From 9e9638677a35384e9acd12a1ecca1390fdf72b3e Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 14 Aug 2024 15:43:00 -0700 Subject: [PATCH 001/188] feat: autoscaling for MonoVertex (#1927) Signed-off-by: Derek Wang Signed-off-by: Sidhant Kohli Signed-off-by: Vigith Maurice Signed-off-by: Keran Yang Co-authored-by: Sidhant Kohli Co-authored-by: Vigith Maurice Co-authored-by: Keran Yang --- hack/generate-proto.sh | 2 + .../numaflow/v1alpha1/mono_vertex_types.go | 22 ++ pkg/apis/numaflow/v1alpha1/pipeline_types.go | 2 + pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go | 299 ++++++++++++++ pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go | 156 ++++++++ pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto | 47 +++ .../proto/mvtxdaemon/mvtxdaemon_grpc.pb.go | 126 ++++++ .../server/service/rater/pod_tracker.go | 5 +- pkg/metrics/metrics.go | 2 +- pkg/mvtxdaemon/client/doc.go | 26 ++ pkg/mvtxdaemon/client/grpc_client.go | 63 +++ pkg/mvtxdaemon/client/grpc_client_test.go | 17 + pkg/mvtxdaemon/client/interface.go | 29 ++ pkg/mvtxdaemon/client/restful_client.go | 91 +++++ pkg/mvtxdaemon/client/restful_client_test.go | 17 + pkg/mvtxdaemon/server/daemon_server.go | 28 +- pkg/mvtxdaemon/server/metrics.go | 4 +- pkg/mvtxdaemon/server/service/mvtx_service.go | 119 ++++++ .../server/service/rater/goleak_test.go | 28 ++ pkg/mvtxdaemon/server/service/rater/helper.go | 123 ++++++ .../server/service/rater/helper_test.go | 288 ++++++++++++++ .../server/service/rater/options.go | 53 +++ .../server/service/rater/pod_tracker.go | 178 +++++++++ .../server/service/rater/pod_tracker_test.go | 141 +++++++ pkg/mvtxdaemon/server/service/rater/rater.go | 270 +++++++++++++ .../server/service/rater/rater_test.go | 134 +++++++ .../service/rater/timestamped_counts.go | 73 ++++ .../service/rater/timestamped_counts_test.go | 64 +++ pkg/reconciler/cmd/start.go | 13 +- pkg/reconciler/metrics.go | 25 +- pkg/reconciler/monovertex/controller.go | 31 +- pkg/reconciler/monovertex/scaling/doc.go | 25 ++ pkg/reconciler/monovertex/scaling/options.go | 57 +++ pkg/reconciler/monovertex/scaling/scaling.go | 366 ++++++++++++++++++ .../monovertex/scaling/scaling_test.go | 17 + pkg/reconciler/vertex/scaling/doc.go | 2 +- pkg/reconciler/vertex/scaling/scaling.go | 10 +- .../rater => shared/util}/uniq_str_list.go | 2 +- .../util}/uniq_str_list_test.go | 2 +- rust/Cargo.lock | 33 +- rust/monovertex/Cargo.toml | 4 +- rust/monovertex/src/forwarder.rs | 41 +- rust/monovertex/src/lib.rs | 2 +- rust/monovertex/src/metrics.rs | 254 ++++++++---- server/apis/interface.go | 1 + server/apis/v1/handler.go | 113 ++++-- server/cmd/server/start.go | 1 + server/cmd/server/start_test.go | 4 +- server/routes/routes.go | 2 + 49 files changed, 3255 insertions(+), 157 deletions(-) create mode 100644 pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go create mode 100644 pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go create mode 100644 pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto create mode 100644 pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go create mode 100644 pkg/mvtxdaemon/client/doc.go create mode 100644 pkg/mvtxdaemon/client/grpc_client.go create mode 100644 pkg/mvtxdaemon/client/grpc_client_test.go create mode 100644 pkg/mvtxdaemon/client/interface.go create mode 100644 pkg/mvtxdaemon/client/restful_client.go create mode 100644 pkg/mvtxdaemon/client/restful_client_test.go create mode 100644 pkg/mvtxdaemon/server/service/mvtx_service.go create mode 100644 pkg/mvtxdaemon/server/service/rater/goleak_test.go create mode 100644 pkg/mvtxdaemon/server/service/rater/helper.go create mode 100644 pkg/mvtxdaemon/server/service/rater/helper_test.go create mode 100644 pkg/mvtxdaemon/server/service/rater/options.go create mode 100644 pkg/mvtxdaemon/server/service/rater/pod_tracker.go create mode 100644 pkg/mvtxdaemon/server/service/rater/pod_tracker_test.go create mode 100644 pkg/mvtxdaemon/server/service/rater/rater.go create mode 100644 pkg/mvtxdaemon/server/service/rater/rater_test.go create mode 100644 pkg/mvtxdaemon/server/service/rater/timestamped_counts.go create mode 100644 pkg/mvtxdaemon/server/service/rater/timestamped_counts_test.go create mode 100644 pkg/reconciler/monovertex/scaling/doc.go create mode 100644 pkg/reconciler/monovertex/scaling/options.go create mode 100644 pkg/reconciler/monovertex/scaling/scaling.go create mode 100644 pkg/reconciler/monovertex/scaling/scaling_test.go rename pkg/{daemon/server/service/rater => shared/util}/uniq_str_list.go (99%) rename pkg/{daemon/server/service/rater => shared/util}/uniq_str_list_test.go (99%) diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index 614ded3e8a..bf970ce318 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -75,6 +75,8 @@ gen-protoc(){ gen-protoc pkg/apis/proto/daemon/daemon.proto +gen-protoc pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto + gen-protoc pkg/apis/proto/isb/message.proto gen-protoc pkg/apis/proto/wmb/wmb.proto diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index b4a372ae45..ac64c7e2a6 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -123,6 +123,16 @@ func (mv MonoVertex) GetDaemonDeploymentName() string { return fmt.Sprintf("%s-mv-daemon", mv.Name) } +func (mv MonoVertex) GetDaemonServiceURL() string { + // Note: the format of the URL is also used in `server/apis/v1/handler.go` + // Do not change it without updating the handler. + return fmt.Sprintf("%s.%s.svc:%d", mv.GetDaemonServiceName(), mv.Namespace, MonoVertexDaemonServicePort) +} + +func (mv MonoVertex) Scalable() bool { + return !mv.Spec.Scale.Disabled +} + func (mv MonoVertex) GetDaemonServiceObj() *corev1.Service { labels := map[string]string{ KeyPartOf: Project, @@ -518,6 +528,18 @@ func (mvs *MonoVertexStatus) MarkPhaseRunning() { mvs.MarkPhase(MonoVertexPhaseRunning, "", "") } +// IsHealthy indicates whether the MonoVertex is in healthy status +func (mvs *MonoVertexStatus) IsHealthy() bool { + switch mvs.Phase { + case MonoVertexPhaseFailed: + return false + case MonoVertexPhaseRunning: + return mvs.IsReady() + default: + return false + } +} + // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type MonoVertexList struct { diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index b6fe89e85a..010b53bf20 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -200,6 +200,8 @@ func (p Pipeline) GetDaemonDeploymentName() string { } func (p Pipeline) GetDaemonServiceURL() string { + // Note: the format of the URL is also used in `server/apis/v1/handler.go` + // Do not change it without updating the handler. return fmt.Sprintf("%s.%s.svc:%d", p.GetDaemonServiceName(), p.Namespace, DaemonServicePort) } diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go new file mode 100644 index 0000000000..10d3e2350c --- /dev/null +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go @@ -0,0 +1,299 @@ +// +//Copyright 2022 The Numaproj Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.2 +// source: pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto + +package mvtxdaemon + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MonoVertexMetrics is used to provide information about the mono vertex including processing rate. +type MonoVertexMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MonoVertex string `protobuf:"bytes,1,opt,name=monoVertex,proto3" json:"monoVertex,omitempty"` + // Processing rate in the past period of time, 1m, 5m, 15m, default + ProcessingRates map[string]*wrapperspb.DoubleValue `protobuf:"bytes,2,rep,name=processingRates,proto3" json:"processingRates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Pending in the past period of time, 1m, 5m, 15m, default + Pendings map[string]*wrapperspb.Int64Value `protobuf:"bytes,3,rep,name=pendings,proto3" json:"pendings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonoVertexMetrics) Reset() { + *x = MonoVertexMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonoVertexMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonoVertexMetrics) ProtoMessage() {} + +func (x *MonoVertexMetrics) ProtoReflect() protoreflect.Message { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonoVertexMetrics.ProtoReflect.Descriptor instead. +func (*MonoVertexMetrics) Descriptor() ([]byte, []int) { + return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP(), []int{0} +} + +func (x *MonoVertexMetrics) GetMonoVertex() string { + if x != nil { + return x.MonoVertex + } + return "" +} + +func (x *MonoVertexMetrics) GetProcessingRates() map[string]*wrapperspb.DoubleValue { + if x != nil { + return x.ProcessingRates + } + return nil +} + +func (x *MonoVertexMetrics) GetPendings() map[string]*wrapperspb.Int64Value { + if x != nil { + return x.Pendings + } + return nil +} + +type GetMonoVertexMetricsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics *MonoVertexMetrics `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *GetMonoVertexMetricsResponse) Reset() { + *x = GetMonoVertexMetricsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMonoVertexMetricsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMonoVertexMetricsResponse) ProtoMessage() {} + +func (x *GetMonoVertexMetricsResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMonoVertexMetricsResponse.ProtoReflect.Descriptor instead. +func (*GetMonoVertexMetricsResponse) Descriptor() ([]byte, []int) { + return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP(), []int{1} +} + +func (x *GetMonoVertexMetricsResponse) GetMetrics() *MonoVertexMetrics { + if x != nil { + return x.Metrics + } + return nil +} + +var File_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto protoreflect.FileDescriptor + +var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x76, 0x74, 0x78, + 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x76, + 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x6f, 0x6e, + 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, + 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x12, 0x5c, 0x0a, 0x0f, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, + 0x6e, 0x67, 0x52, 0x61, 0x74, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x76, 0x74, 0x78, + 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x1a, 0x60, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x52, 0x61, + 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x58, 0x0a, 0x0d, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x1c, + 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, + 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x07, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0x8c, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, + 0x72, 0x74, 0x65, 0x78, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x71, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x28, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6e, 0x75, 0x6d, 0x61, 0x70, 0x72, 0x6f, 0x6a, 0x2f, 0x6e, 0x75, 0x6d, 0x61, + 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescOnce sync.Once + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescData = file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc +) + +func file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP() []byte { + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescOnce.Do(func() { + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescData) + }) + return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescData +} + +var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_goTypes = []any{ + (*MonoVertexMetrics)(nil), // 0: mvtxdaemon.MonoVertexMetrics + (*GetMonoVertexMetricsResponse)(nil), // 1: mvtxdaemon.GetMonoVertexMetricsResponse + nil, // 2: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry + nil, // 3: mvtxdaemon.MonoVertexMetrics.PendingsEntry + (*wrapperspb.DoubleValue)(nil), // 4: google.protobuf.DoubleValue + (*wrapperspb.Int64Value)(nil), // 5: google.protobuf.Int64Value + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty +} +var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_depIdxs = []int32{ + 2, // 0: mvtxdaemon.MonoVertexMetrics.processingRates:type_name -> mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry + 3, // 1: mvtxdaemon.MonoVertexMetrics.pendings:type_name -> mvtxdaemon.MonoVertexMetrics.PendingsEntry + 0, // 2: mvtxdaemon.GetMonoVertexMetricsResponse.metrics:type_name -> mvtxdaemon.MonoVertexMetrics + 4, // 3: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry.value:type_name -> google.protobuf.DoubleValue + 5, // 4: mvtxdaemon.MonoVertexMetrics.PendingsEntry.value:type_name -> google.protobuf.Int64Value + 6, // 5: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:input_type -> google.protobuf.Empty + 1, // 6: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:output_type -> mvtxdaemon.GetMonoVertexMetricsResponse + 6, // [6:7] is the sub-list for method output_type + 5, // [5:6] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_init() } +func file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_init() { + if File_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*MonoVertexMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetMonoVertexMetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_goTypes, + DependencyIndexes: file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_depIdxs, + MessageInfos: file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes, + }.Build() + File_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto = out.File + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc = nil + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_goTypes = nil + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_depIdxs = nil +} diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go new file mode 100644 index 0000000000..97c8075676 --- /dev/null +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto + +/* +Package mvtxdaemon is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package mvtxdaemon + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_MonoVertexDaemonService_GetMonoVertexMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client MonoVertexDaemonServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := client.GetMonoVertexMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MonoVertexDaemonService_GetMonoVertexMetrics_0(ctx context.Context, marshaler runtime.Marshaler, server MonoVertexDaemonServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := server.GetMonoVertexMetrics(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMonoVertexDaemonServiceHandlerServer registers the http handlers for service MonoVertexDaemonService to "mux". +// UnaryRPC :call MonoVertexDaemonServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMonoVertexDaemonServiceHandlerFromEndpoint instead. +func RegisterMonoVertexDaemonServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MonoVertexDaemonServiceServer) error { + + mux.Handle("GET", pattern_MonoVertexDaemonService_GetMonoVertexMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexMetrics", runtime.WithHTTPPathPattern("/api/v1/metrics")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MonoVertexDaemonService_GetMonoVertexMetrics_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MonoVertexDaemonService_GetMonoVertexMetrics_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMonoVertexDaemonServiceHandlerFromEndpoint is same as RegisterMonoVertexDaemonServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMonoVertexDaemonServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMonoVertexDaemonServiceHandler(ctx, mux, conn) +} + +// RegisterMonoVertexDaemonServiceHandler registers the http handlers for service MonoVertexDaemonService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMonoVertexDaemonServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMonoVertexDaemonServiceHandlerClient(ctx, mux, NewMonoVertexDaemonServiceClient(conn)) +} + +// RegisterMonoVertexDaemonServiceHandlerClient registers the http handlers for service MonoVertexDaemonService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MonoVertexDaemonServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MonoVertexDaemonServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MonoVertexDaemonServiceClient" to call the correct interceptors. +func RegisterMonoVertexDaemonServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MonoVertexDaemonServiceClient) error { + + mux.Handle("GET", pattern_MonoVertexDaemonService_GetMonoVertexMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexMetrics", runtime.WithHTTPPathPattern("/api/v1/metrics")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MonoVertexDaemonService_GetMonoVertexMetrics_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MonoVertexDaemonService_GetMonoVertexMetrics_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MonoVertexDaemonService_GetMonoVertexMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "metrics"}, "")) +) + +var ( + forward_MonoVertexDaemonService_GetMonoVertexMetrics_0 = runtime.ForwardResponseMessage +) diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto new file mode 100644 index 0000000000..512b5bf515 --- /dev/null +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +option go_package = "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon"; + +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/wrappers.proto"; + +package mvtxdaemon; + +// MonoVertexMetrics is used to provide information about the mono vertex including processing rate. +message MonoVertexMetrics { + string monoVertex = 1; + // Processing rate in the past period of time, 1m, 5m, 15m, default + map processingRates = 2; + // Pending in the past period of time, 1m, 5m, 15m, default + map pendings = 3; +} + +message GetMonoVertexMetricsResponse { + MonoVertexMetrics metrics = 1; +} + +// MonoVertexDaemonService is a grpc service that is used to provide APIs for giving any MonoVertex information. +service MonoVertexDaemonService { + + rpc GetMonoVertexMetrics (google.protobuf.Empty) returns (GetMonoVertexMetricsResponse) { + option (google.api.http).get = "/api/v1/metrics"; + }; + +} \ No newline at end of file diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go new file mode 100644 index 0000000000..1dd50188d7 --- /dev/null +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go @@ -0,0 +1,126 @@ +// +//Copyright 2022 The Numaproj Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.27.2 +// source: pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto + +package mvtxdaemon + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName = "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexMetrics" +) + +// MonoVertexDaemonServiceClient is the client API for MonoVertexDaemonService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MonoVertexDaemonServiceClient interface { + GetMonoVertexMetrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexMetricsResponse, error) +} + +type monoVertexDaemonServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMonoVertexDaemonServiceClient(cc grpc.ClientConnInterface) MonoVertexDaemonServiceClient { + return &monoVertexDaemonServiceClient{cc} +} + +func (c *monoVertexDaemonServiceClient) GetMonoVertexMetrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexMetricsResponse, error) { + out := new(GetMonoVertexMetricsResponse) + err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MonoVertexDaemonServiceServer is the server API for MonoVertexDaemonService service. +// All implementations must embed UnimplementedMonoVertexDaemonServiceServer +// for forward compatibility +type MonoVertexDaemonServiceServer interface { + GetMonoVertexMetrics(context.Context, *emptypb.Empty) (*GetMonoVertexMetricsResponse, error) + mustEmbedUnimplementedMonoVertexDaemonServiceServer() +} + +// UnimplementedMonoVertexDaemonServiceServer must be embedded to have forward compatible implementations. +type UnimplementedMonoVertexDaemonServiceServer struct { +} + +func (UnimplementedMonoVertexDaemonServiceServer) GetMonoVertexMetrics(context.Context, *emptypb.Empty) (*GetMonoVertexMetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMonoVertexMetrics not implemented") +} +func (UnimplementedMonoVertexDaemonServiceServer) mustEmbedUnimplementedMonoVertexDaemonServiceServer() { +} + +// UnsafeMonoVertexDaemonServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MonoVertexDaemonServiceServer will +// result in compilation errors. +type UnsafeMonoVertexDaemonServiceServer interface { + mustEmbedUnimplementedMonoVertexDaemonServiceServer() +} + +func RegisterMonoVertexDaemonServiceServer(s grpc.ServiceRegistrar, srv MonoVertexDaemonServiceServer) { + s.RegisterService(&MonoVertexDaemonService_ServiceDesc, srv) +} + +func _MonoVertexDaemonService_GetMonoVertexMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MonoVertexDaemonServiceServer).GetMonoVertexMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MonoVertexDaemonServiceServer).GetMonoVertexMetrics(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// MonoVertexDaemonService_ServiceDesc is the grpc.ServiceDesc for MonoVertexDaemonService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MonoVertexDaemonService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "mvtxdaemon.MonoVertexDaemonService", + HandlerType: (*MonoVertexDaemonServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMonoVertexMetrics", + Handler: _MonoVertexDaemonService_GetMonoVertexMetrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto", +} diff --git a/pkg/daemon/server/service/rater/pod_tracker.go b/pkg/daemon/server/service/rater/pod_tracker.go index c693630941..bcda964ebc 100644 --- a/pkg/daemon/server/service/rater/pod_tracker.go +++ b/pkg/daemon/server/service/rater/pod_tracker.go @@ -28,6 +28,7 @@ import ( "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/shared/logging" + "github.com/numaproj/numaflow/pkg/shared/util" ) // podInfoSeparator is used as a separator to split the pod key @@ -41,7 +42,7 @@ type PodTracker struct { pipeline *v1alpha1.Pipeline log *zap.SugaredLogger httpClient metricsHttpClient - activePods *UniqueStringList + activePods *util.UniqueStringList refreshInterval time.Duration } @@ -55,7 +56,7 @@ func NewPodTracker(ctx context.Context, p *v1alpha1.Pipeline, opts ...PodTracker }, Timeout: time.Second, }, - activePods: NewUniqueStringList(), + activePods: util.NewUniqueStringList(), refreshInterval: 30 * time.Second, // Default refresh interval for updating the active pod set } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index be3b34aff7..f4e394004e 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -28,10 +28,10 @@ const ( LabelISBService = "isbsvc" LabelPipeline = "pipeline" LabelVertex = "vertex" - LabelMonoVertex = "mono_vertex" LabelVertexReplicaIndex = "replica" LabelVertexType = "vertex_type" LabelPartitionName = "partition_name" + LabelMonoVertexName = "mvtx_name" LabelReason = "reason" ) diff --git a/pkg/mvtxdaemon/client/doc.go b/pkg/mvtxdaemon/client/doc.go new file mode 100644 index 0000000000..8e7e39073b --- /dev/null +++ b/pkg/mvtxdaemon/client/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client is used to create the MonoVertex daemon service client. +// +// There are 2 clients available. +// +// 1. gRPC client +// func NewGRPCClient(address string) (MonoVertexDaemonClient, error) +// +// 2. RESTful client +// func NewRESTfulClient(address string) (MonoVertexDaemonClient, error) +package client diff --git a/pkg/mvtxdaemon/client/grpc_client.go b/pkg/mvtxdaemon/client/grpc_client.go new file mode 100644 index 0000000000..c7fb80d6fc --- /dev/null +++ b/pkg/mvtxdaemon/client/grpc_client.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "crypto/tls" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" +) + +type grpcClient struct { + client mvtxdaemon.MonoVertexDaemonServiceClient + conn *grpc.ClientConn +} + +var _ MonoVertexDaemonClient = (*grpcClient)(nil) + +func NewGRPCClient(address string) (MonoVertexDaemonClient, error) { + config := &tls.Config{ + InsecureSkipVerify: true, + } + conn, err := grpc.Dial(address, grpc.WithTransportCredentials(credentials.NewTLS(config))) + if err != nil { + return nil, err + } + daemonClient := mvtxdaemon.NewMonoVertexDaemonServiceClient(conn) + return &grpcClient{conn: conn, client: daemonClient}, nil +} + +func (dc *grpcClient) GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.MonoVertexMetrics, error) { + if rspn, err := dc.client.GetMonoVertexMetrics(ctx, &emptypb.Empty{}); err != nil { + return nil, err + } else { + return rspn.Metrics, nil + } +} + +// Close function closes the gRPC connection, it has to be called after a daemon client has finished all its jobs. +func (dc *grpcClient) Close() error { + if dc.conn != nil { + return dc.conn.Close() + } + return nil +} diff --git a/pkg/mvtxdaemon/client/grpc_client_test.go b/pkg/mvtxdaemon/client/grpc_client_test.go new file mode 100644 index 0000000000..b11582d4fd --- /dev/null +++ b/pkg/mvtxdaemon/client/grpc_client_test.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client diff --git a/pkg/mvtxdaemon/client/interface.go b/pkg/mvtxdaemon/client/interface.go new file mode 100644 index 0000000000..71a1a4aeaf --- /dev/null +++ b/pkg/mvtxdaemon/client/interface.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "io" + + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" +) + +type MonoVertexDaemonClient interface { + io.Closer + GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.MonoVertexMetrics, error) +} diff --git a/pkg/mvtxdaemon/client/restful_client.go b/pkg/mvtxdaemon/client/restful_client.go new file mode 100644 index 0000000000..7409e19734 --- /dev/null +++ b/pkg/mvtxdaemon/client/restful_client.go @@ -0,0 +1,91 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" +) + +var ( + // Use JSONPb to unmarshal the response, it is needed to unmarshal the response with google.protobuf.* data types. + jsonMarshaller = new(runtime.JSONPb) +) + +type restfulClient struct { + hostURL string + httpClient *http.Client +} + +var _ MonoVertexDaemonClient = (*restfulClient)(nil) + +func NewRESTfulClient(address string) (MonoVertexDaemonClient, error) { + if !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + client := &restfulClient{ + hostURL: address, + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: time.Second * 1, + }, + } + return client, nil +} + +func (rc *restfulClient) Close() error { + return nil +} + +func unmarshalResponse[T any](r *http.Response) (*T, error) { + if r.StatusCode >= 300 { + return nil, fmt.Errorf("unexpected response %v: %s", r.StatusCode, r.Status) + } + data, err := io.ReadAll(r.Body) + if err != nil { + return nil, fmt.Errorf("failed to read data from response body, %w", err) + } + var t T + if err := jsonMarshaller.Unmarshal(data, &t); err != nil { + return nil, fmt.Errorf("failed to unmarshal response body to %T, %w", t, err) + } + return &t, nil +} + +func (rc *restfulClient) GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.MonoVertexMetrics, error) { + resp, err := rc.httpClient.Get(fmt.Sprintf("%s/api/v1/metrics", rc.hostURL)) + if err != nil { + return nil, fmt.Errorf("failed to call get mono vertex metrics RESTful API, %w", err) + } + defer func() { _ = resp.Body.Close() }() + if res, err := unmarshalResponse[mvtxdaemon.GetMonoVertexMetricsResponse](resp); err != nil { + return nil, err + } else { + return res.Metrics, nil + } +} diff --git a/pkg/mvtxdaemon/client/restful_client_test.go b/pkg/mvtxdaemon/client/restful_client_test.go new file mode 100644 index 0000000000..b11582d4fd --- /dev/null +++ b/pkg/mvtxdaemon/client/restful_client_test.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client diff --git a/pkg/mvtxdaemon/server/daemon_server.go b/pkg/mvtxdaemon/server/daemon_server.go index 29553493e6..f6ce1c9ec7 100644 --- a/pkg/mvtxdaemon/server/daemon_server.go +++ b/pkg/mvtxdaemon/server/daemon_server.go @@ -37,7 +37,9 @@ import ( "github.com/numaproj/numaflow" "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - "github.com/numaproj/numaflow/pkg/apis/proto/daemon" + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" + "github.com/numaproj/numaflow/pkg/mvtxdaemon/server/service" + rateServer "github.com/numaproj/numaflow/pkg/mvtxdaemon/server/service/rater" "github.com/numaproj/numaflow/pkg/shared/logging" sharedtls "github.com/numaproj/numaflow/pkg/shared/tls" ) @@ -57,6 +59,8 @@ func (ds *daemonServer) Run(ctx context.Context) error { var ( err error ) + // rater is used to calculate the processing rate of the mono vertex + rater := rateServer.NewRater(ctx, ds.monoVtx) // Start listener var conn net.Listener @@ -73,11 +77,11 @@ func (ds *daemonServer) Run(ctx context.Context) error { } tlsConfig := &tls.Config{Certificates: []tls.Certificate{*cer}, MinVersion: tls.VersionTLS12} - grpcServer, err := ds.newGRPCServer() + grpcServer, err := ds.newGRPCServer(rater) if err != nil { return fmt.Errorf("failed to create grpc server: %w", err) } - httpServer := ds.newHTTPServer(ctx, v1alpha1.DaemonServicePort, tlsConfig) + httpServer := ds.newHTTPServer(ctx, v1alpha1.MonoVertexDaemonServicePort, tlsConfig) conn = tls.NewListener(conn, tlsConfig) // Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port @@ -89,15 +93,22 @@ func (ds *daemonServer) Run(ctx context.Context) error { go func() { _ = httpServer.Serve(httpL) }() go func() { _ = tcpm.Serve() }() + // Start the rater + go func() { + if err := rater.Start(ctx); err != nil { + log.Panic(fmt.Errorf("failed to start the rater: %w", err)) + } + }() + version := numaflow.GetVersion() - mono_vertex_info.WithLabelValues(version.Version, version.Platform, ds.monoVtx.Name).Set(1) + monoVertexInfo.WithLabelValues(version.Version, version.Platform, ds.monoVtx.Name).Set(1) log.Infof("MonoVertex daemon server started successfully on %s", address) <-ctx.Done() return nil } -func (ds *daemonServer) newGRPCServer() (*grpc.Server, error) { +func (ds *daemonServer) newGRPCServer(rater rateServer.MonoVtxRatable) (*grpc.Server, error) { // "Prometheus histograms are a great way to measure latency distributions of your RPCs. // However, since it is a bad practice to have metrics of high cardinality the latency monitoring metrics are disabled by default. // To enable them please call the following in your server initialization code:" @@ -111,6 +122,11 @@ func (ds *daemonServer) newGRPCServer() (*grpc.Server, error) { } grpcServer := grpc.NewServer(sOpts...) grpc_prometheus.Register(grpcServer) + mvtxService, err := service.NewMoveVertexService(ds.monoVtx, rater) + if err != nil { + return nil, err + } + mvtxdaemon.RegisterMonoVertexDaemonServiceServer(grpcServer, mvtxService) return grpcServer, nil } @@ -133,7 +149,7 @@ func (ds *daemonServer) newHTTPServer(ctx context.Context, port int, tlsConfig * return key, true }), ) - if err := daemon.RegisterDaemonServiceHandlerFromEndpoint(ctx, gwmux, endpoint, dialOpts); err != nil { + if err := mvtxdaemon.RegisterMonoVertexDaemonServiceHandlerFromEndpoint(ctx, gwmux, endpoint, dialOpts); err != nil { log.Errorw("Failed to register daemon handler on HTTP Server", zap.Error(err)) } mux := http.NewServeMux() diff --git a/pkg/mvtxdaemon/server/metrics.go b/pkg/mvtxdaemon/server/metrics.go index f0aa155c31..f3c0c30796 100644 --- a/pkg/mvtxdaemon/server/metrics.go +++ b/pkg/mvtxdaemon/server/metrics.go @@ -24,9 +24,9 @@ import ( ) var ( - mono_vertex_info = promauto.NewGaugeVec(prometheus.GaugeOpts{ + monoVertexInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "monovtx", Name: "build_info", Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the mono vertex name", - }, []string{metrics.LabelVersion, metrics.LabelPlatform, metrics.LabelMonoVertex}) + }, []string{metrics.LabelVersion, metrics.LabelPlatform, metrics.LabelMonoVertexName}) ) diff --git a/pkg/mvtxdaemon/server/service/mvtx_service.go b/pkg/mvtxdaemon/server/service/mvtx_service.go new file mode 100644 index 0000000000..40a2b2972c --- /dev/null +++ b/pkg/mvtxdaemon/server/service/mvtx_service.go @@ -0,0 +1,119 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "time" + + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/prometheus/common/expfmt" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" + "github.com/numaproj/numaflow/pkg/metrics" + raterPkg "github.com/numaproj/numaflow/pkg/mvtxdaemon/server/service/rater" + "github.com/numaproj/numaflow/pkg/shared/logging" +) + +// MonoVtxPendingMetric is the metric emitted from the MonoVtx lag reader for pending stats +// Note: Please keep consistent with the definitions in rust/monovertex/sc/metrics.rs +const MonoVtxPendingMetric = "monovtx_pending" + +type MoveVertexService struct { + mvtxdaemon.UnimplementedMonoVertexDaemonServiceServer + monoVtx *v1alpha1.MonoVertex + httpClient *http.Client + rater raterPkg.MonoVtxRatable +} + +var _ mvtxdaemon.MonoVertexDaemonServiceServer = (*MoveVertexService)(nil) + +// NewMoveVertexService returns a new instance of MoveVertexService +func NewMoveVertexService( + monoVtx *v1alpha1.MonoVertex, + rater raterPkg.MonoVtxRatable, +) (*MoveVertexService, error) { + mv := MoveVertexService{ + monoVtx: monoVtx, + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: time.Second * 3, + }, + rater: rater, + } + return &mv, nil +} + +func (mvs *MoveVertexService) GetMonoVertexMetrics(ctx context.Context, empty *emptypb.Empty) (*mvtxdaemon.GetMonoVertexMetricsResponse, error) { + resp := new(mvtxdaemon.GetMonoVertexMetricsResponse) + collectedMetrics := new(mvtxdaemon.MonoVertexMetrics) + collectedMetrics.MonoVertex = mvs.monoVtx.Name + collectedMetrics.Pendings = mvs.getPending(ctx) + collectedMetrics.ProcessingRates = mvs.rater.GetRates() + resp.Metrics = collectedMetrics + return resp, nil +} + +// getPending returns the pending count for the mono vertex +func (mvs *MoveVertexService) getPending(ctx context.Context) map[string]*wrapperspb.Int64Value { + log := logging.FromContext(ctx) + headlessServiceName := mvs.monoVtx.GetHeadlessServiceName() + pendingMap := make(map[string]*wrapperspb.Int64Value) + + // Get the headless service name + // We can query the metrics endpoint of the (i)th pod to obtain this value. + // example for 0th pod : https://simple-mono-vertex-mv-0.simple-mono-vertex-mv-headless:2469/metrics + url := fmt.Sprintf("https://%s-mv-0.%s.%s.svc:%v/metrics", mvs.monoVtx.Name, headlessServiceName, mvs.monoVtx.Namespace, v1alpha1.MonoVertexMetricsPort) + if res, err := mvs.httpClient.Get(url); err != nil { + log.Debugf("Error reading the metrics endpoint, it might be because of mono vertex scaling down to 0: %f", err.Error()) + return nil + } else { + // expfmt Parser from prometheus to parse the metrics + textParser := expfmt.TextParser{} + result, err := textParser.TextToMetricFamilies(res.Body) + if err != nil { + log.Errorw("Error in parsing to prometheus metric families", zap.Error(err)) + return nil + } + + // Get the pending messages + if value, ok := result[MonoVtxPendingMetric]; ok { + metricsList := value.GetMetric() + for _, metric := range metricsList { + labels := metric.GetLabel() + lookback := "" + for _, label := range labels { + if label.GetName() == metrics.LabelPeriod { + lookback = label.GetValue() + break + } + } + pendingMap[lookback] = wrapperspb.Int64(int64(metric.Gauge.GetValue())) + } + } + } + return pendingMap +} diff --git a/pkg/mvtxdaemon/server/service/rater/goleak_test.go b/pkg/mvtxdaemon/server/service/rater/goleak_test.go new file mode 100644 index 0000000000..58abf6f6be --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/goleak_test.go @@ -0,0 +1,28 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "testing" + + "go.uber.org/goleak" +) + +// apply go leak verification to all tests in this package +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/pkg/mvtxdaemon/server/service/rater/helper.go b/pkg/mvtxdaemon/server/service/rater/helper.go new file mode 100644 index 0000000000..0973b3c5cb --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/helper.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "time" + + sharedqueue "github.com/numaproj/numaflow/pkg/shared/queue" +) + +const ( + // indexNotFound is returned when the start index cannot be found in the queue. + indexNotFound = -1 +) + +// UpdateCount updates the count for a given timestamp in the queue. +func UpdateCount(q *sharedqueue.OverflowQueue[*TimestampedCounts], time int64, podReadCounts *PodReadCount) { + items := q.Items() + + // find the element matching the input timestamp and update it + for _, i := range items { + if i.timestamp == time { + i.Update(podReadCounts) + return + } + } + + // if we cannot find a matching element, it means we need to add a new timestamped count to the queue + tc := NewTimestampedCounts(time) + tc.Update(podReadCounts) + q.Append(tc) +} + +// CalculateRate calculates the rate of a MonoVertex for a given lookback period. +func CalculateRate(q *sharedqueue.OverflowQueue[*TimestampedCounts], lookbackSeconds int64) float64 { + counts := q.Items() + if len(counts) <= 1 { + return 0 + } + startIndex := findStartIndex(lookbackSeconds, counts) + // we consider the last but one element as the end index because the last element might be incomplete + // we can be sure that the last but one element in the queue is complete. + endIndex := len(counts) - 2 + if startIndex == indexNotFound { + return 0 + } + + // time diff in seconds. + timeDiff := counts[endIndex].timestamp - counts[startIndex].timestamp + if timeDiff == 0 { + // if the time difference is 0, we return 0 to avoid division by 0 + // this should not happen in practice because we are using a 10s interval + return 0 + } + + delta := float64(0) + for i := startIndex; i < endIndex; i++ { + // calculate the difference between the current and previous pod count snapshots + delta += calculatePodDelta(counts[i], counts[i+1]) + } + return delta / float64(timeDiff) +} + +// findStartIndex finds the index of the first element in the queue that is within the lookback seconds +func findStartIndex(lookbackSeconds int64, counts []*TimestampedCounts) int { + n := len(counts) + now := time.Now().Truncate(CountWindow).Unix() + if n < 2 || now-counts[n-2].timestamp > lookbackSeconds { + // if the second last element is already outside the lookback window, we return indexNotFound + return indexNotFound + } + + startIndex := n - 2 + left := 0 + right := n - 2 + lastTimestamp := now - lookbackSeconds + for left <= right { + mid := left + (right-left)/2 + if counts[mid].timestamp >= lastTimestamp { + startIndex = mid + right = mid - 1 + } else { + left = mid + 1 + } + } + return startIndex +} + +// calculatePodDelta calculates the difference between the current and previous pod count snapshots +func calculatePodDelta(tc1, tc2 *TimestampedCounts) float64 { + delta := float64(0) + if tc1 == nil || tc2 == nil { + // we calculate delta only when both input timestamped counts are non-nil + return delta + } + prevPodReadCount := tc1.PodCountSnapshot() + currPodReadCount := tc2.PodCountSnapshot() + for podName, readCount := range currPodReadCount { + currCount := readCount + prevCount := prevPodReadCount[podName] + // pod delta will be equal to current count in case of restart + podDelta := currCount + if currCount >= prevCount { + podDelta = currCount - prevCount + } + delta += podDelta + } + return delta +} diff --git a/pkg/mvtxdaemon/server/service/rater/helper_test.go b/pkg/mvtxdaemon/server/service/rater/helper_test.go new file mode 100644 index 0000000000..6ac878244c --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/helper_test.go @@ -0,0 +1,288 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + sharedqueue "github.com/numaproj/numaflow/pkg/shared/queue" +) + +const TestTime = 1620000000 + +func TestUpdateCount(t *testing.T) { + t.Run("givenTimeExistsPodExistsCountAvailable_whenUpdate_thenUpdatePodPartitionCount", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc) + + UpdateCount(q, TestTime, &PodReadCount{"pod1", 20.0}) + + assert.Equal(t, 1, q.Length()) + assert.Equal(t, 20.0, q.Items()[0].podReadCounts["pod1"]) + }) + + t.Run("givenTimeExistsPodNotExistsCountAvailable_whenUpdate_thenAddPodCount", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 20.0}) + q.Append(tc) + + UpdateCount(q, TestTime, &PodReadCount{"pod2", 10.0}) + + assert.Equal(t, 1, q.Length()) + assert.Equal(t, 20.0, q.Items()[0].podReadCounts["pod1"]) + assert.Equal(t, 10.0, q.Items()[0].podReadCounts["pod2"]) + }) + + t.Run("givenTimeExistsPodExistsCountNotAvailable_whenUpdate_thenNotUpdatePod", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc) + + UpdateCount(q, TestTime, nil) + + assert.Equal(t, 1, q.Length()) + assert.Equal(t, 1, len(q.Items()[0].podReadCounts)) + assert.Equal(t, 10.0, q.Items()[0].podReadCounts["pod1"]) + }) + + t.Run("givenTimeExistsPodNotExistsCountNotAvailable_whenUpdate_thenNoUpdate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc) + + UpdateCount(q, TestTime, nil) + + assert.Equal(t, 1, q.Length()) + assert.Equal(t, 10.0, q.Items()[0].podReadCounts["pod1"]) + }) + + t.Run("givenTimeNotExistsCountAvailable_whenUpdate_thenAddNewItem", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc) + + UpdateCount(q, TestTime+1, &PodReadCount{"pod1", 20.0}) + + assert.Equal(t, 2, q.Length()) + assert.Equal(t, 10.0, q.Items()[0].podReadCounts["pod1"]) + assert.Equal(t, 20.0, q.Items()[1].podReadCounts["pod1"]) + }) + + t.Run("givenTimeNotExistsCountNotAvailable_whenUpdate_thenAddEmptyItem", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc) + + UpdateCount(q, TestTime+1, nil) + + assert.Equal(t, 2, q.Length()) + assert.Equal(t, 10.0, q.Items()[0].podReadCounts["pod1"]) + assert.Equal(t, 0, len(q.Items()[1].podReadCounts)) + }) +} + +func TestCalculateRate(t *testing.T) { + t.Run("givenCollectedTimeLessThanTwo_whenCalculateRate_thenReturnZero", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + // no data + assert.Equal(t, 0.0, CalculateRate(q, 10)) + + // only one data + now := time.Now() + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc1.Update(&PodReadCount{"pod1", 5.0}) + q.Append(tc1) + assert.Equal(t, 0.0, CalculateRate(q, 10)) + }) + + t.Run("singlePod_givenCountIncreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc1.Update(&PodReadCount{"pod1", 5.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc2.Update(&PodReadCount{"pod1", 10.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix()) + tc3.Update(&PodReadCount{"pod1", 20.0}) + q.Append(tc3) + + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 5)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 15)) + // tc1 and tc2 are used to calculate the rate + assert.Equal(t, 0.5, CalculateRate(q, 25)) + // tc1 and tc2 are used to calculate the rate + assert.Equal(t, 0.5, CalculateRate(q, 100)) + }) + + t.Run("singlePod_givenCountDecreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 30) + tc1.Update(&PodReadCount{"pod1", 200.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc2.Update(&PodReadCount{"pod1", 100.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc3.Update(&PodReadCount{"pod1", 50.0}) + q.Append(tc3) + tc4 := NewTimestampedCounts(now.Truncate(CountWindow).Unix()) + tc4.Update(&PodReadCount{"pod1", 80.0}) + q.Append(tc4) + + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 5)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 15)) + // tc2 and tc3 are used to calculate the rate + assert.Equal(t, 5.0, CalculateRate(q, 25)) + // tc1, 2 and 3 are used to calculate the rate + assert.Equal(t, 7.5, CalculateRate(q, 35)) + // tc1, 2 and 3 are used to calculate the rate + assert.Equal(t, 7.5, CalculateRate(q, 100)) + }) + + t.Run("multiplePods_givenCountIncreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 30) + tc1.Update(&PodReadCount{"pod1", 50.0}) + tc1.Update(&PodReadCount{"pod2", 100.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc2.Update(&PodReadCount{"pod1", 100.0}) + tc2.Update(&PodReadCount{"pod2", 200.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc3.Update(&PodReadCount{"pod1", 200.0}) + tc3.Update(&PodReadCount{"pod2", 300.0}) + q.Append(tc3) + + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 5)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 15)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 25)) + // tc1 and tc2 are used to calculate the rate + assert.Equal(t, 15.0, CalculateRate(q, 35)) + }) + + t.Run("multiplePods_givenCountDecreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 30) + tc1.Update(&PodReadCount{"pod1", 200.0}) + tc1.Update(&PodReadCount{"pod2", 300.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc2.Update(&PodReadCount{"pod1", 100.0}) + tc2.Update(&PodReadCount{"pod2", 200.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc3.Update(&PodReadCount{"pod1", 50.0}) + tc3.Update(&PodReadCount{"pod2", 100.0}) + q.Append(tc3) + + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 5)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 15)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 25)) + // tc1 and tc2 are used to calculate the rate + assert.Equal(t, 30.0, CalculateRate(q, 35)) + }) + + t.Run("multiplePods_givenOnePodRestarts_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 30) + tc1.Update(&PodReadCount{"pod1", 50.0}) + tc1.Update(&PodReadCount{"pod2", 300.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) + tc2.Update(&PodReadCount{"pod1", 100.0}) + tc2.Update(&PodReadCount{"pod2", 200.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc3.Update(&PodReadCount{"pod1", 200.0}) + tc3.Update(&PodReadCount{"pod2", 100.0}) + q.Append(tc3) + + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 5)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 15)) + // no enough data collected within lookback seconds, expect rate 0 + assert.Equal(t, 0.0, CalculateRate(q, 25)) + // tc1 and tc2 are used to calculate the rate + assert.Equal(t, 25.0, CalculateRate(q, 35)) + }) + + t.Run("multiplePods_givenPodsComeAndGo_whenCalculateRate_thenReturnRate", func(t *testing.T) { + q := sharedqueue.New[*TimestampedCounts](1800) + now := time.Now() + + tc1 := NewTimestampedCounts(now.Truncate(time.Second*10).Unix() - 30) + tc1.Update(&PodReadCount{"pod1", 200.0}) + tc1.Update(&PodReadCount{"pod2", 90.0}) + tc1.Update(&PodReadCount{"pod3", 50.0}) + q.Append(tc1) + tc2 := NewTimestampedCounts(now.Truncate(time.Second*10).Unix() - 20) + tc2.Update(&PodReadCount{"pod1", 100.0}) + tc2.Update(&PodReadCount{"pod2", 200.0}) + q.Append(tc2) + tc3 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 10) + tc3.Update(&PodReadCount{"pod1", 50.0}) + tc3.Update(&PodReadCount{"pod2", 300.0}) + tc3.Update(&PodReadCount{"pod4", 100.0}) + q.Append(tc3) + + tc4 := NewTimestampedCounts(now.Truncate(CountWindow).Unix()) + tc4.Update(&PodReadCount{"pod2", 400.0}) + tc4.Update(&PodReadCount{"pod3", 200.0}) + tc4.Update(&PodReadCount{"pod100", 200.0}) + q.Append(tc4) + + // vertex rate + assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, 25.0, CalculateRate(q, 25)) + assert.Equal(t, 23.0, CalculateRate(q, 35)) + assert.Equal(t, 23.0, CalculateRate(q, 100)) + }) +} diff --git a/pkg/mvtxdaemon/server/service/rater/options.go b/pkg/mvtxdaemon/server/service/rater/options.go new file mode 100644 index 0000000000..03fdc53d04 --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/options.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +type options struct { + // Number of workers working on collecting counts of processed messages. + workers int + // Time in milliseconds, each element in the work queue will be picked up in an interval of this period of time. + taskInterval int +} + +type Option func(*options) + +func defaultOptions() *options { + // A simple example of how these numbers work together: + // Assuming we have 200 tasks, we have 20 workers, each worker will be responsible for approximately 10 tasks during one iteration. + // The task interval is 5 seconds, which means each task need to be picked up by a worker every 5 seconds. + // Hence, a worker needs to finish processing 1 task in 0.5 second. + // Translating to numaflow language, for a 200-pod pipeline, a worker needs to finish scraping 1 pod in 0.5 second, which is a reasonable number. + return &options{ + workers: 20, + // ensure that each task is picked up at least once within a CountWindow by defining taskInterval as half of CountWindow. + // if a CountWindow misses one pod, when calculating the delta with the next window, for that specific pod, + // we will count the total processed count as delta, which is wrong and eventually leads to incorrect high processing rate. + taskInterval: int(CountWindow.Milliseconds() / 2), + } +} + +func WithWorkers(n int) Option { + return func(o *options) { + o.workers = n + } +} + +func WithTaskInterval(n int) Option { + return func(o *options) { + o.taskInterval = n + } +} diff --git a/pkg/mvtxdaemon/server/service/rater/pod_tracker.go b/pkg/mvtxdaemon/server/service/rater/pod_tracker.go new file mode 100644 index 0000000000..1a8a3fd2b4 --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/pod_tracker.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "crypto/tls" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "go.uber.org/zap" + "golang.org/x/net/context" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/shared/logging" + "github.com/numaproj/numaflow/pkg/shared/util" +) + +// podInfoSeparator is used as a separator to split the pod key +// to get the pipeline name, vertex name, and pod index. +// "*" is chosen because it is not allowed in all the above fields. +const podInfoSeparator = "*" + +// PodTracker maintains a set of active pods for a MonoVertex +// It periodically sends http requests to pods to check if they are still active +type PodTracker struct { + monoVertex *v1alpha1.MonoVertex + log *zap.SugaredLogger + httpClient metricsHttpClient + activePods *util.UniqueStringList + refreshInterval time.Duration +} +type PodTrackerOption func(*PodTracker) + +func NewPodTracker(ctx context.Context, mv *v1alpha1.MonoVertex, opts ...PodTrackerOption) *PodTracker { + pt := &PodTracker{ + monoVertex: mv, + log: logging.FromContext(ctx).Named("PodTracker"), + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: time.Second, + }, + activePods: util.NewUniqueStringList(), + refreshInterval: 30 * time.Second, // Default refresh interval for updating the active pod set + } + + for _, opt := range opts { + if opt != nil { + opt(pt) + } + } + return pt +} + +// WithRefreshInterval sets how often to refresh the rate metrics. +func WithRefreshInterval(d time.Duration) PodTrackerOption { + return func(r *PodTracker) { + r.refreshInterval = d + } +} + +func (pt *PodTracker) Start(ctx context.Context) error { + pt.log.Debugf("Starting tracking active pods for MonoVertex %s...", pt.monoVertex.Name) + go pt.trackActivePods(ctx) + return nil +} + +func (pt *PodTracker) trackActivePods(ctx context.Context) { + ticker := time.NewTicker(pt.refreshInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + pt.log.Infof("Context is cancelled. Stopping tracking active pods for MonoVertex %s...", pt.monoVertex.Name) + return + case <-ticker.C: + pt.updateActivePods() + } + } +} + +// updateActivePods checks the status of all pods and updates the activePods set accordingly. +func (pt *PodTracker) updateActivePods() { + for i := 0; i < int(pt.monoVertex.Spec.Scale.GetMaxReplicas()); i++ { + podName := fmt.Sprintf("%s-mv-%d", pt.monoVertex.Name, i) + podKey := pt.getPodKey(i) + if pt.isActive(podName) { + pt.activePods.PushBack(podKey) + } else { + pt.activePods.Remove(podKey) + } + } + pt.log.Debugf("Finished updating the active pod set: %v", pt.activePods.ToString()) +} + +func (pt *PodTracker) getPodKey(index int) string { + // podKey is used as a unique identifier for the pod, it is used by worker to determine the count of processed messages of the pod. + // we use the monoVertex name and the pod index to create a unique identifier. + // For example, if the monoVertex name is "simple-mono-vertex" and the pod index is 0, the podKey will be "simple-mono-vertex*0". + // This way, we can easily identify the pod based on its key. + return strings.Join([]string{pt.monoVertex.Name, fmt.Sprintf("%d", index)}, podInfoSeparator) +} + +// IsActive returns true if the pod is active, false otherwise. +func (pt *PodTracker) IsActive(podKey string) bool { + return pt.activePods.Contains(podKey) +} + +func (pt *PodTracker) isActive(podName string) bool { + headlessSvc := pt.monoVertex.GetHeadlessServiceName() + // using the MonoVertex headless service to check if a pod exists or not. + // example for 0th pod: https://simple-mono-vertex-mv-0.simple-mono-vertex-mv-headless.default.svc:2469/metrics + url := fmt.Sprintf("https://%s.%s.%s.svc:%v/metrics", podName, headlessSvc, pt.monoVertex.Namespace, v1alpha1.MonoVertexMetricsPort) + resp, err := pt.httpClient.Head(url) + if err != nil { + pt.log.Debugf("Sending HEAD request to pod %s is unsuccessful: %v, treating the pod as inactive", podName, err) + return false + } + pt.log.Debugf("Sending HEAD request to pod %s is successful, treating the pod as active", podName) + _ = resp.Body.Close() + return true +} + +// GetActivePodsCount returns the number of active pods. +func (pt *PodTracker) GetActivePodsCount() int { + return pt.activePods.Length() +} + +// podInfo represents the information of a pod that is used for tracking the processing rate +type podInfo struct { + monoVertexName string + replica int + podName string +} + +func (pt *PodTracker) GetPodInfo(key string) (*podInfo, error) { + pi := strings.Split(key, podInfoSeparator) + if len(pi) != 2 { + return nil, fmt.Errorf("invalid key %q", key) + } + replica, err := strconv.Atoi(pi[1]) + if err != nil { + return nil, fmt.Errorf("invalid replica in key %q", key) + } + return &podInfo{ + monoVertexName: pi[0], + replica: replica, + podName: strings.Join([]string{pi[0], "mv", pi[1]}, "-"), + }, nil +} + +// LeastRecentlyUsed returns the least recently used pod from the active pod list. +// if there are no active pods, it returns an empty string. +func (pt *PodTracker) LeastRecentlyUsed() string { + if e := pt.activePods.Front(); e != "" { + pt.activePods.MoveToBack(e) + return e + } + return "" +} diff --git a/pkg/mvtxdaemon/server/service/rater/pod_tracker_test.go b/pkg/mvtxdaemon/server/service/rater/pod_tracker_test.go new file mode 100644 index 0000000000..3338fd3ecf --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/pod_tracker_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "net/http" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" +) + +type trackerMockHttpClient struct { + podsCount int32 + lock *sync.RWMutex +} + +func (m *trackerMockHttpClient) setPodsCount(count int32) { + m.lock.Lock() + defer m.lock.Unlock() + m.podsCount = count +} + +func (m *trackerMockHttpClient) Get(url string) (*http.Response, error) { + return nil, nil +} + +func (m *trackerMockHttpClient) Head(url string) (*http.Response, error) { + m.lock.Lock() + defer m.lock.Unlock() + for i := 0; i < int(m.podsCount); i++ { + if strings.Contains(url, "p-mv-"+strconv.Itoa(i)+".p-mv-headless.default.svc:2469/metrics") { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(``)))}, nil + } + } + + return nil, fmt.Errorf("pod not found") +} + +func TestPodTracker_Start(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + lookBackSeconds := uint32(30) + defer cancel() + pipeline := &v1alpha1.MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p", + Namespace: "default", + }, + Spec: v1alpha1.MonoVertexSpec{ + Scale: v1alpha1.Scale{LookbackSeconds: &lookBackSeconds}, + }, + } + tracker := NewPodTracker(ctx, pipeline, WithRefreshInterval(time.Second)) + tracker.httpClient = &trackerMockHttpClient{ + podsCount: 10, + lock: &sync.RWMutex{}, + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := tracker.Start(ctx); err != nil { + log.Fatalf("failed to start tracker: %v", err) + } + }() + + for tracker.GetActivePodsCount() != 10 { + select { + case <-ctx.Done(): + t.Fatalf("incorrect active pods %v", ctx.Err()) + default: + time.Sleep(100 * time.Millisecond) + } + } + + tracker.httpClient.(*trackerMockHttpClient).setPodsCount(5) + + for tracker.GetActivePodsCount() != 5 { + select { + case <-ctx.Done(): + t.Fatalf("incorrect active pods %v", ctx.Err()) + default: + time.Sleep(100 * time.Millisecond) + } + } + cancel() + wg.Wait() + + assert.Equal(t, "p*0", tracker.LeastRecentlyUsed()) + assert.Equal(t, "p*1", tracker.LeastRecentlyUsed()) + assert.Equal(t, true, tracker.IsActive("p*4")) + assert.Equal(t, false, tracker.IsActive("p*5")) +} + +func TestPodTracker_GetPodInfo(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + tracker := NewPodTracker(ctx, nil, WithRefreshInterval(time.Second)) + // error scenario - more than 3 fields + podInfo, err := tracker.GetPodInfo("p*v*0*1") + assert.Nilf(t, podInfo, "podInfo should be nil") + assert.ErrorContains(t, err, "invalid key") + + // common scenario - get the pod info + podInfo, err = tracker.GetPodInfo("p*0") + assert.Nilf(t, err, "error should be nil") + assert.Equal(t, "p", podInfo.monoVertexName) + assert.Equal(t, "p-mv-0", podInfo.podName) + + // common scenario - incorrect the pod info + podInfo, err = tracker.GetPodInfo("p*avc") + assert.Nilf(t, podInfo, "podInfo should be nil") + assert.ErrorContains(t, err, "invalid replica in key") +} diff --git a/pkg/mvtxdaemon/server/service/rater/rater.go b/pkg/mvtxdaemon/server/service/rater/rater.go new file mode 100644 index 0000000000..d160edd838 --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/rater.go @@ -0,0 +1,270 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "crypto/tls" + "fmt" + "net/http" + "time" + + "github.com/prometheus/common/expfmt" + "go.uber.org/zap" + "golang.org/x/net/context" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/shared/logging" + sharedqueue "github.com/numaproj/numaflow/pkg/shared/queue" +) + +const CountWindow = time.Second * 10 +const MonoVtxReadMetricName = "monovtx_read_total" + +// MonoVtxRatable is the interface for the Rater struct. +type MonoVtxRatable interface { + Start(ctx context.Context) error + GetRates() map[string]*wrapperspb.DoubleValue +} + +var _ MonoVtxRatable = (*Rater)(nil) + +// metricsHttpClient interface for the GET/HEAD call to metrics endpoint. +// Had to add this an interface for testing +type metricsHttpClient interface { + Get(url string) (*http.Response, error) + Head(url string) (*http.Response, error) +} + +// fixedLookbackSeconds always maintain rate metrics for the following lookback seconds (1m, 5m, 15m) +var fixedLookbackSeconds = map[string]int64{"1m": 60, "5m": 300, "15m": 900} + +// Rater is a struct that maintains information about the processing rate of the MonoVertex. +// It monitors the number of processed messages for each pod in a MonoVertex and calculates the rate. +type Rater struct { + monoVertex *v1alpha1.MonoVertex + httpClient metricsHttpClient + log *zap.SugaredLogger + // podTracker keeps track of active pods and their counts + podTracker *PodTracker + // timestampedPodCounts is a queue of timestamped counts for the MonoVertex + timestampedPodCounts *sharedqueue.OverflowQueue[*TimestampedCounts] + // userSpecifiedLookBackSeconds is the user-specified lookback seconds for that MonoVertex + userSpecifiedLookBackSeconds int64 + options *options +} + +// PodReadCount is a struct to maintain count of messages read by a pod of MonoVertex +type PodReadCount struct { + // pod name of the pod + name string + // represents the count of messages read by the pod + readCount float64 +} + +// Name returns the pod name +func (p *PodReadCount) Name() string { + return p.name +} + +// ReadCount returns the value of the messages read by the Pod +func (p *PodReadCount) ReadCount() float64 { + return p.readCount +} + +func NewRater(ctx context.Context, mv *v1alpha1.MonoVertex, opts ...Option) *Rater { + rater := Rater{ + monoVertex: mv, + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: time.Second * 1, + }, + log: logging.FromContext(ctx).Named("Rater"), + options: defaultOptions(), + } + + rater.podTracker = NewPodTracker(ctx, mv) + // maintain the total counts of the last 30 minutes(1800 seconds) since we support 1m, 5m, 15m lookback seconds. + rater.timestampedPodCounts = sharedqueue.New[*TimestampedCounts](int(1800 / CountWindow.Seconds())) + rater.userSpecifiedLookBackSeconds = int64(mv.Spec.Scale.GetLookbackSeconds()) + + for _, opt := range opts { + if opt != nil { + opt(rater.options) + } + } + return &rater +} + +// Function monitor() defines each of the worker's jobs. +// It waits for keys in the channel, and starts a monitoring job +func (r *Rater) monitor(ctx context.Context, id int, keyCh <-chan string) { + r.log.Infof("Started monitoring worker %v", id) + for { + select { + case <-ctx.Done(): + r.log.Infof("Stopped monitoring worker %v", id) + return + case key := <-keyCh: + if err := r.monitorOnePod(ctx, key, id); err != nil { + r.log.Errorw("Failed to monitor a pod", zap.String("pod", key), zap.Error(err)) + } + } + } +} + +// monitorOnePod monitors a single pod and updates the rate metrics for the given pod. +func (r *Rater) monitorOnePod(ctx context.Context, key string, worker int) error { + log := logging.FromContext(ctx).With("worker", fmt.Sprint(worker)).With("podKey", key) + log.Debugf("Working on key: %s", key) + pInfo, err := r.podTracker.GetPodInfo(key) + if err != nil { + return err + } + var podReadCount *PodReadCount + if r.podTracker.IsActive(key) { + podReadCount = r.getPodReadCounts(pInfo.podName) + if podReadCount == nil { + log.Debugf("Failed retrieving total podReadCounts for pod %s", pInfo.podName) + } + } else { + log.Debugf("Pod %s does not exist, updating it with nil...", pInfo.podName) + podReadCount = nil + } + now := time.Now().Add(CountWindow).Truncate(CountWindow).Unix() + UpdateCount(r.timestampedPodCounts, now, podReadCount) + return nil +} + +// getPodReadCounts returns the total number of messages read by the pod +// It fetches the total pod read counts from the Prometheus metrics endpoint. +func (r *Rater) getPodReadCounts(podName string) *PodReadCount { + headlessServiceName := r.monoVertex.GetHeadlessServiceName() + // scrape the read total metric from pod metric port + // example for 0th pod: https://simple-mono-vertex-mv-0.simple-mono-vertex-mv-headless.default.svc:2469/metrics + url := fmt.Sprintf("https://%s.%s.%s.svc:%v/metrics", podName, headlessServiceName, r.monoVertex.Namespace, v1alpha1.MonoVertexMetricsPort) + resp, err := r.httpClient.Get(url) + if err != nil { + r.log.Errorf("[MonoVertex name %s, pod name %s]: failed reading the metrics endpoint, %v", r.monoVertex.Name, podName, err.Error()) + return nil + } + defer resp.Body.Close() + + textParser := expfmt.TextParser{} + result, err := textParser.TextToMetricFamilies(resp.Body) + if err != nil { + r.log.Errorf("[MonoVertex name %s, pod name %s]: failed parsing to prometheus metric families, %v", r.monoVertex.Name, podName, err.Error()) + return nil + } + + if value, ok := result[MonoVtxReadMetricName]; ok && value != nil && len(value.GetMetric()) > 0 { + metricsList := value.GetMetric() + // Each pod should be emitting only one metric with this name, so we should be able to take the first value + // from the results safely. + // We use Untyped here as the counter metric family shows up as untyped from the rust client + // TODO(MonoVertex): Check further on this to understand why not type is counter + podReadCount := &PodReadCount{podName, metricsList[0].Untyped.GetValue()} + return podReadCount + } else { + r.log.Errorf("[MonoVertex name %s, pod name %s]: failed getting the read total metric, the metric is not available.", r.monoVertex.Name, podName) + return nil + } +} + +// GetRates returns the rate metrics for the MonoVertex. +// It calculates the rate metrics for the given lookback seconds. +func (r *Rater) GetRates() map[string]*wrapperspb.DoubleValue { + r.log.Debugf("Current timestampedPodCounts for MonoVertex %s is: %v", r.monoVertex.Name, r.timestampedPodCounts) + var result = make(map[string]*wrapperspb.DoubleValue) + // calculate rates for each lookback seconds + for n, i := range r.buildLookbackSecondsMap() { + rate := CalculateRate(r.timestampedPodCounts, i) + result[n] = wrapperspb.Double(rate) + } + r.log.Debugf("Got rates for MonoVertex %s: %v", r.monoVertex.Name, result) + return result +} + +func (r *Rater) buildLookbackSecondsMap() map[string]int64 { + lookbackSecondsMap := map[string]int64{"default": r.userSpecifiedLookBackSeconds} + for k, v := range fixedLookbackSeconds { + lookbackSecondsMap[k] = v + } + return lookbackSecondsMap +} + +func (r *Rater) Start(ctx context.Context) error { + r.log.Infof("Starting rater...") + keyCh := make(chan string) + ctx, cancel := context.WithCancel(logging.WithLogger(ctx, r.log)) + defer cancel() + + go func() { + err := r.podTracker.Start(ctx) + if err != nil { + r.log.Errorw("Failed to start pod tracker", zap.Error(err)) + } + }() + + // Worker group + for i := 1; i <= r.options.workers; i++ { + go r.monitor(ctx, i, keyCh) + } + + // Function assign() sends the least recently used podKey to the channel so that it can be picked up by a worker. + assign := func() { + if e := r.podTracker.LeastRecentlyUsed(); e != "" { + keyCh <- e + return + } + } + + // Following for loop keeps calling assign() function to assign monitoring tasks to the workers. + // It makes sure each element in the list will be assigned every N milliseconds. + for { + select { + case <-ctx.Done(): + r.log.Info("Shutting down monitoring job assigner") + return nil + default: + assign() + // Make sure each of the key will be assigned at least every taskInterval milliseconds. + sleep(ctx, time.Millisecond*time.Duration(func() int { + l := r.podTracker.GetActivePodsCount() + if l == 0 { + return r.options.taskInterval + } + result := r.options.taskInterval / l + if result > 0 { + return result + } + return 1 + }())) + } + } +} + +// sleep function uses a select statement to check if the context is canceled before sleeping for the given duration +// it helps ensure the sleep will be released when the context is canceled, allowing the goroutine to exit gracefully +func sleep(ctx context.Context, duration time.Duration) { + select { + case <-ctx.Done(): + case <-time.After(duration): + } +} diff --git a/pkg/mvtxdaemon/server/service/rater/rater_test.go b/pkg/mvtxdaemon/server/service/rater/rater_test.go new file mode 100644 index 0000000000..26b7e2f3bb --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/rater_test.go @@ -0,0 +1,134 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "bytes" + "fmt" + "io" + "log" + "net/http" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" +) + +type raterMockHttpClient struct { + podOneCount int64 + podTwoCount int64 + lock *sync.RWMutex +} + +func (m *raterMockHttpClient) Get(url string) (*http.Response, error) { + m.lock.Lock() + defer m.lock.Unlock() + if url == "https://p-mv-0.p-mv-headless.default.svc:2469/metrics" { + m.podOneCount = m.podOneCount + 20 + resp := &http.Response{ + StatusCode: 200, + // we use the default monovertex forwarder metric name "monovtx_read_total" is used to retrieve the metric + Body: io.NopCloser(bytes.NewReader([]byte(fmt.Sprintf(` +# HELP monovtx_read A Counter to keep track of the total number of messages read from the source. +# TYPE monovtx_read counter +monovtx_read_total{mvtx_name="simple-mono-vertex",mvtx_replica="0"} %d +`, m.podOneCount))))} + return resp, nil + } else if url == "https://p-mv-1.p-mv-headless.default.svc:2469/metrics" { + m.podTwoCount = m.podTwoCount + 60 + resp := &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(fmt.Sprintf(` +# HELP monovtx_read A Counter to keep track of the total number of messages read from the source. +# TYPE monovtx_read counter +monovtx_read_total{mvtx_name="simple-mono-vertex",mvtx_replica="1"} %d +`, m.podTwoCount))))} + return resp, nil + } else { + return nil, nil + } +} + +func (m *raterMockHttpClient) Head(url string) (*http.Response, error) { + m.lock.Lock() + defer m.lock.Unlock() + if url == "https://p-mv-0.p-mv-headless.default.svc:2469/metrics" { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(``)))}, nil + } else if url == "https://p-mv-1.p-mv-headless.default.svc:2469/metrics" { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(``)))}, nil + } else { + return nil, fmt.Errorf("unknown url: %s", url) + } +} + +// TestRater_Start tests the rater by mocking the http client +// we mock the metrics endpoint of the pods and increment the read count by 20 for pod one, and 60 for pod two, +// then we verify that the rate calculator is able to calculate a positive rate for the vertex +// note: this test doesn't test the accuracy of the calculated rate, the calculation is tested by helper_test.go +func TestRater_Start(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*29) + lookBackSeconds := uint32(30) + defer cancel() + pipeline := &v1alpha1.MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "p", + Namespace: "default", + }, + Spec: v1alpha1.MonoVertexSpec{ + Scale: v1alpha1.Scale{LookbackSeconds: &lookBackSeconds}, + }, + } + r := NewRater(ctx, pipeline, WithTaskInterval(1000)) + podTracker := NewPodTracker(ctx, pipeline, WithRefreshInterval(time.Second*1)) + podTracker.httpClient = &raterMockHttpClient{podOneCount: 0, podTwoCount: 0, lock: &sync.RWMutex{}} + r.httpClient = &raterMockHttpClient{podOneCount: 0, podTwoCount: 0, lock: &sync.RWMutex{}} + r.podTracker = podTracker + + timer := time.NewTimer(60 * time.Second) + succeedChan := make(chan struct{}) + go func() { + if err := r.Start(ctx); err != nil { + log.Fatalf("failed to start rater: %v", err) + } + }() + go func() { + for { + if r.GetRates()["default"].GetValue() <= 0 { + time.Sleep(time.Second) + } else { + succeedChan <- struct{}{} + break + } + } + }() + select { + case <-succeedChan: + time.Sleep(time.Second) + break + case <-timer.C: + t.Fatalf("timed out waiting for rate to be calculated") + } + timer.Stop() +} diff --git a/pkg/mvtxdaemon/server/service/rater/timestamped_counts.go b/pkg/mvtxdaemon/server/service/rater/timestamped_counts.go new file mode 100644 index 0000000000..ee2a13519b --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/timestamped_counts.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "fmt" + "sync" +) + +// TimestampedCounts track the total count of processed messages for a list of pods at a given timestamp +type TimestampedCounts struct { + // timestamp in seconds is the time when the count is recorded + timestamp int64 + // the key of podReadCounts represents the pod name, the value represents a count of messages processed by the pod + podReadCounts map[string]float64 + lock *sync.RWMutex +} + +func NewTimestampedCounts(t int64) *TimestampedCounts { + return &TimestampedCounts{ + timestamp: t, + podReadCounts: make(map[string]float64), + lock: new(sync.RWMutex), + } +} + +// Update updates the count of processed messages for a pod +func (tc *TimestampedCounts) Update(podReadCount *PodReadCount) { + tc.lock.Lock() + defer tc.lock.Unlock() + if podReadCount == nil { + // we choose to skip updating when podReadCounts is nil, instead of removing the pod from the map. + // imagine if the getPodReadCounts call fails to scrape the readCount metric, and it's NOT because the pod is down. + // in this case getPodReadCounts returns nil. + // if we remove the pod from the map and then the next scrape successfully gets the readCount, we can reach a state that in the timestamped counts, + // for this single pod, at t1, readCount is 123456, at t2, the map doesn't contain this pod and t3, readCount is 123457. + // when calculating the rate, as we sum up deltas among timestamps, we will get 123457 total delta instead of the real delta 1. + // one occurrence of such case can lead to extremely high rate and mess up the autoscaling. + // hence we'd rather keep the readCount as it is to avoid wrong rate calculation. + return + } + tc.podReadCounts[podReadCount.Name()] = podReadCount.ReadCount() +} + +// PodCountSnapshot returns a copy of podReadCounts +// it's used to ensure the returned map is not modified by other goroutines +func (tc *TimestampedCounts) PodCountSnapshot() map[string]float64 { + tc.lock.RLock() + defer tc.lock.RUnlock() + return tc.podReadCounts +} + +// String returns a string representation of the TimestampedCounts +// it's used for debugging purpose +func (tc *TimestampedCounts) String() string { + tc.lock.RLock() + defer tc.lock.RUnlock() + return fmt.Sprintf("{timestamp: %d, podReadCounts: %v}", tc.timestamp, tc.podReadCounts) +} diff --git a/pkg/mvtxdaemon/server/service/rater/timestamped_counts_test.go b/pkg/mvtxdaemon/server/service/rater/timestamped_counts_test.go new file mode 100644 index 0000000000..75d0eda781 --- /dev/null +++ b/pkg/mvtxdaemon/server/service/rater/timestamped_counts_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rater + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewTimestampedCounts(t *testing.T) { + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + assert.Equal(t, int64(TestTime), tc.timestamp) + assert.Equal(t, 1, len(tc.podReadCounts)) + assert.Equal(t, "{timestamp: 1620000000, podReadCounts: map[pod1:10]}", tc.String()) +} + +func TestTimestampedCounts_Update(t *testing.T) { + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + assert.Equal(t, 10.0, tc.podReadCounts["pod1"]) + tc.Update(&PodReadCount{"pod1", 20.0}) + assert.Equal(t, 20.0, tc.podReadCounts["pod1"]) + tc.Update(&PodReadCount{"pod2", 30.0}) + assert.Equal(t, 30.0, tc.podReadCounts["pod2"]) + assert.Equal(t, 2, len(tc.podReadCounts)) + tc.Update(nil) + assert.Equal(t, 2, len(tc.podReadCounts)) + assert.Equal(t, 20, int(tc.podReadCounts["pod1"])) + assert.Equal(t, 30, int(tc.podReadCounts["pod2"])) + + tc.Update(&PodReadCount{"pod1", 10.0}) + assert.Equal(t, 10, int(tc.podReadCounts["pod1"])) + tc.Update(&PodReadCount{"pod2", 20.0}) + assert.Equal(t, 20, int(tc.podReadCounts["pod2"])) + + tc2 := NewTimestampedCounts(TestTime + 1) + tc2.Update(&PodReadCount{"pod1", 40.0}) + assert.Equal(t, 40.0, tc2.podReadCounts["pod1"]) + tc2.Update(&PodReadCount{"pod2", 10.0}) + assert.Equal(t, 10.0, tc2.podReadCounts["pod2"]) +} + +func TestTimestampedPodCounts_Snapshot(t *testing.T) { + tc := NewTimestampedCounts(TestTime) + tc.Update(&PodReadCount{"pod1", 10.0}) + tc.Update(&PodReadCount{"pod2", 20.0}) + assert.Equal(t, map[string]float64{"pod1": 10.0, "pod2": 20.0}, tc.PodCountSnapshot()) +} diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index f7fa5f295f..0565aefad1 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -40,6 +40,7 @@ import ( "github.com/numaproj/numaflow/pkg/reconciler" isbsvcctrl "github.com/numaproj/numaflow/pkg/reconciler/isbsvc" monovtxctrl "github.com/numaproj/numaflow/pkg/reconciler/monovertex" + mvtxscaling "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" plctrl "github.com/numaproj/numaflow/pkg/reconciler/pipeline" vertexctrl "github.com/numaproj/numaflow/pkg/reconciler/vertex" "github.com/numaproj/numaflow/pkg/reconciler/vertex/scaling" @@ -237,8 +238,9 @@ func Start(namespaced bool, managedNamespace string) { } // MonoVertex controller + mvtxAutoscaler := mvtxscaling.NewScaler(mgr.GetClient(), mvtxscaling.WithWorkers(20)) monoVertexController, err := controller.New(dfv1.ControllerMonoVertex, mgr, controller.Options{ - Reconciler: monovtxctrl.NewReconciler(mgr.GetClient(), mgr.GetScheme(), config, image, logger, mgr.GetEventRecorderFor(dfv1.ControllerMonoVertex)), + Reconciler: monovtxctrl.NewReconciler(mgr.GetClient(), mgr.GetScheme(), config, image, mvtxAutoscaler, logger, mgr.GetEventRecorderFor(dfv1.ControllerMonoVertex)), }) if err != nil { logger.Fatalw("Unable to set up MonoVertex controller", zap.Error(err)) @@ -276,9 +278,14 @@ func Start(namespaced bool, managedNamespace string) { logger.Fatalw("Unable to watch Deployments", zap.Error(err)) } - // Add autoscaling runner + // Add Vertex autoscaling runner if err := mgr.Add(LeaderElectionRunner(autoscaler.Start)); err != nil { - logger.Fatalw("Unable to add autoscaling runner", zap.Error(err)) + logger.Fatalw("Unable to add Vertex autoscaling runner", zap.Error(err)) + } + + // Add MonoVertex autoscaling runner + if err := mgr.Add(LeaderElectionRunner(mvtxAutoscaler.Start)); err != nil { + logger.Fatalw("Unable to add MonoVertex autoscaling runner", zap.Error(err)) } version := numaflow.GetVersion() diff --git a/pkg/reconciler/metrics.go b/pkg/reconciler/metrics.go index 5f92049f2d..ce96436556 100644 --- a/pkg/reconciler/metrics.go +++ b/pkg/reconciler/metrics.go @@ -45,6 +45,12 @@ var ( Help: "A metric to indicate whether the Pipeline is healthy. '1' means healthy, '0' means unhealthy", }, []string{metrics.LabelNamespace, metrics.LabelISBService}) + MonoVertexHealth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: "controller", + Name: "monovtx_health", + Help: "A metric to indicate whether the MonoVertex is healthy. '1' means healthy, '0' means unhealthy", + }, []string{metrics.LabelNamespace, metrics.LabelMonoVertexName}) + // JetStreamISBSvcReplicas indicates the replicas of a JetStream ISB Service. JetStreamISBSvcReplicas = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "controller", @@ -72,8 +78,25 @@ var ( Name: "vertex_current_replicas", Help: "A metric indicates the current replicas of a Vertex", }, []string{metrics.LabelNamespace, metrics.LabelPipeline, metrics.LabelVertex}) + + // MonoVertexDesiredReplicas indicates the desired replicas of a MonoVertex. + MonoVertexDesiredReplicas = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: "controller", + Name: "monovtx_desired_replicas", + Help: "A metric indicates the desired replicas of a MonoVertex", + }, []string{metrics.LabelNamespace, metrics.LabelMonoVertexName}) + + // MonoVertexCurrentReplicas indicates the current replicas of a MonoVertex. + MonoVertexCurrentReplicas = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: "controller", + Name: "monovtx_current_replicas", + Help: "A metric indicates the current replicas of a MonoVertex", + }, []string{metrics.LabelNamespace, metrics.LabelMonoVertexName}) ) func init() { - ctrlmetrics.Registry.MustRegister(BuildInfo, ISBSvcHealth, PipelineHealth, JetStreamISBSvcReplicas, RedisISBSvcReplicas, VertexDesiredReplicas, VertexCurrentReplicas) + ctrlmetrics.Registry.MustRegister(BuildInfo, ISBSvcHealth, PipelineHealth, + MonoVertexHealth, JetStreamISBSvcReplicas, RedisISBSvcReplicas, + VertexDesiredReplicas, VertexCurrentReplicas, MonoVertexDesiredReplicas, + MonoVertexCurrentReplicas) } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 20246b8f9c..108580f3dc 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -39,6 +39,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" + mvtxscaling "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" "github.com/numaproj/numaflow/pkg/shared/logging" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" ) @@ -52,11 +53,12 @@ type monoVertexReconciler struct { image string logger *zap.SugaredLogger + scaler *mvtxscaling.Scaler recorder record.EventRecorder } -func NewReconciler(client client.Client, scheme *runtime.Scheme, config *reconciler.GlobalConfig, image string, logger *zap.SugaredLogger, recorder record.EventRecorder) reconcile.Reconciler { - return &monoVertexReconciler{client: client, scheme: scheme, config: config, image: image, logger: logger, recorder: recorder} +func NewReconciler(client client.Client, scheme *runtime.Scheme, config *reconciler.GlobalConfig, image string, scaler *mvtxscaling.Scaler, logger *zap.SugaredLogger, recorder record.EventRecorder) reconcile.Reconciler { + return &monoVertexReconciler{client: client, scheme: scheme, config: config, image: image, scaler: scaler, logger: logger, recorder: recorder} } func (mr *monoVertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -87,13 +89,28 @@ func (mr *monoVertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) // reconcile does the real logic. func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.MonoVertex) (ctrl.Result, error) { log := logging.FromContext(ctx) + mVtxKey := mvtxscaling.KeyOfMonoVertex(*monoVtx) if !monoVtx.DeletionTimestamp.IsZero() { log.Info("Deleting mono vertex") + mr.scaler.StopWatching(mVtxKey) + // Clean up metrics + _ = reconciler.MonoVertexHealth.DeleteLabelValues(monoVtx.Namespace, monoVtx.Name) + _ = reconciler.MonoVertexDesiredReplicas.DeleteLabelValues(monoVtx.Namespace, monoVtx.Name) + _ = reconciler.MonoVertexCurrentReplicas.DeleteLabelValues(monoVtx.Namespace, monoVtx.Name) return ctrl.Result{}, nil } - monoVtx.Status.SetObservedGeneration(monoVtx.Generation) + // Set metrics + defer func() { + if monoVtx.Status.IsHealthy() { + reconciler.MonoVertexHealth.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(1) + } else { + reconciler.MonoVertexHealth.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(0) + } + }() + monoVtx.Status.SetObservedGeneration(monoVtx.Generation) + mr.scaler.StartWatching(mVtxKey) // TODO: handle lifecycle changes // Regular mono vertex change @@ -137,13 +154,19 @@ func (mr *monoVertexReconciler) reconcileNonLifecycleChanges(ctx context.Context } func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + desiredReplicas := monoVtx.GetReplicas() + // Set metrics + defer func() { + reconciler.MonoVertexDesiredReplicas.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(float64(desiredReplicas)) + reconciler.MonoVertexCurrentReplicas.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(float64(monoVtx.Status.Replicas)) + }() + log := logging.FromContext(ctx) existingPods, err := mr.findExistingPods(ctx, monoVtx) if err != nil { mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindExistingPodFailed", err.Error(), "Failed to find existing mono vertex pods", zap.Error(err)) return err } - desiredReplicas := monoVtx.GetReplicas() for replica := 0; replica < desiredReplicas; replica++ { podSpec, err := mr.buildPodSpec(monoVtx) if err != nil { diff --git a/pkg/reconciler/monovertex/scaling/doc.go b/pkg/reconciler/monovertex/scaling/doc.go new file mode 100644 index 0000000000..9a3d2ba933 --- /dev/null +++ b/pkg/reconciler/monovertex/scaling/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scaling provides the autoscaling capability for MonoVertex objects. +// +// A workqueue is implemented in this package to watch monovertices in the cluster, +// calculate the desired replica number for each of them periodically, and +// patch the MonoVertex spec. +// +// Function StartWatching() and StopWatching() are also provided in the package, +// so that monovertices can be added into and removed from the workqueue. +package scaling diff --git a/pkg/reconciler/monovertex/scaling/options.go b/pkg/reconciler/monovertex/scaling/options.go new file mode 100644 index 0000000000..10ed212181 --- /dev/null +++ b/pkg/reconciler/monovertex/scaling/options.go @@ -0,0 +1,57 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaling + +type options struct { + // Number of workers working on autoscaling. + workers int + // Time in milliseconds, each element in the work queue will be picked up in an interval of this period of time. + taskInterval int + // size of the daemon clients cache. + clientsCacheSize int +} + +type Option func(*options) + +func defaultOptions() *options { + return &options{ + workers: 20, + taskInterval: 30000, + clientsCacheSize: 500, + } +} + +// WithWorkers sets the number of workers working on autoscaling. +func WithWorkers(n int) Option { + return func(o *options) { + o.workers = n + } +} + +// WithTaskInterval sets the interval of picking up a task from the work queue. +func WithTaskInterval(n int) Option { + return func(o *options) { + o.taskInterval = n + } +} + +// WithClientsCacheSize sets the size of the daemon clients cache. +func WithClientsCacheSize(n int) Option { + return func(o *options) { + o.clientsCacheSize = n + } +} diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go new file mode 100644 index 0000000000..7557b236b4 --- /dev/null +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -0,0 +1,366 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaling + +import ( + "container/list" + "context" + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "go.uber.org/zap" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/isb" + mvtxdaemonclient "github.com/numaproj/numaflow/pkg/mvtxdaemon/client" + "github.com/numaproj/numaflow/pkg/shared/logging" +) + +type Scaler struct { + client client.Client + monoVtxMap map[string]*list.Element + // List of the mono vertex namespaced name, format is "namespace/name" + monoVtxList *list.List + lock *sync.RWMutex + options *options + // Cache to store the vertex metrics such as pending message number + monoVtxMetricsCache *lru.Cache[string, int64] + mvtxDaemonClientsCache *lru.Cache[string, mvtxdaemonclient.MonoVertexDaemonClient] +} + +// NewScaler returns a Scaler instance. +func NewScaler(client client.Client, opts ...Option) *Scaler { + scalerOpts := defaultOptions() + for _, opt := range opts { + if opt != nil { + opt(scalerOpts) + } + } + s := &Scaler{ + client: client, + options: scalerOpts, + monoVtxMap: make(map[string]*list.Element), + monoVtxList: list.New(), + lock: new(sync.RWMutex), + } + // cache the clients + s.mvtxDaemonClientsCache, _ = lru.NewWithEvict[string, mvtxdaemonclient.MonoVertexDaemonClient](s.options.clientsCacheSize, func(key string, value mvtxdaemonclient.MonoVertexDaemonClient) { + _ = value.Close() + }) + monoVtxMetricsCache, _ := lru.New[string, int64](10000) + s.monoVtxMetricsCache = monoVtxMetricsCache + return s +} + +// Contains returns if the Scaler contains the key. +func (s *Scaler) Contains(key string) bool { + s.lock.RLock() + defer s.lock.RUnlock() + _, ok := s.monoVtxMap[key] + return ok +} + +// Length returns how many vertices are being watched for autoscaling +func (s *Scaler) Length() int { + s.lock.RLock() + defer s.lock.RUnlock() + return s.monoVtxList.Len() +} + +// StartWatching put a key (namespace/name) into the Scaler +func (s *Scaler) StartWatching(key string) { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.monoVtxMap[key]; !ok { + s.monoVtxMap[key] = s.monoVtxList.PushBack(key) + } +} + +// StopWatching stops autoscaling on the key (namespace/name) +func (s *Scaler) StopWatching(key string) { + s.lock.Lock() + defer s.lock.Unlock() + if e, ok := s.monoVtxMap[key]; ok { + _ = s.monoVtxList.Remove(e) + delete(s.monoVtxMap, key) + } +} + +// Function scale() defines each of the worker's job. +// It waits for keys in the channel, and starts a scaling job +func (s *Scaler) scale(ctx context.Context, id int, keyCh <-chan string) { + log := logging.FromContext(ctx) + log.Infof("Started MonoVertex autoscaling worker %v", id) + for { + select { + case <-ctx.Done(): + log.Infof("Stopped MonoVertex autoscaling worker %v", id) + return + case key := <-keyCh: + if err := s.scaleOneMonoVertex(ctx, key, id); err != nil { + log.Errorw("Failed to scale a MonoVertex", zap.String("monoVtx", key), zap.Error(err)) + } + } + } +} + +// scaleOneMonoVertex implements the detailed logic of scaling up/down a MonoVertex. +// +// desiredReplicas = currentReplicas * pending / (targetProcessingTime * rate) +func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) error { + log := logging.FromContext(ctx).With("worker", fmt.Sprint(worker)).With("monoVtxKey", key) + log.Debugf("Working on key: %s.", key) + strs := strings.Split(key, "/") + if len(strs) != 2 { + return fmt.Errorf("invalid key %q", key) + } + namespace := strs[0] + monoVtxName := strs[1] + monoVtx := &dfv1.MonoVertex{} + if err := s.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: monoVtxName}, monoVtx); err != nil { + if apierrors.IsNotFound(err) { + s.StopWatching(key) + log.Info("No corresponding MonoVertex found, stopped watching.") + return nil + } + return fmt.Errorf("failed to query MonoVertex object of key %q, %w", key, err) + } + if !monoVtx.GetDeletionTimestamp().IsZero() { + s.StopWatching(key) + log.Debug("MonoVertex being deleted.") + return nil + } + if !monoVtx.Scalable() { + s.StopWatching(key) // Remove it in case it's watched. + return nil + } + secondsSinceLastScale := time.Since(monoVtx.Status.LastScaledAt.Time).Seconds() + scaleDownCooldown := float64(monoVtx.Spec.Scale.GetScaleDownCooldownSeconds()) + scaleUpCooldown := float64(monoVtx.Spec.Scale.GetScaleUpCooldownSeconds()) + if secondsSinceLastScale < scaleDownCooldown && secondsSinceLastScale < scaleUpCooldown { + // Skip scaling without needing further calculation + log.Infof("Cooldown period, skip scaling.") + return nil + } + if monoVtx.Status.Phase != dfv1.MonoVertexPhaseRunning { + log.Infof("MonoVertex not in Running phase, skip scaling.") + return nil + } + // TODO: lifecycle + // if monoVtx.Spec.Lifecycle.GetDesiredPhase() != dfv1.MonoVertexPhaseRunning { + // log.Info("MonoVertex is pausing, skip scaling.") + // return nil + // } + if int(monoVtx.Status.Replicas) != monoVtx.GetReplicas() { + log.Infof("MonoVertex %s might be under processing, replicas mismatch, skip scaling.", monoVtx.Name) + return nil + } + + var err error + daemonClient, _ := s.mvtxDaemonClientsCache.Get(monoVtx.GetDaemonServiceURL()) + if daemonClient == nil { + daemonClient, err = mvtxdaemonclient.NewGRPCClient(monoVtx.GetDaemonServiceURL()) + if err != nil { + return fmt.Errorf("failed to get daemon service client for MonoVertex %s, %w", monoVtx.Name, err) + } + s.mvtxDaemonClientsCache.Add(monoVtx.GetDaemonServiceURL(), daemonClient) + } + + if monoVtx.Status.Replicas == 0 { // Was scaled to 0 + // Periodically wake them up from 0 replicas to 1, to peek for the incoming messages + if secondsSinceLastScale >= float64(monoVtx.Spec.Scale.GetZeroReplicaSleepSeconds()) { + log.Infof("MonoVertex %s has slept %v seconds, scaling up to peek.", monoVtx.Name, secondsSinceLastScale) + return s.patchMonoVertexReplicas(ctx, monoVtx, 1) + } else { + log.Infof("MonoVertex %q has slept %v seconds, hasn't reached zeroReplicaSleepSeconds (%v seconds), skip scaling.", monoVtx.Name, secondsSinceLastScale, monoVtx.Spec.Scale.GetZeroReplicaSleepSeconds()) + return nil + } + } + + vMetrics, err := daemonClient.GetMonoVertexMetrics(ctx) + if err != nil { + return fmt.Errorf("failed to get metrics of mono vertex key %q, %w", key, err) + } + totalRate := float64(0) + totalPending := int64(0) + rate, existing := vMetrics.ProcessingRates["default"] + // If rate is not available, we skip scaling. + if !existing || rate.GetValue() < 0 { // Rate not available + log.Infof("MonoVertex %s has no rate information, skip scaling.", monoVtxName) + return nil + } else { + totalRate = rate.GetValue() + } + pending, existing := vMetrics.Pendings["default"] + if !existing || pending.GetValue() < 0 || pending.GetValue() == isb.PendingNotAvailable { + // Pending not available, we don't do anything + log.Infof("MonoVertex %s has no pending messages information, skip scaling.", monoVtxName) + return nil + } else { + totalPending = pending.GetValue() + } + + desired := s.desiredReplicas(ctx, monoVtx, totalRate, totalPending) + log.Infof("Calculated desired replica number of MonoVertex %q is: %d.", monoVtx.Name, desired) + max := monoVtx.Spec.Scale.GetMaxReplicas() + min := monoVtx.Spec.Scale.GetMinReplicas() + if desired > max { + desired = max + log.Infof("Calculated desired replica number %d of MonoVertex %q is greater than max, using max %d.", monoVtxName, desired, max) + } + if desired < min { + desired = min + log.Infof("Calculated desired replica number %d of MonoVertex %q is smaller than min, using min %d.", monoVtxName, desired, min) + } + current := int32(monoVtx.GetReplicas()) + if current > max || current < min { // Someone might have manually scaled up/down the MonoVertex + return s.patchMonoVertexReplicas(ctx, monoVtx, desired) + } + maxAllowed := int32(monoVtx.Spec.Scale.GetReplicasPerScale()) + if desired < current { + diff := current - desired + if diff > maxAllowed { + diff = maxAllowed + } + if secondsSinceLastScale < scaleDownCooldown { + log.Infof("Cooldown period for scaling down, skip scaling.") + return nil + } + return s.patchMonoVertexReplicas(ctx, monoVtx, current-diff) // We scale down gradually + } + if desired > current { + diff := desired - current + if diff > maxAllowed { + diff = maxAllowed + } + if secondsSinceLastScale < scaleUpCooldown { + log.Infof("Cooldown period for scaling up, skip scaling.") + return nil + } + return s.patchMonoVertexReplicas(ctx, monoVtx, current+diff) // We scale up gradually + } + return nil +} + +func (s *Scaler) desiredReplicas(_ context.Context, monoVtx *dfv1.MonoVertex, processingRate float64, pending int64) int32 { + // Since pending contains the pending acks, if both totalRate and totalPending are 0, we scale down to 0 + if pending == 0 && processingRate == 0 { + return 0 + } + if processingRate == 0 { // Something is wrong, we don't do anything. + return int32(monoVtx.Status.Replicas) + } + + var desired int32 + // We calculate the time of finishing processing the pending messages, + // and then we know how many replicas are needed to get them done in target seconds. + desired = int32(math.Round(((float64(pending) / processingRate) / float64(monoVtx.Spec.Scale.GetTargetProcessingSeconds())) * float64(monoVtx.Status.Replicas))) + + // we only scale down to zero when the pending and rate are both zero. + if desired == 0 { + desired = 1 + } + if desired > int32(pending) && pending > 0 { // For some corner cases, we don't want to scale up to more than pending. + desired = int32(pending) + } + return desired +} + +// Start function starts the autoscaling worker group. +// Each worker keeps picking up scaling tasks (which contains mono vertex keys) to calculate the desired replicas, +// and patch the mono vertex spec with the new replica number if needed. +func (s *Scaler) Start(ctx context.Context) error { + log := logging.FromContext(ctx).Named("mvtx-autoscaler") + log.Info("Starting MonoVertex autoscaler...") + keyCh := make(chan string) + ctx, cancel := context.WithCancel(logging.WithLogger(ctx, log)) + defer cancel() + // Worker group + for i := 1; i <= s.options.workers; i++ { + go s.scale(ctx, i, keyCh) + } + + // Function assign() moves an element in the list from the front to the back, + // and send to the channel so that it can be picked up by a worker. + assign := func() { + s.lock.Lock() + defer s.lock.Unlock() + if s.monoVtxList.Len() == 0 { + return + } + e := s.monoVtxList.Front() + if key, ok := e.Value.(string); ok { + s.monoVtxList.MoveToBack(e) + keyCh <- key + } + } + + // Following for loop keeps calling assign() function to assign scaling tasks to the workers. + // It makes sure each element in the list will be assigned every N milliseconds. + for { + select { + case <-ctx.Done(): + log.Info("Shutting down mono vertex autoscaling job assigner.") + // clear the daemon clients cache + s.mvtxDaemonClientsCache.Purge() + return nil + default: + assign() + } + // Make sure each of the key will be assigned at most every N milliseconds. + time.Sleep(time.Millisecond * time.Duration(func() int { + l := s.Length() + if l == 0 { + return s.options.taskInterval + } + result := s.options.taskInterval / l + if result > 0 { + return result + } + return 1 + }())) + } +} + +func (s *Scaler) patchMonoVertexReplicas(ctx context.Context, monoVtx *dfv1.MonoVertex, desiredReplicas int32) error { + log := logging.FromContext(ctx) + origin := monoVtx.Spec.Replicas + monoVtx.Spec.Replicas = ptr.To[int32](desiredReplicas) + body, err := json.Marshal(monoVtx) + if err != nil { + return fmt.Errorf("failed to marshal MonoVertex object to json, %w", err) + } + if err := s.client.Patch(ctx, monoVtx, client.RawPatch(types.MergePatchType, body)); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to patch MonoVertex replicas, %w", err) + } + log.Infow("Auto scaling - mono vertex replicas changed.", zap.Int32p("from", origin), zap.Int32("to", desiredReplicas), zap.String("namespace", monoVtx.Namespace), zap.String("vertex", monoVtx.Name)) + return nil +} + +// KeyOfMonoVertex returns the unique key of a MonoVertex +func KeyOfMonoVertex(monoVtx dfv1.MonoVertex) string { + return fmt.Sprintf("%s/%s", monoVtx.Namespace, monoVtx.Name) +} diff --git a/pkg/reconciler/monovertex/scaling/scaling_test.go b/pkg/reconciler/monovertex/scaling/scaling_test.go new file mode 100644 index 0000000000..f0823fe1b5 --- /dev/null +++ b/pkg/reconciler/monovertex/scaling/scaling_test.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scaling diff --git a/pkg/reconciler/vertex/scaling/doc.go b/pkg/reconciler/vertex/scaling/doc.go index a5004a6899..37f3dec1b8 100644 --- a/pkg/reconciler/vertex/scaling/doc.go +++ b/pkg/reconciler/vertex/scaling/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package scaling provides the autoscaling capability for Numaflow. +// Package scaling provides the autoscaling capability for Vertex objects. // // A workqueue is implemented in this package to watch vertices in the cluster, // calculate the desired replica number for each of them periodically, and diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index 6384e12726..741f11a668 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -113,11 +113,11 @@ func (s *Scaler) StopWatching(key string) { // It waits for keys in the channel, and starts a scaling job func (s *Scaler) scale(ctx context.Context, id int, keyCh <-chan string) { log := logging.FromContext(ctx) - log.Infof("Started autoscaling worker %v", id) + log.Infof("Started Vertex autoscaling worker %v", id) for { select { case <-ctx.Done(): - log.Infof("Stopped scaling worker %v", id) + log.Infof("Stopped Vertex autoscaling worker %v", id) return case key := <-keyCh: if err := s.scaleOneVertex(ctx, key, id); err != nil { @@ -385,7 +385,7 @@ func (s *Scaler) desiredReplicas(_ context.Context, vertex *dfv1.Vertex, partiti if desired == 0 { desired = 1 } - if desired > int32(pending) { // For some corner cases, we don't want to scale up to more than pending. + if desired > int32(pending) && pending > 0 { // For some corner cases, we don't want to scale up to more than pending. desired = int32(pending) } // maxDesired is the max of all partitions @@ -400,8 +400,8 @@ func (s *Scaler) desiredReplicas(_ context.Context, vertex *dfv1.Vertex, partiti // Each worker keeps picking up scaling tasks (which contains vertex keys) to calculate the desired replicas, // and patch the vertex spec with the new replica number if needed. func (s *Scaler) Start(ctx context.Context) error { - log := logging.FromContext(ctx).Named("autoscaler") - log.Info("Starting autoscaler...") + log := logging.FromContext(ctx).Named("vertex-autoscaler") + log.Info("Starting vertex autoscaler...") keyCh := make(chan string) ctx, cancel := context.WithCancel(logging.WithLogger(ctx, log)) defer cancel() diff --git a/pkg/daemon/server/service/rater/uniq_str_list.go b/pkg/shared/util/uniq_str_list.go similarity index 99% rename from pkg/daemon/server/service/rater/uniq_str_list.go rename to pkg/shared/util/uniq_str_list.go index 8bc13f103d..4a5c09be85 100644 --- a/pkg/daemon/server/service/rater/uniq_str_list.go +++ b/pkg/shared/util/uniq_str_list.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rater +package util import ( "container/list" diff --git a/pkg/daemon/server/service/rater/uniq_str_list_test.go b/pkg/shared/util/uniq_str_list_test.go similarity index 99% rename from pkg/daemon/server/service/rater/uniq_str_list_test.go rename to pkg/shared/util/uniq_str_list_test.go index 94f958e0cc..ab88096bf4 100644 --- a/pkg/daemon/server/service/rater/uniq_str_list_test.go +++ b/pkg/shared/util/uniq_str_list_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rater +package util import ( "testing" diff --git a/rust/Cargo.lock b/rust/Cargo.lock index db2f0404bd..3b4bfaa19b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -627,6 +627,12 @@ dependencies = [ "const-random", ] +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "dunce" version = "1.0.5" @@ -1566,12 +1572,12 @@ dependencies = [ "bytes", "chrono", "hyper-util", - "metrics", - "metrics-exporter-prometheus", "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", "numaflow-models", "once_cell", + "parking_lot", "pep440_rs", + "prometheus-client", "prost", "prost-types", "rcgen", @@ -2050,6 +2056,29 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost" version = "0.13.1" diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index 3e98b10d69..4efb9658b2 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -17,8 +17,6 @@ prost = "0.13.1" prost-types = "0.13.1" chrono = "0.4.31" base64 = "0.22.1" -metrics = { version = "0.23.0", default-features = false } -metrics-exporter-prometheus = { version = "0.15.3", default-features = false } tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } hyper-util = "0.1.6" tower = "0.4.13" @@ -33,6 +31,8 @@ serde = { version = "1.0.204", features = ["derive"] } semver = "1.0" pep440_rs = "0.6.6" backoff = { path = "../backoff" } +parking_lot = "0.12.3" +prometheus-client = "0.22.3" [dev-dependencies] tower = "0.4.13" diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 2efd447784..f774cc80b0 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,15 +1,12 @@ use crate::config::config; use crate::error::{Error, Result}; use crate::message::Offset; -use crate::metrics::{ - FORWARDER_ACK_TOTAL, FORWARDER_READ_BYTES_TOTAL, FORWARDER_READ_TOTAL, FORWARDER_WRITE_TOTAL, - MONO_VERTEX_NAME, PARTITION_LABEL, REPLICA_LABEL, VERTEX_TYPE_LABEL, -}; +use crate::metrics; +use crate::metrics::forward_metrics; use crate::sink::{proto, SinkClient}; use crate::source::SourceClient; use crate::transformer::TransformerClient; use chrono::Utc; -use metrics::counter; use std::collections::HashMap; use tokio::task::JoinSet; use tokio::time::sleep; @@ -17,8 +14,6 @@ use tokio_util::sync::CancellationToken; use tracing::info; use tracing::log::warn; -const MONO_VERTEX_TYPE: &str = "mono_vertex"; - /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. @@ -38,15 +33,7 @@ impl Forwarder { transformer_client: Option, cln_token: CancellationToken, ) -> Result { - let common_labels = vec![ - ( - MONO_VERTEX_NAME.to_string(), - config().mono_vertex_name.clone(), - ), - (VERTEX_TYPE_LABEL.to_string(), MONO_VERTEX_TYPE.to_string()), - (REPLICA_LABEL.to_string(), config().replica.to_string()), - (PARTITION_LABEL.to_string(), "0".to_string()), - ]; + let common_labels = metrics::forward_metrics_labels().clone(); Ok(Self { source_client, @@ -76,14 +63,14 @@ impl Forwarder { // Read messages from the source let messages = result?; info!("Read batch size: {} and latency - {}ms", messages.len(), start_time.elapsed().as_millis()); - + // emit metrics + let msg_count = messages.len() as u64; // collect all the offsets as the transformer can drop (via filter) messages let offsets = messages.iter().map(|msg| msg.offset.clone()).collect::>(); - messages_count += messages.len() as u64; let bytes_count = messages.iter().map(|msg| msg.value.len() as u64).sum::(); - counter!(FORWARDER_READ_TOTAL, &self.common_labels).increment(messages_count); - counter!(FORWARDER_READ_BYTES_TOTAL, &self.common_labels).increment(bytes_count); + forward_metrics().monovtx_read_total.get_or_create(&self.common_labels).inc_by(msg_count); + forward_metrics().monovtx_read_bytes_total.get_or_create(&self.common_labels).inc_by(bytes_count); // Apply transformation if transformer is present let transformed_messages = if let Some(transformer_client) = &self.transformer_client { @@ -107,10 +94,14 @@ impl Forwarder { messages }; + let transformed_msg_count = transformed_messages.len() as u64; + forward_metrics().monovtx_sink_write_total.get_or_create(&self.common_labels).inc_by(transformed_msg_count); + // Write messages to the sink // TODO: should we retry writing? what if the error is transient? // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. // we need to confirm this via FMEA tests. + let mut retry_messages = transformed_messages; let mut attempts = 0; let mut error_map = HashMap::new(); @@ -125,7 +116,6 @@ impl Forwarder { .filter(|result| result.status != proto::Status::Success as i32) .map(|result| result.id.clone()) .collect(); - attempts += 1; if failed_ids.is_empty() { @@ -154,13 +144,12 @@ impl Forwarder { attempts, error_map ))); } - // Acknowledge the messages back to the source let start_time = tokio::time::Instant::now(); self.source_client.ack_fn(offsets).await?; info!("Ack latency - {}ms", start_time.elapsed().as_millis()); - - counter!(FORWARDER_ACK_TOTAL, &self.common_labels).increment(messages_count); + // increment the acked messages count metric + forward_metrics().monovtx_ack_total.get_or_create(&self.common_labels).inc_by(msg_count); } } // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded @@ -173,6 +162,10 @@ impl Forwarder { messages_count = 0; last_forwarded_at = std::time::Instant::now(); } + forward_metrics() + .monovtx_processing_time + .get_or_create(&self.common_labels) + .observe(start_time.elapsed().as_micros() as f64); } Ok(()) } diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index 823ffc6870..f3864f7531 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -24,7 +24,7 @@ use tracing_subscriber::EnvFilter; /// - Send Acknowledgement back to the Source pub mod error; -pub mod metrics; +mod metrics; pub mod source; diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index f3d5421dbc..cd792d9693 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -1,15 +1,13 @@ -use std::future::ready; use std::net::SocketAddr; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use std::time::Duration; +use axum::body::Body; use axum::extract::State; -use axum::http::StatusCode; +use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; -use metrics::describe_counter; -use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; use rcgen::{generate_simple_self_signed, CertifiedKey}; use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; @@ -17,22 +15,36 @@ use tokio::task::JoinHandle; use tokio::time; use tracing::{debug, error, info}; +use crate::config::config; use crate::error::Error; use crate::sink::SinkClient; use crate::source::SourceClient; use crate::transformer::TransformerClient; +use prometheus_client::encoding::text::encode; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::Registry; // Define the labels for the metrics -pub const MONO_VERTEX_NAME: &str = "vertex"; -pub const REPLICA_LABEL: &str = "replica"; -pub const PARTITION_LABEL: &str = "partition_name"; -pub const VERTEX_TYPE_LABEL: &str = "vertex_type"; +// Note: Please keep consistent with the definitions in MonoVertex daemon +pub const MONO_VERTEX_NAME_LABEL: &str = "mvtx_name"; +pub const REPLICA_LABEL: &str = "mvtx_replica"; +const PENDING_PERIOD_LABEL: &str = "period"; // Define the metrics -pub const FORWARDER_READ_TOTAL: &str = "forwarder_read_total"; -pub const FORWARDER_READ_BYTES_TOTAL: &str = "forwarder_read_bytes_total"; -pub const FORWARDER_ACK_TOTAL: &str = "forwarder_ack_total"; -pub const FORWARDER_WRITE_TOTAL: &str = "forwarder_write_total"; +// Note: We do not add a suffix to the metric name, as the suffix is inferred through the metric type +// by the prometheus client library +// refer: https://github.com/prometheus/client_rust/blob/master/src/registry.rs#L102 + +// Note: Please keep consistent with the definitions in MonoVertex daemon +const MONOVTX_READ_TOTAL: &str = "monovtx_read"; +const MONOVTX_READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; +const MONOVTX_ACK_TOTAL: &str = "monovtx_ack"; +const MONOVTX_SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; +const MONOVTX_PROCESSING_TIME: &str = "monovtx_processing_time"; +const MONOVTX_PENDING: &str = "monovtx_pending"; #[derive(Clone)] pub(crate) struct MetricsState { @@ -41,6 +53,151 @@ pub(crate) struct MetricsState { pub transformer_client: Option, } +/// The global register of all metrics. +#[derive(Default)] +pub struct GlobalRegistry { + // It is okay to use std mutex because we register each metric only one time. + pub registry: parking_lot::Mutex, +} + +impl GlobalRegistry { + fn new() -> Self { + GlobalRegistry { + // Create a new registry for the metrics + registry: parking_lot::Mutex::new(Registry::default()), + } + } +} + +/// GLOBAL_REGISTER is the static global registry which is initialized +// only once. +static GLOBAL_REGISTER: OnceLock = OnceLock::new(); + +/// global_registry is a helper function to get the GLOBAL_REGISTER +fn global_registry() -> &'static GlobalRegistry { + GLOBAL_REGISTER.get_or_init(GlobalRegistry::new) +} + +// TODO: let's do sub-registry for forwarder so tomorrow we can add sink and source metrics. +/// MonoVtxMetrics is a struct which is used for storing the metrics related to MonoVertex +// These fields are exposed as pub to be used by other modules for +// changing the value of the metrics +// Each metric is defined as family of metrics, which means that they can be +// differentiated by their label values assigned. +// The labels are provided in the form of Vec<(String, String) +// The second argument is the metric kind. +pub struct MonoVtxMetrics { + pub monovtx_read_total: Family, Counter>, + pub monovtx_read_bytes_total: Family, Counter>, + pub monovtx_ack_total: Family, Counter>, + pub monovtx_sink_write_total: Family, Counter>, + pub monovtx_processing_time: Family, Histogram>, + pub monovtx_pending: Family, Gauge>, +} + +/// impl the MonoVtxMetrics struct and create a new object +impl MonoVtxMetrics { + fn new() -> Self { + let monovtx_read_total = Family::, Counter>::default(); + let monovtx_ack_total = Family::, Counter>::default(); + let monovtx_read_bytes_total = Family::, Counter>::default(); + let monovtx_sink_write_total = Family::, Counter>::default(); + + let monovtx_processing_time = + Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + }); + let monovtx_pending = Family::, Gauge>::default(); + + let metrics = Self { + monovtx_read_total, + monovtx_read_bytes_total, + monovtx_ack_total, + monovtx_sink_write_total, + monovtx_processing_time, + monovtx_pending, + }; + + let mut registry = global_registry().registry.lock(); + // Register all the metrics to the global registry + registry.register( + MONOVTX_READ_TOTAL, + "A Counter to keep track of the total number of messages read from the source", + metrics.monovtx_read_total.clone(), + ); + registry.register( + MONOVTX_SINK_WRITE_TOTAL, + "A Counter to keep track of the total number of messages written to the sink", + metrics.monovtx_sink_write_total.clone(), + ); + registry.register( + MONOVTX_ACK_TOTAL, + "A Counter to keep track of the total number of messages acknowledged by the sink", + metrics.monovtx_ack_total.clone(), + ); + registry.register( + MONOVTX_PROCESSING_TIME, + "A Histogram to keep track of the total time taken to forward a chunk, the time is in microseconds", + metrics.monovtx_processing_time.clone(), + ); + registry.register( + MONOVTX_READ_BYTES_TOTAL, + "A Counter to keep track of the total number of bytes read from the source", + metrics.monovtx_read_bytes_total.clone(), + ); + registry.register( + MONOVTX_PENDING, + "A Gauge to keep track of the total number of pending messages for the monovtx", + metrics.monovtx_pending.clone(), + ); + + metrics + } +} + +/// MONOVTX_METRICS is the MonoVtxMetrics object which stores the metrics +static MONOVTX_METRICS: OnceLock = OnceLock::new(); + +// forward_metrics is a helper function used to fetch the +// MonoVtxMetrics object +pub(crate) fn forward_metrics() -> &'static MonoVtxMetrics { + MONOVTX_METRICS.get_or_init(|| { + let metrics = MonoVtxMetrics::new(); + metrics + }) +} + +/// MONOVTX_METRICS_LABELS are used to store the common labels used in the metrics +static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new(); + +// forward_metrics_labels is a helper function used to fetch the +// MONOVTX_METRICS_LABELS object +pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { + crate::metrics::MONOVTX_METRICS_LABELS.get_or_init(|| { + let common_labels = vec![ + ( + MONO_VERTEX_NAME_LABEL.to_string(), + config().mono_vertex_name.clone(), + ), + (REPLICA_LABEL.to_string(), config().replica.to_string()), + ]; + common_labels + }) +} + +// metrics_handler is used to generate and return a snapshot of the +// current state of the metrics in the global registry +pub async fn metrics_handler() -> impl IntoResponse { + let state = global_registry().registry.lock(); + let mut buffer = String::new(); + encode(&mut buffer, &*state).unwrap(); + debug!("Exposing Metrics: {:?}", buffer); + Response::builder() + .status(StatusCode::OK) + .body(Body::from(buffer)) + .unwrap() +} + /// Collect and emit prometheus metrics. /// Metrics router and server over HTTP endpoint. // This is not used currently @@ -54,17 +211,11 @@ pub(crate) async fn start_metrics_http_server( where A: ToSocketAddrs + std::fmt::Debug, { - // setup_metrics_recorder should only be invoked once - let recorder_handle = setup_metrics_recorder()?; - - let metrics_app = metrics_router( - recorder_handle, - MetricsState { - source_client, - sink_client, - transformer_client, - }, - ); + let metrics_app = metrics_router(MetricsState { + source_client, + sink_client, + transformer_client, + }); let listener = TcpListener::bind(&addr) .await @@ -92,10 +243,7 @@ pub(crate) async fn start_metrics_https_server( .await .map_err(|e| Error::MetricsError(format!("Creating tlsConfig from pem: {}", e)))?; - // setup_metrics_recorder should only be invoked once - let recorder_handle = setup_metrics_recorder()?; - - let metrics_app = metrics_router(recorder_handle, metrics_state); + let metrics_app = metrics_router(metrics_state); axum_server::bind_rustls(addr, tls_config) .serve(metrics_app.into_make_service()) @@ -106,9 +254,9 @@ pub(crate) async fn start_metrics_https_server( } /// router for metrics and k8s health endpoints -fn metrics_router(recorder_handle: PrometheusHandle, metrics_state: MetricsState) -> Router { +fn metrics_router(metrics_state: MetricsState) -> Router { Router::new() - .route("/metrics", get(move || ready(recorder_handle.render()))) + .route("/metrics", get(metrics_handler)) .route("/livez", get(livez)) .route("/readyz", get(readyz)) .route("/sidecar-livez", get(sidecar_livez)) @@ -138,45 +286,6 @@ async fn sidecar_livez(State(mut state): State) -> impl IntoRespon StatusCode::NO_CONTENT } -/// setup the Prometheus metrics recorder. -fn setup_metrics_recorder() -> crate::Result { - // 1 micro-sec < t < 1000 seconds - let log_to_power_of_sqrt2_bins: [f64; 62] = (0..62) - .map(|i| 2_f64.sqrt().powf(i as f64)) - .collect::>() - .try_into() - .unwrap(); - - let prometheus_handle = PrometheusBuilder::new() - .set_buckets_for_metric( - Matcher::Full("fac_total_duration_micros".to_string()), // fac == forward-a-chunk - &log_to_power_of_sqrt2_bins, - ) - .map_err(|e| Error::MetricsError(format!("Prometheus install_recorder: {}", e)))? - .install_recorder() - .map_err(|e| Error::MetricsError(format!("Prometheus install_recorder: {}", e)))?; - - // Define forwarder metrics - describe_counter!( - FORWARDER_READ_TOTAL, - "Total number of Data Messages Read in the forwarder" - ); - describe_counter!( - FORWARDER_READ_BYTES_TOTAL, - "Total number of bytes read in the forwarder" - ); - describe_counter!( - FORWARDER_ACK_TOTAL, - "Total number of acknowledgments by the forwarder" - ); - describe_counter!( - FORWARDER_WRITE_TOTAL, - "Total number of Data Messages written by the forwarder" - ); - - Ok(prometheus_handle) -} - const MAX_PENDING_STATS: usize = 1800; // Pending info with timestamp @@ -319,7 +428,12 @@ async fn expose_pending_metrics( for (label, seconds) in &lookback_seconds_map { let pending = calculate_pending(*seconds, &pending_stats).await; if pending != -1 { - // TODO: emit it as a metric + let mut metric_labels = forward_metrics_labels().clone(); + metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); + forward_metrics() + .monovtx_pending + .get_or_create(&metric_labels) + .set(pending); info!("Pending messages ({}): {}", label, pending); } } diff --git a/server/apis/interface.go b/server/apis/interface.go index ff41eb3ebe..2e2c87d30f 100644 --- a/server/apis/interface.go +++ b/server/apis/interface.go @@ -46,4 +46,5 @@ type Handler interface { GetMonoVertex(c *gin.Context) ListMonoVertexPods(c *gin.Context) CreateMonoVertex(c *gin.Context) + GetMonoVertexMetrics(c *gin.Context) } diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 59dc00c810..fb53cfbe42 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -46,6 +46,7 @@ import ( dfv1versiond "github.com/numaproj/numaflow/pkg/client/clientset/versioned" dfv1clients "github.com/numaproj/numaflow/pkg/client/clientset/versioned/typed/numaflow/v1alpha1" daemonclient "github.com/numaproj/numaflow/pkg/daemon/client" + mvtdaemonclient "github.com/numaproj/numaflow/pkg/mvtxdaemon/client" "github.com/numaproj/numaflow/pkg/shared/util" "github.com/numaproj/numaflow/pkg/webhook/validator" "github.com/numaproj/numaflow/server/authn" @@ -89,14 +90,15 @@ func WithReadOnlyMode() HandlerOption { } type handler struct { - kubeClient kubernetes.Interface - metricsClient *metricsversiond.Clientset - numaflowClient dfv1clients.NumaflowV1alpha1Interface - daemonClientsCache *lru.Cache[string, daemonclient.DaemonClient] - dexObj *DexObject - localUsersAuthObject *LocalUsersAuthObject - healthChecker *HealthChecker - opts *handlerOptions + kubeClient kubernetes.Interface + metricsClient *metricsversiond.Clientset + numaflowClient dfv1clients.NumaflowV1alpha1Interface + daemonClientsCache *lru.Cache[string, daemonclient.DaemonClient] + mvtDaemonClientsCache *lru.Cache[string, mvtdaemonclient.MonoVertexDaemonClient] + dexObj *DexObject + localUsersAuthObject *LocalUsersAuthObject + healthChecker *HealthChecker + opts *handlerOptions } // NewHandler is used to provide a new instance of the handler type @@ -118,6 +120,9 @@ func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *Lo daemonClientsCache, _ := lru.NewWithEvict[string, daemonclient.DaemonClient](500, func(key string, value daemonclient.DaemonClient) { _ = value.Close() }) + mvtDaemonClientsCache, _ := lru.NewWithEvict[string, mvtdaemonclient.MonoVertexDaemonClient](500, func(key string, value mvtdaemonclient.MonoVertexDaemonClient) { + _ = value.Close() + }) o := defaultHandlerOptions() for _, opt := range opts { if opt != nil { @@ -125,14 +130,15 @@ func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *Lo } } return &handler{ - kubeClient: kubeClient, - metricsClient: metricsClient, - numaflowClient: numaflowClient, - daemonClientsCache: daemonClientsCache, - dexObj: dexObj, - localUsersAuthObject: localUsersAuthObject, - healthChecker: NewHealthChecker(ctx), - opts: o, + kubeClient: kubeClient, + metricsClient: metricsClient, + numaflowClient: numaflowClient, + daemonClientsCache: daemonClientsCache, + mvtDaemonClientsCache: mvtDaemonClientsCache, + dexObj: dexObj, + localUsersAuthObject: localUsersAuthObject, + healthChecker: NewHealthChecker(ctx), + opts: o, }, nil } @@ -434,7 +440,7 @@ func (h *handler) GetPipeline(c *gin.Context) { } // get pipeline lag - client, err := h.getDaemonClient(ns, pipeline) + client, err := h.getPipelineDaemonClient(ns, pipeline) if err != nil || client == nil { h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for pipeline %q, %s", pipeline, err.Error())) return @@ -551,7 +557,9 @@ func (h *handler) DeletePipeline(c *gin.Context) { } // cleanup client after successfully deleting pipeline - h.daemonClientsCache.Remove(daemonSvcAddress(ns, pipeline)) + // NOTE: if a pipeline was deleted by not through UI, the cache will not be updated, + // the entry becomes invalid and will be evicted only after the cache is full. + h.daemonClientsCache.Remove(pipelineDaemonSvcAddress(ns, pipeline)) c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, nil)) } @@ -739,7 +747,7 @@ func (h *handler) DeleteInterStepBufferService(c *gin.Context) { func (h *handler) ListPipelineBuffers(c *gin.Context) { ns, pipeline := c.Param("namespace"), c.Param("pipeline") - client, err := h.getDaemonClient(ns, pipeline) + client, err := h.getPipelineDaemonClient(ns, pipeline) if err != nil || client == nil { h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for pipeline %q, %s", pipeline, err.Error())) return @@ -758,7 +766,7 @@ func (h *handler) ListPipelineBuffers(c *gin.Context) { func (h *handler) GetPipelineWatermarks(c *gin.Context) { ns, pipeline := c.Param("namespace"), c.Param("pipeline") - client, err := h.getDaemonClient(ns, pipeline) + client, err := h.getPipelineDaemonClient(ns, pipeline) if err != nil || client == nil { h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for pipeline %q, %s", pipeline, err.Error())) return @@ -850,7 +858,7 @@ func (h *handler) GetVerticesMetrics(c *gin.Context) { return } - client, err := h.getDaemonClient(ns, pipeline) + client, err := h.getPipelineDaemonClient(ns, pipeline) if err != nil || client == nil { h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for pipeline %q, %s", pipeline, err.Error())) return @@ -1013,7 +1021,7 @@ func (h *handler) GetPipelineStatus(c *gin.Context) { } // Get a new daemon client for the given pipeline - client, err := h.getDaemonClient(ns, pipeline) + client, err := h.getPipelineDaemonClient(ns, pipeline) if err != nil || client == nil { h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for pipeline %q, %s", pipeline, err.Error())) return @@ -1118,6 +1126,25 @@ func (h *handler) ListMonoVertexPods(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, pods.Items)) } +// GetMonoVertexMetrics is used to provide information about one mono vertex, including processing rates. +func (h *handler) GetMonoVertexMetrics(c *gin.Context) { + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + + client, err := h.getMonoVertexDaemonClient(ns, monoVertex) + if err != nil || client == nil { + h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for mono vertex %q, %s", monoVertex, err.Error())) + return + } + + metrics, err := client.GetMonoVertexMetrics(c) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to get the mono vertex metrics: namespace %q mono vertex %q: %s", ns, monoVertex, err.Error())) + return + } + + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, metrics)) +} + // getAllNamespaces is a utility used to fetch all the namespaces in the cluster // except the kube system namespaces func getAllNamespaces(h *handler) ([]string, error) { @@ -1312,26 +1339,54 @@ func validatePipelinePatch(patch []byte) error { return nil } -func daemonSvcAddress(ns, pipeline string) string { - return fmt.Sprintf("%s.%s.svc:%d", fmt.Sprintf("%s-daemon-svc", pipeline), ns, dfv1.DaemonServicePort) +func pipelineDaemonSvcAddress(ns, pipelineName string) string { + // the format is consistent with what we defined in GetDaemonServiceURL in `pkg/apis/numaflow/v1alpha1/pipeline_types.go` + // do not change it without changing the other. + return fmt.Sprintf("%s.%s.svc:%d", fmt.Sprintf("%s-daemon-svc", pipelineName), ns, dfv1.DaemonServicePort) } -func (h *handler) getDaemonClient(ns, pipeline string) (daemonclient.DaemonClient, error) { - if dClient, ok := h.daemonClientsCache.Get(daemonSvcAddress(ns, pipeline)); !ok { +func monoVertexDaemonSvcAddress(ns, monoVertexName string) string { + // the format is consistent with what we defined in GetDaemonServiceURL in `pkg/apis/numaflow/v1alpha1/mono_vertex_types.go` + // do not change it without changing the other. + return fmt.Sprintf("%s.%s.svc:%d", fmt.Sprintf("%s-mv-daemon-svc", monoVertexName), ns, dfv1.MonoVertexDaemonServicePort) +} + +func (h *handler) getPipelineDaemonClient(ns, pipeline string) (daemonclient.DaemonClient, error) { + if dClient, ok := h.daemonClientsCache.Get(pipelineDaemonSvcAddress(ns, pipeline)); !ok { var err error var c daemonclient.DaemonClient // Default to use gRPC client if strings.EqualFold(h.opts.daemonClientProtocol, "http") { - c, err = daemonclient.NewRESTfulDaemonServiceClient(daemonSvcAddress(ns, pipeline)) + c, err = daemonclient.NewRESTfulDaemonServiceClient(pipelineDaemonSvcAddress(ns, pipeline)) } else { - c, err = daemonclient.NewGRPCDaemonServiceClient(daemonSvcAddress(ns, pipeline)) + c, err = daemonclient.NewGRPCDaemonServiceClient(pipelineDaemonSvcAddress(ns, pipeline)) } if err != nil { return nil, err } - h.daemonClientsCache.Add(daemonSvcAddress(ns, pipeline), c) + h.daemonClientsCache.Add(pipelineDaemonSvcAddress(ns, pipeline), c) return c, nil } else { return dClient, nil } } + +func (h *handler) getMonoVertexDaemonClient(ns, mvtName string) (mvtdaemonclient.MonoVertexDaemonClient, error) { + if mvtDaemonClient, ok := h.mvtDaemonClientsCache.Get(monoVertexDaemonSvcAddress(ns, mvtName)); !ok { + var err error + var c mvtdaemonclient.MonoVertexDaemonClient + // Default to use gRPC client + if strings.EqualFold(h.opts.daemonClientProtocol, "http") { + c, err = mvtdaemonclient.NewRESTfulClient(monoVertexDaemonSvcAddress(ns, mvtName)) + } else { + c, err = mvtdaemonclient.NewGRPCClient(monoVertexDaemonSvcAddress(ns, mvtName)) + } + if err != nil { + return nil, err + } + h.mvtDaemonClientsCache.Add(monoVertexDaemonSvcAddress(ns, mvtName), c) + return c, nil + } else { + return mvtDaemonClient, nil + } +} diff --git a/server/cmd/server/start.go b/server/cmd/server/start.go index eccfca45a3..16e52eaf62 100644 --- a/server/cmd/server/start.go +++ b/server/cmd/server/start.go @@ -203,6 +203,7 @@ func CreateAuthRouteMap(baseHref string) authz.RouteMap { "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex": authz.NewRouteInfo(authz.ObjectMonoVertex, true), "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/metrics": authz.NewRouteInfo(authz.ObjectMonoVertex, true), "POST:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), } } diff --git a/server/cmd/server/start_test.go b/server/cmd/server/start_test.go index 1df0d5b8f9..b8abd6291e 100644 --- a/server/cmd/server/start_test.go +++ b/server/cmd/server/start_test.go @@ -25,12 +25,12 @@ import ( func TestCreateAuthRouteMap(t *testing.T) { t.Run("empty base", func(t *testing.T) { got := CreateAuthRouteMap("") - assert.Equal(t, 28, len(got)) + assert.Equal(t, 29, len(got)) }) t.Run("customize base", func(t *testing.T) { got := CreateAuthRouteMap("abcdefg") - assert.Equal(t, 28, len(got)) + assert.Equal(t, 29, len(got)) for k := range got { assert.Contains(t, k, "abcdefg") } diff --git a/server/routes/routes.go b/server/routes/routes.go index deb0cbcb6e..68064e8c52 100644 --- a/server/routes/routes.go +++ b/server/routes/routes.go @@ -161,6 +161,8 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/pods", handler.ListMonoVertexPods) // Create a mono vertex. r.POST("/namespaces/:namespace/mono-vertices", handler.CreateMonoVertex) + // Get the metrics of a mono vertex. + r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/metrics", handler.GetMonoVertexMetrics) } // authMiddleware is the middleware for AuthN/AuthZ. From c4b5d05c24c189684043688fa657295bf4495dcd Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 15 Aug 2024 11:27:36 -0700 Subject: [PATCH 002/188] fix: default resources mutated when applying templates (#1948) Signed-off-by: Derek Wang --- .../numaflow/v1alpha1/container_builder.go | 2 +- .../numaflow/v1alpha1/vertex_types_test.go | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/pkg/apis/numaflow/v1alpha1/container_builder.go b/pkg/apis/numaflow/v1alpha1/container_builder.go index 0730993246..dcded47e9c 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder.go @@ -25,7 +25,7 @@ func (b containerBuilder) init(req getContainerReq) containerBuilder { b.Image = req.image b.ImagePullPolicy = req.imagePullPolicy b.Name = CtrMain - b.Resources = req.resources + b.Resources = *req.resources.DeepCopy() b.VolumeMounts = req.volumeMounts return b } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index a664fee1d0..4336e15e31 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" ) @@ -198,7 +199,15 @@ func TestGetPodSpec(t *testing.T) { {Name: "test-env", Value: "test-val"}, }, SideInputsStoreName: "test-store", + DefaultResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{}, + }, } + t.Run("test source", func(t *testing.T) { testObj := testVertex.DeepCopy() testObj.Spec.Source = &Source{} @@ -215,6 +224,18 @@ func TestGetPodSpec(t *testing.T) { DNSPolicy: corev1.DNSClusterFirstWithHostNet, DNSConfig: &corev1.PodDNSConfig{Nameservers: []string{"aaa.aaa"}}, } + testObj.Spec.ContainerTemplate = &ContainerTemplate{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) assert.NotNil(t, s.NodeSelector) @@ -252,6 +273,14 @@ func TestGetPodSpec(t *testing.T) { assert.Contains(t, s.Containers[0].Args, "--type="+string(VertexTypeSource)) assert.Equal(t, 1, len(s.InitContainers)) assert.Equal(t, CtrInit, s.InitContainers[0].Name) + assert.Equal(t, "200m", s.Containers[0].Resources.Requests.Cpu().String()) + assert.Equal(t, "200m", s.Containers[0].Resources.Limits.Cpu().String()) + assert.Equal(t, "200Mi", s.Containers[0].Resources.Requests.Memory().String()) + assert.Equal(t, "200Mi", s.Containers[0].Resources.Limits.Memory().String()) + assert.Equal(t, "100m", s.InitContainers[0].Resources.Requests.Cpu().String()) + assert.Equal(t, "100Mi", s.InitContainers[0].Resources.Requests.Memory().String()) + assert.Equal(t, "0", s.InitContainers[0].Resources.Limits.Cpu().String()) + assert.Equal(t, "0", s.InitContainers[0].Resources.Limits.Memory().String()) }) t.Run("test sink", func(t *testing.T) { From 7b85e89f307c2286c5b57daf3fb17d70a08107c0 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Fri, 16 Aug 2024 08:29:07 +0530 Subject: [PATCH 003/188] test: add mono vertex e2e tests (#1945) Signed-off-by: Yashash H L Signed-off-by: Keran Yang Co-authored-by: Keran Yang --- .github/workflows/ci.yaml | 2 +- pkg/reconciler/monovertex/controller.go | 20 ++-- test/diamond-e2e/diamond_test.go | 20 ++-- test/diamond-e2e/testdata/cycle-backward.yaml | 4 + test/diamond-e2e/testdata/cycle-to-self.yaml | 4 + ...-on-map-pipeline.yaml => join-on-map.yaml} | 4 + ...duce-pipeline.yaml => join-on-reduce.yaml} | 4 + test/diamond-e2e/testdata/join-on-sink.yaml | 5 +- test/e2e-api/redis.go | 6 +- test/e2e/functional_test.go | 31 +++-- test/e2e/testdata/even-odd.yaml | 14 +++ test/e2e/testdata/simple-fallback.yaml | 4 + test/e2e/testdata/udf-filtering.yaml | 4 + test/fixtures/e2e_suite.go | 27 +++-- test/fixtures/expect.go | 78 +++++++++---- test/fixtures/given.go | 70 +++++++++--- test/fixtures/redis.go | 7 +- test/fixtures/redis_check.go | 24 ++-- test/fixtures/util.go | 71 ++++++++++++ test/fixtures/when.go | 106 ++++++++++++++---- test/http-e2e/http_test.go | 10 +- .../testdata/http-source-with-auth.yaml | 4 + test/http-e2e/testdata/http-source.yaml | 4 + test/idle-source-e2e/idle_source_test.go | 8 +- .../testdata/idle-source-reduce-pipeline.yaml | 4 + .../testdata/kafka-pipeline.yaml | 4 + test/jetstream-e2e/jetstream_test.go | 5 +- .../testdata/jetstream-source-pipeline.yaml | 4 + test/monovertex-e2e/monovertex_test.go | 32 ++++++ .../mono-vertex-with-transformer.yaml | 24 ++++ test/nats-e2e/nats_test.go | 2 +- .../testdata/nats-source-pipeline.yaml | 4 + test/reduce-one-e2e/reduce_one_test.go | 28 ++--- .../testdata/complex-reduce-pipeline.yaml | 4 + .../complex-sliding-window-pipeline.yaml | 4 + .../simple-keyed-reduce-pipeline.yaml | 4 + .../simple-non-keyed-reduce-pipeline.yaml | 4 + .../testdata/simple-reduce-pipeline-wal.yaml | 4 + test/reduce-two-e2e/reduce_two_test.go | 28 ++--- .../reduce-stream/reduce-stream-go.yaml | 4 + .../reduce-stream/reduce-stream-java.yaml | 4 + ...ple-session-keyed-counter-pipeline-go.yaml | 4 + ...e-session-keyed-counter-pipeline-java.yaml | 4 + .../simple-session-sum-pipeline.yaml | 4 + .../sideinput-e2e_sink_source_test.go | 10 +- test/sideinputs-e2e/sideinput_test.go | 6 +- .../testdata/map-sideinput-pipeline.yaml | 4 + .../testdata/reduce-sideinput-pipeline.yaml | 4 + ...ideinput_sink.yaml => sideinput-sink.yaml} | 5 +- ...nput_source.yaml => sideinput-source.yaml} | 3 + .../testdata/source-filtering.yaml | 4 + test/transformer-e2e/transformer_test.go | 12 +- 52 files changed, 562 insertions(+), 192 deletions(-) rename test/diamond-e2e/testdata/{join-on-map-pipeline.yaml => join-on-map.yaml} (82%) rename test/diamond-e2e/testdata/{join-on-reduce-pipeline.yaml => join-on-reduce.yaml} (91%) create mode 100644 test/monovertex-e2e/monovertex_test.go create mode 100644 test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml rename test/sideinputs-e2e/testdata/{sideinput_sink.yaml => sideinput-sink.yaml} (89%) rename test/sideinputs-e2e/testdata/{sideinput_source.yaml => sideinput-source.yaml} (91%) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 064afdb38c..ba128ef042 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -151,7 +151,7 @@ jobs: fail-fast: false matrix: driver: [jetstream] - case: [e2e, diamond-e2e, transformer-e2e, kafka-e2e, http-e2e, nats-e2e, jetstream-e2e, sdks-e2e, reduce-one-e2e, reduce-two-e2e, udsource-e2e, api-e2e, sideinputs-e2e, idle-source-e2e] + case: [e2e, diamond-e2e, transformer-e2e, kafka-e2e, http-e2e, nats-e2e, jetstream-e2e, sdks-e2e, reduce-one-e2e, reduce-two-e2e, udsource-e2e, api-e2e, sideinputs-e2e, idle-source-e2e, monovertex-e2e] include: - driver: redis case: e2e diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 108580f3dc..b345369c66 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -186,21 +186,21 @@ func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1 } } if needToCreate { - labels := map[string]string{} + podLabels := map[string]string{} annotations := map[string]string{} if x := monoVtx.Spec.Metadata; x != nil { for k, v := range x.Annotations { annotations[k] = v } for k, v := range x.Labels { - labels[k] = v + podLabels[k] = v } } - labels[dfv1.KeyPartOf] = dfv1.Project - labels[dfv1.KeyManagedBy] = dfv1.ControllerMonoVertex - labels[dfv1.KeyComponent] = dfv1.ComponentMonoVertex - labels[dfv1.KeyAppName] = monoVtx.Name - labels[dfv1.KeyMonoVertexName] = monoVtx.Name + podLabels[dfv1.KeyPartOf] = dfv1.Project + podLabels[dfv1.KeyManagedBy] = dfv1.ControllerMonoVertex + podLabels[dfv1.KeyComponent] = dfv1.ComponentMonoVertex + podLabels[dfv1.KeyAppName] = monoVtx.Name + podLabels[dfv1.KeyMonoVertexName] = monoVtx.Name annotations[dfv1.KeyHash] = hash annotations[dfv1.KeyReplica] = strconv.Itoa(replica) // Defaults to udf @@ -209,7 +209,7 @@ func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1 ObjectMeta: metav1.ObjectMeta{ Namespace: monoVtx.Namespace, Name: podNamePrefix + sharedutil.RandomLowerCaseString(5), - Labels: labels, + Labels: podLabels, Annotations: annotations, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(monoVtx.GetObjectMeta(), dfv1.MonoVertexGroupVersionKind)}, }, @@ -392,10 +392,10 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Conte return nil } -func (r *monoVertexReconciler) findExistingPods(ctx context.Context, monoVtx *dfv1.MonoVertex) (map[string]corev1.Pod, error) { +func (mr *monoVertexReconciler) findExistingPods(ctx context.Context, monoVtx *dfv1.MonoVertex) (map[string]corev1.Pod, error) { pods := &corev1.PodList{} selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) - if err := r.client.List(ctx, pods, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { + if err := mr.client.List(ctx, pods, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { return nil, fmt.Errorf("failed to list mono vertex pods: %w", err) } result := make(map[string]corev1.Pod) diff --git a/test/diamond-e2e/diamond_test.go b/test/diamond-e2e/diamond_test.go index cdb98c7783..b30c72c7c0 100644 --- a/test/diamond-e2e/diamond_test.go +++ b/test/diamond-e2e/diamond_test.go @@ -45,7 +45,7 @@ func (s *DiamondSuite) TestJoinOnReducePipeline() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - w := s.Given().Pipeline("@testdata/join-on-reduce-pipeline.yaml"). + w := s.Given().Pipeline("@testdata/join-on-reduce.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() @@ -74,13 +74,13 @@ func (s *DiamondSuite) TestJoinOnReducePipeline() { }() // todo: this only tests for one occurrence: ideally should verify all w.Expect(). - SinkContains("sink", "40"). // per 10-second window: (10 * 2) * 2 atoi vertices - SinkContains("sink", "80") // per 10-second window: 10 * (1 + 3) * 2 atoi vertices + RedisSinkContains("join-on-reduce-sink", "40"). // per 10-second window: (10 * 2) * 2 atoi vertices + RedisSinkContains("join-on-reduce-sink", "80") // per 10-second window: 10 * (1 + 3) * 2 atoi vertices done <- struct{}{} } func (s *DiamondSuite) TestJoinOnMapPipeline() { - w := s.Given().Pipeline("@testdata/join-on-map-pipeline.yaml"). + w := s.Given().Pipeline("@testdata/join-on-map.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() @@ -93,8 +93,8 @@ func (s *DiamondSuite) TestJoinOnMapPipeline() { w.SendMessageTo(pipelineName, "in-1", NewHttpPostRequest().WithBody([]byte("2"))) w.Expect(). - SinkContains("sink", "1"). - SinkContains("sink", "2") + RedisSinkContains("join-on-map-sink", "1"). + RedisSinkContains("join-on-map-sink", "2") } func (s *DiamondSuite) TestJoinOnSinkVertex() { @@ -110,8 +110,8 @@ func (s *DiamondSuite) TestJoinOnSinkVertex() { w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("888888"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("888889"))) - w.Expect().SinkContains("out", "888888") - w.Expect().SinkContains("out", "888889") + w.Expect().RedisSinkContains("join-on-sink-out", "888888") + w.Expect().RedisSinkContains("join-on-sink-out", "888889") } func (s *DiamondSuite) TestCycleToSelf() { @@ -136,7 +136,7 @@ func (s *DiamondSuite) TestCycleToSelf() { } } for i := 0; i < 10; i++ { - w.Expect().SinkContains("out", msgs[i]) + w.Expect().RedisSinkContains("cycle-to-self-out", msgs[i]) } } @@ -162,7 +162,7 @@ func (s *DiamondSuite) TestCycleBackward() { } } for i := 0; i < 10; i++ { - w.Expect().SinkContains("out", msgs[i]) + w.Expect().RedisSinkContains("cycle-backward-out", msgs[i]) } } diff --git a/test/diamond-e2e/testdata/cycle-backward.yaml b/test/diamond-e2e/testdata/cycle-backward.yaml index 5505af8e0f..7312fc6cce 100644 --- a/test/diamond-e2e/testdata/cycle-backward.yaml +++ b/test/diamond-e2e/testdata/cycle-backward.yaml @@ -26,6 +26,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "cycle-backward-out" edges: - from: in to: cat diff --git a/test/diamond-e2e/testdata/cycle-to-self.yaml b/test/diamond-e2e/testdata/cycle-to-self.yaml index 063354d5ff..eaa7bf9cfa 100644 --- a/test/diamond-e2e/testdata/cycle-to-self.yaml +++ b/test/diamond-e2e/testdata/cycle-to-self.yaml @@ -22,6 +22,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "cycle-to-self-out" edges: - from: in to: retry diff --git a/test/diamond-e2e/testdata/join-on-map-pipeline.yaml b/test/diamond-e2e/testdata/join-on-map.yaml similarity index 82% rename from test/diamond-e2e/testdata/join-on-map-pipeline.yaml rename to test/diamond-e2e/testdata/join-on-map.yaml index 286a075d8c..6ca69cda24 100644 --- a/test/diamond-e2e/testdata/join-on-map-pipeline.yaml +++ b/test/diamond-e2e/testdata/join-on-map.yaml @@ -32,6 +32,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "join-on-map-sink" edges: - from: in-0 to: cat diff --git a/test/diamond-e2e/testdata/join-on-reduce-pipeline.yaml b/test/diamond-e2e/testdata/join-on-reduce.yaml similarity index 91% rename from test/diamond-e2e/testdata/join-on-reduce-pipeline.yaml rename to test/diamond-e2e/testdata/join-on-reduce.yaml index 2f2066a2f0..f0ec43e19f 100644 --- a/test/diamond-e2e/testdata/join-on-reduce-pipeline.yaml +++ b/test/diamond-e2e/testdata/join-on-reduce.yaml @@ -53,6 +53,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "join-on-reduce-sink" edges: - from: in to: atoi-0 diff --git a/test/diamond-e2e/testdata/join-on-sink.yaml b/test/diamond-e2e/testdata/join-on-sink.yaml index 46a7294922..1f1a442b24 100644 --- a/test/diamond-e2e/testdata/join-on-sink.yaml +++ b/test/diamond-e2e/testdata/join-on-sink.yaml @@ -28,7 +28,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always - + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "join-on-sink-out" edges: - from: in to: even-or-odd diff --git a/test/e2e-api/redis.go b/test/e2e-api/redis.go index ea373634ac..228da31dec 100644 --- a/test/e2e-api/redis.go +++ b/test/e2e-api/redis.go @@ -18,7 +18,6 @@ package main import ( "context" - "fmt" "log" "net/http" "net/url" @@ -62,8 +61,7 @@ func (h *RedisController) GetMsgCountContains(w http.ResponseWriter, r *http.Req redisClient := h.getRedisClient() - pipelineName := r.URL.Query().Get("pipelineName") - sinkName := r.URL.Query().Get("sinkName") + keyName := r.URL.Query().Get("keyName") targetStr, err := url.QueryUnescape(r.URL.Query().Get("targetStr")) if err != nil { log.Println(err) @@ -71,7 +69,7 @@ func (h *RedisController) GetMsgCountContains(w http.ResponseWriter, r *http.Req return } - count, err := redisClient.HGet(context.Background(), fmt.Sprintf("%s:%s", pipelineName, sinkName), targetStr).Result() + count, err := redisClient.HGet(context.Background(), keyName, targetStr).Result() if err != nil { log.Println(err) diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index 573e0d6f8f..3e9c1d70dd 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -169,15 +169,14 @@ func (s *FunctionalSuite) TestUDFFiltering() { SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte(expect3))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte(expect4))) - w.Expect().SinkContains("out", expect3) - w.Expect().SinkContains("out", expect4) - w.Expect().SinkNotContains("out", expect0) - w.Expect().SinkNotContains("out", expect1) - w.Expect().SinkNotContains("out", expect2) + w.Expect().RedisSinkContains("udf-filtering-out", expect3) + w.Expect().RedisSinkContains("udf-filtering-out", expect4) + w.Expect().RedisSinkNotContains("udf-filtering-out", expect0) + w.Expect().RedisSinkNotContains("udf-filtering-out", expect1) + w.Expect().RedisSinkNotContains("udf-filtering-out", expect2) } func (s *FunctionalSuite) TestConditionalForwarding() { - // FIXME: flaky when redis is used as isb if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() @@ -196,17 +195,17 @@ func (s *FunctionalSuite) TestConditionalForwarding() { SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("888889"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("not an integer"))) - w.Expect().SinkContains("even-sink", "888888") - w.Expect().SinkNotContains("even-sink", "888889") - w.Expect().SinkNotContains("even-sink", "not an integer") + w.Expect().RedisSinkContains("even-odd-even-sink", "888888") + w.Expect().RedisSinkNotContains("even-odd-even-sink", "888889") + w.Expect().RedisSinkNotContains("even-odd-even-sink", "not an integer") - w.Expect().SinkContains("odd-sink", "888889") - w.Expect().SinkNotContains("odd-sink", "888888") - w.Expect().SinkNotContains("odd-sink", "not an integer") + w.Expect().RedisSinkContains("even-odd-odd-sink", "888889") + w.Expect().RedisSinkNotContains("even-odd-odd-sink", "888888") + w.Expect().RedisSinkNotContains("even-odd-odd-sink", "not an integer") - w.Expect().SinkContains("number-sink", "888888") - w.Expect().SinkContains("number-sink", "888889") - w.Expect().SinkNotContains("number-sink", "not an integer") + w.Expect().RedisSinkContains("even-odd-number-sink", "888888") + w.Expect().RedisSinkContains("even-odd-number-sink", "888889") + w.Expect().RedisSinkNotContains("even-odd-number-sink", "not an integer") } func (s *FunctionalSuite) TestDropOnFull() { @@ -354,7 +353,7 @@ func (s *FunctionalSuite) TestFallbackSink() { // wait for all the pods to come up w.Expect().VertexPodsRunning() - w.Expect().SinkContains("output", "fallback-message") + w.Expect().RedisSinkContains("simple-fallback-output", "fallback-message") } func TestFunctionalSuite(t *testing.T) { diff --git a/test/e2e/testdata/even-odd.yaml b/test/e2e/testdata/even-odd.yaml index 0c3da8cdd5..7cc6822243 100644 --- a/test/e2e/testdata/even-odd.yaml +++ b/test/e2e/testdata/even-odd.yaml @@ -22,18 +22,32 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "even-odd-even-sink" - name: odd-sink sink: udsink: container: + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "even-odd-odd-sink" - name: number-sink sink: udsink: container: + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "even-odd-number-sink" edges: - from: in to: even-or-odd diff --git a/test/e2e/testdata/simple-fallback.yaml b/test/e2e/testdata/simple-fallback.yaml index de76a06d59..0e39df2ddf 100644 --- a/test/e2e/testdata/simple-fallback.yaml +++ b/test/e2e/testdata/simple-fallback.yaml @@ -27,6 +27,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "simple-fallback-output" edges: - from: in to: cat diff --git a/test/e2e/testdata/udf-filtering.yaml b/test/e2e/testdata/udf-filtering.yaml index 8cc332ddfc..1c8c79057c 100644 --- a/test/e2e/testdata/udf-filtering.yaml +++ b/test/e2e/testdata/udf-filtering.yaml @@ -20,6 +20,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "udf-filtering-out" edges: - from: in to: p1 diff --git a/test/fixtures/e2e_suite.go b/test/fixtures/e2e_suite.go index 3012be8754..64b27e3eb9 100644 --- a/test/fixtures/e2e_suite.go +++ b/test/fixtures/e2e_suite.go @@ -80,12 +80,13 @@ spec: type E2ESuite struct { suite.Suite - restConfig *rest.Config - isbSvcClient flowpkg.InterStepBufferServiceInterface - pipelineClient flowpkg.PipelineInterface - vertexClient flowpkg.VertexInterface - kubeClient kubernetes.Interface - stopch chan struct{} + restConfig *rest.Config + isbSvcClient flowpkg.InterStepBufferServiceInterface + pipelineClient flowpkg.PipelineInterface + vertexClient flowpkg.VertexInterface + monoVertexClient flowpkg.MonoVertexInterface + kubeClient kubernetes.Interface + stopch chan struct{} } func (s *E2ESuite) SetupSuite() { @@ -98,6 +99,7 @@ func (s *E2ESuite) SetupSuite() { s.isbSvcClient = flowversiond.NewForConfigOrDie(s.restConfig).NumaflowV1alpha1().InterStepBufferServices(Namespace) s.pipelineClient = flowversiond.NewForConfigOrDie(s.restConfig).NumaflowV1alpha1().Pipelines(Namespace) s.vertexClient = flowversiond.NewForConfigOrDie(s.restConfig).NumaflowV1alpha1().Vertices(Namespace) + s.monoVertexClient = flowversiond.NewForConfigOrDie(s.restConfig).NumaflowV1alpha1().MonoVertices(Namespace) // Clean up resources if any s.deleteResources([]schema.GroupVersionResource{ @@ -182,12 +184,13 @@ func (s *E2ESuite) deleteResources(resources []schema.GroupVersionResource) { func (s *E2ESuite) Given() *Given { return &Given{ - t: s.T(), - isbSvcClient: s.isbSvcClient, - pipelineClient: s.pipelineClient, - vertexClient: s.vertexClient, - restConfig: s.restConfig, - kubeClient: s.kubeClient, + t: s.T(), + isbSvcClient: s.isbSvcClient, + pipelineClient: s.pipelineClient, + vertexClient: s.vertexClient, + monoVertexClient: s.monoVertexClient, + restConfig: s.restConfig, + kubeClient: s.kubeClient, } } diff --git a/test/fixtures/expect.go b/test/fixtures/expect.go index 6438959786..2e550653fe 100644 --- a/test/fixtures/expect.go +++ b/test/fixtures/expect.go @@ -32,32 +32,38 @@ import ( ) type Expect struct { - t *testing.T - isbSvcClient flowpkg.InterStepBufferServiceInterface - pipelineClient flowpkg.PipelineInterface - vertexClient flowpkg.VertexInterface - isbSvc *dfv1.InterStepBufferService - pipeline *dfv1.Pipeline - restConfig *rest.Config - kubeClient kubernetes.Interface + t *testing.T + isbSvcClient flowpkg.InterStepBufferServiceInterface + pipelineClient flowpkg.PipelineInterface + vertexClient flowpkg.VertexInterface + monoVertexClient flowpkg.MonoVertexInterface + isbSvc *dfv1.InterStepBufferService + pipeline *dfv1.Pipeline + monoVertex *dfv1.MonoVertex + restConfig *rest.Config + kubeClient kubernetes.Interface } -func (t *Expect) SinkContains(sinkName string, targetStr string, opts ...SinkCheckOption) *Expect { +// RedisSinkContains checks if the target string is written to the redis sink +// hashKey is the hash key environment variable set by the sink +// targetStr is the target string to check +func (t *Expect) RedisSinkContains(hashKey string, targetStr string, opts ...SinkCheckOption) *Expect { t.t.Helper() ctx := context.Background() - contains := RedisContains(ctx, t.pipeline.Name, sinkName, targetStr, opts...) - if !contains { - t.t.Fatalf("Expected redis contains target string %s written by pipeline %s, sink %s.", targetStr, t.pipeline.Name, sinkName) + if contains := redisContains(ctx, hashKey, targetStr, opts...); !contains { + t.t.Fatalf("Expected redis contains target string %s written under hash key %s.", targetStr, hashKey) } return t } -func (t *Expect) SinkNotContains(sinkName string, targetStr string, opts ...SinkCheckOption) *Expect { +// RedisSinkNotContains checks if the target string is not written to the redis sink +// hashKey is the hash key environment variable set by the sink +// targetStr is the target string to check +func (t *Expect) RedisSinkNotContains(hashKey string, targetStr string, opts ...SinkCheckOption) *Expect { t.t.Helper() ctx := context.Background() - notContains := RedisNotContains(ctx, t.pipeline.Name, sinkName, targetStr, opts...) - if !notContains { - t.t.Fatalf("Not expected redis contains target string %s written by pipeline %s, sink %s.", targetStr, t.pipeline.Name, sinkName) + if notContain := redisNotContains(ctx, hashKey, targetStr, opts...); !notContain { + t.t.Fatalf("Not expected redis contains target string %s written under hash key %s.", targetStr, hashKey) } return t } @@ -112,6 +118,14 @@ func (t *Expect) VertexPodsRunning() *Expect { return t } +func (t *Expect) MonoVertexPodsRunning() *Expect { + t.t.Helper() + if err := WaitForMonoVertexPodRunning(t.kubeClient, t.monoVertexClient, Namespace, t.monoVertex.Name, 2*time.Minute); err != nil { + t.t.Fatalf("Expected mono vertex %q pod running: %v", t.monoVertex.Name, err) + } + return t +} + func (t *Expect) VertexSizeScaledTo(v string, size int) *Expect { t.t.Helper() ctx := context.Background() @@ -141,6 +155,20 @@ func (t *Expect) VertexPodLogContains(vertexName, regex string, opts ...PodLogCh return t } +func (t *Expect) MonoVertexPodLogContains(regex string, opts ...PodLogCheckOption) *Expect { + t.t.Helper() + ctx := context.Background() + contains, err := MonoVertexPodLogContains(ctx, t.kubeClient, Namespace, t.monoVertex.Name, regex, opts...) + if err != nil { + t.t.Fatalf("Failed to check mono vertex %q pod logs: %v", t.monoVertex.Name, err) + } + if !contains { + t.t.Fatalf("Expected mono vertex [%q] pod log to contain [%q] but didn't.", t.monoVertex.Name, regex) + } + t.t.Logf("Expected mono vertex %q pod contains %q", t.monoVertex.Name, regex) + return t +} + func (t *Expect) VertexPodLogNotContains(vertexName, regex string, opts ...PodLogCheckOption) *Expect { t.t.Helper() ctx := context.Background() @@ -178,13 +206,15 @@ func (t *Expect) DaemonPodLogContains(pipelineName, regex string, opts ...PodLog func (t *Expect) When() *When { return &When{ - t: t.t, - isbSvcClient: t.isbSvcClient, - pipelineClient: t.pipelineClient, - vertexClient: t.vertexClient, - isbSvc: t.isbSvc, - pipeline: t.pipeline, - restConfig: t.restConfig, - kubeClient: t.kubeClient, + t: t.t, + isbSvcClient: t.isbSvcClient, + pipelineClient: t.pipelineClient, + vertexClient: t.vertexClient, + monoVertexClient: t.monoVertexClient, + isbSvc: t.isbSvc, + pipeline: t.pipeline, + monoVertex: t.monoVertex, + restConfig: t.restConfig, + kubeClient: t.kubeClient, } } diff --git a/test/fixtures/given.go b/test/fixtures/given.go index 448f8e10f6..30a4ab01a7 100644 --- a/test/fixtures/given.go +++ b/test/fixtures/given.go @@ -21,23 +21,26 @@ import ( "strings" "testing" - dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - flowpkg "github.com/numaproj/numaflow/pkg/client/clientset/versioned/typed/numaflow/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "sigs.k8s.io/yaml" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + flowpkg "github.com/numaproj/numaflow/pkg/client/clientset/versioned/typed/numaflow/v1alpha1" ) type Given struct { - t *testing.T - isbSvcClient flowpkg.InterStepBufferServiceInterface - pipelineClient flowpkg.PipelineInterface - vertexClient flowpkg.VertexInterface - isbSvc *dfv1.InterStepBufferService - pipeline *dfv1.Pipeline - restConfig *rest.Config - kubeClient kubernetes.Interface + t *testing.T + isbSvcClient flowpkg.InterStepBufferServiceInterface + pipelineClient flowpkg.PipelineInterface + vertexClient flowpkg.VertexInterface + monoVertexClient flowpkg.MonoVertexInterface + isbSvc *dfv1.InterStepBufferService + pipeline *dfv1.Pipeline + monoVertex *dfv1.MonoVertex + restConfig *rest.Config + kubeClient kubernetes.Interface } // creates an ISBSvc based on the parameter, this may be: @@ -76,6 +79,23 @@ func (g *Given) Pipeline(text string) *Given { return g } +// / creates a MonoVertex based on the parameter, this may be: +// +// 1. A file name if it starts with "@" +// 2. Raw YAML. +func (g *Given) MonoVertex(text string) *Given { + g.t.Helper() + g.monoVertex = &dfv1.MonoVertex{} + g.readResource(text, g.monoVertex) + l := g.monoVertex.GetLabels() + if l == nil { + l = map[string]string{} + } + l[Label] = LabelValue + g.monoVertex.SetLabels(l) + return g +} + func (g *Given) WithPipeline(p *dfv1.Pipeline) *Given { g.t.Helper() g.pipeline = p @@ -89,6 +109,18 @@ func (g *Given) WithPipeline(p *dfv1.Pipeline) *Given { return g } +func (g *Given) WithMonoVertex(mv *dfv1.MonoVertex) *Given { + g.t.Helper() + g.monoVertex = mv + l := g.monoVertex.GetLabels() + if l == nil { + l = map[string]string{} + } + l[Label] = LabelValue + g.monoVertex.SetLabels(l) + return g +} + func (g *Given) readResource(text string, v metav1.Object) { g.t.Helper() var file string @@ -122,13 +154,15 @@ func (g *Given) readResource(text string, v metav1.Object) { func (g *Given) When() *When { return &When{ - t: g.t, - isbSvcClient: g.isbSvcClient, - pipelineClient: g.pipelineClient, - vertexClient: g.vertexClient, - isbSvc: g.isbSvc, - pipeline: g.pipeline, - restConfig: g.restConfig, - kubeClient: g.kubeClient, + t: g.t, + isbSvcClient: g.isbSvcClient, + pipelineClient: g.pipelineClient, + vertexClient: g.vertexClient, + monoVertexClient: g.monoVertexClient, + isbSvc: g.isbSvc, + pipeline: g.pipeline, + monoVertex: g.monoVertex, + restConfig: g.restConfig, + kubeClient: g.kubeClient, } } diff --git a/test/fixtures/redis.go b/test/fixtures/redis.go index 7f0b4c6e7e..93d5d4996f 100644 --- a/test/fixtures/redis.go +++ b/test/fixtures/redis.go @@ -22,9 +22,10 @@ import ( "strconv" ) -// GetMsgCountContains returns number of occurrences of the targetStr in redis that are written by pipelineName, sinkName. -func GetMsgCountContains(pipelineName, sinkName, targetStr string) int { - str := InvokeE2EAPI("/redis/get-msg-count-contains?pipelineName=%s&sinkName=%s&targetStr=%s", pipelineName, sinkName, url.QueryEscape(targetStr)) +// getMsgCountContains returns the number of occurrences of the targetStr in redis +// that are written by under hash key keyName. +func getMsgCountContains(keyName, targetStr string) int { + str := InvokeE2EAPI("/redis/get-msg-count-contains?keyName=%s&targetStr=%s", keyName, url.QueryEscape(targetStr)) count, err := strconv.Atoi(str) if err != nil { panic(fmt.Sprintf("Can't parse string %s to an integer.", str)) diff --git a/test/fixtures/redis_check.go b/test/fixtures/redis_check.go index b73078a3e5..b784aa2d61 100644 --- a/test/fixtures/redis_check.go +++ b/test/fixtures/redis_check.go @@ -24,8 +24,8 @@ import ( // Retry checking redis every 5 seconds. const retryInterval = time.Second * 5 -// RedisNotContains verifies that there is no occurrence of targetStr in redis that is written by pipelineName, sinkName. -func RedisNotContains(ctx context.Context, pipelineName, sinkName, targetStr string, opts ...SinkCheckOption) bool { +// redisNotContains verifies that there is no occurrence of targetStr in redis that is written under hashKey. +func redisNotContains(ctx context.Context, hashKey, targetStr string, opts ...SinkCheckOption) bool { o := defaultRedisCheckOptions() for _, opt := range opts { if opt != nil { @@ -34,14 +34,13 @@ func RedisNotContains(ctx context.Context, pipelineName, sinkName, targetStr str } ctx, cancel := context.WithTimeout(ctx, o.timeout) defer cancel() - return runChecks(ctx, func() bool { - return !redisContains(pipelineName, sinkName, targetStr, 1) + return !redisContainsCount(hashKey, targetStr, 1) }) } -// RedisContains verifies that there are targetStr in redis written by pipelineName, sinkName. -func RedisContains(ctx context.Context, pipelineName, sinkName, targetStr string, opts ...SinkCheckOption) bool { +// redisContains verifies that there are targetStr in redis written under hashKey. +func redisContains(ctx context.Context, hashKey, targetStr string, opts ...SinkCheckOption) bool { o := defaultRedisCheckOptions() for _, opt := range opts { if opt != nil { @@ -50,15 +49,14 @@ func RedisContains(ctx context.Context, pipelineName, sinkName, targetStr string } ctx, cancel := context.WithTimeout(ctx, o.timeout) defer cancel() - return runChecks(ctx, func() bool { - return redisContains(pipelineName, sinkName, targetStr, o.count) + return redisContainsCount(hashKey, targetStr, o.count) }) } -func redisContains(pipelineName, sinkName, targetStr string, expectedCount int) bool { - // If number of matches is higher than expected, we treat it as passing the check. - return GetMsgCountContains(pipelineName, sinkName, targetStr) >= expectedCount +func redisContainsCount(hashKey, targetStr string, expectedCount int) bool { + // If the number of matches is higher than expected, we treat it as passing the check. + return getMsgCountContains(hashKey, targetStr) >= expectedCount } type redisCheckOptions struct { @@ -96,8 +94,8 @@ type CheckFunc func() bool // runChecks executes a performChecks function with retry strategy (retryInterval with timeout). // If performChecks doesn't pass within timeout, runChecks returns false indicating the checks have failed. // This is to mitigate the problem that we don't know exactly when a numaflow pipeline finishes processing our test data. -// Please notice such approach is not strictly accurate as there can be case where runChecks passes before pipeline finishes processing data. -// Which could result in false positive test results. e.g. checking data doesn't exist can pass before data gets persisted to redis. +// Please notice such an approach is not strictly accurate as there can be a case where runChecks passes before the pipeline finishes processing data. +// Which could result in false positive test results. E.g., checking data doesn't exist can pass before data gets persisted to redis. func runChecks(ctx context.Context, performChecks CheckFunc) bool { ticker := time.NewTicker(retryInterval) defer ticker.Stop() diff --git a/test/fixtures/util.go b/test/fixtures/util.go index 73b42b9f88..2c8c8a0ae1 100644 --- a/test/fixtures/util.go +++ b/test/fixtures/util.go @@ -230,6 +230,65 @@ func WaitForPipelineRunning(ctx context.Context, pipelineClient flowpkg.Pipeline } } +func WaitForMonoVertexRunning(ctx context.Context, monoVertexClient flowpkg.MonoVertexInterface, monoVertexName string, timeout time.Duration) error { + fieldSelector := "metadata.name=" + monoVertexName + opts := metav1.ListOptions{FieldSelector: fieldSelector} + watch, err := monoVertexClient.Watch(ctx, opts) + if err != nil { + return err + } + defer watch.Stop() + timeoutCh := make(chan bool, 1) + go func() { + time.Sleep(timeout) + timeoutCh <- true + }() + for { + select { + case event := <-watch.ResultChan(): + i, ok := event.Object.(*dfv1.MonoVertex) + if ok { + if i.Status.Phase == dfv1.MonoVertexPhaseRunning { + return nil + } + } else { + return fmt.Errorf("not monovertex") + } + case <-timeoutCh: + return fmt.Errorf("timeout after %v waiting for MonoVertex running", timeout) + } + } +} + +func WaitForMonoVertexPodRunning(kubeClient kubernetes.Interface, monoVertexClient flowpkg.MonoVertexInterface, namespace, monoVertexName string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyMonoVertexName, monoVertexName, dfv1.KeyComponent, dfv1.ComponentMonoVertex) + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout after %v waiting for monovertex pod running", timeout) + default: + } + monoVertex, err := monoVertexClient.Get(ctx, monoVertexName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error getting the monovertex: %w", err) + } + podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return fmt.Errorf("error getting monovertex pod list: %w", err) + } + ok := len(podList.Items) > 0 && len(podList.Items) == monoVertex.GetReplicas() // pod number should equal to desired replicas + for _, p := range podList.Items { + ok = ok && p.Status.Phase == corev1.PodRunning + } + if ok { + return nil + } + time.Sleep(2 * time.Second) + } +} + func WaitForVertexPodRunning(kubeClient kubernetes.Interface, vertexClient flowpkg.VertexInterface, namespace, pipelineName, vertexName string, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -359,6 +418,18 @@ func VertexPodLogContains(ctx context.Context, kubeClient kubernetes.Interface, return PodsLogContains(ctx, kubeClient, namespace, regex, podList, opts...), nil } +func MonoVertexPodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, mvName, regex string, opts ...PodLogCheckOption) (bool, error) { + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyMonoVertexName, mvName, dfv1.KeyComponent, dfv1.ComponentMonoVertex) + podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return false, fmt.Errorf("error getting monovertex pods: %w", err) + } + if len(podList.Items) == 0 { + return false, fmt.Errorf("no monovertex pods found") + } + return PodsLogContains(ctx, kubeClient, namespace, regex, podList, opts...), nil +} + func DaemonPodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, pipelineName, regex string, opts ...PodLogCheckOption) (bool, error) { labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyPipelineName, pipelineName, dfv1.KeyComponent, dfv1.ComponentDaemon) podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) diff --git a/test/fixtures/when.go b/test/fixtures/when.go index 35295d6dfb..7ab85ae772 100644 --- a/test/fixtures/when.go +++ b/test/fixtures/when.go @@ -31,14 +31,16 @@ import ( ) type When struct { - t *testing.T - isbSvcClient flowpkg.InterStepBufferServiceInterface - pipelineClient flowpkg.PipelineInterface - vertexClient flowpkg.VertexInterface - isbSvc *dfv1.InterStepBufferService - pipeline *dfv1.Pipeline - restConfig *rest.Config - kubeClient kubernetes.Interface + t *testing.T + isbSvcClient flowpkg.InterStepBufferServiceInterface + pipelineClient flowpkg.PipelineInterface + vertexClient flowpkg.VertexInterface + monoVertexClient flowpkg.MonoVertexInterface + isbSvc *dfv1.InterStepBufferService + pipeline *dfv1.Pipeline + monoVertex *dfv1.MonoVertex + restConfig *rest.Config + kubeClient kubernetes.Interface portForwarderStopChannels map[string]chan struct{} streamLogsStopChannels map[string]chan struct{} @@ -101,6 +103,26 @@ func (w *When) CreatePipelineAndWait() *When { return w } +func (w *When) CreateMonoVertexAndWait() *When { + w.t.Helper() + if w.monoVertex == nil { + w.t.Fatal("No MonoVertex to create") + } + w.t.Log("Creating MonoVertex", w.monoVertex.Name) + ctx := context.Background() + i, err := w.monoVertexClient.Create(ctx, w.monoVertex, metav1.CreateOptions{}) + if err != nil { + w.t.Fatal(err) + } else { + w.monoVertex = i + } + // wait + if err := WaitForMonoVertexRunning(ctx, w.monoVertexClient, w.monoVertex.Name, defaultTimeout); err != nil { + w.t.Fatal(err) + } + return w +} + func (w *When) DeletePipelineAndWait() *When { w.t.Helper() if w.pipeline == nil { @@ -133,6 +155,38 @@ func (w *When) DeletePipelineAndWait() *When { } } +func (w *When) DeleteMonoVertexAndWait() *When { + w.t.Helper() + if w.monoVertex == nil { + w.t.Fatal("No MonoVertex to delete") + } + w.t.Log("Deleting MonoVertex", w.monoVertex.Name) + ctx := context.Background() + if err := w.monoVertexClient.Delete(ctx, w.monoVertex.Name, metav1.DeleteOptions{}); err != nil { + w.t.Fatal(err) + } + + timeout := defaultTimeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + labelSelector := fmt.Sprintf("%s=%s", dfv1.KeyMonoVertexName, w.monoVertex.Name) + for { + select { + case <-ctx.Done(): + w.t.Fatalf("Timeout after %v waiting for mono vertex pods terminating", timeout) + default: + } + podList, err := w.kubeClient.CoreV1().Pods(Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + w.t.Fatalf("Error getting mono vertex pods: %v", err) + } + if len(podList.Items) == 0 { + return w + } + time.Sleep(2 * time.Second) + } +} + func (w *When) WaitForISBSvcReady() *When { w.t.Helper() ctx := context.Background() @@ -281,26 +335,30 @@ func (w *When) Exec(name string, args []string, block func(t *testing.T, output func (w *When) Given() *Given { return &Given{ - t: w.t, - isbSvcClient: w.isbSvcClient, - pipelineClient: w.pipelineClient, - vertexClient: w.vertexClient, - isbSvc: w.isbSvc, - pipeline: w.pipeline, - restConfig: w.restConfig, - kubeClient: w.kubeClient, + t: w.t, + isbSvcClient: w.isbSvcClient, + pipelineClient: w.pipelineClient, + vertexClient: w.vertexClient, + monoVertexClient: w.monoVertexClient, + isbSvc: w.isbSvc, + pipeline: w.pipeline, + monoVertex: w.monoVertex, + restConfig: w.restConfig, + kubeClient: w.kubeClient, } } func (w *When) Expect() *Expect { return &Expect{ - t: w.t, - isbSvcClient: w.isbSvcClient, - pipelineClient: w.pipelineClient, - vertexClient: w.vertexClient, - isbSvc: w.isbSvc, - pipeline: w.pipeline, - restConfig: w.restConfig, - kubeClient: w.kubeClient, + t: w.t, + isbSvcClient: w.isbSvcClient, + pipelineClient: w.pipelineClient, + vertexClient: w.vertexClient, + monoVertexClient: w.monoVertexClient, + isbSvc: w.isbSvc, + pipeline: w.pipeline, + monoVertex: w.monoVertex, + restConfig: w.restConfig, + kubeClient: w.kubeClient, } } diff --git a/test/http-e2e/http_test.go b/test/http-e2e/http_test.go index ce46e23aef..a5e7058dee 100644 --- a/test/http-e2e/http_test.go +++ b/test/http-e2e/http_test.go @@ -47,16 +47,16 @@ func (s *HTTPSuite) TestHTTPSourcePipeline() { w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("no-id"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("no-id"))) // No x-numaflow-id, expect 2 outputs - w.Expect().SinkContains("out", "no-id", SinkCheckWithContainCount(2)) + w.Expect().RedisSinkContains("http-source-out", "no-id", SinkCheckWithContainCount(2)) w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("with-id")).WithHeader("x-numaflow-id", "101")). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("with-id")).WithHeader("x-numaflow-id", "101")) // With same x-numaflow-id, expect 1 output - w.Expect().SinkContains("out", "with-id", SinkCheckWithContainCount(1)) + w.Expect().RedisSinkContains("http-source-out", "with-id", SinkCheckWithContainCount(1)) w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("with-id")).WithHeader("x-numaflow-id", "102")) // With a new x-numaflow-id, expect 2 outputs - w.Expect().SinkContains("out", "with-id", SinkCheckWithContainCount(2)) + w.Expect().RedisSinkContains("http-source-out", "with-id", SinkCheckWithContainCount(2)) } func (s *HTTPSuite) TestHTTPSourceAuthPipeline() { @@ -71,8 +71,8 @@ func (s *HTTPSuite) TestHTTPSourceAuthPipeline() { w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("no-auth"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("with-auth")).WithHeader("Authorization", "Bearer faketoken")) - w.Expect().SinkContains("out", "with-auth") - w.Expect().SinkNotContains("out", "no-auth") + w.Expect().RedisSinkContains("http-auth-source-out", "with-auth") + w.Expect().RedisSinkNotContains("http-auth-source-out", "no-auth") } func TestHTTPSuite(t *testing.T) { diff --git a/test/http-e2e/testdata/http-source-with-auth.yaml b/test/http-e2e/testdata/http-source-with-auth.yaml index a96cd3b58e..4b27a58cee 100644 --- a/test/http-e2e/testdata/http-source-with-auth.yaml +++ b/test/http-e2e/testdata/http-source-with-auth.yaml @@ -18,6 +18,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "http-auth-source-out" edges: - from: in to: out diff --git a/test/http-e2e/testdata/http-source.yaml b/test/http-e2e/testdata/http-source.yaml index 3f81fc1902..c67d366ddb 100644 --- a/test/http-e2e/testdata/http-source.yaml +++ b/test/http-e2e/testdata/http-source.yaml @@ -17,6 +17,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "http-source-out" edges: - from: in to: out diff --git a/test/idle-source-e2e/idle_source_test.go b/test/idle-source-e2e/idle_source_test.go index faf5be1f00..ef5e3eff98 100644 --- a/test/idle-source-e2e/idle_source_test.go +++ b/test/idle-source-e2e/idle_source_test.go @@ -87,8 +87,8 @@ func (is *IdleSourceSuite) TestIdleKeyedReducePipelineWithHttpSource() { // since the key can be even or odd and the window duration is 10s // the sum should be 20(for even) and 40(for odd) w.Expect(). - SinkContains("sink", "20", SinkCheckWithTimeout(300*time.Second)). - SinkContains("sink", "40", SinkCheckWithTimeout(300*time.Second)) + RedisSinkContains("http-idle-source-sink", "20", SinkCheckWithTimeout(300*time.Second)). + RedisSinkContains("http-idle-source-sink", "40", SinkCheckWithTimeout(300*time.Second)) done <- struct{}{} } @@ -138,9 +138,9 @@ func (is *IdleSourceSuite) TestIdleKeyedReducePipelineWithKafkaSource() { }() // since the window duration is 10 second, so the count of event will be 20, when sending data to both partitions. - w.Expect().SinkContains("sink", "20", SinkCheckWithTimeout(300*time.Second)) + w.Expect().RedisSinkContains("kafka-idle-source-sink", "20", SinkCheckWithTimeout(300*time.Second)) // since the window duration is 10 second, so the count of event will be 10, when sending data to only one partition. - w.Expect().SinkContains("sink", "10", SinkCheckWithTimeout(300*time.Second)) + w.Expect().RedisSinkContains("kafka-idle-source-sink", "10", SinkCheckWithTimeout(300*time.Second)) done <- struct{}{} } diff --git a/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml b/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml index 68bd2f15b0..2fc2a0a89e 100644 --- a/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml +++ b/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml @@ -50,6 +50,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "http-idle-source-sink" edges: - from: in to: atoi diff --git a/test/idle-source-e2e/testdata/kafka-pipeline.yaml b/test/idle-source-e2e/testdata/kafka-pipeline.yaml index fdc4e33bf0..3af968f58d 100644 --- a/test/idle-source-e2e/testdata/kafka-pipeline.yaml +++ b/test/idle-source-e2e/testdata/kafka-pipeline.yaml @@ -53,6 +53,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "kafka-idle-source-sink" edges: - from: kafka-in to: count-event diff --git a/test/jetstream-e2e/jetstream_test.go b/test/jetstream-e2e/jetstream_test.go index 40687c1f17..06aa24d05a 100644 --- a/test/jetstream-e2e/jetstream_test.go +++ b/test/jetstream-e2e/jetstream_test.go @@ -21,8 +21,9 @@ package jetstream_e2e import ( "testing" - "github.com/numaproj/numaflow/test/fixtures" "github.com/stretchr/testify/suite" + + "github.com/numaproj/numaflow/test/fixtures" ) //go:generate kubectl -n numaflow-system delete statefulset nats --ignore-not-found=true @@ -47,7 +48,7 @@ func (ns *JetstreamSuite) TestJetstreamSource() { // wait for all the pods to come up w.Expect().VertexPodsRunning() - w.Expect().SinkContains("out", msgPayload, fixtures.SinkCheckWithContainCount(100)) + w.Expect().RedisSinkContains("jetstream-source-e2e-out", msgPayload, fixtures.SinkCheckWithContainCount(100)) } func TestJetstreamSuite(t *testing.T) { diff --git a/test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml b/test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml index 1f01be02db..c1a3766530 100644 --- a/test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml +++ b/test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml @@ -26,6 +26,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "jetstream-source-e2e-out" edges: - from: in to: p1 diff --git a/test/monovertex-e2e/monovertex_test.go b/test/monovertex-e2e/monovertex_test.go new file mode 100644 index 0000000000..51d9135c56 --- /dev/null +++ b/test/monovertex-e2e/monovertex_test.go @@ -0,0 +1,32 @@ +package monovertex_e2e + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + . "github.com/numaproj/numaflow/test/fixtures" +) + +type MonoVertexSuite struct { + E2ESuite +} + +func (s *MonoVertexSuite) TestMonoVertexWithTransformer() { + w := s.Given().MonoVertex("@testdata/mono-vertex-with-transformer.yaml"). + When().CreateMonoVertexAndWait() + defer w.DeleteMonoVertexAndWait() + + w.Expect().MonoVertexPodsRunning() + + // Expect the messages to be processed by the transformer. + w.Expect().MonoVertexPodLogContains("AssignEventTime", PodLogCheckOptionWithContainer("transformer")) + + // Expect the messages to reach the sink. + w.Expect().RedisSinkContains("transformer-mono-vertex", "199") + w.Expect().RedisSinkContains("transformer-mono-vertex", "200") +} + +func TestMonoVertexSuite(t *testing.T) { + suite.Run(t, new(MonoVertexSuite)) +} diff --git a/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml new file mode 100644 index 0000000000..66f1bb34bd --- /dev/null +++ b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml @@ -0,0 +1,24 @@ +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex +metadata: + name: transformer-mono-vertex +spec: + source: + udsource: + container: + image: quay.io/numaio/numaflow-go/source-simple-source:stable + imagePullPolicy: Always + transformer: + container: + image: quay.io/numaio/numaflow-go/mapt-assign-event-time:stable + imagePullPolicy: Always + sink: + udsink: + container: + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + image: quay.io/numaio/numaflow-go/redis-sink:stable + imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # Use the name of the mono vertex as the key + value: "transformer-mono-vertex" \ No newline at end of file diff --git a/test/nats-e2e/nats_test.go b/test/nats-e2e/nats_test.go index 504432214e..72d2c70e99 100644 --- a/test/nats-e2e/nats_test.go +++ b/test/nats-e2e/nats_test.go @@ -45,7 +45,7 @@ func (ns *NatsSuite) TestNatsSource() { w.Expect().VertexPodsRunning() PumpNatsSubject(subject, 100, 20*time.Millisecond, 10, "test-message") - w.Expect().SinkContains("out", "test-message", SinkCheckWithContainCount(100)) + w.Expect().RedisSinkContains("nats-source-e2e-out", "test-message", SinkCheckWithContainCount(100)) } func TestNatsSuite(t *testing.T) { diff --git a/test/nats-e2e/testdata/nats-source-pipeline.yaml b/test/nats-e2e/testdata/nats-source-pipeline.yaml index bab98eeb52..3dd0d965df 100644 --- a/test/nats-e2e/testdata/nats-source-pipeline.yaml +++ b/test/nats-e2e/testdata/nats-source-pipeline.yaml @@ -27,6 +27,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "nats-source-e2e-out" edges: - from: in to: p1 diff --git a/test/reduce-one-e2e/reduce_one_test.go b/test/reduce-one-e2e/reduce_one_test.go index 6fa78f92f1..4cf592e1cf 100644 --- a/test/reduce-one-e2e/reduce_one_test.go +++ b/test/reduce-one-e2e/reduce_one_test.go @@ -72,8 +72,8 @@ func (r *ReduceSuite) TestSimpleKeyedReducePipeline() { // since the key can be even or odd and the window duration is 10s // the sum should be 20(for even) and 40(for odd) w.Expect(). - SinkContains("sink", "40"). - SinkContains("sink", "20") + RedisSinkContains("simple-sum-sink", "40"). + RedisSinkContains("simple-sum-sink", "20") done <- struct{}{} } @@ -117,7 +117,7 @@ func (r *ReduceSuite) TestSimpleNonKeyedReducePipeline() { // since there is no key, all the messages will be assigned to same window // the sum should be 60(since the window is 10s) - w.Expect().SinkContains("sink", "60") + w.Expect().RedisSinkContains("reduce-sum-sink", "60") done <- struct{}{} } @@ -161,7 +161,7 @@ func (r *ReduceSuite) TestComplexReducePipelineKeyedNonKeyed() { // since the key can be even or odd and the first window duration is 10s(which is keyed) // and the second window duration is 60s(non-keyed) // the sum should be 180(60 + 120) - w.Expect().SinkContains("sink", "180") + w.Expect().RedisSinkContains("complex-sum-sink", "180") done <- struct{}{} } @@ -219,10 +219,10 @@ func (r *ReduceSuite) TestSimpleReducePipelineFailOverUsingWAL() { }() w.Expect(). - SinkContains("sink", "38"). - SinkContains("sink", "76"). - SinkContains("sink", "120"). - SinkContains("sink", "240") + RedisSinkContains("even-odd-sum-sink", "38"). + RedisSinkContains("even-odd-sum-sink", "76"). + RedisSinkContains("even-odd-sum-sink", "120"). + RedisSinkContains("even-odd-sum-sink", "240") done <- struct{}{} } @@ -280,12 +280,12 @@ func (r *ReduceSuite) TestComplexSlidingWindowPipeline() { // we only have to extend the timeout for the first output to be produced. for the rest, // we just need to wait for the default timeout for the rest of the outputs since its synchronous w.Expect(). - SinkContains("sink", "30"). - SinkContains("sink", "60"). - SinkNotContains("sink", "80"). - SinkContains("sink", "90"). - SinkContains("sink", "180"). - SinkNotContains("sink", "210") + RedisSinkContains("complex-sliding-sum-sink", "30"). + RedisSinkContains("complex-sliding-sum-sink", "60"). + RedisSinkNotContains("complex-sliding-sum-sink", "80"). + RedisSinkContains("complex-sliding-sum-sink", "90"). + RedisSinkContains("complex-sliding-sum-sink", "180"). + RedisSinkNotContains("complex-sliding-sum-sink", "210") done <- struct{}{} } diff --git a/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml index 0c745be3b6..d7a56c5f0e 100644 --- a/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml @@ -57,6 +57,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "complex-sum-sink" edges: - from: in to: atoi diff --git a/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml b/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml index 2eafdd6f85..8c19e22d72 100644 --- a/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml @@ -89,6 +89,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "complex-sliding-sum-sink" edges: - from: in to: atoi diff --git a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml index 51e62d0613..25e1976331 100644 --- a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml @@ -46,6 +46,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "simple-sum-sink" edges: - from: in to: atoi diff --git a/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml index 65b80c2dc0..d5a932a7c5 100644 --- a/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml @@ -45,6 +45,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "reduce-sum-sink" edges: - from: in to: atoi diff --git a/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml b/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml index e77479eaad..25a6770a5d 100644 --- a/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml +++ b/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml @@ -44,6 +44,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "even-odd-sum-sink" edges: - from: in to: atoi diff --git a/test/reduce-two-e2e/reduce_two_test.go b/test/reduce-two-e2e/reduce_two_test.go index 3af75820fd..7a1815f2c2 100644 --- a/test/reduce-two-e2e/reduce_two_test.go +++ b/test/reduce-two-e2e/reduce_two_test.go @@ -83,9 +83,9 @@ func (r *ReduceSuite) testReduceStream(lang string) { // The reduce stream application summarizes the input messages and returns the sum when the sum is greater than 100. // Since we are sending 3s, the first returned message should be 102. // There should be no other values. - w.Expect().SinkContains("sink", "102") - w.Expect().SinkNotContains("sink", "99") - w.Expect().SinkNotContains("sink", "105") + w.Expect().RedisSinkContains(pipelineName+"-sink", "102") + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "99") + w.Expect().RedisSinkNotContains(pipelineName+"sink", "105") done <- struct{}{} } @@ -132,7 +132,7 @@ func (r *ReduceSuite) TestSimpleSessionPipeline() { } }() - w.Expect().SinkContains("sink", "1000") + w.Expect().RedisSinkContains("simple-session-sum-sink", "1000") done <- struct{}{} } @@ -190,11 +190,11 @@ func (r *ReduceSuite) testSimpleSessionKeyedPipeline(lang string) { } }() - w.Expect().SinkContains("sink", "5") - w.Expect().SinkNotContains("sink", "4", SinkCheckWithTimeout(20*time.Second)) - w.Expect().SinkNotContains("sink", "3", SinkCheckWithTimeout(20*time.Second)) - w.Expect().SinkNotContains("sink", "2", SinkCheckWithTimeout(20*time.Second)) - w.Expect().SinkNotContains("sink", "1", SinkCheckWithTimeout(20*time.Second)) + w.Expect().RedisSinkContains(pipelineName+"-sink", "5") + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "4", SinkCheckWithTimeout(20*time.Second)) + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "3", SinkCheckWithTimeout(20*time.Second)) + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "2", SinkCheckWithTimeout(20*time.Second)) + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "1", SinkCheckWithTimeout(20*time.Second)) done <- struct{}{} } @@ -255,11 +255,11 @@ func (r *ReduceSuite) TestSimpleSessionPipelineFailOverUsingWAL() { }() w.Expect(). - SinkContains("sink", "5"). - SinkNotContains("sink", "4", SinkCheckWithTimeout(20*time.Second)). - SinkNotContains("sink", "3", SinkCheckWithTimeout(20*time.Second)). - SinkNotContains("sink", "2", SinkCheckWithTimeout(20*time.Second)). - SinkNotContains("sink", "1", SinkCheckWithTimeout(20*time.Second)) + RedisSinkContains("simple-session-counter-go-sink", "5"). + RedisSinkNotContains("simple-session-counter-go-sink", "4", SinkCheckWithTimeout(20*time.Second)). + RedisSinkNotContains("simple-session-counter-go-sink", "3", SinkCheckWithTimeout(20*time.Second)). + RedisSinkNotContains("simple-session-counter-go-sink", "2", SinkCheckWithTimeout(20*time.Second)). + RedisSinkNotContains("simple-session-counter-go-sink", "1", SinkCheckWithTimeout(20*time.Second)) done <- struct{}{} } diff --git a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml index 250c6c126f..d80138366e 100644 --- a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml +++ b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml @@ -42,6 +42,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "reduce-stream-go-sink" edges: - from: in to: atoi diff --git a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml index ebdd2b4c8a..b94e7fd649 100644 --- a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml +++ b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml @@ -42,6 +42,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "reduce-stream-java-sink" edges: - from: in to: atoi diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml index ff2ab4a2ee..a2d88efeb6 100644 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml +++ b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml @@ -43,6 +43,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "simple-session-counter-go-sink" edges: - from: in to: even-odd diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml index 91a9d168ed..f468710f03 100644 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml +++ b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml @@ -48,6 +48,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "simple-session-counter-java-sink" edges: - from: in to: even-odd diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml index 57f8cdadf4..6f1259e085 100644 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml +++ b/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml @@ -36,6 +36,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "simple-session-sum-sink" edges: - from: in to: compute-sum diff --git a/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go b/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go index dfec57977c..be95e4c3f5 100644 --- a/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go +++ b/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go @@ -40,16 +40,14 @@ func (s *SideInputUDSSuite) setUpTests(pipeLineFile string) *When { } func (s *SideInputUDSSuite) TestSinkWithSideInput() { - // the side inputs feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() } - w := s.setUpTests("@testdata/sideinput_sink.yaml") + w := s.setUpTests("@testdata/sideinput-sink.yaml") defer w.DeletePipelineAndWait() - w.Expect().SinkContains("redis-uds", "e2e-even", SinkCheckWithTimeout(2*time.Minute)) - + w.Expect().RedisSinkContains("sideinput-sink-test-redis-uds", "e2e-even", SinkCheckWithTimeout(2*time.Minute)) } func (s *SideInputUDSSuite) TestSourceWithSideInput() { @@ -59,9 +57,9 @@ func (s *SideInputUDSSuite) TestSourceWithSideInput() { s.T().SkipNow() } - w := s.setUpTests("@testdata/sideinput_source.yaml") + w := s.setUpTests("@testdata/sideinput-source.yaml") defer w.DeletePipelineAndWait() - w.Expect().SinkContains("redis-uds", "e2e-even", SinkCheckWithTimeout(2*time.Minute)) + w.Expect().RedisSinkContains("sideinput-source-test-redis-uds", "e2e-even", SinkCheckWithTimeout(2*time.Minute)) } diff --git a/test/sideinputs-e2e/sideinput_test.go b/test/sideinputs-e2e/sideinput_test.go index 1b4c18b2ac..8f1e156926 100644 --- a/test/sideinputs-e2e/sideinput_test.go +++ b/test/sideinputs-e2e/sideinput_test.go @@ -70,8 +70,8 @@ func (s *SideInputSuite) TestSimpleMapSideInputPipeline() { }() // map-even-data and map-odd-data message is generated based on map and side input data. - w.Expect().SinkContains("sink", "map-even-data") - w.Expect().SinkContains("sink", "map-odd-data") + w.Expect().RedisSinkContains("map-sideinput-pipeline-sink", "map-even-data") + w.Expect().RedisSinkContains("map-sideinput-pipeline-sink", "map-odd-data") done <- struct{}{} } @@ -114,7 +114,7 @@ func (s *SideInputSuite) TestSimpleReduceSideInputPipeline() { }() // here reduce-side-input text is generated based on reduce and side input data. - w.Expect().SinkContains("sink", "reduce_sideinput") + w.Expect().RedisSinkContains("reduce-sideinput-pipeline-sink", "reduce_sideinput") done <- struct{}{} } diff --git a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml index 30079de488..cb3ea152ed 100644 --- a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml +++ b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml @@ -32,6 +32,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "map-sideinput-pipeline-sink" edges: - from: in to: si-e2e diff --git a/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml b/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml index b5f253c0fe..028a20125e 100644 --- a/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml +++ b/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml @@ -49,6 +49,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "reduce-sideinput-pipeline-sink" edges: - from: in to: atoi diff --git a/test/sideinputs-e2e/testdata/sideinput_sink.yaml b/test/sideinputs-e2e/testdata/sideinput-sink.yaml similarity index 89% rename from test/sideinputs-e2e/testdata/sideinput_sink.yaml rename to test/sideinputs-e2e/testdata/sideinput-sink.yaml index a9cd439d4a..f172b4ec31 100644 --- a/test/sideinputs-e2e/testdata/sideinput_sink.yaml +++ b/test/sideinputs-e2e/testdata/sideinput-sink.yaml @@ -26,8 +26,11 @@ spec: udsink: container: # see https://github.com/numaproj/numaflow-go/tree/main/pkg/sideinput/examples/sink_sideinput - image: quay.io/numaio/numaflow-go/redis-sink-with-sideinput:stable + image: quay.io/numaio/numaflow-go/redis-sink-with-sideinput:hash imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + value: "sideinput-sink-test-redis-uds" sideInputs: - myticker diff --git a/test/sideinputs-e2e/testdata/sideinput_source.yaml b/test/sideinputs-e2e/testdata/sideinput-source.yaml similarity index 91% rename from test/sideinputs-e2e/testdata/sideinput_source.yaml rename to test/sideinputs-e2e/testdata/sideinput-source.yaml index 62754d05b1..6c21edd717 100644 --- a/test/sideinputs-e2e/testdata/sideinput_source.yaml +++ b/test/sideinputs-e2e/testdata/sideinput-source.yaml @@ -31,6 +31,9 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + value: "sideinput-source-test-redis-uds" sideInputs: - myticker edges: diff --git a/test/transformer-e2e/testdata/source-filtering.yaml b/test/transformer-e2e/testdata/source-filtering.yaml index 670e9f87fd..549fddec63 100644 --- a/test/transformer-e2e/testdata/source-filtering.yaml +++ b/test/transformer-e2e/testdata/source-filtering.yaml @@ -19,6 +19,10 @@ spec: # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # The key is set in the format of "pipeline_name-vertex_name" + value: "source-filtering-out" edges: - from: in to: out diff --git a/test/transformer-e2e/transformer_test.go b/test/transformer-e2e/transformer_test.go index 8f0a226c79..888b3ee3fb 100644 --- a/test/transformer-e2e/transformer_test.go +++ b/test/transformer-e2e/transformer_test.go @@ -61,11 +61,11 @@ func (s *TransformerSuite) TestSourceFiltering() { SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte(expect3))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte(expect4))) - w.Expect().SinkContains("out", expect3) - w.Expect().SinkContains("out", expect4) - w.Expect().SinkNotContains("out", expect0) - w.Expect().SinkNotContains("out", expect1) - w.Expect().SinkNotContains("out", expect2) + w.Expect().RedisSinkContains("source-filtering-out", expect3) + w.Expect().RedisSinkContains("source-filtering-out", expect4) + w.Expect().RedisSinkNotContains("source-filtering-out", expect0) + w.Expect().RedisSinkNotContains("source-filtering-out", expect1) + w.Expect().RedisSinkNotContains("source-filtering-out", expect2) } func (s *TransformerSuite) TestTimeExtractionFilter() { @@ -84,7 +84,7 @@ func (s *TransformerSuite) TestTimeExtractionFilter() { testMsgTwo := `{"id": 101, "msg": "test", "time": "2021-01-18T21:54:42.123Z", "desc": "A bad ID."}` w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte(testMsgTwo))) - w.Expect().SinkNotContains("out", testMsgTwo) + w.Expect().RedisSinkNotContains("time-extraction-filter-out", testMsgTwo) } func (s *TransformerSuite) TestBuiltinEventTimeExtractor() { From c14abd5de5cfc4d88f396c17231233b4e9fc2c5f Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Fri, 16 Aug 2024 09:23:59 +0530 Subject: [PATCH 004/188] feat: Mono vertex UI (#1941) Signed-off-by: veds-g --- ui/src/components/common/Routes/index.tsx | 8 +- .../partials/VertexDetails/index.tsx | 28 +- .../partials/ProcessingRates/index.tsx | 14 +- .../partials/VertexUpdate/index.tsx | 8 +- ui/src/components/pages/MonoVertex/index.tsx | 224 +++++++ .../partials/MonoVertexStatus/index.tsx | 58 ++ .../partials/MonoVertexStatus/style.css | 11 + .../MonoVertexSummaryStatus/index.tsx | 135 ++++ .../MonoVertexSummaryStatus/style.css | 20 + ui/src/components/pages/MonoVertex/style.css | 14 + ui/src/components/pages/Namespace/index.tsx | 27 +- .../partials/MonoVertexCard/index.tsx | 580 ++++++++++++++++++ .../partials/MonoVertexCard/style.css | 10 + .../PipelineListing/index.tsx | 143 +++-- .../NamespaceListingWrapper/PipelinesTypes.ts | 14 + .../NamespaceListingWrapper/index.tsx | 2 + ui/src/components/pages/Pipeline/index.tsx | 3 +- .../pages/Pipeline/partials/Graph/index.tsx | 43 +- .../Graph/partials/CustomNode/index.tsx | 132 +++- .../Graph/partials/CustomNode/style.css | 261 ++++---- .../Graph/partials/NodeInfo/index.tsx | 1 + .../partials/NodeInfo/partials/Pods/index.tsx | 3 +- .../plugin/NumaflowMonitorApp/App.tsx | 8 - ui/src/components/plugin/Routes/Routes.tsx | 6 +- ui/src/images/monoVertex.svg | 28 + ui/src/images/transformer.svg | 10 + ui/src/types/declarations/graph.d.ts | 2 + ui/src/types/declarations/namespace.d.ts | 2 + ui/src/types/declarations/pipeline.d.ts | 39 ++ ui/src/types/declarations/pods.d.ts | 1 + .../fetchWrappers/clusterSummaryFetch.ts | 16 +- ui/src/utils/fetchWrappers/monoVertexFetch.ts | 119 ++++ .../fetchWrappers/monoVertexUpdateFetch.ts | 92 +++ .../fetchWrappers/namespaceK8sEventsFetch.ts | 8 +- .../fetchWrappers/namespaceSummaryFetch.ts | 83 ++- .../utils/fetcherHooks/monoVertexViewFetch.ts | 278 +++++++++ .../utils/fetcherHooks/podsViewFetch.test.ts | 3 + ui/src/utils/fetcherHooks/podsViewFetch.ts | 12 +- 38 files changed, 2192 insertions(+), 254 deletions(-) create mode 100644 ui/src/components/pages/MonoVertex/index.tsx create mode 100644 ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/index.tsx create mode 100644 ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/style.css create mode 100644 ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/index.tsx create mode 100644 ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/style.css create mode 100644 ui/src/components/pages/MonoVertex/style.css create mode 100644 ui/src/components/pages/Namespace/partials/MonoVertexCard/index.tsx create mode 100644 ui/src/components/pages/Namespace/partials/MonoVertexCard/style.css create mode 100644 ui/src/images/monoVertex.svg create mode 100644 ui/src/images/transformer.svg create mode 100644 ui/src/utils/fetchWrappers/monoVertexFetch.ts create mode 100644 ui/src/utils/fetchWrappers/monoVertexUpdateFetch.ts create mode 100644 ui/src/utils/fetcherHooks/monoVertexViewFetch.ts diff --git a/ui/src/components/common/Routes/index.tsx b/ui/src/components/common/Routes/index.tsx index ec6f22be67..d52f24c574 100644 --- a/ui/src/components/common/Routes/index.tsx +++ b/ui/src/components/common/Routes/index.tsx @@ -2,6 +2,7 @@ import { useLocation } from "react-router-dom"; import { Cluster } from "../../pages/Cluster"; import { Namespaces } from "../../pages/Namespace"; import { Pipeline } from "../../pages/Pipeline"; +import { MonoVertex } from "../../pages/MonoVertex"; export interface RoutesProps { managedNamespace?: string; @@ -12,11 +13,14 @@ export function Routes(props: RoutesProps) { const query = new URLSearchParams(location.search); const ns = query.get("namespace") || ""; const pl = query.get("pipeline") || ""; + const type = query.get("type") || ""; const { managedNamespace } = props; if (managedNamespace) { - return pl ? ( + return type ? ( + + ) : pl ? ( ) : ( @@ -25,6 +29,8 @@ export function Routes(props: RoutesProps) { if (ns === "" && pl === "") { return ; + } else if (type !== "") { + return ; } else if (pl !== "") { return ; } else { diff --git a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/index.tsx b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/index.tsx index 5d18a79eb3..6113140894 100644 --- a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/index.tsx +++ b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/index.tsx @@ -12,7 +12,8 @@ import { CloseModal } from "../CloseModal"; import sourceIcon from "../../../../../images/source.png"; import sinkIcon from "../../../../../images/sink.png"; import mapIcon from "../../../../../images/map.png"; -import reducIcon from "../../../../../images/reduce.png"; +import reduceIcon from "../../../../../images/reduce.png"; +import monoVertexIcon from "../../../../../images/monoVertex.svg"; import "./style.css"; @@ -27,6 +28,7 @@ export enum VertexType { SINK, MAP, REDUCE, + MONOVERTEX, } export interface VertexDetailsProps { @@ -71,6 +73,8 @@ export function VertexDetails({ setVertexType(VertexType.MAP); } else if (type === "sink") { setVertexType(VertexType.SINK); + } else if (type === "monoVertex") { + setVertexType(VertexType.MONOVERTEX); } setVertexSpec(vertexSpecs); }, [vertexSpecs, type]); @@ -98,7 +102,7 @@ export function VertexDetails({ return ( reduce vertex @@ -127,6 +131,17 @@ export function VertexDetails({ Sink Vertex ); + case VertexType.MONOVERTEX: + return ( + + mono vertex + Mono Vertex + + ); default: return ( @@ -246,6 +261,7 @@ export function VertexDetails({ namespaceId={namespaceId} pipelineId={pipelineId} vertexId={vertexId} + type={type} /> )} @@ -261,6 +277,7 @@ export function VertexDetails({ pipelineId={pipelineId} vertexId={vertexId} vertexSpec={vertexSpec} + type={type} setModalOnClose={handleUpdateModalClose} refresh={refresh} /> @@ -276,6 +293,7 @@ export function VertexDetails({ )} @@ -288,8 +306,10 @@ export function VertexDetails({ {tabValue === K8S_EVENTS_TAB_INDEX && ( diff --git a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx index 8bcd790234..87b7c0574f 100644 --- a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx +++ b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx @@ -13,12 +13,14 @@ import "./style.css"; export interface ProcessingRatesProps { vertexId: string; pipelineId: string; + type: string; vertexMetrics: any; } export function ProcessingRates({ vertexMetrics, pipelineId, + type, vertexId, }: ProcessingRatesProps) { const [foundRates, setFoundRates] = useState([]); @@ -27,13 +29,15 @@ export function ProcessingRates({ if (!vertexMetrics || !pipelineId || !vertexId) { return; } - const vertexData = vertexMetrics[vertexId]; + const vertexData = + type === "monoVertex" ? [vertexMetrics] : vertexMetrics[vertexId]; if (!vertexData) { return; } const rates: PipelineVertexMetric[] = []; vertexData.forEach((item: any, index: number) => { - if (item.pipeline !== pipelineId || !item.processingRates) { + const key = type === "monoVertex" ? "monoVertex" : "pipeline"; + if (item?.[key] !== pipelineId || !item.processingRates) { return; // continue } rates.push({ @@ -64,7 +68,7 @@ export function ProcessingRates({ - Partition + {type !== "monoVertex" && Partition} 1m 5m 15m @@ -81,7 +85,9 @@ export function ProcessingRates({ {!!foundRates.length && foundRates.map((metric) => ( - {metric.partition} + {type !== "monoVertex" && ( + {metric.partition} + )} {metric.oneM}/sec {metric.fiveM}/sec {metric.fifteenM}/sec diff --git a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/VertexUpdate/index.tsx b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/VertexUpdate/index.tsx index 1163263cbc..a8b836a127 100644 --- a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/VertexUpdate/index.tsx +++ b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/VertexUpdate/index.tsx @@ -20,6 +20,7 @@ export interface VertexUpdateProps { pipelineId: string; vertexId: string; vertexSpec: any; + type: string; setModalOnClose?: (props: SpecEditorModalProps | undefined) => void; refresh: () => void; } @@ -30,6 +31,7 @@ export function VertexUpdate({ vertexId, vertexSpec, setModalOnClose, + type: vertexType, refresh, }: VertexUpdateProps) { const [loading, setLoading] = useState(false); @@ -225,7 +227,11 @@ export function VertexUpdate({ > >( + new Map() +); + +// TODO add health status + processing rate once implemented +export function MonoVertex({ namespaceId: nsIdProp }: MonoVertexProps) { + const location = useLocation(); + const query = new URLSearchParams(location.search); + const pipelineId = query.get("pipeline") || ""; + const nsIdParam = query.get("namespace") || ""; + const namespaceId = nsIdProp || nsIdParam; + const { addError, setSidebarProps } = useContext(AppContext); + const { + data, + loading: summaryLoading, + error, + refresh: summaryRefresh, + } = useMonoVertexSummaryFetch({ namespaceId, pipelineId, addError }); + + const { + pipeline, + vertices, + pipelineErr, + loading, + refresh: graphRefresh, + } = useMonoVertexViewFetch(namespaceId, pipelineId, addError); + + const refresh = useCallback(() => { + graphRefresh(); + summaryRefresh(); + }, [graphRefresh, summaryRefresh]); + + const handleK8sEventsClick = useCallback(() => { + if (!namespaceId || !pipelineId || !setSidebarProps) { + return; + } + const vertexMap = new Map(); + setSidebarProps({ + type: SidebarType.NAMESPACE_K8s, + k8sEventsProps: { + namespaceId, + pipelineId: `${pipelineId} (MonoVertex)`, + headerText: "Pipeline K8s Events", + vertexFilterOptions: vertexMap, + }, + }); + }, [namespaceId, pipelineId, setSidebarProps, vertices]); + + const summarySections: SummarySection[] = useMemo(() => { + if (summaryLoading) { + return [ + { + type: SummarySectionType.CUSTOM, + customComponent: ( + + ), + }, + ]; + } + if (error) { + return [ + { + type: SummarySectionType.CUSTOM, + customComponent: ( + + ), + }, + ]; + } + if (!data) { + return []; + } + const pipelineData = data?.monoVertexData; + const pipelineStatus = pipelineData?.monoVertex?.status?.phase || UNKNOWN; + return [ + // pipeline collection + { + type: SummarySectionType.CUSTOM, + customComponent: ( + + ), + }, + { + type: SummarySectionType.CUSTOM, + customComponent: ( + + ), + }, + { + type: SummarySectionType.CUSTOM, + customComponent: ( +
+ K8s Events +
+ ), + }, + ]; + }, [summaryLoading, error, data, pipelineId, refresh]); + + const content = useMemo(() => { + if (pipelineErr) { + return ( + + + + + + ); + } + if (loading) { + return ( + + + + ); + } + return ( + + + + ); + }, [ + pipelineErr, + loading, + vertices, + pipeline, + namespaceId, + pipelineId, + refresh, + ]); + + return ( + + {content} + + } + /> + ); +} diff --git a/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/index.tsx b/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/index.tsx new file mode 100644 index 0000000000..04ebb4621a --- /dev/null +++ b/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/index.tsx @@ -0,0 +1,58 @@ +import React from "react"; +import Box from "@mui/material/Box"; +import { IconsStatusMap, StatusString } from "../../../../../utils"; + +import "./style.css"; + +export interface MonoVertexStatusProps { + status: any; +} + +export function MonoVertexStatus({ status }: MonoVertexStatusProps) { + return ( + + + STATUS + + + + {status} + {"healthy"} + + + {StatusString[status]} + + + {StatusString["healthy"]} + + + + + + + ); +} diff --git a/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/style.css b/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/style.css new file mode 100644 index 0000000000..ef1f338503 --- /dev/null +++ b/ui/src/components/pages/MonoVertex/partials/MonoVertexStatus/style.css @@ -0,0 +1,11 @@ +.pipeline-logo { + width: 2.4rem; +} + +.pipeline-logo-text { + color: #3C4348; + font-size: 1.4rem; + font-style: normal; + font-weight: 400; + margin-top: 0.2rem; +} diff --git a/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/index.tsx b/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/index.tsx new file mode 100644 index 0000000000..83b570d188 --- /dev/null +++ b/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/index.tsx @@ -0,0 +1,135 @@ +import React, { useCallback, useContext } from "react"; +import Box from "@mui/material/Box"; +import { useLocation } from "react-router-dom"; +import { SidebarType } from "../../../../common/SlidingSidebar"; +import { AppContextProps } from "../../../../../types/declarations/app"; +import { AppContext } from "../../../../../App"; +import { ViewType } from "../../../../common/SpecEditor"; + +import "./style.css"; + +export interface MonoVertexSummaryProps { + pipelineId: any; + pipeline: any; + refresh: () => void; +} + +export function MonoVertexSummaryStatus({ + pipelineId, + pipeline, + refresh, +}: MonoVertexSummaryProps) { + const location = useLocation(); + const query = new URLSearchParams(location.search); + const namespaceId = query.get("namespace") || ""; + const { setSidebarProps } = useContext(AppContext); + + const handleUpdateComplete = useCallback(() => { + refresh(); + if (!setSidebarProps) { + return; + } + // Close sidebar + setSidebarProps(undefined); + }, [setSidebarProps, refresh]); + + const handleSpecClick = useCallback(() => { + if (!namespaceId || !setSidebarProps) { + return; + } + setSidebarProps({ + type: SidebarType.PIPELINE_UPDATE, + specEditorProps: { + titleOverride: `View Pipeline: ${pipelineId}`, + initialYaml: pipeline, + namespaceId, + pipelineId, + viewType: ViewType.READ_ONLY, + // viewType: isReadOnly ? ViewType.READ_ONLY : ViewType.TOGGLE_EDIT, + onUpdateComplete: handleUpdateComplete, + }, + }); + }, [ + namespaceId, + pipelineId, + setSidebarProps, + pipeline, + handleUpdateComplete, + ]); + + return ( + + + SUMMARY + + +
+ Created On: +
+
+ + Last Updated On:{" "} + +
+ {/*
*/} + {/* Last Refresh: */} + {/* 2023-12-07T02:02:00Z*/} + {/*
*/} +
+ +
+ {pipeline?.metadata?.creationTimestamp} +
+
+ {pipeline?.status?.lastUpdated} +
+ {/*
*/} + {/* 2023-12-07T02:02:00Z*/} + {/*
*/} +
+ +
+ +
+ {`View Specs`} +
+
+
+
+
+
+
+ ); +} diff --git a/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/style.css b/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/style.css new file mode 100644 index 0000000000..eeaffa9d73 --- /dev/null +++ b/ui/src/components/pages/MonoVertex/partials/MonoVertexSummaryStatus/style.css @@ -0,0 +1,20 @@ +.pipeline-summary-text { + color: #393A3D; + font-size: 1.4rem; + font-style: normal; + font-weight: 400; + line-height: 2.35rem; +} + +.pipeline-summary-subtitle { + font-weight: 600; +} + +.pipeline-onclick-events { + color: #393A3D; + font-size: 1.4rem; + font-style: normal; + font-weight: 600; + text-decoration: underline; + cursor: pointer; +} diff --git a/ui/src/components/pages/MonoVertex/style.css b/ui/src/components/pages/MonoVertex/style.css new file mode 100644 index 0000000000..abd662f23a --- /dev/null +++ b/ui/src/components/pages/MonoVertex/style.css @@ -0,0 +1,14 @@ +.react-flow__edge-textwrapper { + cursor: pointer; +} + +.react-flow__edge-textbg { + fill: #fafafa; +} + +.pipeline-status-title { + font-weight: 600; + color: #393A3D; + font-size: 1.3rem; + font-style: normal; +} diff --git a/ui/src/components/pages/Namespace/index.tsx b/ui/src/components/pages/Namespace/index.tsx index 07d38682f5..a6fe5a1284 100644 --- a/ui/src/components/pages/Namespace/index.tsx +++ b/ui/src/components/pages/Namespace/index.tsx @@ -50,12 +50,19 @@ export function Namespaces({ namespaceId: nsIdProp }: NamespaceProps) { const nsIdParam = query.get("namespace") || ""; const namespaceId = nsIdProp || nsIdParam; const { setSidebarProps, addError } = useContext(AppContext); - const { data, pipelineRawData, isbRawData, loading, error, refresh } = - useNamespaceSummaryFetch({ - namespace: namespaceId || "", - loadOnRefresh: false, - addError, - }); + const { + data, + pipelineRawData, + isbRawData, + monoVertexRawData, + loading, + error, + refresh, + } = useNamespaceSummaryFetch({ + namespace: namespaceId || "", + loadOnRefresh: false, + addError, + }); const handleK8sEventsClick = useCallback(() => { if (!namespaceId || !setSidebarProps) { @@ -75,6 +82,11 @@ export function Namespaces({ namespaceId: nsIdProp }: NamespaceProps) { }); }); } + if (monoVertexRawData) { + Object.keys(monoVertexRawData).forEach((pipelineId) => { + pipelines.push(`${pipelineId} (MonoVertex)`); + }); + } setSidebarProps({ type: SidebarType.NAMESPACE_K8s, k8sEventsProps: { @@ -83,7 +95,7 @@ export function Namespaces({ namespaceId: nsIdProp }: NamespaceProps) { vertexFilterOptions: vertexMap, }, }); - }, [namespaceId, setSidebarProps, pipelineRawData]); + }, [namespaceId, setSidebarProps, pipelineRawData, monoVertexRawData]); const defaultPipelinesData = useMemo(() => { return [ @@ -280,6 +292,7 @@ export function Namespaces({ namespaceId: nsIdProp }: NamespaceProps) { data={data ? data : defaultNamespaceSummaryData} pipelineData={pipelineRawData} isbData={isbRawData} + monoVertexData={monoVertexRawData} refresh={refresh} /> ); diff --git a/ui/src/components/pages/Namespace/partials/MonoVertexCard/index.tsx b/ui/src/components/pages/Namespace/partials/MonoVertexCard/index.tsx new file mode 100644 index 0000000000..80cd548716 --- /dev/null +++ b/ui/src/components/pages/Namespace/partials/MonoVertexCard/index.tsx @@ -0,0 +1,580 @@ +import React, { useCallback, useContext, useEffect, useState } from "react"; +import Paper from "@mui/material/Paper"; +import { Link } from "react-router-dom"; +import { PipelineCardProps } from "../../../../../types/declarations/namespace"; +import { + Box, + Button, + CircularProgress, + Grid, + MenuItem, + Select, + SelectChangeEvent, +} from "@mui/material"; +import ArrowForwardIcon from "@mui/icons-material/ArrowForward"; +import { DeleteModal } from "../DeleteModal"; +import { + getAPIResponseError, + IconsStatusMap, + StatusString, + timeAgo, + UNKNOWN, + PAUSED, + RUNNING, + // PAUSING, + DELETING, + getBaseHref, +} from "../../../../../utils"; +import { useMonoVertexUpdateFetch } from "../../../../../utils/fetchWrappers/monoVertexUpdateFetch"; +import { AppContextProps } from "../../../../../types/declarations/app"; +import { AppContext } from "../../../../../App"; +import { SidebarType } from "../../../../common/SlidingSidebar"; +import { ViewType } from "../../../../common/SpecEditor"; +import pipelineIcon from "../../../../../images/pipeline.png"; + +import "./style.css"; + +export interface DeleteProps { + type: "pipeline"; + pipelineId?: string; +} + +export function MonoVertexCard({ + namespace, + data, + statusData, + refresh, +}: PipelineCardProps) { + const { setSidebarProps, host, isReadOnly } = + useContext(AppContext); + const [viewOption] = useState("view"); + const [editOption] = useState("edit"); + const [deleteOption] = useState("delete"); + const [deleteProps, setDeleteProps] = useState(); + const [statusPayload, setStatusPayload] = useState(undefined); + const [error, setError] = useState(undefined); + const [successMessage, setSuccessMessage] = useState( + undefined + ); + const [timerDateStamp, setTimerDateStamp] = useState(undefined); + const [timer, setTimer] = useState(undefined); + const [pipelineAbleToLoad, setPipelineAbleToLoad] = useState(false); + const { pipelineAvailable } = useMonoVertexUpdateFetch({ + namespaceId: namespace, + pipelineId: data?.name, + active: !pipelineAbleToLoad, + refreshInterval: 5000, // 5 seconds + }); + + useEffect(() => { + if (pipelineAvailable) { + setPipelineAbleToLoad(true); + } + }, [pipelineAvailable]); + + const handleUpdateComplete = useCallback(() => { + refresh(); + setPipelineAbleToLoad(false); + if (!setSidebarProps) { + return; + } + // Close sidebar + setSidebarProps(undefined); + }, [setSidebarProps, refresh]); + + const handleViewChange = useCallback( + (event: SelectChangeEvent) => { + if (event.target.value === "pipeline" && setSidebarProps) { + setSidebarProps({ + type: SidebarType.PIPELINE_UPDATE, + specEditorProps: { + titleOverride: `View Pipeline: ${data?.name}`, + initialYaml: statusData?.monoVertex, + namespaceId: namespace, + pipelineId: data?.name, + viewType: ViewType.READ_ONLY, + onUpdateComplete: handleUpdateComplete, + }, + }); + } + }, + [setSidebarProps, handleUpdateComplete, data] + ); + + const handleEditChange = useCallback( + (event: SelectChangeEvent) => { + if (event.target.value === "pipeline" && setSidebarProps) { + setSidebarProps({ + type: SidebarType.PIPELINE_UPDATE, + specEditorProps: { + initialYaml: statusData?.monoVertex, + namespaceId: namespace, + pipelineId: data?.name, + viewType: ViewType.EDIT, + onUpdateComplete: handleUpdateComplete, + }, + }); + } + }, + [setSidebarProps, handleUpdateComplete, data] + ); + + const handleDeleteChange = useCallback( + (event: SelectChangeEvent) => { + if (event.target.value === "pipeline") { + setDeleteProps({ + type: "pipeline", + pipelineId: data?.name, + }); + } + }, + [data] + ); + + const handleDeleteComplete = useCallback(() => { + refresh(); + setDeleteProps(undefined); + }, [refresh]); + + const handeDeleteCancel = useCallback(() => { + setDeleteProps(undefined); + }, []); + + const pipelineStatus = statusData?.monoVertex?.status?.phase || UNKNOWN; + const handleTimer = useCallback(() => { + const dateString = new Date().toISOString(); + const time = timeAgo(dateString); + setTimerDateStamp(time); + const pauseTimer = setInterval(() => { + const time = timeAgo(dateString); + setTimerDateStamp(time); + }, 1000); + setTimer(pauseTimer); + }, []); + + const handlePlayClick = useCallback(() => { + handleTimer(); + setStatusPayload({ + spec: { + lifecycle: { + desiredPhase: RUNNING, + }, + }, + }); + }, []); + + const handlePauseClick = useCallback(() => { + handleTimer(); + setStatusPayload({ + spec: { + lifecycle: { + desiredPhase: PAUSED, + }, + }, + }); + }, []); + + useEffect(() => { + const patchStatus = async () => { + try { + const response = await fetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespace}/mono-vertices/${ + data?.name + }`, + { + method: "PATCH", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(statusPayload), + } + ); + const error = await getAPIResponseError(response); + if (error) { + setError(error); + } else { + refresh(); + setSuccessMessage("Status updated successfully"); + } + } catch (e: any) { + setError(e); + } + }; + if (statusPayload) { + patchStatus(); + } + }, [statusPayload, host]); + + useEffect(() => { + if ( + statusPayload?.spec?.lifecycle?.desiredPhase === PAUSED && + statusData?.monoVertex?.status?.phase === PAUSED + ) { + clearInterval(timer); + setStatusPayload(undefined); + } + if ( + statusPayload?.spec?.lifecycle?.desiredPhase === RUNNING && + statusData?.monoVertex?.status?.phase === RUNNING + ) { + clearInterval(timer); + setStatusPayload(undefined); + } + }, [statusData]); + + return ( + <> + + + pipeline icon + + + {data?.name} + + + {!isReadOnly && ( + + {error && statusPayload ? ( +
+ {error} +
+ ) : successMessage && + statusPayload && + ((statusPayload.spec.lifecycle.desiredPhase === PAUSED && + statusData?.monoVertex?.status?.phase !== PAUSED) || + (statusPayload.spec.lifecycle.desiredPhase === RUNNING && + statusData?.monoVertex?.status?.phase !== RUNNING)) ? ( +
+ {" "} + + + {statusPayload?.spec?.lifecycle?.desiredPhase === PAUSED + ? "Pipeline Pausing..." + : "Pipeline Resuming..."} + + + {timerDateStamp} + + +
+ ) : ( + "" + )} + + + +
+ )} + + {pipelineAbleToLoad ? ( + + ) : ( + + )} + +
+ + + + + Status: + Health: + + + Status + Health + + + {StatusString[pipelineStatus]} + {StatusString["healthy"]} + + + + + {isReadOnly && ( + + + + )} + {!isReadOnly && ( + + + + )} + {!isReadOnly && ( + + + + )} + + + {deleteProps && ( + + )} +
+ + ); +} diff --git a/ui/src/components/pages/Namespace/partials/MonoVertexCard/style.css b/ui/src/components/pages/Namespace/partials/MonoVertexCard/style.css new file mode 100644 index 0000000000..c0ad3f86d6 --- /dev/null +++ b/ui/src/components/pages/Namespace/partials/MonoVertexCard/style.css @@ -0,0 +1,10 @@ +.pipeline-card-name { + font-size: 2rem; + font-style: normal; + font-weight: 400; + color: #000; +} + +.pipeline-card-icon { + width: 2.4rem; +} \ No newline at end of file diff --git a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelineListing/index.tsx b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelineListing/index.tsx index 3ea6280f0e..18f48cd3f0 100644 --- a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelineListing/index.tsx +++ b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelineListing/index.tsx @@ -12,12 +12,14 @@ import { UNKNOWN, } from "../../../../../../utils"; import { ListingProps } from "../ISBListing"; -import { PipelineData } from "../PipelinesTypes"; +import { MonoVertexData, PipelineData } from "../PipelinesTypes"; import { PipelineCard } from "../../PipelineCard"; +import { MonoVertexCard } from "../../MonoVertexCard"; interface PipelineListingProps extends ListingProps { pipelineData: Map | undefined; isbData: any; + monoVertexData: Map | undefined; totalCount: number; } @@ -31,10 +33,11 @@ export function PipelineListing({ totalCount, search, isbData, + monoVertexData, }: PipelineListingProps) { - const [filteredPipelines, setFilteredPipelines] = useState( - [] - ); + const [filteredPipelines, setFilteredPipelines] = useState< + (PipelineData | MonoVertexData)[] + >([]); const [page, setPage] = useState(1); const [totalPages, setTotalPages] = useState( Math.ceil(totalCount / MAX_PAGE_SIZE) @@ -72,18 +75,30 @@ export function PipelineListing({ margin: "0.8rem 0 2.4rem 0", }} > - {filteredPipelines.map((p: PipelineData) => { - const isbName = pipelineData - ? pipelineData[p.name]?.pipeline?.spec - ?.interStepBufferServiceName || DEFAULT_ISB - : DEFAULT_ISB; + {filteredPipelines.map((p: PipelineData | MonoVertexData) => { + if (p?.pipeline) { + const isbName = pipelineData + ? pipelineData[p.name]?.pipeline?.spec + ?.interStepBufferServiceName || DEFAULT_ISB + : DEFAULT_ISB; + return ( + + + + ); + } return ( - - + @@ -91,52 +106,84 @@ export function PipelineListing({ })} ); - }, [filteredPipelines, namespace, refresh]); + }, [ + filteredPipelines, + namespace, + pipelineData, + isbData, + monoVertexData, + refresh, + ]); useEffect(() => { - let filtered: PipelineData[] = Object.values( + let filtered: (PipelineData | MonoVertexData)[] = Object.values( pipelineData ? pipelineData : {} ); + filtered = [ + ...filtered, + ...Object.values(monoVertexData ? monoVertexData : {}), + ]; if (search) { // Filter by search - filtered = filtered.filter((p: PipelineData) => p.name.includes(search)); + filtered = filtered.filter((p: PipelineData | MonoVertexData) => + p.name.includes(search) + ); } // Sorting if (orderBy.value === ALPHABETICAL_SORT) { - filtered?.sort((a: PipelineData, b: PipelineData) => { - if (orderBy.sortOrder === ASC) { - return a.name > b.name ? 1 : -1; - } else { - return a.name < b.name ? 1 : -1; + filtered?.sort( + ( + a: PipelineData | MonoVertexData, + b: PipelineData | MonoVertexData + ) => { + if (orderBy.sortOrder === ASC) { + return a.name > b.name ? 1 : -1; + } else { + return a.name < b.name ? 1 : -1; + } } - }); + ); } else if (orderBy.value === LAST_UPDATED_SORT) { - filtered?.sort((a: PipelineData, b: PipelineData) => { - if (orderBy.sortOrder === ASC) { - return a?.pipeline?.status?.lastUpdated > - b?.pipeline?.status?.lastUpdated - ? 1 - : -1; - } else { - return a?.pipeline?.status?.lastUpdated < - b?.pipeline?.status?.lastUpdated - ? 1 - : -1; + filtered?.sort( + ( + a: PipelineData | MonoVertexData, + b: PipelineData | MonoVertexData + ) => { + const aType = a?.pipeline ? "pipeline" : "monoVertex"; + const bType = b?.pipeline ? "pipeline" : "monoVertex"; + if (orderBy.sortOrder === ASC) { + return Date.parse(a?.[aType]?.status?.lastUpdated) > + Date.parse(b?.[bType]?.status?.lastUpdated) + ? 1 + : -1; + } else { + return Date.parse(a?.[aType]?.status?.lastUpdated) < + Date.parse(b?.[bType]?.status?.lastUpdated) + ? 1 + : -1; + } } - }); + ); } else { - filtered?.sort((a: PipelineData, b: PipelineData) => { - if (orderBy.sortOrder === ASC) { - return Date.parse(a?.pipeline?.metadata?.creationTimestamp) > - Date.parse(b?.pipeline?.metadata?.creationTimestamp) - ? 1 - : -1; - } else { - return Date.parse(a?.pipeline?.metadata?.creationTimestamp) < - Date.parse(b?.pipeline?.metadata?.creationTimestamp) - ? 1 - : -1; + filtered?.sort( + ( + a: PipelineData | MonoVertexData, + b: PipelineData | MonoVertexData + ) => { + const aType = a?.pipeline ? "pipeline" : "monoVertex"; + const bType = b?.pipeline ? "pipeline" : "monoVertex"; + if (orderBy.sortOrder === ASC) { + return Date.parse(a?.[aType]?.metadata?.creationTimestamp) > + Date.parse(b?.[bType]?.metadata?.creationTimestamp) + ? 1 + : -1; + } else { + return Date.parse(a?.[aType]?.metadata?.creationTimestamp) < + Date.parse(b?.[bType]?.metadata?.creationTimestamp) + ? 1 + : -1; + } } - }); + ); } //Filter by health if (healthFilter !== ALL) { @@ -153,7 +200,8 @@ export function PipelineListing({ //Filter by status if (statusFilter !== ALL) { filtered = filtered.filter((p) => { - const currentStatus = p?.pipeline?.status?.phase || UNKNOWN; + const type = p?.pipeline ? "pipeline" : "monoVertex"; + const currentStatus = p?.[type]?.status?.phase || UNKNOWN; if (currentStatus.toLowerCase() === statusFilter.toLowerCase()) { return true; } else { @@ -183,6 +231,7 @@ export function PipelineListing({ page, pipelineData, isbData, + monoVertexData, orderBy, healthFilter, statusFilter, diff --git a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelinesTypes.ts b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelinesTypes.ts index e793005b9d..35bb2d4b42 100644 --- a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelinesTypes.ts +++ b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/PipelinesTypes.ts @@ -16,6 +16,20 @@ interface Pipeline { status: Status; } +export interface MonoVertexData { + name: string; + status: string; + monoVertex: MonoVertex; +} + +interface MonoVertex { + kind: string; + apiVersion: string; + metadata: any; + spec: any; + status: any; +} + interface Status { conditions: Condition[]; phase: string; diff --git a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/index.tsx b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/index.tsx index 68aa0f4350..a7889a33c9 100644 --- a/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/index.tsx +++ b/ui/src/components/pages/Namespace/partials/NamespaceListingWrapper/index.tsx @@ -111,6 +111,7 @@ export function NamespaceListingWrapper({ data, pipelineData, isbData, + monoVertexData, refresh, }: NamespacePipelineListingProps) { const { setSidebarProps, isReadOnly } = @@ -433,6 +434,7 @@ export function NamespaceListingWrapper({ diff --git a/ui/src/components/pages/Pipeline/partials/Graph/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/index.tsx index a2ac1e38b2..7c5bd60160 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/index.tsx @@ -64,6 +64,7 @@ import source from "../../../../../images/source.png"; import map from "../../../../../images/map.png"; import reduce from "../../../../../images/reduce.png"; import sink from "../../../../../images/sink.png"; +import monoVertex from "../../../../../images/monoVertex.svg"; import input from "../../../../../images/input0.svg"; import generator from "../../../../../images/generator0.svg"; @@ -161,6 +162,7 @@ const Flow = (props: FlowProps) => { refresh, namespaceId, data, + type, } = props; const onIsLockedChange = useCallback( @@ -317,7 +319,11 @@ const Flow = (props: FlowProps) => { fontSize: "1.4rem", }} onClick={handlePlayClick} - disabled={data?.pipeline?.status?.phase === RUNNING} + disabled={ + type === "monoVertex" + ? true + : data?.pipeline?.status?.phase === RUNNING + } > Resume @@ -333,8 +339,10 @@ const Flow = (props: FlowProps) => { }} onClick={handlePauseClick} disabled={ - data?.pipeline?.status?.phase === PAUSED || - data?.pipeline?.status?.phase === PAUSING + type === "monoVertex" + ? true + : data?.pipeline?.status?.phase === PAUSED || + data?.pipeline?.status?.phase === PAUSING } > Pause @@ -513,10 +521,18 @@ const Flow = (props: FlowProps) => { Legend -
- {"source"} -
Source
-
+ {type === "monoVertex" && ( +
+ {"monoVertex"} +
MonoVertex
+
+ )} + {type === "pipeline" && ( +
+ {"source"} +
Source
+
+ )} {isMap && (
{"map"} @@ -529,10 +545,12 @@ const Flow = (props: FlowProps) => {
Reduce
)} -
- {"sink"} -
Sink
-
+ {type === "pipeline" && ( +
+ {"sink"} +
Sink
+
+ )} {isSideInput && (
{"input"} @@ -569,7 +587,7 @@ const getHiddenValue = (edges: Edge[]) => { }; export default function Graph(props: GraphProps) { - const { data, namespaceId, pipelineId, refresh } = props; + const { data, namespaceId, pipelineId, type, refresh } = props; const { sidebarProps, setSidebarProps } = useContext(AppContext); @@ -833,6 +851,7 @@ export default function Graph(props: GraphProps) { refresh={refresh} namespaceId={namespaceId} data={data} + type={type} /> diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx index 1acd2e61b6..64718e4f12 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx @@ -3,6 +3,7 @@ import { FC, memo, useCallback, useContext, useMemo } from "react"; import { Tooltip } from "@mui/material"; import { Handle, NodeProps, Position } from "reactflow"; +import Box from "@mui/material/Box"; import { HighlightContext } from "../../index"; import { GeneratorColorContext } from "../../../../index"; import { HighlightContextProps } from "../../../../../../../types/declarations/graph"; @@ -11,6 +12,8 @@ import source from "../../../../../../../images/source.png"; import map from "../../../../../../../images/map.png"; import reduce from "../../../../../../../images/reduce.png"; import sink from "../../../../../../../images/sink.png"; +import monoVertex from "../../../../../../../images/monoVertex.svg"; +import transformer from "../../../../../../../images/transformer.svg"; import input0 from "../../../../../../../images/input0.svg"; import input1 from "../../../../../../../images/input1.svg"; import input2 from "../../../../../../../images/input2.svg"; @@ -148,17 +151,17 @@ const CustomNode: FC = ({ if (data?.type === "sideInput") { return ( {data?.name}
} + title={{data?.name}} arrow placement={"left"} > -
+ Spec View
} + title={Spec View} arrow placement={"bottom-start"} > -
= ({ height={16} style={{ alignSelf: "center" }} /> -
+ Show Edges} + title={Show Edges} arrow placement={"bottom-start"} > -
= ({ }} > --- -
+
= ({ id="2" position={Position.Right} /> - + ); } if (data?.type === "generator") { return ( -
= ({ onClick={(e) => e.stopPropagation()} > Generator -
+ ); } @@ -284,25 +287,89 @@ const CustomNode: FC = ({ setHighlightValues({}); }, [setHidden, setHighlightValues]); + // arrow for containers in monoVertex + const arrowSvg = useMemo(() => { + return ( + + + + + + ); + }, []); + return ( -
-
+ -
{data?.name}
- + {data?.type !== "monoVertex" && ( + {data?.name} + )} + {data?.type === "monoVertex" && ( + <> + {data?.name} + + Source Container} + arrow + placement={"left"} + > + + {"source-container"} + + + {arrowSvg} + {data?.nodeInfo?.source?.transformer && ( + Transformer Container
+ } + arrow + placement={"bottom"} + > + + {"transformer-container"} + + + )} + {data?.nodeInfo?.source?.transformer && arrowSvg} + Sink Container} + arrow + placement={"right"} + > + + {"sink-container"} + + + + + )} + {data?.podnum <= 1 ? "pod" : "pods"} -
+ } placement={"top-end"} arrow > -
+ {data?.type === "source" && ( {"source-vertex"} )} @@ -313,29 +380,32 @@ const CustomNode: FC = ({ {"reduce-vertex"} )} {data?.type === "sink" && {"sink-vertex"}} + {data?.type === "monoVertex" && ( + {"monoVertex"} + )} {data?.podnum} -
+ - {/*
+ {/* {"healthy"} -
*/} + */} -
Processing Rates
-
1 min: {data?.vertexMetrics?.ratePerMin}/sec
-
5 min: {data?.vertexMetrics?.ratePerFiveMin}/sec
-
15 min: {data?.vertexMetrics?.ratePerFifteenMin}/sec
-
+ + Processing Rates + 1 min: {data?.vertexMetrics?.ratePerMin}/sec + 5 min: {data?.vertexMetrics?.ratePerFiveMin}/sec + 15 min: {data?.vertexMetrics?.ratePerFifteenMin}/sec + } arrow placement={"bottom-end"} > -
+ {data?.vertexMetrics?.ratePerMin}/sec -
+ {(data?.type === "udf" || data?.type === "sink") && ( @@ -407,7 +477,7 @@ const CustomNode: FC = ({ /> ); })} - + {data?.nodeInfo?.sideInputs?.map((input: any, idx: number) => { return ( = ({ /> ); })} - + ); }; export default memo(CustomNode); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css index f0a4a80594..9e9b094718 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css @@ -1,160 +1,193 @@ .node-rate { - display: flex; - width: 9rem; - height: 2.2rem; - border-radius: 2rem; - background: #D1DEE9; - color: #274C77; - font-family: "IBM Plex Sans", sans-serif; - font-size: 1.2rem; - font-style: normal; - font-weight: 500; - line-height: normal; - justify-content: center; - align-items: center; - position: absolute; - bottom: -12.5%; - right: 8%; - text-transform: lowercase; + display: flex; + width: 9rem; + height: 2.2rem; + border-radius: 2rem; + background: #d1dee9; + color: #274c77; + font-family: "IBM Plex Sans", sans-serif; + font-size: 1.2rem; + font-style: normal; + font-weight: 500; + line-height: normal; + justify-content: center; + align-items: center; + position: absolute; + bottom: -12.5%; + right: 8%; + text-transform: lowercase; } .node-pods { - display: flex; - width: 10rem; - height: 2.2rem; - border-radius: 2rem; - background: #D1DEE9; - color: #274C77; - font-family: "IBM Plex Sans", sans-serif; - font-size: 1.2rem; - font-style: normal; - font-weight: 700; - line-height: normal; - justify-content: space-evenly; - align-items: center; - position: absolute; - top: -13.5%; - left: 10%; - text-transform: lowercase; + display: flex; + width: 10rem; + height: 2.2rem; + border-radius: 2rem; + background: #d1dee9; + color: #274c77; + font-family: "IBM Plex Sans", sans-serif; + font-size: 1.2rem; + font-style: normal; + font-weight: 700; + line-height: normal; + justify-content: space-evenly; + align-items: center; + position: absolute; + top: -13.5%; + left: 10%; + text-transform: lowercase; } .node-pods > img { - width: 2rem; + width: 2rem; } .node-status { - width: 2.2rem; - height: 2.2rem; - border-radius: 2rem; - background: #D1DEE9; - display: flex; - justify-content: center; - align-items: center; - position: absolute; - bottom: -12.5%; - left: 8%; + width: 2.2rem; + height: 2.2rem; + border-radius: 2rem; + background: #d1dee9; + display: flex; + justify-content: center; + align-items: center; + position: absolute; + bottom: -12.5%; + left: 8%; } .node-tooltip { - font-size: 1.2rem; - font-weight: 600; - font-family: "Avenir", sans-serif; - cursor: pointer; + font-size: 1.2rem; + font-weight: 600; + font-family: "Avenir", sans-serif; + cursor: pointer; } .react-flow__node-input { - width: 25.2rem; - height: 7.8rem; - border-radius: 2rem; - border: 0.01rem solid #009EAC; - background: var(--boxes, #F8F8FB); - box-shadow: 0 2.4rem 4.8rem -0.8rem rgba(39, 76, 119, 0.16); - cursor: pointer; + width: 25.2rem; + height: 7.8rem; + border-radius: 2rem; + border: 0.01rem solid #009eac; + background: var(--boxes, #f8f8fb); + box-shadow: 0 2.4rem 4.8rem -0.8rem rgba(39, 76, 119, 0.16); + cursor: pointer; } .node-info { - display: flex; - width: 100%; - height: 100%; - flex-direction: column; - justify-content: center; - color: #274C77; - text-align: center; - font-family: "IBM Plex Sans", sans-serif; - font-size: 1.4rem; - font-style: normal; - font-weight: 500; - line-height: normal; - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; + display: flex; + width: 100%; + height: 100%; + flex-direction: column; + justify-content: center; + color: #274c77; + text-align: center; + font-family: "IBM Plex Sans", sans-serif; + font-size: 1.4rem; + font-style: normal; + font-weight: 500; + line-height: normal; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; +} + +.node-info-mono { + display: flex; + width: 100%; + height: 50%; + flex-direction: column; + justify-content: center; + color: #274c77; + text-align: center; + font-family: "IBM Plex Sans", sans-serif; + font-size: 1.4rem; + font-style: normal; + font-weight: 500; + line-height: normal; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; +} + +.mono-vertex-img-wrapper { + height: 2rem; + width: 2rem; + border-radius: 50%; + border: 1px solid #d1dee9; + display: flex; + align-items: center; + justify-content: center; +} + +.mono-vertex-img { + height: 1rem; + width: 1rem; } .node-icon { - width: 1.144rem; - height: 1.144rem; - display: inline-block; - vertical-align: middle; - position: relative; - margin-left: 0.01rem; - margin-right: 0.01rem; - padding-bottom: 0.05rem; + width: 1.144rem; + height: 1.144rem; + display: inline-block; + vertical-align: middle; + position: relative; + margin-left: 0.01rem; + margin-right: 0.01rem; + padding-bottom: 0.05rem; } .node-podnum { - font-weight: bold; + font-weight: bold; } .react-flow__handle { - /*background : #8D9096;*/ - z-index: -1; - margin: -0.32rem; - height: 1.6rem; - width: 1.6rem; + /*background : #8D9096;*/ + z-index: -1; + margin: -0.32rem; + height: 1.6rem; + width: 1.6rem; } .sideInput_node { - display: flex; - cursor: pointer; + display: flex; + cursor: pointer; } .sideInput_node_ele { - width: 3.6rem; - height: 3rem; - background: #E0E0E0; - display: flex; - justify-content: center; - align-items: center; + width: 3.6rem; + height: 3rem; + background: #e0e0e0; + display: flex; + justify-content: center; + align-items: center; } .generator_handle { - top: 60%; - left: 82.5%; - background: none; + top: 60%; + left: 82.5%; + background: none; } .generator_node { - font-size: 1.6rem; - background: #F8F8FB; - padding: 1.6rem 2.4rem 1.6rem 2.4rem; - margin-left: -2.4rem; - border-radius: 2rem; - border: 0.1rem solid #DAE3E8; - color: #6B6C72; + font-size: 1.6rem; + background: #f8f8fb; + padding: 1.6rem 2.4rem 1.6rem 2.4rem; + margin-left: -2.4rem; + border-radius: 2rem; + border: 0.1rem solid #dae3e8; + color: #6b6c72; } .react-flow__handle-bottom { - z-index: -1; - margin: -0.32rem; - height: 1.6rem; - width: 1.6rem; - border: none; - bottom: -15%; - background: none !important; + z-index: -1; + margin: -0.32rem; + height: 1.6rem; + width: 1.6rem; + border: none; + bottom: -15%; + background: none !important; } .sideInput_handle { - bottom: -26% ; - position: absolute; - cursor: pointer; -} \ No newline at end of file + bottom: -26%; + position: absolute; + cursor: pointer; +} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/index.tsx index 19ec973709..50182392b2 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/index.tsx @@ -145,6 +145,7 @@ export default function NodeInfo(props: NodeInfoProps) { namespaceId={namespaceId} pipelineId={pipelineId} vertexId={node.id} + type={node?.data?.type} /> )} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx index 9d2629fb8a..92cdccd453 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx @@ -20,7 +20,7 @@ import { } from "../../../../../../../../../types/declarations/pods"; export function Pods(props: PodsProps) { - const { namespaceId, pipelineId, vertexId } = props; + const { namespaceId, pipelineId, vertexId, type } = props; if (!namespaceId || !pipelineId || !vertexId) { return ( @@ -41,6 +41,7 @@ export function Pods(props: PodsProps) { pipelineId, vertexId, selectedPod, + type, setSelectedPod, setSelectedContainer ); diff --git a/ui/src/components/plugin/NumaflowMonitorApp/App.tsx b/ui/src/components/plugin/NumaflowMonitorApp/App.tsx index 28bc5880e9..9b65030418 100644 --- a/ui/src/components/plugin/NumaflowMonitorApp/App.tsx +++ b/ui/src/components/plugin/NumaflowMonitorApp/App.tsx @@ -32,14 +32,6 @@ import "react-toastify/dist/ReactToastify.css"; const MAX_ERRORS = 6; function App(props: AppProps) { - // TODO remove, used for testing ns only installation - // const { systemInfo, error: systemInfoError } = { - // systemInfo: { - // namespaced: true, - // managedNamespace: "test", - // }, - // error: undefined, - // }; const { hostUrl = "", namespace = "" } = props; const pageRef = useRef(); const [pageWidth, setPageWidth] = useState(0); diff --git a/ui/src/components/plugin/Routes/Routes.tsx b/ui/src/components/plugin/Routes/Routes.tsx index 3584a366c9..d7a75529cb 100644 --- a/ui/src/components/plugin/Routes/Routes.tsx +++ b/ui/src/components/plugin/Routes/Routes.tsx @@ -1,6 +1,7 @@ import { useLocation } from "react-router-dom"; import { Namespaces } from "../../pages/Namespace"; import { Pipeline } from "../../pages/Pipeline"; +import { MonoVertex } from "../../pages/MonoVertex"; export interface RoutesProps { namespace: string; @@ -9,9 +10,12 @@ export function Routes(props: RoutesProps) { const location = useLocation(); const query = new URLSearchParams(location.search); const pl = query.get("pipeline") || ""; + const type = query.get("type") || ""; const { namespace } = props; - return pl ? ( + return type ? ( + + ) : pl ? ( ) : ( diff --git a/ui/src/images/monoVertex.svg b/ui/src/images/monoVertex.svg new file mode 100644 index 0000000000..99c36ab144 --- /dev/null +++ b/ui/src/images/monoVertex.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ui/src/images/transformer.svg b/ui/src/images/transformer.svg new file mode 100644 index 0000000000..5f189d9e8a --- /dev/null +++ b/ui/src/images/transformer.svg @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/ui/src/types/declarations/graph.d.ts b/ui/src/types/declarations/graph.d.ts index 34a9571daa..bd5abe05d0 100644 --- a/ui/src/types/declarations/graph.d.ts +++ b/ui/src/types/declarations/graph.d.ts @@ -6,6 +6,7 @@ export interface GraphProps { data: GraphData; namespaceId: string | undefined; pipelineId: string | undefined; + type: "monoVertex" | "pipeline"; refresh: () => void; } @@ -37,6 +38,7 @@ export interface FlowProps { refresh: () => void; namespaceId: string | undefined; data: any; + type: string; } export interface HighlightContextProps { diff --git a/ui/src/types/declarations/namespace.d.ts b/ui/src/types/declarations/namespace.d.ts index 3c3582e14d..65473bae4f 100644 --- a/ui/src/types/declarations/namespace.d.ts +++ b/ui/src/types/declarations/namespace.d.ts @@ -28,6 +28,7 @@ export interface NamespaceSummaryFetchResult { data?: NamespaceSummaryData; pipelineRawData?: any; isbRawData?: any; + monoVertexRawData?: any; loading: boolean; error: any; refresh: () => void; @@ -50,6 +51,7 @@ export interface NamespacePipelineListingProps { data: NamespaceSummaryData; pipelineData?: Map; isbData?: any; + monoVertexData?: any; refresh: () => void; } diff --git a/ui/src/types/declarations/pipeline.d.ts b/ui/src/types/declarations/pipeline.d.ts index 31aa0de14a..09b8757387 100644 --- a/ui/src/types/declarations/pipeline.d.ts +++ b/ui/src/types/declarations/pipeline.d.ts @@ -109,6 +109,45 @@ export interface PipelineSummaryFetchResult { refresh: () => void; } +export interface MonoVertex { + spec: any; + metadata: any; + status?: any; +} + +export interface MonoVertexSpec { + replicas: number; + source: any; + sink: any; + scale: any; +} + +export interface MonoVertexMetrics { + ratePerMin: string; + ratePerFiveMin: string; + ratePerFifteenMin: string; + podMetrics: any[]; + error: boolean; +} + +export interface MonoVertexSummary { + name: string; + status: string; + lag?: number; + monoVertex: MonoVertex; +} + +export interface MonoVertexMergeSummaryData { + monoVertexData: MonoVertexSummary; +} + +export interface MonoVertexSummaryFetchResult { + data?: MonoVertexMergeSummaryData; + loading: boolean; + error: any; + refresh: () => void; +} + export interface PipelineUpdateFetchResult { pipelineAvailable: boolean; } diff --git a/ui/src/types/declarations/pods.d.ts b/ui/src/types/declarations/pods.d.ts index eabc2d8988..31d551da0f 100644 --- a/ui/src/types/declarations/pods.d.ts +++ b/ui/src/types/declarations/pods.d.ts @@ -11,6 +11,7 @@ export interface PodsProps { namespaceId: string; pipelineId: string; vertexId: string; + type: string; } export interface PodContainerSpec { diff --git a/ui/src/utils/fetchWrappers/clusterSummaryFetch.ts b/ui/src/utils/fetchWrappers/clusterSummaryFetch.ts index e7de7408a5..ea606f2b02 100644 --- a/ui/src/utils/fetchWrappers/clusterSummaryFetch.ts +++ b/ui/src/utils/fetchWrappers/clusterSummaryFetch.ts @@ -33,14 +33,22 @@ const rawDataToClusterSummary = ( rawData.forEach((ns: any) => { // Pipeline counts - const nsPipelinesHealthyCount = ns.pipelineSummary?.active?.Healthy || 0; - const nsPipelinesWarningCount = ns.pipelineSummary?.active?.Warning || 0; - const nsPipelinesCriticalCount = ns.pipelineSummary?.active?.Critical || 0; + const nsPipelinesHealthyCount = + (ns.pipelineSummary?.active?.Healthy || 0) + + (ns.monoVertexSummary?.active?.Healthy || 0); + const nsPipelinesWarningCount = + (ns.pipelineSummary?.active?.Warning || 0) + + (ns.monoVertexSummary?.active?.Warning || 0); + const nsPipelinesCriticalCount = + (ns.pipelineSummary?.active?.Critical || 0) + + (ns.monoVertexSummary?.active?.Critical || 0); const nsPipelinesActiveCount = nsPipelinesHealthyCount + nsPipelinesWarningCount + nsPipelinesCriticalCount; - const nsPipelinesInactiveCount = ns.pipelineSummary?.inactive || 0; + const nsPipelinesInactiveCount = + (ns.pipelineSummary?.inactive || 0) + + (ns.monoVertexSummary?.inactive || 0); const nsPipelinesCount = nsPipelinesActiveCount + nsPipelinesInactiveCount; // ISB counts const nsIsbsHealthyCount = ns.isbServiceSummary?.active?.Healthy || 0; diff --git a/ui/src/utils/fetchWrappers/monoVertexFetch.ts b/ui/src/utils/fetchWrappers/monoVertexFetch.ts new file mode 100644 index 0000000000..41c0972c4a --- /dev/null +++ b/ui/src/utils/fetchWrappers/monoVertexFetch.ts @@ -0,0 +1,119 @@ +import { useEffect, useState, useCallback, useContext } from "react"; +import { Options, useFetch } from "./fetch"; +import { MonoVertexSummaryFetchResult } from "../../types/declarations/pipeline"; +import { getBaseHref } from "../index"; +import { AppContextProps } from "../../types/declarations/app"; +import { AppContext } from "../../App"; + +const DATA_REFRESH_INTERVAL = 15000; // ms + +// fetch monoVertex summary +export const useMonoVertexSummaryFetch = ({ + namespaceId, + pipelineId, + addError, +}: any) => { + const [options, setOptions] = useState({ + skip: false, + requestKey: "", + }); + + const refresh = useCallback(() => { + setOptions({ + skip: false, + requestKey: "id" + Math.random().toString(16).slice(2), + }); + }, []); + + const [results, setResults] = useState({ + data: undefined, + loading: true, + error: undefined, + refresh, + }); + + const { host } = useContext(AppContext); + + const { + data: monoVertexData, + loading: monoVertexLoading, + error: monoVertexError, + } = useFetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/mono-vertices/${pipelineId}`, + undefined, + options + ); + + useEffect(() => { + setInterval(() => { + setOptions({ + skip: false, + requestKey: "id" + Math.random().toString(16).slice(2), + }); + }, DATA_REFRESH_INTERVAL); + }, []); + + useEffect(() => { + if (monoVertexLoading) { + if (options?.requestKey === "") { + setResults({ + data: undefined, + loading: true, + error: undefined, + refresh, + }); + } + return; + } + if (monoVertexError) { + if (options?.requestKey === "") { + // Failed on first load, return error + setResults({ + data: undefined, + loading: false, + error: monoVertexError, + refresh, + }); + } else { + // Failed on refresh, add error to app context + addError(monoVertexError); + } + return; + } + if (monoVertexData?.errMsg) { + if (options?.requestKey === "") { + // Failed on first load, return error + setResults({ + data: undefined, + loading: false, + error: monoVertexData?.errMsg, + refresh, + }); + } else { + // Failed on refresh, add error to app context + addError(monoVertexData?.errMsg); + } + return; + } + if (monoVertexData) { + const monoVertexSummary = { + monoVertexData: monoVertexData?.data, + }; + setResults({ + data: monoVertexSummary, + loading: false, + error: undefined, + refresh, + }); + return; + } + }, [ + monoVertexData, + monoVertexLoading, + monoVertexError, + options, + refresh, + addError, + ]); + return results; +}; diff --git a/ui/src/utils/fetchWrappers/monoVertexUpdateFetch.ts b/ui/src/utils/fetchWrappers/monoVertexUpdateFetch.ts new file mode 100644 index 0000000000..640d718dd3 --- /dev/null +++ b/ui/src/utils/fetchWrappers/monoVertexUpdateFetch.ts @@ -0,0 +1,92 @@ +import { useContext, useEffect, useState } from "react"; +import { Options, useFetch } from "./fetch"; +import { getBaseHref } from "../index"; +import { AppContextProps } from "../../types/declarations/app"; +import { AppContext } from "../../App"; +import { PipelineUpdateFetchResult } from "../../types/declarations/pipeline"; + +const DATA_REFRESH_INTERVAL = 1000; // ms + +// fetch monoVertex to check for existence +export const useMonoVertexUpdateFetch = ({ + namespaceId, + pipelineId, + active, + refreshInterval = DATA_REFRESH_INTERVAL, +}: any) => { + const [options, setOptions] = useState({ + skip: !active, + requestKey: "", + }); + + const [results, setResults] = useState({ + pipelineAvailable: false, + }); + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const [intervalId, setIntervalId] = useState(); + + const { host } = useContext(AppContext); + + const { data, loading, error } = useFetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/mono-vertices/${pipelineId}`, + undefined, + options + ); + + useEffect(() => { + if (!active) { + // Clear any existing interval running + setIntervalId((prev: any) => { + if (prev) { + clearInterval(prev); + } + return undefined; + }); + return; + } + // Set periodic interval to refresh data + const id = setInterval(() => { + setOptions({ + skip: false, + requestKey: "id" + Math.random().toString(16).slice(2), + }); + }, refreshInterval); + // Clear any existing interval running and store new one + setIntervalId((prev: any) => { + if (prev) { + clearInterval(prev); + } + return id; + }); + return () => { + // Clear interval on unmount + clearInterval(id); + }; + }, [active, refreshInterval]); + + useEffect(() => { + if (loading) { + if (options?.requestKey === "") { + // Only set false when it's the first load. Keep existing result otherwise. + setResults({ + pipelineAvailable: false, + }); + } + return; + } + if (error || data?.errMsg) { + setResults({ + pipelineAvailable: false, + }); + return; + } + if (data?.data) { + setResults({ + pipelineAvailable: true, + }); + return; + } + }, [data, loading, error, options]); + + return results; +}; diff --git a/ui/src/utils/fetchWrappers/namespaceK8sEventsFetch.ts b/ui/src/utils/fetchWrappers/namespaceK8sEventsFetch.ts index e3df856db9..99b0ee434b 100644 --- a/ui/src/utils/fetchWrappers/namespaceK8sEventsFetch.ts +++ b/ui/src/utils/fetchWrappers/namespaceK8sEventsFetch.ts @@ -70,7 +70,13 @@ export const useNamespaceK8sEventsFetch = ({ if (vertex) { return `${BASE_URL}?objectType=vertex&objectName=${pipeline}-${vertex}`; } else if (pipeline) { - return `${BASE_URL}?objectType=pipeline&objectName=${pipeline}`; + const isMonoVertex = pipeline.endsWith("(MonoVertex)"); + const pipelineName = isMonoVertex + ? pipeline.replace(/\s*\(.*?\)\s*/g, "").trim() + : pipeline; + return `${BASE_URL}?objectType=${ + isMonoVertex ? "monovertex" : "pipeline" + }&objectName=${pipelineName}`; } return `${BASE_URL}`; }, [namespace, pipeline, vertex]); diff --git a/ui/src/utils/fetchWrappers/namespaceSummaryFetch.ts b/ui/src/utils/fetchWrappers/namespaceSummaryFetch.ts index 7a051f95a7..5f4a625695 100644 --- a/ui/src/utils/fetchWrappers/namespaceSummaryFetch.ts +++ b/ui/src/utils/fetchWrappers/namespaceSummaryFetch.ts @@ -12,11 +12,12 @@ import { const rawDataToNamespaceSummary = ( rawPipelineData: any[], - rawIsbData: any[] + rawIsbData: any[], + rawMonoVertexData: any[] ): NamespaceSummaryData | undefined => { - const pipelinesCount = Array.isArray(rawPipelineData) - ? rawPipelineData.length - : 0; + const pipelinesCount = + (Array.isArray(rawPipelineData) ? rawPipelineData.length : 0) + + (Array.isArray(rawMonoVertexData) ? rawMonoVertexData.length : 0); let pipelinesActiveCount = 0; let pipelinesInactiveCount = 0; let pipelinesHealthyCount = 0; @@ -56,6 +57,34 @@ const rawDataToNamespaceSummary = ( status: pipeline.status, }); }); + // adding MonoVertex count to pipeline count + Array.isArray(rawMonoVertexData) && + rawMonoVertexData?.forEach((monoVertex: any) => { + switch (monoVertex.status) { + case "healthy": + pipelinesActiveCount++; + pipelinesHealthyCount++; + break; + case "warning": + pipelinesActiveCount++; + pipelinesWarningCount++; + break; + case "critical": + pipelinesActiveCount++; + pipelinesCriticalCount++; + break; + case "inactive": + pipelinesInactiveCount++; + break; + default: + break; + } + // Add pipeline summary to array + pipelineSummaries.push({ + name: monoVertex.name, + status: monoVertex.status, + }); + }); Array.isArray(rawIsbData) && rawIsbData?.forEach((isb: any) => { switch (isb.status) { @@ -142,6 +171,15 @@ export const useNamespaceSummaryFetch = ({ undefined, options ); + const { + data: monoVertexData, + loading: monoVertexLoading, + error: monoVertexError, + } = useFetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespace}/mono-vertices`, + undefined, + options + ); useEffect(() => { setInterval(() => { @@ -153,7 +191,7 @@ export const useNamespaceSummaryFetch = ({ }, []); useEffect(() => { - if (pipelineLoading || isbLoading) { + if (pipelineLoading || isbLoading || monoVertexLoading) { if (options?.requestKey === "" || loadOnRefresh) { // Only set loading true when first load or when loadOnRefresh is true setResults({ @@ -165,38 +203,41 @@ export const useNamespaceSummaryFetch = ({ } return; } - if (pipelineError || isbError) { + if (pipelineError || isbError || monoVertexError) { if (options?.requestKey === "") { // Failed on first load, return error setResults({ data: undefined, loading: false, - error: pipelineError || isbError, + error: pipelineError || isbError || monoVertexError, refresh, }); } else { // Failed on refresh, add error to app context - addError(pipelineError || isbError); + addError(pipelineError || isbError || monoVertexError); } return; } - if (pipelineData?.errMsg || isbData?.errMsg) { + if (pipelineData?.errMsg || isbData?.errMsg || monoVertexData?.errMsg) { if (options?.requestKey === "") { // Failed on first load, return error setResults({ data: undefined, loading: false, - error: pipelineData?.errMsg || isbData?.errMsg, + error: + pipelineData?.errMsg || isbData?.errMsg || monoVertexData?.errMsg, refresh, }); } else { // Failed on refresh, add error to app context - addError(pipelineData?.errMsg || isbData?.errMsg); + addError( + pipelineData?.errMsg || isbData?.errMsg || monoVertexData?.errMsg + ); } return; } - if (pipelineData && isbData) { - const pipeLineMap = pipelineData?.data?.reduce((map: any, obj: any) => { + if (pipelineData && isbData && monoVertexData) { + const pipelineMap = pipelineData?.data?.reduce((map: any, obj: any) => { map[obj.name] = obj; return map; }, {}); @@ -204,14 +245,23 @@ export const useNamespaceSummaryFetch = ({ map[obj.name] = obj; return map; }, {}); + const monoVertexMap = monoVertexData?.data?.reduce( + (map: any, obj: any) => { + map[obj.name] = obj; + return map; + }, + {} + ); const nsSummary = rawDataToNamespaceSummary( pipelineData?.data, - isbData?.data + isbData?.data, + monoVertexData?.data ); setResults({ data: nsSummary, - pipelineRawData: pipeLineMap, + pipelineRawData: pipelineMap, isbRawData: isbMap, + monoVertexRawData: monoVertexMap, loading: false, error: undefined, refresh, @@ -221,10 +271,13 @@ export const useNamespaceSummaryFetch = ({ }, [ pipelineData, isbData, + monoVertexData, pipelineLoading, isbLoading, + monoVertexLoading, pipelineError, isbError, + monoVertexError, loadOnRefresh, options, refresh, diff --git a/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts b/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts new file mode 100644 index 0000000000..9c7cd459d3 --- /dev/null +++ b/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts @@ -0,0 +1,278 @@ +import { useCallback, useContext, useEffect, useMemo, useState } from "react"; +import { Node } from "reactflow"; +import { isEqual } from "lodash"; +import { getBaseHref } from "../index"; +import { AppContextProps } from "../../types/declarations/app"; +import { AppContext } from "../../App"; +import { + MonoVertex, + MonoVertexSpec, + MonoVertexMetrics, +} from "../../types/declarations/pipeline"; + +export const useMonoVertexViewFetch = ( + namespaceId: string | undefined, + pipelineId: string | undefined, + addError: (error: string) => void +) => { + const [requestKey, setRequestKey] = useState(""); + const [pipeline, setPipeline] = useState(undefined); + const [spec, setSpec] = useState(undefined); + const [monoVertexPods, setMonoVertexPods] = useState>( + new Map() + ); + const [monoVertexMetrics, setMonoVertexMetrics] = useState< + Map + >(new Map()); + const [pipelineErr, setPipelineErr] = useState(undefined); + const [loading, setLoading] = useState(true); + const { host } = useContext(AppContext); + + const BASE_API = `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/mono-vertices/${pipelineId}`; + + const refresh = useCallback(() => { + setRequestKey(`${Date.now()}`); + }, []); + + // Call to get pipeline + useEffect(() => { + const fetchPipeline = async () => { + try { + const response = await fetch(`${BASE_API}?refreshKey=${requestKey}`); + if (response.ok) { + const json = await response.json(); + if (json?.data) { + // Update pipeline state with data from the response + setPipeline(json.data?.monoVertex); + // Update spec state if it is not equal to the spec from the response + if (!isEqual(spec, json.data)) setSpec(json.data?.monoVertex?.spec); + setPipelineErr(undefined); + } else if (json?.errMsg) { + // pipeline API call returns an error message + if (requestKey === "") { + setPipelineErr(json.errMsg); + } else { + addError(json.errMsg); + } + } + } else { + // Handle the case when the response is not OK + if (requestKey === "") { + if (response.status === 403) { + // Unauthorized user, display given or default error message + const data = await response.json(); + if (data.errMsg) { + setPipelineErr(`Error: ${data.errMsg}`); + } else { + setPipelineErr( + `Error: user is not authorized to execute the requested action.` + ); + } + } else { + setPipelineErr(`Response code: ${response.status}`); + } + } else { + addError(`Failed with code: ${response.status}`); + } + } + } catch (e: any) { + // Handle any errors that occur during the fetch request + if (requestKey === "") { + setPipelineErr(e.message); + } else { + addError(e.message); + } + } + }; + + fetchPipeline(); + }, [requestKey, addError]); + + // Refresh pipeline every 30 sec + useEffect(() => { + const interval = setInterval(() => { + setRequestKey(`${Date.now()}`); + }, 30000); + return () => clearInterval(interval); + }, []); + + // This useEffect is used to obtain all the pods for a given monoVertex. + useEffect(() => { + const vertexToPodsMap = new Map(); + if (spec?.source && spec?.sink) { + // Fetch pods count for each vertex in parallel + Promise.allSettled([ + fetch(`${BASE_API}/pods`) + .then((response) => { + if (response.ok) { + return response.json(); + } else { + return Promise.reject({ response, vertex: pipelineId }); + } + }) + .then((json) => { + if (json?.data) { + const mvtxPods = json.data.filter( + (mvtx: any) => !mvtx?.metadata?.name.includes("-daemon-") + ); + // Update vertexToPodsMap with the number of pods for the current vertex + vertexToPodsMap.set(pipelineId, mvtxPods?.length); + } else if (json?.errMsg) { + // Pods API call returns an error message + addError(json.errMsg); + } + }), + ]) + .then((results) => { + results.forEach((result) => { + if (result && result?.status === "rejected") { + // Handle rejected promises and add error messages to podsErr + addError(`Failed to get pods: ${result.reason.response.status}`); + } + }); + }) + .then(() => { + if (!isEqual(monoVertexPods, vertexToPodsMap)) { + // Update vertexPods state if it is not equal to vertexToPodsMap + setMonoVertexPods(vertexToPodsMap); + } + }) + .catch((e: any) => { + addError(`Error: ${e.message}`); + }); + } + }, [spec, requestKey, addError]); + + const getVertexMetrics = useCallback(() => { + const vertexToMetricsMap = new Map(); + + if (spec?.source && spec?.sink && monoVertexPods.size > 0) { + // Fetch metrics for monoVertex + Promise.allSettled([ + fetch(`${BASE_API}/metrics`) + .then((response) => { + if (response.ok) { + return response.json(); + } else { + return Promise.reject(response); + } + }) + .then((json) => { + if (json?.data) { + const mvtx = json.data; + const monoVertexName = mvtx.monoVertex; + const monoVertexMetrics: MonoVertexMetrics = { + ratePerMin: "0.00", + ratePerFiveMin: "0.00", + ratePerFifteenMin: "0.00", + podMetrics: [], + error: false, + }; + let ratePerMin = 0.0, + ratePerFiveMin = 0.0, + ratePerFifteenMin = 0.0; + // Calculate processing rates as summation of pod values + if ("processingRates" in mvtx) { + if ("1m" in mvtx["processingRates"]) { + ratePerMin += mvtx["processingRates"]["1m"]; + } + if ("5m" in mvtx["processingRates"]) { + ratePerFiveMin += mvtx["processingRates"]["5m"]; + } + if ("15m" in mvtx["processingRates"]) { + ratePerFifteenMin += mvtx["processingRates"]["15m"]; + } + } else { + if ( + monoVertexPods.has(monoVertexName) && + monoVertexPods.get(monoVertexName) !== 0 + ) { + // Handle case when processingRates are not available for a vertex + monoVertexMetrics.error = true; + addError( + `Failed to get metrics for ${monoVertexName} monoVertex` + ); + } + } + monoVertexMetrics.ratePerMin = ratePerMin.toFixed(2); + monoVertexMetrics.ratePerFiveMin = ratePerFiveMin.toFixed(2); + monoVertexMetrics.ratePerFifteenMin = + ratePerFifteenMin.toFixed(2); + if ( + monoVertexPods.has(monoVertexName) && + monoVertexPods.get(monoVertexName) !== 0 + ) { + monoVertexMetrics.podMetrics = json; + } + vertexToMetricsMap.set(monoVertexName, monoVertexMetrics); + } else if (json?.errMsg) { + // Metrics API call returns an error message + addError(json.errMsg); + } + }), + ]) + .then((results) => { + results.forEach((result) => { + if (result && result?.status === "rejected") { + // Handle rejected promises and add error messages to metricsErr + addError( + `Failed to get metrics: ${result.reason.response.status}` + ); + } + }); + }) + .then(() => setMonoVertexMetrics(vertexToMetricsMap)) + .catch((e: any) => { + addError(`Error: ${e.message}`); + }); + } + }, [spec, monoVertexPods, addError]); + + // This useEffect is used to obtain metrics for a given monoVertex and refreshes every 1 minute + useEffect(() => { + getVertexMetrics(); + const interval = setInterval(() => { + getVertexMetrics(); + }, 60000); + return () => clearInterval(interval); + }, [getVertexMetrics]); + + const vertices = useMemo(() => { + const newVertices: Node[] = []; + // if (spec?.vertices && vertexPods && vertexMetrics) { + if (spec?.source && spec?.sink && monoVertexMetrics) { + const newNode = {} as Node; + const name = pipelineId ?? ""; + newNode.id = name; + newNode.data = { name: name }; + newNode.data.podnum = spec?.replicas ? spec.replicas : 0; + newNode.position = { x: 0, y: 0 }; + // change this in the future if you would like to make it draggable + newNode.draggable = false; + newNode.type = "custom"; + newNode.data.nodeInfo = spec; + newNode.data.type = "monoVertex"; + newNode.data.vertexMetrics = null; + newNode.data.vertexMetrics = monoVertexMetrics.has(name) + ? monoVertexMetrics.get(name) + : null; + newVertices.push(newNode); + } + return newVertices; + }, [spec, monoVertexMetrics]); + + //sets loading variable + useEffect(() => { + if (pipeline && vertices?.length > 0) { + setLoading(false); + } + }, [pipeline, vertices]); + + return { + pipeline, + vertices, + pipelineErr, + loading, + refresh, + }; +}; diff --git a/ui/src/utils/fetcherHooks/podsViewFetch.test.ts b/ui/src/utils/fetcherHooks/podsViewFetch.test.ts index 0948c1fbc8..b113a7bf93 100644 --- a/ui/src/utils/fetcherHooks/podsViewFetch.test.ts +++ b/ui/src/utils/fetcherHooks/podsViewFetch.test.ts @@ -65,6 +65,7 @@ describe("Custom Pods hook", () => { "simple-pipeline", "cat", undefined, + "udf", jest.fn() as Dispatch>, jest.fn() as Dispatch> ) @@ -88,6 +89,7 @@ describe("Custom Pods hook", () => { "simple-pipeline", "cat", undefined, + "udf", jest.fn() as Dispatch>, jest.fn() as Dispatch> ) @@ -109,6 +111,7 @@ describe("Custom Pods hook", () => { "simple-pipeline", "cat", undefined, + "udf", jest.fn() as Dispatch>, jest.fn() as Dispatch> ) diff --git a/ui/src/utils/fetcherHooks/podsViewFetch.ts b/ui/src/utils/fetcherHooks/podsViewFetch.ts index 9c57592428..e893381cf6 100644 --- a/ui/src/utils/fetcherHooks/podsViewFetch.ts +++ b/ui/src/utils/fetcherHooks/podsViewFetch.ts @@ -19,6 +19,7 @@ export const usePodsViewFetch = ( pipelineId: string | undefined, vertexId: string | undefined, selectedPod: Pod | undefined, + type: string, setSelectedPod: Dispatch>, setSelectedContainer: Dispatch> ) => { @@ -39,12 +40,19 @@ export const usePodsViewFetch = ( const fetchPods = async () => { try { const response = await fetch( - `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/pipelines/${pipelineId}/vertices/${vertexId}/pods?refreshKey=${requestKey}` + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}${ + type === "monoVertex" + ? `/mono-vertices` + : `/pipelines/${pipelineId}/vertices` + }/${vertexId}/pods?refreshKey=${requestKey}` ); if (response.ok) { const json = await response.json(); if (json?.data) { - const data = json?.data; + let data = json?.data; + data = data.filter( + (pod: any) => !pod?.metadata?.name.includes("-daemon-") + ); const pList = data?.map((pod: any) => { const containers: string[] = []; const containerSpecMap = new Map(); From 0489eae34a5ee998c26429c2664d14a59d71c169 Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Fri, 16 Aug 2024 14:21:46 -0400 Subject: [PATCH 005/188] chore: add missed tests for java (#1953) Signed-off-by: Keran Yang --- test/sdks-e2e/sdks_test.go | 12 +++--- test/sdks-e2e/testdata/flatmap-stream.yaml | 49 +++++++++++----------- 2 files changed, 29 insertions(+), 32 deletions(-) diff --git a/test/sdks-e2e/sdks_test.go b/test/sdks-e2e/sdks_test.go index a37785ba68..a1bf013f41 100644 --- a/test/sdks-e2e/sdks_test.go +++ b/test/sdks-e2e/sdks_test.go @@ -77,9 +77,9 @@ func (s *SDKsSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) - //VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - // VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) + VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello,hello,hello"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello"))) @@ -90,10 +90,8 @@ func (s *SDKsSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("go-udsink-2", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - - // FIXME(map-batch): enable Java - //w.Expect(). - // VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + w.Expect(). + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) } func (s *SDKsSuite) TestBatchMapUDFunctionAndSink() { diff --git a/test/sdks-e2e/testdata/flatmap-stream.yaml b/test/sdks-e2e/testdata/flatmap-stream.yaml index c8ba177696..503ffcaf3a 100644 --- a/test/sdks-e2e/testdata/flatmap-stream.yaml +++ b/test/sdks-e2e/testdata/flatmap-stream.yaml @@ -62,27 +62,26 @@ spec: # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log image: quay.io/numaio/numaflow-python/sink-log:stable imagePullPolicy: Always -## FIXME(map-batch): enable Java -# - name: java-split -# partitions: 3 -# limits: -# readBatchSize: 1 -# scale: -# min: 1 -# udf: -# container: -# # Split input message into an array with comma, see https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/mapstream/flatmapstream -# image: quay.io/numaio/numaflow-java/flat-map-stream:stable -# imagePullPolicy: Always -# - name: java-udsink -# scale: -# min: 1 -# sink: -# udsink: -# container: -# # https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/sink/simple -# image: quay.io/numaio/numaflow-java/simple-sink:stable -# imagePullPolicy: Always + - name: java-split + partitions: 3 + limits: + readBatchSize: 1 + scale: + min: 1 + udf: + container: + # Split input message into an array with comma, see https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/mapstream/flatmapstream + image: quay.io/numaio/numaflow-java/flat-map-stream:stable + imagePullPolicy: Always + - name: java-udsink + scale: + min: 1 + sink: + udsink: + container: + # https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/sink/simple + image: quay.io/numaio/numaflow-java/simple-sink:stable + imagePullPolicy: Always edges: - from: in to: go-split @@ -94,7 +93,7 @@ spec: to: python-split - from: python-split to: python-udsink -# - from: in -# to: java-split -# - from: java-split -# to: java-udsink + - from: in + to: java-split + - from: java-split + to: java-udsink From a52102d1367215fd202c82791891d50d7ae52227 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Sat, 17 Aug 2024 23:58:16 +0530 Subject: [PATCH 006/188] chore: switch to scratch base image (#1955) --- .dockerignore | 1 + Dockerfile | 50 ++-- Makefile | 2 +- rust/Cargo.lock | 273 ++++++------------ rust/numaflow-models/Cargo.toml | 3 +- rust/numaflow-models/templates/Cargo.mustache | 6 +- rust/servesink/Cargo.toml | 6 +- 7 files changed, 127 insertions(+), 214 deletions(-) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..7a8cf80175 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +rust/target \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 7796a35b81..c5ac53cbb1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,13 @@ -ARG BASE_IMAGE=gcr.io/distroless/cc-debian12 +ARG BASE_IMAGE=scratch ARG ARCH=$TARGETARCH #################################################################################################### # base #################################################################################################### -FROM debian:bullseye as base +FROM alpine:3.17 AS base +ARG ARCH +RUN apk update && apk upgrade && \ + apk add ca-certificates && \ + apk --no-cache add tzdata ARG ARCH COPY dist/numaflow-linux-${ARCH} /bin/numaflow @@ -13,12 +17,10 @@ RUN chmod +x /bin/numaflow #################################################################################################### # extension base #################################################################################################### -FROM rust:1.79-bookworm as extension-base - -RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +FROM rust:1.80-bookworm AS extension-base +ARG TARGETPLATFORM -RUN apt-get update -RUN apt-get install protobuf-compiler -y +RUN apt-get update && apt-get install protobuf-compiler -y RUN cargo new numaflow # Create a new empty shell project @@ -43,8 +45,15 @@ COPY ./rust/serving/Cargo.toml ./serving/Cargo.toml COPY ./rust/Cargo.toml ./rust/Cargo.lock ./ # Build to cache dependencies -RUN mkdir -p src/bin && echo "fn main() {}" > src/bin/main.rs && \ - cargo build --workspace --all --release +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + case ${TARGETPLATFORM} in \ + "linux/amd64") TARGET="x86_64-unknown-linux-gnu" ;; \ + "linux/arm64") TARGET="aarch64-unknown-linux-gnu" ;; \ + *) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \ + esac && \ + mkdir -p src/bin && echo "fn main() {}" > src/bin/main.rs && \ + RUSTFLAGS='-C target-feature=+crt-static' cargo build --workspace --all --release --target ${TARGET} # Copy the actual source code files of the main project and the subprojects COPY ./rust/src ./src @@ -57,22 +66,29 @@ COPY ./rust/monovertex/build.rs ./monovertex/build.rs COPY ./rust/monovertex/proto ./monovertex/proto # Build the real binaries -RUN touch src/bin/main.rs && \ - cargo build --workspace --all --release +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + case ${TARGETPLATFORM} in \ + "linux/amd64") TARGET="x86_64-unknown-linux-gnu" ;; \ + "linux/arm64") TARGET="aarch64-unknown-linux-gnu" ;; \ + *) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \ + esac && \ + touch src/bin/main.rs && \ + RUSTFLAGS='-C target-feature=+crt-static' cargo build --workspace --all --release --target ${TARGET} && \ + cp -pv target/${TARGET}/release/numaflow /root/numaflow #################################################################################################### # numaflow #################################################################################################### ARG BASE_IMAGE -FROM debian:bookworm as numaflow - -# Install necessary libraries -RUN apt-get update && apt-get install -y libssl3 +FROM ${BASE_IMAGE} AS numaflow +COPY --from=base /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=base /bin/numaflow /bin/numaflow COPY ui/build /ui/build -COPY --from=extension-base /numaflow/target/release/numaflow /bin/numaflow-rs +COPY --from=extension-base /root/numaflow /bin/numaflow-rs COPY ./rust/serving/config config ENTRYPOINT [ "/bin/numaflow" ] @@ -80,7 +96,7 @@ ENTRYPOINT [ "/bin/numaflow" ] #################################################################################################### # testbase #################################################################################################### -FROM alpine:3.17 as testbase +FROM alpine:3.17 AS testbase RUN apk update && apk upgrade && \ apk add ca-certificates && \ apk --no-cache add tzdata diff --git a/Makefile b/Makefile index 2c6e30d68c..f437836f48 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ DIST_DIR=${CURRENT_DIR}/dist BINARY_NAME:=numaflow DOCKERFILE:=Dockerfile DEV_BASE_IMAGE:=debian:bookworm -RELEASE_BASE_IMAGE:=debian:bookworm +RELEASE_BASE_IMAGE:=scratch BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') GIT_COMMIT=$(shell git rev-parse HEAD) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 3b4bfaa19b..94bce16b9a 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -84,7 +84,7 @@ dependencies = [ "ring", "rustls-native-certs", "rustls-pemfile 2.1.3", - "rustls-webpki", + "rustls-webpki 0.102.6", "serde", "serde_json", "serde_nanos", @@ -92,7 +92,7 @@ dependencies = [ "thiserror", "time", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tracing", "tryhard", "url", @@ -252,11 +252,11 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "pin-project-lite", - "rustls", + "rustls 0.23.12", "rustls-pemfile 2.1.3", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tower", "tower-service", ] @@ -716,21 +716,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1102,15 +1087,29 @@ dependencies = [ "headers", "http 1.1.0", "hyper 1.4.1", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-util", "pin-project-lite", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tower-service", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.30", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -1122,11 +1121,11 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "log", - "rustls", + "rustls 0.23.12", "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tower-service", ] @@ -1143,35 +1142,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.30", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.6" @@ -1360,14 +1330,14 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-http-proxy", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-timeout", "hyper-util", "jsonpath-rust", "k8s-openapi", "kube-core", "pem", - "rustls", + "rustls 0.23.12", "rustls-pemfile 2.1.3", "secrecy", "serde", @@ -1581,7 +1551,7 @@ dependencies = [ "prost", "prost-types", "rcgen", - "rustls", + "rustls 0.23.12", "semver", "serde", "serde_json", @@ -1605,23 +1575,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "nkeys" version = "0.4.3" @@ -1750,7 +1703,7 @@ version = "0.0.0-pre" dependencies = [ "k8s-openapi", "kube", - "reqwest 0.11.27", + "reqwest", "serde", "serde_derive", "serde_json", @@ -1773,50 +1726,12 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "openssl" -version = "0.10.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-sys" -version = "0.9.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "ordered-float" version = "2.10.1" @@ -2010,12 +1925,6 @@ dependencies = [ "spki", ] -[[package]] -name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - [[package]] name = "portable-atomic" version = "1.7.0" @@ -2301,16 +2210,16 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.30", - "hyper-tls 0.5.0", + "hyper-rustls 0.24.2", "ipnet", "js-sys", "log", "mime", "mime_guess", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.21.12", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -2318,56 +2227,14 @@ dependencies = [ "sync_wrapper 0.1.2", "system-configuration", "tokio", - "tokio-native-tls", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.50.0", -] - -[[package]] -name = "reqwest" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" -dependencies = [ - "base64 0.22.1", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.4.5", - "http 1.1.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.4.1", - "hyper-rustls", - "hyper-tls 0.6.0", - "hyper-util", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile 2.1.3", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 1.0.1", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.52.0", + "webpki-roots", + "winreg", ] [[package]] @@ -2441,6 +2308,18 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.12" @@ -2452,7 +2331,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.102.6", "subtle", "zeroize", ] @@ -2495,6 +2374,16 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.102.6" @@ -2534,6 +2423,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -2684,7 +2583,7 @@ name = "servesink" version = "0.1.0" dependencies = [ "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", - "reqwest 0.12.5", + "reqwest", "tokio", "tonic", "tracing", @@ -3017,23 +2916,23 @@ dependencies = [ ] [[package]] -name = "tokio-native-tls" -version = "0.3.1" +name = "tokio-retry" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ - "native-tls", + "pin-project", + "rand", "tokio", ] [[package]] -name = "tokio-retry" -version = "0.3.0" +name = "tokio-rustls" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "pin-project", - "rand", + "rustls 0.21.12", "tokio", ] @@ -3043,7 +2942,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls", + "rustls 0.23.12", "rustls-pki-types", "tokio", ] @@ -3390,12 +3289,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "version_check" version = "0.9.5" @@ -3493,6 +3386,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + [[package]] name = "which" version = "4.4.2" @@ -3694,16 +3593,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/rust/numaflow-models/Cargo.toml b/rust/numaflow-models/Cargo.toml index 6a38e0cc01..1e133e3b08 100644 --- a/rust/numaflow-models/Cargo.toml +++ b/rust/numaflow-models/Cargo.toml @@ -16,4 +16,5 @@ url = "^2.2" uuid = { version = "^1.0", features = ["serde", "v4"] } [dependencies.reqwest] version = "^0.11" -features = ["json", "multipart"] +default-features = false +features = ["json", "multipart", "rustls-tls"] diff --git a/rust/numaflow-models/templates/Cargo.mustache b/rust/numaflow-models/templates/Cargo.mustache index a4bbdfeae2..f7d1cdeb7c 100644 --- a/rust/numaflow-models/templates/Cargo.mustache +++ b/rust/numaflow-models/templates/Cargo.mustache @@ -50,7 +50,8 @@ secrecy = "0.8.0" {{^supportAsync}} [dependencies.reqwest] version = "^0.11" -features = ["json", "blocking", "multipart"] +default-features = false +features = ["json", "blocking", "multipart", "rustls-tls"] {{/supportAsync}} {{#supportAsync}} {{#supportMiddleware}} @@ -58,6 +59,7 @@ reqwest-middleware = "0.2.0" {{/supportMiddleware}} [dependencies.reqwest] version = "^0.11" -features = ["json", "multipart"] +default-features = false +features = ["json", "multipart", "rustls-tls"] {{/supportAsync}} {{/reqwest}} diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 70fa8e55f5..e820030494 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -7,6 +7,10 @@ edition = "2021" tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } -reqwest = "0.12.4" tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } + +[dependencies.reqwest] +version = "^0.11" +default-features = false +features = ["rustls-tls"] \ No newline at end of file From cbad6996f063acf1f4a3d2d8fc2ec1acff6ee912 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 19 Aug 2024 23:02:23 +0530 Subject: [PATCH 007/188] feat: enable fallback sink for mvtx (#1957) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- .codecov.yml | 3 +- .../numaflow/v1alpha1/mono_vertex_types.go | 3 + rust/monovertex/src/config.rs | 9 + rust/monovertex/src/forwarder.rs | 614 ++++++++++++++---- rust/monovertex/src/lib.rs | 71 +- rust/monovertex/src/metrics.rs | 22 +- rust/monovertex/src/sink.rs | 10 +- 7 files changed, 579 insertions(+), 153 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 15eaa610e7..dd3d8fd073 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -7,7 +7,8 @@ ignore: - "pkg/client/.*" - "vendor/.*" - "test/.*" -- "serving/src/error.rs" +- "rust/**/error.rs" +- "rust/numaflow-models/**" # ignore generated files coverage: status: patch: off diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index ac64c7e2a6..6f02509563 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -425,6 +425,9 @@ func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Conta if mvspec.Sink.UDSink != nil { // Only support UDSink for now. containers = append(containers, mvspec.Sink.getUDSinkContainer(req)) } + if mvspec.Sink.Fallback != nil { + containers = append(containers, mvspec.Sink.getFallbackUDSinkContainer(req)) + } // Fallback sink is not supported. containers = append(containers, mvspec.Sidecars...) return containers diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index 7e102e06b8..5a3121e862 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -41,6 +41,7 @@ pub struct Settings { pub log_level: String, pub grpc_max_message_size: usize, pub is_transformer_enabled: bool, + pub is_fallback_enabled: bool, pub lag_check_interval_in_secs: u16, pub lag_refresh_interval_in_secs: u16, pub sink_max_retry_attempts: u16, @@ -58,6 +59,7 @@ impl Default for Settings { log_level: LevelFilter::INFO.to_string(), grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, is_transformer_enabled: false, + is_fallback_enabled: false, lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, @@ -111,6 +113,13 @@ impl Settings { .ok_or(Error::ConfigError("Source not found".to_string()))? .transformer .is_some(); + + settings.is_fallback_enabled = mono_vertex_obj + .spec + .sink + .ok_or(Error::ConfigError("Sink not found".to_string()))? + .fallback + .is_some(); } settings.log_level = diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index f774cc80b0..5862e753e9 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,6 +1,6 @@ use crate::config::config; use crate::error::{Error, Result}; -use crate::message::Offset; +use crate::message::{Message, Offset}; use crate::metrics; use crate::metrics::forward_metrics; use crate::sink::{proto, SinkClient}; @@ -11,8 +11,8 @@ use std::collections::HashMap; use tokio::task::JoinSet; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::info; use tracing::log::warn; +use tracing::{debug, info}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages @@ -21,147 +21,89 @@ pub(crate) struct Forwarder { source_client: SourceClient, sink_client: SinkClient, transformer_client: Option, + fallback_client: Option, cln_token: CancellationToken, common_labels: Vec<(String, String)>, } -impl Forwarder { - #[allow(clippy::too_many_arguments)] - pub(crate) async fn new( +/// ForwarderBuilder is used to build a Forwarder instance with optional fields. +pub(crate) struct ForwarderBuilder { + source_client: SourceClient, + sink_client: SinkClient, + cln_token: CancellationToken, + transformer_client: Option, + fb_sink_client: Option, +} + +impl ForwarderBuilder { + /// Create a new builder with mandatory fields + pub(crate) fn new( source_client: SourceClient, sink_client: SinkClient, - transformer_client: Option, cln_token: CancellationToken, - ) -> Result { - let common_labels = metrics::forward_metrics_labels().clone(); - - Ok(Self { + ) -> Self { + Self { source_client, sink_client, - transformer_client, - common_labels, cln_token, - }) + transformer_client: None, + fb_sink_client: None, + } + } + + /// Set the optional transformer client + pub(crate) fn transformer_client(mut self, transformer_client: TransformerClient) -> Self { + self.transformer_client = Some(transformer_client); + self + } + + /// Set the optional fallback client + pub(crate) fn fb_sink_client(mut self, fallback_client: SinkClient) -> Self { + self.fb_sink_client = Some(fallback_client); + self } - /// run starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. + /// Build the Forwarder instance + #[must_use] + pub(crate) fn build(self) -> Forwarder { + let common_labels = metrics::forward_metrics_labels().clone(); + Forwarder { + source_client: self.source_client, + sink_client: self.sink_client, + transformer_client: self.transformer_client, + fallback_client: self.fb_sink_client, + cln_token: self.cln_token, + common_labels, + } + } +} + +impl Forwarder { + /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. /// this means that, in the happy path scenario a block is always completely processed. /// this function will return on any error and will cause end up in a non-0 exit code. - pub(crate) async fn run(&mut self) -> Result<()> { - let mut messages_count: u64 = 0; + pub(crate) async fn start(&mut self) -> Result<()> { + let mut processed_msgs_count: usize = 0; let mut last_forwarded_at = std::time::Instant::now(); loop { - // TODO: emit latency metrics, metrics-rs histograms has memory leak issues. let start_time = tokio::time::Instant::now(); - // two arms, either shutdown or forward-a-chunk - tokio::select! { - _ = self.cln_token.cancelled() => { - info!("Shutdown signal received, stopping forwarder..."); - break; - } - result = self.source_client.read_fn(config().batch_size, config().timeout_in_ms) => { - // Read messages from the source - let messages = result?; - info!("Read batch size: {} and latency - {}ms", messages.len(), start_time.elapsed().as_millis()); - // emit metrics - let msg_count = messages.len() as u64; - // collect all the offsets as the transformer can drop (via filter) messages - let offsets = messages.iter().map(|msg| msg.offset.clone()).collect::>(); - messages_count += messages.len() as u64; - let bytes_count = messages.iter().map(|msg| msg.value.len() as u64).sum::(); - forward_metrics().monovtx_read_total.get_or_create(&self.common_labels).inc_by(msg_count); - forward_metrics().monovtx_read_bytes_total.get_or_create(&self.common_labels).inc_by(bytes_count); - - // Apply transformation if transformer is present - let transformed_messages = if let Some(transformer_client) = &self.transformer_client { - let start_time = tokio::time::Instant::now(); - let mut jh = JoinSet::new(); - for message in messages { - let mut transformer_client = transformer_client.clone(); - jh.spawn(async move { transformer_client.transform_fn(message).await }); - } + if self.cln_token.is_cancelled() { + break; + } - let mut results = Vec::new(); - while let Some(task) = jh.join_next().await { - let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; - if let Some(result) = result? { - results.extend(result); - } - } - info!("Transformer latency - {}ms", start_time.elapsed().as_millis()); - results - } else { - messages - }; - - let transformed_msg_count = transformed_messages.len() as u64; - forward_metrics().monovtx_sink_write_total.get_or_create(&self.common_labels).inc_by(transformed_msg_count); - - // Write messages to the sink - // TODO: should we retry writing? what if the error is transient? - // we could rely on gRPC retries and say that any error that is bubbled up is worthy of non-0 exit. - // we need to confirm this via FMEA tests. - - let mut retry_messages = transformed_messages; - let mut attempts = 0; - let mut error_map = HashMap::new(); - - while attempts <= config().sink_max_retry_attempts { - let start_time = tokio::time::Instant::now(); - match self.sink_client.sink_fn(retry_messages.clone()).await { - Ok(response) => { - info!("Sink latency - {}ms", start_time.elapsed().as_millis()); - - let failed_ids: Vec = response.results.iter() - .filter(|result| result.status != proto::Status::Success as i32) - .map(|result| result.id.clone()) - .collect(); - attempts += 1; - - if failed_ids.is_empty() { - break; - } else { - // Collect error messages and their counts - retry_messages.retain(|msg| failed_ids.contains(&msg.id)); - error_map.clear(); - for result in response.results { - if result.status != proto::Status::Success as i32 { - *error_map.entry(result.err_msg).or_insert(0) += 1; - } - } - - warn!("Retry attempt {} due to retryable error. Errors: {:?}", attempts, error_map); - sleep(tokio::time::Duration::from_millis(config().sink_retry_interval_in_ms as u64)).await; - } - } - Err(e) => return Err(e), - } - } + processed_msgs_count += self.read_and_process_messages().await?; - if !error_map.is_empty() { - return Err(Error::SinkError(format!( - "Failed to sink messages after {} attempts. Errors: {:?}", - attempts, error_map - ))); - } - // Acknowledge the messages back to the source - let start_time = tokio::time::Instant::now(); - self.source_client.ack_fn(offsets).await?; - info!("Ack latency - {}ms", start_time.elapsed().as_millis()); - // increment the acked messages count metric - forward_metrics().monovtx_ack_total.get_or_create(&self.common_labels).inc_by(msg_count); - } - } // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded if last_forwarded_at.elapsed().as_millis() >= 1000 { info!( "Forwarded {} messages at time {}", - messages_count, + processed_msgs_count, Utc::now() ); - messages_count = 0; + processed_msgs_count = 0; last_forwarded_at = std::time::Instant::now(); } + forward_metrics() .monovtx_processing_time .get_or_create(&self.common_labels) @@ -169,14 +111,281 @@ impl Forwarder { } Ok(()) } + + /* + Read messages from the source, apply transformation if transformer is present, + write the messages to the sink, if fallback messages are present write them to the fallback sink, + and then acknowledge the messages back to the source. + */ + async fn read_and_process_messages(&mut self) -> Result { + let start_time = tokio::time::Instant::now(); + let messages = self + .source_client + .read_fn(config().batch_size, config().timeout_in_ms) + .await?; + debug!( + "Read batch size: {} and latency - {}ms", + messages.len(), + start_time.elapsed().as_millis() + ); + + // nothing more to be done. + if messages.is_empty() { + return Ok(0); + } + + let msg_count = messages.len() as u64; + let bytes_count = messages + .iter() + .map(|msg| msg.value.len() as u64) + .sum::(); + forward_metrics() + .monovtx_read_total + .get_or_create(&self.common_labels) + .inc_by(msg_count); + forward_metrics() + .monovtx_read_bytes_total + .get_or_create(&self.common_labels) + .inc_by(bytes_count); + + // collect all the offsets as the transformer can drop (via filter) messages + let offsets = messages + .iter() + .map(|msg| msg.offset.clone()) + .collect::>(); + + // Apply transformation if transformer is present + let transformed_messages = self.apply_transformer(messages).await?; + + // Write the messages to the sink + self.write_to_sink(transformed_messages).await?; + + // Acknowledge the messages back to the source + self.acknowledge_messages(offsets).await?; + + Ok(msg_count as usize) + } + + // Applies transformation to the messages if transformer is present + // we concurrently apply transformation to all the messages. + async fn apply_transformer(&self, messages: Vec) -> Result> { + if let Some(transformer_client) = &self.transformer_client { + let start_time = tokio::time::Instant::now(); + let mut jh = JoinSet::new(); + for message in messages { + let mut transformer_client = transformer_client.clone(); + jh.spawn(async move { transformer_client.transform_fn(message).await }); + } + + let mut results = Vec::new(); + while let Some(task) = jh.join_next().await { + let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; + if let Some(result) = result? { + results.extend(result); + } + } + debug!( + "Transformer latency - {}ms", + start_time.elapsed().as_millis() + ); + Ok(results) + } else { + Ok(messages) + } + } + + // Writes the messages to the sink and handles fallback messages if present + async fn write_to_sink(&mut self, mut messages: Vec) -> Result<()> { + let msg_count = messages.len() as u64; + + if messages.is_empty() { + return Ok(()); + } + + let mut attempts = 0; + let mut error_map = HashMap::new(); + let mut fallback_msgs = Vec::new(); + + while attempts <= config().sink_max_retry_attempts { + let start_time = tokio::time::Instant::now(); + match self.sink_client.sink_fn(messages.clone()).await { + Ok(response) => { + debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); + attempts += 1; + + fallback_msgs.extend( + response + .results + .iter() + .filter(|result| result.status == proto::Status::Fallback as i32) + .map(|result| { + messages + .iter() + .find(|msg| msg.id == result.id) + .unwrap() + .clone() + }) + .collect::>(), + ); + + messages = response + .results + .iter() + .filter(|result| result.status == proto::Status::Failure as i32) + .map(|result| { + messages + .iter() + .find(|msg| msg.id == result.id) + .unwrap() + .clone() + }) + .collect::>(); + + if messages.is_empty() { + break; + } else { + error_map.clear(); + for result in response.results { + if result.status == proto::Status::Failure as i32 { + *error_map.entry(result.err_msg).or_insert(0) += 1; + } + } + + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, error_map + ); + sleep(tokio::time::Duration::from_millis( + config().sink_retry_interval_in_ms as u64, + )) + .await; + } + } + Err(e) => return Err(e), + } + } + + if !messages.is_empty() { + return Err(Error::SinkError(format!( + "Failed to sink messages after {} attempts. Errors: {:?}", + attempts, error_map + ))); + } + + // If there are fallback messages, write them to the fallback sink + if !fallback_msgs.is_empty() { + self.handle_fallback_messages(fallback_msgs).await?; + } + + forward_metrics() + .monovtx_sink_write_total + .get_or_create(&self.common_labels) + .inc_by(msg_count); + Ok(()) + } + + // Writes the fallback messages to the fallback sink + async fn handle_fallback_messages(&mut self, mut fallback_msgs: Vec) -> Result<()> { + if self.fallback_client.is_none() { + return Err(Error::SinkError( + "Response contains fallback messages but no fallback sink is configured" + .to_string(), + )); + } + + let fallback_client = self.fallback_client.as_mut().unwrap(); + let mut attempts = 0; + let mut fallback_error_map = HashMap::new(); + + while attempts <= config().sink_max_retry_attempts { + let start_time = tokio::time::Instant::now(); + match fallback_client.sink_fn(fallback_msgs.clone()).await { + Ok(fb_response) => { + debug!( + "Fallback sink latency - {}ms", + start_time.elapsed().as_millis() + ); + + fallback_msgs = fb_response + .results + .iter() + .filter(|result| result.status == proto::Status::Failure as i32) + .map(|result| { + fallback_msgs + .iter() + .find(|msg| msg.id == result.id) + .unwrap() + .clone() + }) + .collect::>(); + + // we can't specify fallback response inside fallback sink + if fb_response + .results + .iter() + .any(|result| result.status == proto::Status::Fallback as i32) + { + return Err(Error::SinkError( + "Fallback sink can't specify status fallback".to_string(), + )); + } + + attempts += 1; + + if fallback_msgs.is_empty() { + break; + } else { + fallback_error_map.clear(); + for result in fb_response.results { + if result.status != proto::Status::Success as i32 { + *fallback_error_map.entry(result.err_msg).or_insert(0) += 1; + } + } + + warn!( + "Fallback sink retry attempt {} due to retryable error. Errors: {:?}", + attempts, fallback_error_map + ); + sleep(tokio::time::Duration::from_millis( + config().sink_retry_interval_in_ms as u64, + )) + .await; + } + } + Err(e) => return Err(e), + } + } + + if !fallback_msgs.is_empty() { + return Err(Error::SinkError(format!( + "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", + attempts, fallback_error_map + ))); + } + + Ok(()) + } + + // Acknowledge the messages back to the source + async fn acknowledge_messages(&mut self, offsets: Vec) -> Result<()> { + let n = offsets.len(); + let start_time = tokio::time::Instant::now(); + self.source_client.ack_fn(offsets).await?; + debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); + forward_metrics() + .monovtx_ack_total + .get_or_create(&self.common_labels) + .inc_by(n as u64); + Ok(()) + } } #[cfg(test)] mod tests { use std::collections::HashSet; - use crate::error::Error; - use crate::forwarder::Forwarder; + use crate::error::Result; + use crate::forwarder::ForwarderBuilder; use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; @@ -310,7 +519,6 @@ mod tests { #[tokio::test] async fn test_forwarder_source_sink() { - // Create channels for communication let (sink_tx, mut sink_rx) = tokio::sync::mpsc::channel(10); // Start the source server @@ -396,17 +604,12 @@ mod tests { .await .expect("failed to connect to transformer server"); - let mut forwarder = Forwarder::new( - source_client, - sink_client, - Some(transformer_client), - cln_token.clone(), - ) - .await - .expect("failed to create forwarder"); + let mut forwarder = ForwarderBuilder::new(source_client, sink_client, cln_token.clone()) + .transformer_client(transformer_client) + .build(); let forwarder_handle = tokio::spawn(async move { - forwarder.run().await.unwrap(); + forwarder.start().await.unwrap(); }); // Receive messages from the sink @@ -524,19 +727,18 @@ mod tests { .await .expect("failed to connect to sink server"); - let mut forwarder = Forwarder::new(source_client, sink_client, None, cln_token.clone()) - .await - .expect("failed to create forwarder"); + let mut forwarder = + ForwarderBuilder::new(source_client, sink_client, cln_token.clone()).build(); let forwarder_handle = tokio::spawn(async move { - forwarder.run().await?; + forwarder.start().await?; Ok(()) }); // Set a timeout for the forwarder let timeout_duration = tokio::time::Duration::from_secs(1); let result = tokio::time::timeout(timeout_duration, forwarder_handle).await; - let result: Result<(), Error> = result.expect("forwarder_handle timed out").unwrap(); + let result: Result<()> = result.expect("forwarder_handle timed out").unwrap(); assert!(result.is_err()); // stop the servers @@ -554,4 +756,150 @@ mod tests { .await .expect("failed to join sink server task"); } + + // Sink that returns status fallback + struct FallbackSender {} + + #[tonic::async_trait] + impl sink::Sinker for FallbackSender { + async fn sink( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses = vec![]; + while let Some(datum) = input.recv().await { + responses.append(&mut vec![sink::Response::fallback(datum.id)]); + } + responses + } + } + + #[tokio::test] + async fn test_fb_sink() { + let (sink_tx, mut sink_rx) = tokio::sync::mpsc::channel(10); + + // Start the source server + let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let source_sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let source_socket = source_sock_file.clone(); + let source_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource::new()) + .with_socket_file(source_socket) + .with_server_info_file(server_info) + .start_with_shutdown(source_shutdown_rx) + .await + .unwrap(); + }); + let source_config = SourceConfig { + socket_path: source_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Start the primary sink server (which returns status fallback) + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); + let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let sink_socket = sink_sock_file.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(FallbackSender {}) + .with_socket_file(sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + let sink_config = SinkConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Start the fb sink server + let (fb_sink_shutdown_tx, fb_sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let fb_sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let fb_sink_sock_file = fb_sink_tmp_dir.path().join("fb-sink.sock"); + let server_info_file = fb_sink_tmp_dir.path().join("fb-sinker-server-info"); + + let server_info = server_info_file.clone(); + let fb_sink_socket = fb_sink_sock_file.clone(); + let fb_sink_server_handle = tokio::spawn(async move { + sink::Server::new(InMemorySink::new(sink_tx)) + .with_socket_file(fb_sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(fb_sink_shutdown_rx) + .await + .unwrap(); + }); + let fb_sink_config = SinkConfig { + socket_path: fb_sink_sock_file.to_str().unwrap().to_string(), + server_info_file: server_info_file.to_str().unwrap().to_string(), + max_message_size: 4 * 1024 * 1024, + }; + + // Wait for the servers to start + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let cln_token = CancellationToken::new(); + + let source_client = SourceClient::connect(source_config) + .await + .expect("failed to connect to source server"); + + let sink_client = SinkClient::connect(sink_config) + .await + .expect("failed to connect to sink server"); + + let fb_sink_client = SinkClient::connect(fb_sink_config) + .await + .expect("failed to connect to fb sink server"); + + let mut forwarder = ForwarderBuilder::new(source_client, sink_client, cln_token.clone()) + .fb_sink_client(fb_sink_client) + .build(); + + let forwarder_handle = tokio::spawn(async move { + forwarder.start().await.unwrap(); + }); + + // We should receive the message in the fallback sink, since the primary sink returns status fallback + let received_message = sink_rx.recv().await.unwrap(); + assert_eq!(received_message.value, "test-message".as_bytes()); + assert_eq!(received_message.keys, vec!["test-key".to_string()]); + + // stop the forwarder + cln_token.cancel(); + forwarder_handle + .await + .expect("failed to join forwarder task"); + + // stop the servers + source_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + source_server_handle + .await + .expect("failed to join source server task"); + + sink_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + sink_server_handle + .await + .expect("failed to join sink server task"); + + fb_sink_shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + fb_sink_server_handle + .await + .expect("failed to join fb sink server task"); + } } diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index f3864f7531..614e7279aa 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -1,9 +1,9 @@ pub(crate) use self::error::Result; use crate::config::config; pub(crate) use crate::error::Error; -use crate::forwarder::Forwarder; +use crate::forwarder::ForwarderBuilder; use crate::metrics::{start_metrics_https_server, LagReaderBuilder, MetricsState}; -use crate::sink::{SinkClient, SinkConfig}; +use crate::sink::{SinkClient, SinkConfig, FB_SINK_SERVER_INFO_FILE, FB_SINK_SOCKET}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; use std::net::SocketAddr; @@ -72,6 +72,16 @@ pub async fn mono_vertex() { None }; + let fb_sink_config = if config().is_fallback_enabled { + Some(SinkConfig { + max_message_size: config().grpc_max_message_size, + socket_path: FB_SINK_SOCKET.to_string(), + server_info_file: FB_SINK_SERVER_INFO_FILE.to_string(), + }) + } else { + None + }; + let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); // wait for SIG{INT,TERM} and invoke cancellation token. @@ -82,7 +92,15 @@ pub async fn mono_vertex() { }); // Run the forwarder with cancellation token. - if let Err(e) = init(source_config, sink_config, transformer_config, cln_token).await { + if let Err(e) = init( + source_config, + sink_config, + transformer_config, + fb_sink_config, + cln_token, + ) + .await + { error!("Application error: {:?}", e); // abort the task since we have an error @@ -122,6 +140,7 @@ pub async fn init( source_config: SourceConfig, sink_config: SinkConfig, transformer_config: Option, + fb_sink_config: Option, cln_token: CancellationToken, ) -> Result<()> { server_info::check_for_server_compatibility(&source_config.server_info_file, cln_token.clone()) @@ -153,11 +172,24 @@ pub async fn init( None }; + let mut fb_sink_client = if let Some(config) = fb_sink_config { + server_info::check_for_server_compatibility(&config.server_info_file, cln_token.clone()) + .await + .map_err(|e| { + warn!("Error waiting for fallback sink server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + Some(SinkClient::connect(config).await?) + } else { + None + }; + // readiness check for all the ud containers wait_until_ready( &mut source_client, &mut sink_client, &mut transformer_client, + &mut fb_sink_client, ) .await?; @@ -173,6 +205,7 @@ pub async fn init( source_client: source_client.clone(), sink_client: sink_client.clone(), transformer_client: transformer_client.clone(), + fb_sink_client: fb_sink_client.clone(), }; tokio::spawn(async move { if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { @@ -191,10 +224,20 @@ pub async fn init( .build(); lag_reader.start().await; - let mut forwarder = - Forwarder::new(source_client, sink_client, transformer_client, cln_token).await?; + // build the forwarder + let mut forwarder_builder = ForwarderBuilder::new(source_client, sink_client, cln_token); + if let Some(transformer_client) = transformer_client { + forwarder_builder = forwarder_builder.transformer_client(transformer_client); + } - forwarder.run().await?; + if let Some(fb_sink_client) = fb_sink_client { + forwarder_builder = forwarder_builder.fb_sink_client(fb_sink_client); + } + + let mut forwarder = forwarder_builder.build(); + + // start the forwarder + forwarder.start().await?; info!("Forwarder stopped gracefully"); Ok(()) @@ -204,6 +247,7 @@ async fn wait_until_ready( source_client: &mut SourceClient, sink_client: &mut SinkClient, transformer_client: &mut Option, + fb_sink_client: &mut Option, ) -> Result<()> { loop { let source_ready = source_client.is_ready().await; @@ -226,7 +270,17 @@ async fn wait_until_ready( true }; - if source_ready && sink_ready && transformer_ready { + let fb_sink_ready = if let Some(client) = fb_sink_client { + let ready = client.is_ready().await; + if !ready { + info!("Fallback Sink is not ready, waiting..."); + } + ready + } else { + true + }; + + if source_ready && sink_ready && transformer_ready && fb_sink_ready { break; } @@ -330,7 +384,8 @@ mod tests { let forwarder_cln_token = cln_token.clone(); let forwarder_handle = tokio::spawn(async move { - let result = super::init(source_config, sink_config, None, forwarder_cln_token).await; + let result = + super::init(source_config, sink_config, None, None, forwarder_cln_token).await; assert!(result.is_ok()); }); diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index cd792d9693..789ba47a3b 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -51,6 +51,7 @@ pub(crate) struct MetricsState { pub source_client: SourceClient, pub sink_client: SinkClient, pub transformer_client: Option, + pub fb_sink_client: Option, } /// The global register of all metrics. @@ -190,7 +191,7 @@ pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { pub async fn metrics_handler() -> impl IntoResponse { let state = global_registry().registry.lock(); let mut buffer = String::new(); - encode(&mut buffer, &*state).unwrap(); + encode(&mut buffer, &state).unwrap(); debug!("Exposing Metrics: {:?}", buffer); Response::builder() .status(StatusCode::OK) @@ -204,18 +205,12 @@ pub async fn metrics_handler() -> impl IntoResponse { #[allow(dead_code)] pub(crate) async fn start_metrics_http_server( addr: A, - source_client: SourceClient, - sink_client: SinkClient, - transformer_client: Option, + metrics_state: MetricsState, ) -> crate::Result<()> where A: ToSocketAddrs + std::fmt::Debug, { - let metrics_app = metrics_router(MetricsState { - source_client, - sink_client, - transformer_client, - }); + let metrics_app = metrics_router(metrics_state); let listener = TcpListener::bind(&addr) .await @@ -273,13 +268,22 @@ async fn readyz() -> impl IntoResponse { async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { if !state.source_client.is_ready().await { + error!("Source client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } if !state.sink_client.is_ready().await { + error!("Sink client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } if let Some(mut transformer_client) = state.transformer_client { if !transformer_client.is_ready().await { + error!("Transformer client is not available"); + return StatusCode::SERVICE_UNAVAILABLE; + } + } + if let Some(mut fb_sink_client) = state.fb_sink_client { + if !fb_sink_client.is_ready().await { + error!("Fallback sink client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } } diff --git a/rust/monovertex/src/sink.rs b/rust/monovertex/src/sink.rs index 5f2b8a74be..4cedaef49e 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -13,7 +13,9 @@ pub mod proto { const RECONNECT_INTERVAL: u64 = 1000; const MAX_RECONNECT_ATTEMPTS: usize = 5; const SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; +pub(crate) const FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; const SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; +pub(crate) const FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; /// SinkConfig is the configuration for the sink server. #[derive(Debug, Clone)] @@ -58,11 +60,15 @@ impl SinkClient { } pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { + let (tx, rx) = tokio::sync::mpsc::channel(if messages.is_empty() { + 1 + } else { + messages.len() + }); + let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); - let (tx, rx) = tokio::sync::mpsc::channel(1); - tokio::spawn(async move { for request in requests { if tx.send(request).await.is_err() { From b54a4cd3e555ee3e29c603f7f2ea1c15ccd88f7a Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 19 Aug 2024 14:23:31 -0400 Subject: [PATCH 008/188] feat: add health for monovertex (#1954) Signed-off-by: Sidhant Kohli --- pkg/apis/numaflow/v1alpha1/const.go | 6 +- .../numaflow/v1alpha1/mono_vertex_types.go | 7 + .../v1alpha1/mono_vertex_types_test.go | 85 +++++++ pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go | 222 ++++++++++++++--- pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go | 69 ++++++ pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto | 15 ++ .../proto/mvtxdaemon/mvtxdaemon_grpc.pb.go | 37 +++ pkg/mvtxdaemon/client/grpc_client.go | 8 + pkg/mvtxdaemon/client/interface.go | 1 + pkg/mvtxdaemon/client/restful_client.go | 14 ++ pkg/mvtxdaemon/server/daemon_server.go | 12 +- .../server/service/health_status.go | 226 ++++++++++++++++++ .../server/service/health_status_test.go | 145 +++++++++++ pkg/mvtxdaemon/server/service/mvtx_service.go | 83 ++++++- .../health-status-code/mono_vtx_code_map.go | 71 ++++++ .../mono_vtx_code_map_test.go | 50 ++++ .../{code_map.go => pipeline_code_map.go} | 14 -- ..._map_test.go => pipeline_code_map_test.go} | 0 pkg/shared/health-status-code/utils.go | 31 +++ server/apis/v1/handler.go | 37 ++- server/apis/v1/health.go | 95 +++++++- server/cmd/server/start.go | 1 + server/cmd/server/start_test.go | 4 +- server/routes/routes.go | 2 + 24 files changed, 1161 insertions(+), 74 deletions(-) create mode 100644 pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go create mode 100644 pkg/mvtxdaemon/server/service/health_status.go create mode 100644 pkg/mvtxdaemon/server/service/health_status_test.go create mode 100644 pkg/shared/health-status-code/mono_vtx_code_map.go create mode 100644 pkg/shared/health-status-code/mono_vtx_code_map_test.go rename pkg/shared/health-status-code/{code_map.go => pipeline_code_map.go} (89%) rename pkg/shared/health-status-code/{code_map_test.go => pipeline_code_map_test.go} (100%) create mode 100644 pkg/shared/health-status-code/utils.go diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index a1a8e518fa..177e6f73a1 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -230,7 +230,11 @@ const ( // MonoVertex health status // TODO - more statuses to be added - MonoVertexStatusHealthy = "healthy" + MonoVertexStatusHealthy = "healthy" + MonoVertexStatusUnhealthy = "unhealthy" + MonoVertexStatusUnknown = "unknown" + MonoVertexStatusCritical = "critical" + MonoVertexStatusWarning = "warning" // Callback annotation keys CallbackEnabledKey = "numaflow.numaproj.io/callback" diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 6f02509563..4d835e9c2c 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -532,10 +532,17 @@ func (mvs *MonoVertexStatus) MarkPhaseRunning() { } // IsHealthy indicates whether the MonoVertex is in healthy status +// It returns false if any issues exists +// True indicates that the MonoVertex is healthy +// TODO: Add support for paused whenever added in MonoVtx? func (mvs *MonoVertexStatus) IsHealthy() bool { + // check for the phase field first switch mvs.Phase { + // Directly return an error if the phase is failed case MonoVertexPhaseFailed: return false + // Check if the MonoVertex is ready if the phase is running, + // We check if all the required conditions are true for it to be healthy case MonoVertexPhaseRunning: return mvs.IsReady() default: diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go new file mode 100644 index 0000000000..2f2fee9a39 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestMonoVertex_GetDaemonServiceObj(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + } + + svc := mv.GetDaemonServiceObj() + if svc.Name != "test-mv-daemon-svc" { + t.Error("GetDaemonServiceObj generated incorrect service name") + } + if svc.Namespace != "default" { + t.Error("GetDaemonServiceObj generated incorrect namespace") + } +} + +func TestMonoVertex_MarkPhaseRunning(t *testing.T) { + mvs := MonoVertexStatus{} + mvs.MarkPhaseRunning() + + if mvs.Phase != MonoVertexPhaseRunning { + t.Errorf("MarkPhaseRunning did not set the Phase to Running, got %v", mvs.Phase) + } +} + +func TestMonoVertex_IsHealthy(t *testing.T) { + mvs := MonoVertexStatus{} + + mvs.InitConditions() + mvs.MarkPhaseRunning() + mvs.MarkDeployed() + mvs.MarkDaemonHealthy() + mvs.MarkPodHealthy("AllGood", "All pod are up and running") + + isHealthy := mvs.IsHealthy() + if !isHealthy { + t.Error("IsHealthy should return true when everything is healthy") + } + + mvs.MarkPodNotHealthy("PodIssue", "One of the pods is down") + isHealthy = mvs.IsHealthy() + if isHealthy { + t.Error("IsHealthy should return false when pod condition is not healthy") + } +} + +func TestMonoVertexStatus_MarkDeployFailed(t *testing.T) { + mvs := MonoVertexStatus{} + mvs.MarkDeployFailed("DeployError", "Deployment failed due to resource constraints") + + if mvs.Phase != MonoVertexPhaseFailed { + t.Errorf("MarkDeployFailed should set the Phase to Failed, got %v", mvs.Phase) + } + if mvs.Reason != "DeployError" { + t.Errorf("MarkDeployFailed should set the Reason to 'DeployError', got %s", mvs.Reason) + } + if mvs.Message != "Deployment failed due to resource constraints" { + t.Errorf("MarkDeployFailed should set the Message correctly, got %s", mvs.Message) + } +} diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go index 10d3e2350c..e33c93aa7a 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.go @@ -151,6 +151,117 @@ func (x *GetMonoVertexMetricsResponse) GetMetrics() *MonoVertexMetrics { return nil } +// MonoVertexStatus is used to provide information about the mono vertex status. +type MonoVertexStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Code string `protobuf:"bytes,3,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *MonoVertexStatus) Reset() { + *x = MonoVertexStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonoVertexStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonoVertexStatus) ProtoMessage() {} + +func (x *MonoVertexStatus) ProtoReflect() protoreflect.Message { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonoVertexStatus.ProtoReflect.Descriptor instead. +func (*MonoVertexStatus) Descriptor() ([]byte, []int) { + return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP(), []int{2} +} + +func (x *MonoVertexStatus) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *MonoVertexStatus) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *MonoVertexStatus) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +type GetMonoVertexStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *MonoVertexStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *GetMonoVertexStatusResponse) Reset() { + *x = GetMonoVertexStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMonoVertexStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMonoVertexStatusResponse) ProtoMessage() {} + +func (x *GetMonoVertexStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMonoVertexStatusResponse.ProtoReflect.Descriptor instead. +func (*GetMonoVertexStatusResponse) Descriptor() ([]byte, []int) { + return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP(), []int{3} +} + +func (x *GetMonoVertexStatusResponse) GetStatus() *MonoVertexStatus { + if x != nil { + return x.Status + } + return nil +} + var File_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto protoreflect.FileDescriptor var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc = []byte{ @@ -194,20 +305,38 @@ var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc = []byte{ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x07, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0x8c, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, - 0x72, 0x74, 0x65, 0x78, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x71, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, - 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x28, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x6e, 0x75, 0x6d, 0x61, 0x70, 0x72, 0x6f, 0x6a, 0x2f, 0x6e, 0x75, 0x6d, 0x61, - 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x58, 0x0a, 0x10, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, + 0x53, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x6f, + 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x32, 0xfc, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x71, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x28, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x6e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, + 0x72, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x27, 0x2e, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x6f, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x6e, 0x75, 0x6d, 0x61, 0x70, 0x72, 0x6f, 0x6a, 0x2f, 0x6e, 0x75, 0x6d, 0x61, 0x66, + 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x6d, 0x76, 0x74, 0x78, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -222,29 +351,34 @@ func file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescGZIP() []byte { return file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDescData } -var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_goTypes = []any{ (*MonoVertexMetrics)(nil), // 0: mvtxdaemon.MonoVertexMetrics (*GetMonoVertexMetricsResponse)(nil), // 1: mvtxdaemon.GetMonoVertexMetricsResponse - nil, // 2: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry - nil, // 3: mvtxdaemon.MonoVertexMetrics.PendingsEntry - (*wrapperspb.DoubleValue)(nil), // 4: google.protobuf.DoubleValue - (*wrapperspb.Int64Value)(nil), // 5: google.protobuf.Int64Value - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty + (*MonoVertexStatus)(nil), // 2: mvtxdaemon.MonoVertexStatus + (*GetMonoVertexStatusResponse)(nil), // 3: mvtxdaemon.GetMonoVertexStatusResponse + nil, // 4: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry + nil, // 5: mvtxdaemon.MonoVertexMetrics.PendingsEntry + (*wrapperspb.DoubleValue)(nil), // 6: google.protobuf.DoubleValue + (*wrapperspb.Int64Value)(nil), // 7: google.protobuf.Int64Value + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty } var file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_depIdxs = []int32{ - 2, // 0: mvtxdaemon.MonoVertexMetrics.processingRates:type_name -> mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry - 3, // 1: mvtxdaemon.MonoVertexMetrics.pendings:type_name -> mvtxdaemon.MonoVertexMetrics.PendingsEntry + 4, // 0: mvtxdaemon.MonoVertexMetrics.processingRates:type_name -> mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry + 5, // 1: mvtxdaemon.MonoVertexMetrics.pendings:type_name -> mvtxdaemon.MonoVertexMetrics.PendingsEntry 0, // 2: mvtxdaemon.GetMonoVertexMetricsResponse.metrics:type_name -> mvtxdaemon.MonoVertexMetrics - 4, // 3: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry.value:type_name -> google.protobuf.DoubleValue - 5, // 4: mvtxdaemon.MonoVertexMetrics.PendingsEntry.value:type_name -> google.protobuf.Int64Value - 6, // 5: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:input_type -> google.protobuf.Empty - 1, // 6: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:output_type -> mvtxdaemon.GetMonoVertexMetricsResponse - 6, // [6:7] is the sub-list for method output_type - 5, // [5:6] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 2, // 3: mvtxdaemon.GetMonoVertexStatusResponse.status:type_name -> mvtxdaemon.MonoVertexStatus + 6, // 4: mvtxdaemon.MonoVertexMetrics.ProcessingRatesEntry.value:type_name -> google.protobuf.DoubleValue + 7, // 5: mvtxdaemon.MonoVertexMetrics.PendingsEntry.value:type_name -> google.protobuf.Int64Value + 8, // 6: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:input_type -> google.protobuf.Empty + 8, // 7: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexStatus:input_type -> google.protobuf.Empty + 1, // 8: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexMetrics:output_type -> mvtxdaemon.GetMonoVertexMetricsResponse + 3, // 9: mvtxdaemon.MonoVertexDaemonService.GetMonoVertexStatus:output_type -> mvtxdaemon.GetMonoVertexStatusResponse + 8, // [8:10] is the sub-list for method output_type + 6, // [6:8] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_init() } @@ -277,6 +411,30 @@ func file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_init() { return nil } } + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*MonoVertexStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetMonoVertexStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -284,7 +442,7 @@ func file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_apis_proto_mvtxdaemon_mvtxdaemon_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go index 97c8075676..80741d34f6 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go @@ -50,6 +50,24 @@ func local_request_MonoVertexDaemonService_GetMonoVertexMetrics_0(ctx context.Co } +func request_MonoVertexDaemonService_GetMonoVertexStatus_0(ctx context.Context, marshaler runtime.Marshaler, client MonoVertexDaemonServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := client.GetMonoVertexStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MonoVertexDaemonService_GetMonoVertexStatus_0(ctx context.Context, marshaler runtime.Marshaler, server MonoVertexDaemonServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := server.GetMonoVertexStatus(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterMonoVertexDaemonServiceHandlerServer registers the http handlers for service MonoVertexDaemonService to "mux". // UnaryRPC :call MonoVertexDaemonServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -81,6 +99,31 @@ func RegisterMonoVertexDaemonServiceHandlerServer(ctx context.Context, mux *runt }) + mux.Handle("GET", pattern_MonoVertexDaemonService_GetMonoVertexStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MonoVertexDaemonService_GetMonoVertexStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MonoVertexDaemonService_GetMonoVertexStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -144,13 +187,39 @@ func RegisterMonoVertexDaemonServiceHandlerClient(ctx context.Context, mux *runt }) + mux.Handle("GET", pattern_MonoVertexDaemonService_GetMonoVertexStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MonoVertexDaemonService_GetMonoVertexStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MonoVertexDaemonService_GetMonoVertexStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( pattern_MonoVertexDaemonService_GetMonoVertexMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "metrics"}, "")) + + pattern_MonoVertexDaemonService_GetMonoVertexStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) ) var ( forward_MonoVertexDaemonService_GetMonoVertexMetrics_0 = runtime.ForwardResponseMessage + + forward_MonoVertexDaemonService_GetMonoVertexStatus_0 = runtime.ForwardResponseMessage ) diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto index 512b5bf515..21bc215c50 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto @@ -37,6 +37,17 @@ message GetMonoVertexMetricsResponse { MonoVertexMetrics metrics = 1; } +// MonoVertexStatus is used to provide information about the mono vertex status. +message MonoVertexStatus { + string status = 1; + string message = 2; + string code = 3; +} + +message GetMonoVertexStatusResponse { + MonoVertexStatus status = 1; +} + // MonoVertexDaemonService is a grpc service that is used to provide APIs for giving any MonoVertex information. service MonoVertexDaemonService { @@ -44,4 +55,8 @@ service MonoVertexDaemonService { option (google.api.http).get = "/api/v1/metrics"; }; + rpc GetMonoVertexStatus (google.protobuf.Empty) returns (GetMonoVertexStatusResponse) { + option (google.api.http).get = "/api/v1/status"; + }; + } \ No newline at end of file diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go index 1dd50188d7..33f0b26d6b 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go @@ -36,6 +36,7 @@ const _ = grpc.SupportPackageIsVersion7 const ( MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName = "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexMetrics" + MonoVertexDaemonService_GetMonoVertexStatus_FullMethodName = "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexStatus" ) // MonoVertexDaemonServiceClient is the client API for MonoVertexDaemonService service. @@ -43,6 +44,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type MonoVertexDaemonServiceClient interface { GetMonoVertexMetrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexMetricsResponse, error) + GetMonoVertexStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexStatusResponse, error) } type monoVertexDaemonServiceClient struct { @@ -62,11 +64,21 @@ func (c *monoVertexDaemonServiceClient) GetMonoVertexMetrics(ctx context.Context return out, nil } +func (c *monoVertexDaemonServiceClient) GetMonoVertexStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexStatusResponse, error) { + out := new(GetMonoVertexStatusResponse) + err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexStatus_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MonoVertexDaemonServiceServer is the server API for MonoVertexDaemonService service. // All implementations must embed UnimplementedMonoVertexDaemonServiceServer // for forward compatibility type MonoVertexDaemonServiceServer interface { GetMonoVertexMetrics(context.Context, *emptypb.Empty) (*GetMonoVertexMetricsResponse, error) + GetMonoVertexStatus(context.Context, *emptypb.Empty) (*GetMonoVertexStatusResponse, error) mustEmbedUnimplementedMonoVertexDaemonServiceServer() } @@ -77,6 +89,9 @@ type UnimplementedMonoVertexDaemonServiceServer struct { func (UnimplementedMonoVertexDaemonServiceServer) GetMonoVertexMetrics(context.Context, *emptypb.Empty) (*GetMonoVertexMetricsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMonoVertexMetrics not implemented") } +func (UnimplementedMonoVertexDaemonServiceServer) GetMonoVertexStatus(context.Context, *emptypb.Empty) (*GetMonoVertexStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMonoVertexStatus not implemented") +} func (UnimplementedMonoVertexDaemonServiceServer) mustEmbedUnimplementedMonoVertexDaemonServiceServer() { } @@ -109,6 +124,24 @@ func _MonoVertexDaemonService_GetMonoVertexMetrics_Handler(srv interface{}, ctx return interceptor(ctx, in, info, handler) } +func _MonoVertexDaemonService_GetMonoVertexStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MonoVertexDaemonServiceServer).GetMonoVertexStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MonoVertexDaemonService_GetMonoVertexStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MonoVertexDaemonServiceServer).GetMonoVertexStatus(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + // MonoVertexDaemonService_ServiceDesc is the grpc.ServiceDesc for MonoVertexDaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -120,6 +153,10 @@ var MonoVertexDaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetMonoVertexMetrics", Handler: _MonoVertexDaemonService_GetMonoVertexMetrics_Handler, }, + { + MethodName: "GetMonoVertexStatus", + Handler: _MonoVertexDaemonService_GetMonoVertexStatus_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto", diff --git a/pkg/mvtxdaemon/client/grpc_client.go b/pkg/mvtxdaemon/client/grpc_client.go index c7fb80d6fc..cd6fdbb455 100644 --- a/pkg/mvtxdaemon/client/grpc_client.go +++ b/pkg/mvtxdaemon/client/grpc_client.go @@ -54,6 +54,14 @@ func (dc *grpcClient) GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.Mon } } +func (dc *grpcClient) GetMonoVertexStatus(ctx context.Context) (*mvtxdaemon.MonoVertexStatus, error) { + if rspn, err := dc.client.GetMonoVertexStatus(ctx, &emptypb.Empty{}); err != nil { + return nil, err + } else { + return rspn.Status, nil + } +} + // Close function closes the gRPC connection, it has to be called after a daemon client has finished all its jobs. func (dc *grpcClient) Close() error { if dc.conn != nil { diff --git a/pkg/mvtxdaemon/client/interface.go b/pkg/mvtxdaemon/client/interface.go index 71a1a4aeaf..25a7ca1d05 100644 --- a/pkg/mvtxdaemon/client/interface.go +++ b/pkg/mvtxdaemon/client/interface.go @@ -26,4 +26,5 @@ import ( type MonoVertexDaemonClient interface { io.Closer GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.MonoVertexMetrics, error) + GetMonoVertexStatus(ctx context.Context) (*mvtxdaemon.MonoVertexStatus, error) } diff --git a/pkg/mvtxdaemon/client/restful_client.go b/pkg/mvtxdaemon/client/restful_client.go index 7409e19734..0e08e6f8a8 100644 --- a/pkg/mvtxdaemon/client/restful_client.go +++ b/pkg/mvtxdaemon/client/restful_client.go @@ -89,3 +89,17 @@ func (rc *restfulClient) GetMonoVertexMetrics(ctx context.Context) (*mvtxdaemon. return res.Metrics, nil } } + +func (rc *restfulClient) GetMonoVertexStatus(ctx context.Context) (*mvtxdaemon.MonoVertexStatus, error) { + resp, err := rc.httpClient.Get(fmt.Sprintf("%s/api/v1/status", rc.hostURL)) + if err != nil { + return nil, fmt.Errorf("failed to call get mono vertex status RESTful API, %w", err) + } + defer func() { _ = resp.Body.Close() }() + if res, err := unmarshalResponse[mvtxdaemon.GetMonoVertexStatusResponse](resp); err != nil { + return nil, err + } else { + return res.Status, nil + } + +} diff --git a/pkg/mvtxdaemon/server/daemon_server.go b/pkg/mvtxdaemon/server/daemon_server.go index f6ce1c9ec7..a8f05f64a7 100644 --- a/pkg/mvtxdaemon/server/daemon_server.go +++ b/pkg/mvtxdaemon/server/daemon_server.go @@ -45,12 +45,14 @@ import ( ) type daemonServer struct { - monoVtx *v1alpha1.MonoVertex + monoVtx *v1alpha1.MonoVertex + mvtxService *service.MonoVertexService } func NewDaemonServer(monoVtx *v1alpha1.MonoVertex) *daemonServer { return &daemonServer{ - monoVtx: monoVtx, + monoVtx: monoVtx, + mvtxService: nil, } } @@ -93,6 +95,11 @@ func (ds *daemonServer) Run(ctx context.Context) error { go func() { _ = httpServer.Serve(httpL) }() go func() { _ = tcpm.Serve() }() + // Start the Data flow health status updater + go func() { + ds.mvtxService.StartHealthCheck(ctx) + }() + // Start the rater go func() { if err := rater.Start(ctx); err != nil { @@ -127,6 +134,7 @@ func (ds *daemonServer) newGRPCServer(rater rateServer.MonoVtxRatable) (*grpc.Se return nil, err } mvtxdaemon.RegisterMonoVertexDaemonServiceServer(grpcServer, mvtxService) + ds.mvtxService = mvtxService return grpcServer, nil } diff --git a/pkg/mvtxdaemon/server/service/health_status.go b/pkg/mvtxdaemon/server/service/health_status.go new file mode 100644 index 0000000000..9942f7885c --- /dev/null +++ b/pkg/mvtxdaemon/server/service/health_status.go @@ -0,0 +1,226 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" + "github.com/numaproj/numaflow/pkg/isb" +) + +const ( + // healthTimeStep is the frequency at which the health of a MonoVertex is computed + healthTimeStep = 30 * time.Second +) + +// dataHealthResponse is the response returned by the data health check API +type dataHealthResponse struct { + // Status is the overall data status of the pipeline + Status string `json:"status"` + // Message is the error message if any + Message string `json:"message"` + // Code is the status code for the data health + Code string `json:"code"` +} + +// newDataHealthResponse is used to create a new dataHealthResponse object +func newDataHealthResponse(status string, message string, code string) *dataHealthResponse { + return &dataHealthResponse{ + Status: status, + Message: message, + Code: code, + } +} + +// defaultDataHealthResponse is the default response returned by the data health check API +var defaultDataHealthResponse = newDataHealthResponse( + v1alpha1.MonoVertexStatusUnknown, + "MonoVertex data flow is in an unknown state", + "D4") + +// HealthChecker is the struct type for health checker. +type HealthChecker struct { + // field for the health status. + currentDataStatus *dataHealthResponse + // Spec of the monoVertex getting processed + monoVertex *v1alpha1.MonoVertex + // Mutex for state information + statusLock *sync.RWMutex +} + +// NewHealthChecker creates a new object HealthChecker struct type. +func NewHealthChecker(monoVertex *v1alpha1.MonoVertex) *HealthChecker { + // Return a new HealthChecker struct instance. + return &HealthChecker{ + currentDataStatus: defaultDataHealthResponse, + monoVertex: monoVertex, + statusLock: &sync.RWMutex{}, + } +} + +// monoVtxState is a struct which contains the name and data health state +// of a MonoVertex +type monoVtxState struct { + // Name is the name of the vertex + Name string `json:"name"` + // State is the state of the vertex + State string `json:"state"` +} + +// newMonoVtxState is used to create a new monoVtxState object +func newMonoVtxState(name string, state string) *monoVtxState { + return &monoVtxState{ + Name: name, + State: state, + } +} + +// getCurrentHealth returns the current health status of the MonoVertex. +// It is thread safe to ensure concurrent access. +func (hc *HealthChecker) getCurrentHealth() *dataHealthResponse { + // Lock the statusLock to ensure thread safety. + hc.statusLock.RLock() + defer hc.statusLock.RUnlock() + // Return the health status. + return hc.currentDataStatus +} + +// setCurrentHealth sets the current health status of the MonoVertex. +// It is thread safe to ensure concurrent access. +func (hc *HealthChecker) setCurrentHealth(status *dataHealthResponse) { + // Lock the statusLock to ensure thread safety. + hc.statusLock.Lock() + defer hc.statusLock.Unlock() + // Set the health status. + hc.currentDataStatus = status +} + +// getMonoVertexDataCriticality is used to provide the data criticality of the MonoVertex +// They can be of the following types: +// 1. Healthy: The MonoVertex is working as expected +// 2. Warning: The MonoVertex is working but there could be a lag in the data movement +// 3. Critical: The MonoVertex is not working as expected +// We need to check the following things to determine the data criticality of the MonoVertex: +// At any given instant of time what is the desired number of replicas required by the MonoVertex +// to clear out the backlog in the target state time. +// This logic is similar to our scaling logic. +// Based on the desired replicas, we decide the data criticality. +// +// - If the current replicas are equal to the max replicas, and the desired replicas are more than the max replicas, +// the data criticality is Critical. This means that the MonoVertex is not able to process the data at the rate +// it is coming in, and the due to the provided specified scale we cannot add more replicas as well. +// Else we consider the data criticality as healthy. +// +// TODO(MonoVertex): Add the logic to determine the warning state based on more conditions. +func (hc *HealthChecker) getMonoVertexDataCriticality(ctx context.Context, mvtxMetrics *mvtxdaemon.MonoVertexMetrics) (*monoVtxState, error) { + // Get the desired replicas for the MonoVertex based on the metrics + desiredReplicas, err := hc.getDesiredReplica(mvtxMetrics) + if err != nil { + return nil, err + } + // Get the current state of the MonoVertex replicas + currentReplicas := hc.monoVertex.GetReplicas() + maxReplicas := int(hc.monoVertex.Spec.Scale.GetMaxReplicas()) + // default status is healthy + status := v1alpha1.MonoVertexStatusHealthy + // If the current replicas are equal to the max replicas, and the desired replicas are more than the max replicas, + // the data criticality is Critical. + if currentReplicas == maxReplicas && desiredReplicas > maxReplicas { + status = v1alpha1.MonoVertexStatusCritical + } + return newMonoVtxState(mvtxMetrics.MonoVertex, status), nil +} + +// getDesiredReplica calculates the desired replicas based on the processing rate and pending information +// of the MonoVertex. This logic is similar to our scaling logic. +// But unlike the scaling where we change the desired replicas based on the provided scale, +// here we just calculate the desired replicas and return it. +func (hc *HealthChecker) getDesiredReplica(mvtxMetrics *mvtxdaemon.MonoVertexMetrics) (int, error) { + totalRate := float64(0) + totalPending := int64(0) + // Extract the processing rate from the metrics for the default lookback period + rate, existing := mvtxMetrics.ProcessingRates["default"] + // Rate not available + // send back error that we calculate the health status right now + if !existing || rate.GetValue() < 0 { + return 0, fmt.Errorf("cannot check data health, MonoVertex %s has no rate information", mvtxMetrics.MonoVertex) + } else { + totalRate = rate.GetValue() + } + + // Extract the pending information from the metrics for the default lookback period + pending, existing := mvtxMetrics.Pendings["default"] + if !existing || pending.GetValue() < 0 || pending.GetValue() == isb.PendingNotAvailable { + // Pending not available, we don't do anything + // send back error that we calculate the health status right now + return 0, fmt.Errorf("cannot check data health, MonoVertex %s has no pending information", mvtxMetrics.MonoVertex) + } else { + totalPending = pending.GetValue() + } + + // If both totalRate and totalPending are 0, we don't need any processing replicas + if totalPending == 0 && totalRate == 0 { + return 0, nil + } + + //TODO(MonoVertex): Something is wrong + // MonoVertex is not processing any data even though the pending is still around. + // It could be a slow processor, but zero rate isn't ideal + // we should mark this up as warning maybe? + if totalRate == 0 { + return int(hc.monoVertex.Status.Replicas), nil + } + + // We calculate the time of finishing processing the pending messages, + // and then we know how many replicas are needed to get them done in target seconds. + desired := int32(math.Round(((float64(totalPending) / totalRate) / float64(hc.monoVertex.Spec.Scale.GetTargetProcessingSeconds())) * float64(hc.monoVertex.Status.Replicas))) + return int(desired), nil +} + +// convertMonoVtxStateToHealthResp is used to generate the data health response from a MonoVtx State +func (hc *HealthChecker) convertMonoVtxStateToHealthResp(vertexState *monoVtxState) *dataHealthResponse { + switch vertexState.State { + case v1alpha1.MonoVertexStatusHealthy: + return newDataHealthResponse( + v1alpha1.MonoVertexStatusHealthy, + "MonoVertex data flow is healthy", + "D1") + case v1alpha1.MonoVertexStatusWarning: + return newDataHealthResponse( + v1alpha1.MonoVertexStatusWarning, + fmt.Sprintf("MonoVertex data flow is in a warning state for %s", vertexState.Name), + "D2") + case v1alpha1.MonoVertexStatusCritical: + return newDataHealthResponse( + v1alpha1.MonoVertexStatusCritical, + fmt.Sprintf("MonoVertex data flow is in a critical state for %s", vertexState.Name), + "D3") + case v1alpha1.MonoVertexStatusUnknown: + return newDataHealthResponse( + v1alpha1.MonoVertexStatusUnknown, + fmt.Sprintf("MonoVertex data flow is in an unknown state due to %s", vertexState.Name), + "D4") + default: + return defaultDataHealthResponse + } +} diff --git a/pkg/mvtxdaemon/server/service/health_status_test.go b/pkg/mvtxdaemon/server/service/health_status_test.go new file mode 100644 index 0000000000..ecd3dedc44 --- /dev/null +++ b/pkg/mvtxdaemon/server/service/health_status_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "testing" + + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" +) + +func TestGetCurrentHealth(t *testing.T) { + monoVertex := &v1alpha1.MonoVertex{} // Simplified for testing + hc := NewHealthChecker(monoVertex) + expected := defaultDataHealthResponse + + if result := hc.getCurrentHealth(); result != expected { + t.Errorf("Expected %v, got %v", expected, result) + } +} + +func TestSetCurrentHealth(t *testing.T) { + monoVertex := &v1alpha1.MonoVertex{} // Simplified + hc := NewHealthChecker(monoVertex) + newStatus := newDataHealthResponse("Healthy", "All systems green", "D1") + + hc.setCurrentHealth(newStatus) + + if result := hc.getCurrentHealth(); result != newStatus { + t.Errorf("Expected %v, got %v", newStatus, result) + } +} + +func TestConvertMonoVtxStateToHealthResp(t *testing.T) { + monoVertex := &v1alpha1.MonoVertex{} // Simplified + hc := NewHealthChecker(monoVertex) + + tests := []struct { + name string + state *monoVtxState + expected *dataHealthResponse + }{ + { + name: "Healthy State", + state: newMonoVtxState("vertex1", v1alpha1.MonoVertexStatusHealthy), + expected: newDataHealthResponse(v1alpha1.MonoVertexStatusHealthy, "MonoVertex data flow is healthy", "D1"), + }, + { + name: "Critical State", + state: newMonoVtxState("vertex1", v1alpha1.MonoVertexStatusCritical), + expected: newDataHealthResponse(v1alpha1.MonoVertexStatusCritical, "MonoVertex data flow is in a critical state for vertex1", "D3"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := hc.convertMonoVtxStateToHealthResp(test.state) + if result.Status != test.expected.Status || result.Message != test.expected.Message || result.Code != test.expected.Code { + t.Errorf("Expected %+v, got %+v", test.expected, result) + } + }) + } +} + +func TestGetDesiredReplica(t *testing.T) { + targetProcessingSeconds := uint32(5) + monoVertex := &v1alpha1.MonoVertex{ + Spec: v1alpha1.MonoVertexSpec{ + Scale: v1alpha1.Scale{TargetProcessingSeconds: &targetProcessingSeconds}, + }, + Status: v1alpha1.MonoVertexStatus{Replicas: 4}, + } + hc := NewHealthChecker(monoVertex) + + metrics := &mvtxdaemon.MonoVertexMetrics{ + MonoVertex: "vertex", + ProcessingRates: map[string]*wrapperspb.DoubleValue{ + "default": {Value: 100}, + }, + Pendings: map[string]*wrapperspb.Int64Value{ + "default": {Value: 500}, + }, + } + + expected := int(4) + result, err := hc.getDesiredReplica(metrics) + if err != nil { + t.Fatal(err) + } + if result != expected { + t.Errorf("Expected %d, got %d", expected, result) + } +} + +func TestGetDesiredReplicaNoRateAvailable(t *testing.T) { + monoVertex := &v1alpha1.MonoVertex{ + Status: v1alpha1.MonoVertexStatus{Replicas: 4}, + } + hc := NewHealthChecker(monoVertex) + + metrics := &mvtxdaemon.MonoVertexMetrics{ + MonoVertex: "vertex", + Pendings: map[string]*wrapperspb.Int64Value{ + "default": {Value: 100}, + }, + } + + _, err := hc.getDesiredReplica(metrics) + if err == nil { + t.Errorf("Expected error for no rate information, got nil") + } +} + +func TestGetDesiredReplicaPendingNotAvailable(t *testing.T) { + monoVertex := &v1alpha1.MonoVertex{} + hc := NewHealthChecker(monoVertex) + + metrics := &mvtxdaemon.MonoVertexMetrics{ + MonoVertex: "vertex", + ProcessingRates: map[string]*wrapperspb.DoubleValue{ + "default": {Value: 100}, + }, + } + + _, err := hc.getDesiredReplica(metrics) + if err == nil { + t.Errorf("Expected error for no pending information, got nil") + } +} diff --git a/pkg/mvtxdaemon/server/service/mvtx_service.go b/pkg/mvtxdaemon/server/service/mvtx_service.go index 40a2b2972c..c52c590ac6 100644 --- a/pkg/mvtxdaemon/server/service/mvtx_service.go +++ b/pkg/mvtxdaemon/server/service/mvtx_service.go @@ -40,21 +40,22 @@ import ( // Note: Please keep consistent with the definitions in rust/monovertex/sc/metrics.rs const MonoVtxPendingMetric = "monovtx_pending" -type MoveVertexService struct { +type MonoVertexService struct { mvtxdaemon.UnimplementedMonoVertexDaemonServiceServer - monoVtx *v1alpha1.MonoVertex - httpClient *http.Client - rater raterPkg.MonoVtxRatable + monoVtx *v1alpha1.MonoVertex + httpClient *http.Client + rater raterPkg.MonoVtxRatable + healthChecker *HealthChecker } -var _ mvtxdaemon.MonoVertexDaemonServiceServer = (*MoveVertexService)(nil) +var _ mvtxdaemon.MonoVertexDaemonServiceServer = (*MonoVertexService)(nil) -// NewMoveVertexService returns a new instance of MoveVertexService +// NewMoveVertexService returns a new instance of MonoVertexService func NewMoveVertexService( monoVtx *v1alpha1.MonoVertex, rater raterPkg.MonoVtxRatable, -) (*MoveVertexService, error) { - mv := MoveVertexService{ +) (*MonoVertexService, error) { + mv := MonoVertexService{ monoVtx: monoVtx, httpClient: &http.Client{ Transport: &http.Transport{ @@ -62,12 +63,18 @@ func NewMoveVertexService( }, Timeout: time.Second * 3, }, - rater: rater, + rater: rater, + healthChecker: NewHealthChecker(monoVtx), } return &mv, nil } -func (mvs *MoveVertexService) GetMonoVertexMetrics(ctx context.Context, empty *emptypb.Empty) (*mvtxdaemon.GetMonoVertexMetricsResponse, error) { +func (mvs *MonoVertexService) GetMonoVertexMetrics(ctx context.Context, empty *emptypb.Empty) (*mvtxdaemon.GetMonoVertexMetricsResponse, error) { + return mvs.fetchMonoVertexMetrics(ctx) +} + +// fetchMonoVertexMetrics is a helper function to derive the MonoVertex metrics +func (mvs *MonoVertexService) fetchMonoVertexMetrics(ctx context.Context) (*mvtxdaemon.GetMonoVertexMetricsResponse, error) { resp := new(mvtxdaemon.GetMonoVertexMetricsResponse) collectedMetrics := new(mvtxdaemon.MonoVertexMetrics) collectedMetrics.MonoVertex = mvs.monoVtx.Name @@ -77,8 +84,19 @@ func (mvs *MoveVertexService) GetMonoVertexMetrics(ctx context.Context, empty *e return resp, nil } +func (mvs *MonoVertexService) GetMonoVertexStatus(ctx context.Context, empty *emptypb.Empty) (*mvtxdaemon.GetMonoVertexStatusResponse, error) { + resp := new(mvtxdaemon.GetMonoVertexStatusResponse) + collectedStatus := new(mvtxdaemon.MonoVertexStatus) + dataHealth := mvs.healthChecker.getCurrentHealth() + collectedStatus.Status = dataHealth.Status + collectedStatus.Message = dataHealth.Message + collectedStatus.Code = dataHealth.Code + resp.Status = collectedStatus + return resp, nil +} + // getPending returns the pending count for the mono vertex -func (mvs *MoveVertexService) getPending(ctx context.Context) map[string]*wrapperspb.Int64Value { +func (mvs *MonoVertexService) getPending(ctx context.Context) map[string]*wrapperspb.Int64Value { log := logging.FromContext(ctx) headlessServiceName := mvs.monoVtx.GetHeadlessServiceName() pendingMap := make(map[string]*wrapperspb.Int64Value) @@ -117,3 +135,46 @@ func (mvs *MoveVertexService) getPending(ctx context.Context) map[string]*wrappe } return pendingMap } + +// StartHealthCheck starts the health check for the MonoVertex using the health checker +func (mvs *MonoVertexService) StartHealthCheck(ctx context.Context) { + mvs.startHealthCheck(ctx) +} + +// startHealthCheck starts the health check for the pipeline. +// The ticks are generated at the interval of healthTimeStep. +func (mvs *MonoVertexService) startHealthCheck(ctx context.Context) { + logger := logging.FromContext(ctx) + // Goroutine to listen for ticks + // At every tick, check and update the health status of the MonoVertex. + // If the context is done, return. + // Create a ticker to generate ticks at the interval of healthTimeStep. + ticker := time.NewTicker(healthTimeStep) + defer ticker.Stop() + for { + select { + // Get the current health status of the MonoVertex. + case <-ticker.C: + // Fetch the MonoVertex metrics, these are required for deriving the + // health status + mvtxMetrics, _ := mvs.fetchMonoVertexMetrics(ctx) + // Calculate the data criticality + criticality, err := mvs.healthChecker.getMonoVertexDataCriticality(ctx, mvtxMetrics.Metrics) + logger.Debugw("MonoVertex Health check", zap.Any("criticality", criticality)) + if err != nil { + // If there is an error, set the current health status to unknown. + // as we are not able to determine the health of the pipeline. + logger.Errorw("Failed to MonoVertex data criticality", zap.Error(err)) + mvs.healthChecker.setCurrentHealth(defaultDataHealthResponse) + } else { + // convert the MonoVertex health state to API response + monoVertexState := mvs.healthChecker.convertMonoVtxStateToHealthResp(criticality) + // update the current health status of the MonoVertex to cache + mvs.healthChecker.setCurrentHealth(monoVertexState) + } + // If the context is done, return. + case <-ctx.Done(): + return + } + } +} diff --git a/pkg/shared/health-status-code/mono_vtx_code_map.go b/pkg/shared/health-status-code/mono_vtx_code_map.go new file mode 100644 index 0000000000..ef2e3c5ab4 --- /dev/null +++ b/pkg/shared/health-status-code/mono_vtx_code_map.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health_status_code + +var monoVtxResourceMap = map[string]*HealthCodeInfo{ + "M1": newHealthCodeInfo( + "Mono Vertex is healthy", + "Healthy", + ), + "M2": newHealthCodeInfo( + "Mono Vertex is in a critical state", + "Critical", + ), + "M3": newHealthCodeInfo( + "Mono Vertex is in a warning state", + "Warning", + ), + "M4": newHealthCodeInfo( + "Mono Vertex is in an unknown state", + "Critical", + ), + "M5": newHealthCodeInfo( + "Mono Vertex is in a paused state", + "Warning", + ), +} + +// monoVtxDataflowHealthMap is used to maintain status codes for dataflow level health +// Each map entry is a map of status code as the key to the status message and the criticality of the status. +// Status codes are in incremental like +// 1. D1 +// 2. D2 +// 3. D3 +// The criticality is used to determine the overall status of the MonoVertex +// Criticality can be one of the following: +// 1. Critical: The MonoVertex is in a critical state +// 2. Warning: The MonoVertex is in a warning state +// 3. Healthy: The MonoVertex is healthy + +var _ = map[string]*HealthCodeInfo{ + "D1": newHealthCodeInfo( + "Dataflow is healthy", + "Healthy", + ), + "D2": newHealthCodeInfo( + "Dataflow in warning state", + "Warning", + ), + "D3": newHealthCodeInfo( + "Dataflow in critical state", + "Critical", + ), + "D4": newHealthCodeInfo( + "Dataflow in unknown state", + "Critical", + ), +} diff --git a/pkg/shared/health-status-code/mono_vtx_code_map_test.go b/pkg/shared/health-status-code/mono_vtx_code_map_test.go new file mode 100644 index 0000000000..6bb643b0b9 --- /dev/null +++ b/pkg/shared/health-status-code/mono_vtx_code_map_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health_status_code + +import "testing" + +func TestHealthCodeInformation(t *testing.T) { + tests := []struct { + code string + expectedCriticality string + expectedStatus string + }{ + {"M1", "Healthy", "Mono Vertex is healthy"}, + {"M2", "Critical", "Mono Vertex is in a critical state"}, + {"M3", "Warning", "Mono Vertex is in a warning state"}, + {"M4", "Critical", "Mono Vertex is in an unknown state"}, + {"M5", "Warning", "Mono Vertex is in a paused state"}, + } + + for _, test := range tests { + t.Run(test.code, func(t *testing.T) { + info, exists := monoVtxResourceMap[test.code] + if !exists { + t.Errorf("Health code %s does not exist in map", test.code) + } + + if info.Status != test.expectedStatus { + t.Errorf("Wrong state for %s: got %s, want %s", test.code, info.Status, test.expectedStatus) + } + + if info.Criticality != test.expectedCriticality { + t.Errorf("Wrong description for %s: got %s, want %s", test.code, info.Criticality, test.expectedCriticality) + } + }) + } +} diff --git a/pkg/shared/health-status-code/code_map.go b/pkg/shared/health-status-code/pipeline_code_map.go similarity index 89% rename from pkg/shared/health-status-code/code_map.go rename to pkg/shared/health-status-code/pipeline_code_map.go index 8d20618304..08da32c0cd 100644 --- a/pkg/shared/health-status-code/code_map.go +++ b/pkg/shared/health-status-code/pipeline_code_map.go @@ -16,20 +16,6 @@ limitations under the License. package health_status_code -// HealthCodeInfo is used to maintain status codes for vertex level health -type HealthCodeInfo struct { - Status string - Criticality string -} - -// newHealthCodeInfo is used to create a new HealthCodeInfo object -func newHealthCodeInfo(status string, criticality string) *HealthCodeInfo { - return &HealthCodeInfo{ - Status: status, - Criticality: criticality, - } -} - // VertexHealthMap is used to maintain status codes for vertex level health // Each map entry is a map of status code as the key to the status message and the criticality of the status. // Status codes are in incremental like diff --git a/pkg/shared/health-status-code/code_map_test.go b/pkg/shared/health-status-code/pipeline_code_map_test.go similarity index 100% rename from pkg/shared/health-status-code/code_map_test.go rename to pkg/shared/health-status-code/pipeline_code_map_test.go diff --git a/pkg/shared/health-status-code/utils.go b/pkg/shared/health-status-code/utils.go new file mode 100644 index 0000000000..e83ca20de0 --- /dev/null +++ b/pkg/shared/health-status-code/utils.go @@ -0,0 +1,31 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health_status_code + +// HealthCodeInfo is used to maintain status codes for vertex level health +type HealthCodeInfo struct { + Status string + Criticality string +} + +// newHealthCodeInfo is used to create a new HealthCodeInfo object +func newHealthCodeInfo(status string, criticality string) *HealthCodeInfo { + return &HealthCodeInfo{ + Status: status, + Criticality: criticality, + } +} diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index fb53cfbe42..460d863435 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -1016,7 +1016,7 @@ func (h *handler) GetPipelineStatus(c *gin.Context) { // Get the vertex level health of the pipeline resourceHealth, err := h.healthChecker.getPipelineResourceHealth(h, ns, pipeline) if err != nil { - h.respondWithError(c, fmt.Sprintf("Failed to get the dataStatus for pipeline %q: %s", pipeline, err.Error())) + h.respondWithError(c, fmt.Sprintf("Failed to get the resourceHealth for pipeline %q: %s", pipeline, err.Error())) return } @@ -1145,6 +1145,41 @@ func (h *handler) GetMonoVertexMetrics(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, metrics)) } +// GetMonoVertexHealth is used to the health information about a mono vertex +// We use two checks to determine the health of the mono vertex: +// 1. Resource Health: It is based on the health of the mono vertex deployment and pods. +// 2. Data Criticality: It is based on the data movement of the mono vertex +func (h *handler) GetMonoVertexHealth(c *gin.Context) { + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + + // Resource level health + resourceHealth, err := h.healthChecker.getMonoVtxResourceHealth(h, ns, monoVertex) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to get the resourceHealth for MonoVertex %q: %s", monoVertex, err.Error())) + return + } + + // Create a new daemon client to get the data status + client, err := h.getMonoVertexDaemonClient(ns, monoVertex) + if err != nil || client == nil { + h.respondWithError(c, fmt.Sprintf("failed to get daemon service client for mono vertex %q, %s", monoVertex, err.Error())) + return + } + // Data level health status + dataHealth, err := client.GetMonoVertexStatus(c) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to get the mono vertex dataStatus: namespace %q mono vertex %q: %s", ns, monoVertex, err.Error())) + return + } + + // Create a response string based on the vertex health and data criticality + // We combine both the states to get the final dataStatus of the pipeline + response := NewHealthResponse(resourceHealth.Status, dataHealth.GetStatus(), + resourceHealth.Message, dataHealth.GetMessage(), resourceHealth.Code, dataHealth.GetCode()) + + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, response)) +} + // getAllNamespaces is a utility used to fetch all the namespaces in the cluster // except the kube system namespaces func getAllNamespaces(h *handler) ([]string, error) { diff --git a/server/apis/v1/health.go b/server/apis/v1/health.go index 690631b80f..5973a8865d 100644 --- a/server/apis/v1/health.go +++ b/server/apis/v1/health.go @@ -14,32 +14,38 @@ import ( ) const ( - // resourceCacheRefreshDuration is the duration after which the vertex status cache is refreshed + // resourceCacheRefreshDuration is the duration after which the resource status cache is refreshed resourceCacheRefreshDuration = 30 * time.Second ) -// resourceHealthResponse is the response returned by the vertex health check API +// resourceHealthResponse is the response returned by the health check API type resourceHealthResponse struct { - // Status is the overall vertex status of the pipeline + // Status is the overall resource status of the corresponding resource Status string `json:"status"` // Message is the error message if any Message string `json:"message"` - // Code is the status code for the vertex health + // Code is the status code for the resource health Code string `json:"code"` } -// HealthChecker is the struct to hold the resource status cache for the pipeline +// HealthChecker is the struct to hold the resource status cache for the +// pipeline and Mono Vertex type HealthChecker struct { - resourceStatusCache *evictCache.LRU[string, *resourceHealthResponse] - log *zap.SugaredLogger + // pipelineResourceStatusCache is a cache to store the pipeline resource status + pipelineResourceStatusCache *evictCache.LRU[string, *resourceHealthResponse] + // monoVtxResourceStatusCache is a cache to store the Mono Vertex resource status + monoVtxResourceStatusCache *evictCache.LRU[string, *resourceHealthResponse] + log *zap.SugaredLogger } // NewHealthChecker is used to create a new health checker func NewHealthChecker(ctx context.Context) *HealthChecker { c := evictCache.NewLRU[string, *resourceHealthResponse](500, nil, resourceCacheRefreshDuration) + mvCache := evictCache.NewLRU[string, *resourceHealthResponse](500, nil, resourceCacheRefreshDuration) return &HealthChecker{ - resourceStatusCache: c, - log: logging.FromContext(ctx), + pipelineResourceStatusCache: c, + monoVtxResourceStatusCache: mvCache, + log: logging.FromContext(ctx), } } @@ -55,7 +61,7 @@ func (hc *HealthChecker) getPipelineResourceHealth(h *handler, ns string, cacheKey := fmt.Sprintf("%s-%s", ns, pipeline) // check if the pipeline status is cached - if status, ok := hc.resourceStatusCache.Get(cacheKey); ok { + if status, ok := hc.pipelineResourceStatusCache.Get(cacheKey); ok { hc.log.Info("Pipeline status from cache: ", status) return status, nil } @@ -65,7 +71,7 @@ func (hc *HealthChecker) getPipelineResourceHealth(h *handler, ns string, return status, err } // update cache with the new pipeline status - hc.resourceStatusCache.Add(cacheKey, status) + hc.pipelineResourceStatusCache.Add(cacheKey, status) return status, nil } @@ -207,3 +213,70 @@ func isVertexHealthy(h *handler, ns string, pipeline string, vertex *dfv1.Vertex Code: "V1", }, nil } + +// getMonoVtxResourceHealth is used to provide the overall resouce health and status of the Mono Vertex +// This first check if the Mono Vertex status is cached, if not, +// it checks for the current Mono Vertex status +func (hc *HealthChecker) getMonoVtxResourceHealth(h *handler, ns string, + monoVtx string) (*resourceHealthResponse, error) { + + // create a cache key for the Mono Vertex + // It is a combination of namespace and Mono Vertex name + // In the form of - + cacheKey := fmt.Sprintf("%s-%s", ns, monoVtx) + + // check if the Mono Vertex status is cached + if status, ok := hc.monoVtxResourceStatusCache.Get(cacheKey); ok { + hc.log.Info("Mono Vertex status from cache: ", status) + return status, nil + } + // if not present in cache, check for the current Mono Vertex status + status, err := checkMonoVtxHealth(h, ns, monoVtx) + if err != nil { + return nil, err + } + // update cache with the new Mono Vertex status + hc.monoVtxResourceStatusCache.Add(cacheKey, status) + + return status, nil +} + +// checkMonoVtxHealth is used to provide the overall Mono Vertex health and status of the Mono Vertex +// They can be of the following types: +// 1. Healthy: The Mono Vertex is healthy +// 2. Unhealthy: The Mono Vertex is unhealthy +// 3. Paused: The Mono Vertex is paused (Not supported right now) +// 4. Unknown: The Mono Vertex is in an unknown state +// We use the kubernetes client to get the spec of the MonoVertex and +// then check its status to derive the resource health status + +// We perform the following checks: +// 1) Check the `phase“ in the Status field of the spec, it should be “Running” +// 2) Check if the `conditions` field of the spec, all of them shoudl be true +func checkMonoVtxHealth(h *handler, ns string, monoVtx string) (*resourceHealthResponse, error) { + // fetch the current spec of the Mono Vertex + v, err := h.numaflowClient.MonoVertices(ns).Get(context.Background(), monoVtx, metav1.GetOptions{}) + // if there is an error fetching the spec, return an error + // with status unknown + if err != nil { + return &resourceHealthResponse{ + Status: dfv1.MonoVertexStatusUnknown, + Message: fmt.Sprintf("error in getting Mono Vertex %q status: %v", monoVtx, err), + Code: "M4", + }, err + } + // check if the Mono vertex is healthy or not through the status field of the spec + isMvtxHealthy := v.Status.IsHealthy() + if !isMvtxHealthy { + return &resourceHealthResponse{ + Status: dfv1.MonoVertexStatusUnhealthy, + Message: fmt.Sprintf("mono vertex %q is unhealthy: %s:%s", monoVtx, v.Status.Message, v.Status.Reason), + Code: "M2", + }, nil + } + return &resourceHealthResponse{ + Status: dfv1.MonoVertexStatusHealthy, + Message: fmt.Sprintf("mono vertex %q is healthy", monoVtx), + Code: "M1", + }, nil +} diff --git a/server/cmd/server/start.go b/server/cmd/server/start.go index 16e52eaf62..aa4e3403b8 100644 --- a/server/cmd/server/start.go +++ b/server/cmd/server/start.go @@ -205,5 +205,6 @@ func CreateAuthRouteMap(baseHref string) authz.RouteMap { "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods": authz.NewRouteInfo(authz.ObjectMonoVertex, true), "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/metrics": authz.NewRouteInfo(authz.ObjectMonoVertex, true), "POST:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/health": authz.NewRouteInfo(authz.ObjectMonoVertex, true), } } diff --git a/server/cmd/server/start_test.go b/server/cmd/server/start_test.go index b8abd6291e..13518ee163 100644 --- a/server/cmd/server/start_test.go +++ b/server/cmd/server/start_test.go @@ -25,12 +25,12 @@ import ( func TestCreateAuthRouteMap(t *testing.T) { t.Run("empty base", func(t *testing.T) { got := CreateAuthRouteMap("") - assert.Equal(t, 29, len(got)) + assert.Equal(t, 30, len(got)) }) t.Run("customize base", func(t *testing.T) { got := CreateAuthRouteMap("abcdefg") - assert.Equal(t, 29, len(got)) + assert.Equal(t, 30, len(got)) for k := range got { assert.Contains(t, k, "abcdefg") } diff --git a/server/routes/routes.go b/server/routes/routes.go index 68064e8c52..c80181d789 100644 --- a/server/routes/routes.go +++ b/server/routes/routes.go @@ -163,6 +163,8 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.POST("/namespaces/:namespace/mono-vertices", handler.CreateMonoVertex) // Get the metrics of a mono vertex. r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/metrics", handler.GetMonoVertexMetrics) + // Get the health information of a mono vertex. + r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/health", handler.GetMonoVertexHealth) } // authMiddleware is the middleware for AuthN/AuthZ. From 42671138250d67f6eacddf33d4b5d5e069e5674f Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Tue, 20 Aug 2024 00:40:17 +0530 Subject: [PATCH 009/188] fix: replicas derived in UI from mvtx status instead of spec (#1965) Signed-off-by: veds-g --- ui/src/types/declarations/pipeline.d.ts | 1 - ui/src/utils/fetcherHooks/monoVertexViewFetch.ts | 12 +++++++++--- ui/src/utils/fetcherHooks/pipelineViewFetch.ts | 4 +++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/ui/src/types/declarations/pipeline.d.ts b/ui/src/types/declarations/pipeline.d.ts index 09b8757387..dbc2811f3f 100644 --- a/ui/src/types/declarations/pipeline.d.ts +++ b/ui/src/types/declarations/pipeline.d.ts @@ -116,7 +116,6 @@ export interface MonoVertex { } export interface MonoVertexSpec { - replicas: number; source: any; sink: any; scale: any; diff --git a/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts b/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts index 9c7cd459d3..68d7524950 100644 --- a/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts +++ b/ui/src/utils/fetcherHooks/monoVertexViewFetch.ts @@ -18,6 +18,7 @@ export const useMonoVertexViewFetch = ( const [requestKey, setRequestKey] = useState(""); const [pipeline, setPipeline] = useState(undefined); const [spec, setSpec] = useState(undefined); + const [replicas, setReplicas] = useState(undefined); const [monoVertexPods, setMonoVertexPods] = useState>( new Map() ); @@ -45,7 +46,12 @@ export const useMonoVertexViewFetch = ( // Update pipeline state with data from the response setPipeline(json.data?.monoVertex); // Update spec state if it is not equal to the spec from the response - if (!isEqual(spec, json.data)) setSpec(json.data?.monoVertex?.spec); + if (!isEqual(spec, json.data?.monoVertex?.spec)) { + setSpec(json.data.monoVertex.spec); + } + if (replicas !== json.data?.monoVertex?.status?.replicas) { + setReplicas(json.data.monoVertex.status.replicas); + } setPipelineErr(undefined); } else if (json?.errMsg) { // pipeline API call returns an error message @@ -245,7 +251,7 @@ export const useMonoVertexViewFetch = ( const name = pipelineId ?? ""; newNode.id = name; newNode.data = { name: name }; - newNode.data.podnum = spec?.replicas ? spec.replicas : 0; + newNode.data.podnum = replicas ? replicas : 0; newNode.position = { x: 0, y: 0 }; // change this in the future if you would like to make it draggable newNode.draggable = false; @@ -266,7 +272,7 @@ export const useMonoVertexViewFetch = ( if (pipeline && vertices?.length > 0) { setLoading(false); } - }, [pipeline, vertices]); + }, [pipeline, vertices, replicas]); return { pipeline, diff --git a/ui/src/utils/fetcherHooks/pipelineViewFetch.ts b/ui/src/utils/fetcherHooks/pipelineViewFetch.ts index 583d0859fc..abfab42443 100644 --- a/ui/src/utils/fetcherHooks/pipelineViewFetch.ts +++ b/ui/src/utils/fetcherHooks/pipelineViewFetch.ts @@ -70,7 +70,9 @@ export const usePipelineViewFetch = ( `${json.data?.pipeline?.metadata?.namespace}-${json.data.pipeline?.metadata?.name}-` ); // Update spec state if it is not equal to the spec from the response - if (!isEqual(spec, json.data)) setSpec(json.data?.pipeline?.spec); + if (!isEqual(spec, json.data?.pipeline?.spec)) { + setSpec(json.data.pipeline.spec); + } setPipelineErr(undefined); } else if (json?.errMsg) { // pipeline API call returns an error message From d2fc8d7e95f8f85a12ef338e84859e2b5d87d191 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 19 Aug 2024 17:34:57 -0400 Subject: [PATCH 010/188] chore: send negative value rateNotAvailable (#1966) Signed-off-by: Sidhant Kohli --- pkg/daemon/server/service/rater/helper.go | 10 +++- .../server/service/rater/helper_test.go | 60 +++++++++---------- pkg/mvtxdaemon/server/service/rater/helper.go | 10 +++- .../server/service/rater/helper_test.go | 36 +++++------ 4 files changed, 62 insertions(+), 54 deletions(-) diff --git a/pkg/daemon/server/service/rater/helper.go b/pkg/daemon/server/service/rater/helper.go index 929ac99d50..73683c06af 100644 --- a/pkg/daemon/server/service/rater/helper.go +++ b/pkg/daemon/server/service/rater/helper.go @@ -17,6 +17,7 @@ limitations under the License. package rater import ( + "math" "time" sharedqueue "github.com/numaproj/numaflow/pkg/shared/queue" @@ -24,6 +25,9 @@ import ( const ( indexNotFound = -1 + // rateNotAvailable is returned when the processing rate cannot be derived from the currently + // available pod data, a negative min is returned to indicate this. + rateNotAvailable = float64(math.MinInt) ) // UpdateCount updates the count of processed messages for a pod at a given time @@ -48,14 +52,14 @@ func UpdateCount(q *sharedqueue.OverflowQueue[*TimestampedCounts], time int64, p func CalculateRate(q *sharedqueue.OverflowQueue[*TimestampedCounts], lookbackSeconds int64, partitionName string) float64 { counts := q.Items() if len(counts) <= 1 { - return 0 + return rateNotAvailable } startIndex := findStartIndex(lookbackSeconds, counts) // we consider the last but one element as the end index because the last element might be incomplete // we can be sure that the last but one element in the queue is complete. endIndex := len(counts) - 2 if startIndex == indexNotFound { - return 0 + return rateNotAvailable } // time diff in seconds. @@ -63,7 +67,7 @@ func CalculateRate(q *sharedqueue.OverflowQueue[*TimestampedCounts], lookbackSec if timeDiff == 0 { // if the time difference is 0, we return 0 to avoid division by 0 // this should not happen in practice because we are using a 10s interval - return 0 + return rateNotAvailable } delta := float64(0) diff --git a/pkg/daemon/server/service/rater/helper_test.go b/pkg/daemon/server/service/rater/helper_test.go index e420bcc0dc..8873f99802 100644 --- a/pkg/daemon/server/service/rater/helper_test.go +++ b/pkg/daemon/server/service/rater/helper_test.go @@ -119,17 +119,17 @@ func TestUpdateCount(t *testing.T) { } func TestCalculateRate(t *testing.T) { - t.Run("givenCollectedTimeLessThanTwo_whenCalculateRate_thenReturnZero", func(t *testing.T) { + t.Run("givenCollectedTimeLessThanTwo_whenCalculateRate_thenReturnRateNotAvailable", func(t *testing.T) { q := sharedqueue.New[*TimestampedCounts](1800) // no data - assert.Equal(t, 0.0, CalculateRate(q, 10, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 10, "partition1")) // only one data now := time.Now() tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) tc1.Update(&PodReadCount{"pod1", map[string]float64{"partition1": 5.0}}) q.Append(tc1) - assert.Equal(t, 0.0, CalculateRate(q, 10, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 10, "partition1")) }) t.Run("singlePod_givenCountIncreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { @@ -147,9 +147,9 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 0.5, CalculateRate(q, 25, "partition1")) // tc1 and tc2 are used to calculate the rate @@ -174,9 +174,9 @@ func TestCalculateRate(t *testing.T) { q.Append(tc4) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // tc2 and tc3 are used to calculate the rate assert.Equal(t, 5.0, CalculateRate(q, 25, "partition1")) // tc1, 2 and 3 are used to calculate the rate @@ -203,11 +203,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25, "partition1")) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 15.0, CalculateRate(q, 35, "partition1")) }) @@ -230,11 +230,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25, "partition1")) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 30.0, CalculateRate(q, 35, "partition1")) }) @@ -257,11 +257,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25, "partition1")) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 25.0, CalculateRate(q, 35, "partition1")) }) @@ -292,9 +292,9 @@ func TestCalculateRate(t *testing.T) { // partition1 rate // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // tc2 and tc3 are used to calculate the rate assert.Equal(t, 5.0, CalculateRate(q, 25, "partition1")) // tc1, 2 and 3 are used to calculate the rate @@ -303,29 +303,29 @@ func TestCalculateRate(t *testing.T) { assert.Equal(t, 7.5, CalculateRate(q, 100, "partition1")) // partition2 rate - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition2")) - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition2")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition2")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition2")) assert.Equal(t, 10.0, CalculateRate(q, 25, "partition2")) assert.Equal(t, 10.5, CalculateRate(q, 35, "partition2")) assert.Equal(t, 10.5, CalculateRate(q, 100, "partition2")) // partition3 rate - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition3")) - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition3")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition3")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition3")) assert.Equal(t, 20.0, CalculateRate(q, 25, "partition3")) assert.Equal(t, 10.0, CalculateRate(q, 35, "partition3")) assert.Equal(t, 10.0, CalculateRate(q, 100, "partition3")) // partition4 rate - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition4")) - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition4")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition4")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition4")) assert.Equal(t, 10.0, CalculateRate(q, 25, "partition4")) assert.Equal(t, 5.0, CalculateRate(q, 35, "partition4")) assert.Equal(t, 5.0, CalculateRate(q, 100, "partition4")) // partition100 rate - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition100")) - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition100")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition100")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition100")) assert.Equal(t, 0.0, CalculateRate(q, 25, "partition100")) assert.Equal(t, 0.0, CalculateRate(q, 35, "partition100")) assert.Equal(t, 0.0, CalculateRate(q, 100, "partition100")) @@ -359,9 +359,9 @@ func TestCalculateRate(t *testing.T) { // partition1 rate // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition1")) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition1")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition1")) // tc2 and tc3 are used to calculate the rate assert.Equal(t, 111.0, CalculateRate(q, 25, "partition1")) // tc1, 2 and 3 are used to calculate the rate @@ -370,8 +370,8 @@ func TestCalculateRate(t *testing.T) { assert.Equal(t, 111.0, CalculateRate(q, 100, "partition1")) // partition2 rate - assert.Equal(t, 0.0, CalculateRate(q, 5, "partition2")) - assert.Equal(t, 0.0, CalculateRate(q, 15, "partition2")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5, "partition2")) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15, "partition2")) assert.Equal(t, 111.0, CalculateRate(q, 25, "partition2")) assert.Equal(t, 111.0, CalculateRate(q, 35, "partition2")) assert.Equal(t, 111.0, CalculateRate(q, 100, "partition2")) diff --git a/pkg/mvtxdaemon/server/service/rater/helper.go b/pkg/mvtxdaemon/server/service/rater/helper.go index 0973b3c5cb..ce9aac1ed4 100644 --- a/pkg/mvtxdaemon/server/service/rater/helper.go +++ b/pkg/mvtxdaemon/server/service/rater/helper.go @@ -17,6 +17,7 @@ limitations under the License. package rater import ( + "math" "time" sharedqueue "github.com/numaproj/numaflow/pkg/shared/queue" @@ -25,6 +26,9 @@ import ( const ( // indexNotFound is returned when the start index cannot be found in the queue. indexNotFound = -1 + // rateNotAvailable is returned when the processing rate cannot be derived from the currently + // available pod data, a negative min is returned to indicate this. + rateNotAvailable = float64(math.MinInt) ) // UpdateCount updates the count for a given timestamp in the queue. @@ -49,14 +53,14 @@ func UpdateCount(q *sharedqueue.OverflowQueue[*TimestampedCounts], time int64, p func CalculateRate(q *sharedqueue.OverflowQueue[*TimestampedCounts], lookbackSeconds int64) float64 { counts := q.Items() if len(counts) <= 1 { - return 0 + return rateNotAvailable } startIndex := findStartIndex(lookbackSeconds, counts) // we consider the last but one element as the end index because the last element might be incomplete // we can be sure that the last but one element in the queue is complete. endIndex := len(counts) - 2 if startIndex == indexNotFound { - return 0 + return rateNotAvailable } // time diff in seconds. @@ -64,7 +68,7 @@ func CalculateRate(q *sharedqueue.OverflowQueue[*TimestampedCounts], lookbackSec if timeDiff == 0 { // if the time difference is 0, we return 0 to avoid division by 0 // this should not happen in practice because we are using a 10s interval - return 0 + return rateNotAvailable } delta := float64(0) diff --git a/pkg/mvtxdaemon/server/service/rater/helper_test.go b/pkg/mvtxdaemon/server/service/rater/helper_test.go index 6ac878244c..f3e61e3390 100644 --- a/pkg/mvtxdaemon/server/service/rater/helper_test.go +++ b/pkg/mvtxdaemon/server/service/rater/helper_test.go @@ -106,17 +106,17 @@ func TestUpdateCount(t *testing.T) { } func TestCalculateRate(t *testing.T) { - t.Run("givenCollectedTimeLessThanTwo_whenCalculateRate_thenReturnZero", func(t *testing.T) { + t.Run("givenCollectedTimeLessThanTwo_whenCalculateRate_thenReturnRateNotAvailable", func(t *testing.T) { q := sharedqueue.New[*TimestampedCounts](1800) // no data - assert.Equal(t, 0.0, CalculateRate(q, 10)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 10)) // only one data now := time.Now() tc1 := NewTimestampedCounts(now.Truncate(CountWindow).Unix() - 20) tc1.Update(&PodReadCount{"pod1", 5.0}) q.Append(tc1) - assert.Equal(t, 0.0, CalculateRate(q, 10)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 10)) }) t.Run("singlePod_givenCountIncreases_whenCalculateRate_thenReturnRate", func(t *testing.T) { @@ -134,9 +134,9 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 0.5, CalculateRate(q, 25)) // tc1 and tc2 are used to calculate the rate @@ -161,9 +161,9 @@ func TestCalculateRate(t *testing.T) { q.Append(tc4) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) // tc2 and tc3 are used to calculate the rate assert.Equal(t, 5.0, CalculateRate(q, 25)) // tc1, 2 and 3 are used to calculate the rate @@ -190,11 +190,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25)) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 15.0, CalculateRate(q, 35)) }) @@ -217,11 +217,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25)) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 30.0, CalculateRate(q, 35)) }) @@ -244,11 +244,11 @@ func TestCalculateRate(t *testing.T) { q.Append(tc3) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) // no enough data collected within lookback seconds, expect rate 0 - assert.Equal(t, 0.0, CalculateRate(q, 25)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 25)) // tc1 and tc2 are used to calculate the rate assert.Equal(t, 25.0, CalculateRate(q, 35)) }) @@ -279,8 +279,8 @@ func TestCalculateRate(t *testing.T) { q.Append(tc4) // vertex rate - assert.Equal(t, 0.0, CalculateRate(q, 5)) - assert.Equal(t, 0.0, CalculateRate(q, 15)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 5)) + assert.Equal(t, rateNotAvailable, CalculateRate(q, 15)) assert.Equal(t, 25.0, CalculateRate(q, 25)) assert.Equal(t, 23.0, CalculateRate(q, 35)) assert.Equal(t, 23.0, CalculateRate(q, 100)) From e1bfd1b2d016d64bfb9d6ac546cc3489c96b806d Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Mon, 19 Aug 2024 18:18:24 -0400 Subject: [PATCH 011/188] refactor: re-arrange e2e tests (#1961) Signed-off-by: Keran Yang --- .github/workflows/ci.yaml | 2 +- Makefile | 8 +- test/api-e2e/api_test.go | 15 ++ test/api-e2e/testdata.go | 15 ++ .../builtin_source_test.go} | 54 +++++- .../testdata/http-auth-fake-secret.yaml | 0 .../testdata/http-source-with-auth.yaml | 0 .../testdata/http-source.yaml | 0 .../testdata/jetstream-source-pipeline.yaml | 0 .../testdata/nats-source-pipeline.yaml | 0 test/e2e/functional_test.go | 35 +--- test/e2e/testdata/even-odd.yaml | 75 -------- test/idle-source-e2e/idle_source_test.go | 3 - test/jetstream-e2e/jetstream_test.go | 56 ------ test/kafka-e2e/kafka_test.go | 63 +------ .../sdks_test.go => map-e2e/map_test.go} | 176 +++--------------- .../testdata/flatmap-batch.yaml | 0 .../testdata/flatmap-stream.yaml | 0 .../testdata/flatmap.yaml | 0 test/monovertex-e2e/monovertex_test.go | 18 ++ test/nats-e2e/nats_test.go | 53 ------ test/reduce-one-e2e/reduce_one_test.go | 9 +- .../simple-keyed-reduce-pipeline.yaml | 5 +- test/reduce-two-e2e/reduce_two_test.go | 55 ------ .../simple-session-sum-pipeline.yaml | 47 ----- test/sdks-e2e/README.md | 7 - .../simple-keyed-reduce-pipeline.yaml | 47 ----- ..._test.go => sideinput_sink_source_test.go} | 2 - test/sideinputs-e2e/sideinput_test.go | 2 - .../testdata}/event-time-filter-go.yaml | 0 .../testdata}/event-time-filter-java.yaml | 0 .../testdata}/event-time-filter-python.yaml | 0 .../testdata}/event-time-filter-rust.yaml | 0 test/transformer-e2e/transformer_test.go | 70 ++++++- 34 files changed, 205 insertions(+), 612 deletions(-) rename test/{http-e2e/http_test.go => builtin-source-e2e/builtin_source_test.go} (62%) rename test/{http-e2e => builtin-source-e2e}/testdata/http-auth-fake-secret.yaml (100%) rename test/{http-e2e => builtin-source-e2e}/testdata/http-source-with-auth.yaml (100%) rename test/{http-e2e => builtin-source-e2e}/testdata/http-source.yaml (100%) rename test/{jetstream-e2e => builtin-source-e2e}/testdata/jetstream-source-pipeline.yaml (100%) rename test/{nats-e2e => builtin-source-e2e}/testdata/nats-source-pipeline.yaml (100%) delete mode 100644 test/e2e/testdata/even-odd.yaml delete mode 100644 test/jetstream-e2e/jetstream_test.go rename test/{sdks-e2e/sdks_test.go => map-e2e/map_test.go} (50%) rename test/{sdks-e2e => map-e2e}/testdata/flatmap-batch.yaml (100%) rename test/{sdks-e2e => map-e2e}/testdata/flatmap-stream.yaml (100%) rename test/{sdks-e2e => map-e2e}/testdata/flatmap.yaml (100%) delete mode 100644 test/nats-e2e/nats_test.go delete mode 100644 test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml delete mode 100644 test/sdks-e2e/README.md delete mode 100644 test/sdks-e2e/testdata/simple-keyed-reduce-pipeline.yaml rename test/sideinputs-e2e/{sideinput-e2e_sink_source_test.go => sideinput_sink_source_test.go} (99%) rename test/{sdks-e2e/testdata/transformer => transformer-e2e/testdata}/event-time-filter-go.yaml (100%) rename test/{sdks-e2e/testdata/transformer => transformer-e2e/testdata}/event-time-filter-java.yaml (100%) rename test/{sdks-e2e/testdata/transformer => transformer-e2e/testdata}/event-time-filter-python.yaml (100%) rename test/{sdks-e2e/testdata/transformer => transformer-e2e/testdata}/event-time-filter-rust.yaml (100%) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ba128ef042..e27feac027 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -151,7 +151,7 @@ jobs: fail-fast: false matrix: driver: [jetstream] - case: [e2e, diamond-e2e, transformer-e2e, kafka-e2e, http-e2e, nats-e2e, jetstream-e2e, sdks-e2e, reduce-one-e2e, reduce-two-e2e, udsource-e2e, api-e2e, sideinputs-e2e, idle-source-e2e, monovertex-e2e] + case: [e2e, diamond-e2e, transformer-e2e, kafka-e2e, map-e2e, reduce-one-e2e, reduce-two-e2e, udsource-e2e, api-e2e, sideinputs-e2e, idle-source-e2e, monovertex-e2e, builtin-source-e2e] include: - driver: redis case: e2e diff --git a/Makefile b/Makefile index f437836f48..696b63f87b 100644 --- a/Makefile +++ b/Makefile @@ -104,10 +104,7 @@ test-code: test-e2e: test-kafka-e2e: -test-http-e2e: -test-nats-e2e: -test-jetstream-e2e: -test-sdks-e2e: +test-map-e2e: test-reduce-one-e2e: test-reduce-two-e2e: test-api-e2e: @@ -115,6 +112,9 @@ test-udsource-e2e: test-transformer-e2e: test-diamond-e2e: test-sideinputs-e2e: +test-monovertex-e2e: +test-idle-source-e2e: +test-builtin-source-e2e: test-%: $(MAKE) cleanup-e2e $(MAKE) image e2eapi-image diff --git a/test/api-e2e/api_test.go b/test/api-e2e/api_test.go index ae927e51d4..b23735e699 100644 --- a/test/api-e2e/api_test.go +++ b/test/api-e2e/api_test.go @@ -1,3 +1,18 @@ +/* +Copyright 2022 The Numaproj Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api_e2e import ( diff --git a/test/api-e2e/testdata.go b/test/api-e2e/testdata.go index 5322b38ce8..411a7b586a 100644 --- a/test/api-e2e/testdata.go +++ b/test/api-e2e/testdata.go @@ -1,3 +1,18 @@ +/* +Copyright 2022 The Numaproj Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api_e2e var ( diff --git a/test/http-e2e/http_test.go b/test/builtin-source-e2e/builtin_source_test.go similarity index 62% rename from test/http-e2e/http_test.go rename to test/builtin-source-e2e/builtin_source_test.go index a5e7058dee..da37c4c3ad 100644 --- a/test/http-e2e/http_test.go +++ b/test/builtin-source-e2e/builtin_source_test.go @@ -1,3 +1,5 @@ +//go:build test + /* Copyright 2022 The Numaproj Authors. @@ -14,24 +16,41 @@ See the License for the specific language governing permissions and limitations under the License. */ -package http_e2e +package builtin_source_e2e import ( "fmt" "testing" + "time" "github.com/stretchr/testify/suite" . "github.com/numaproj/numaflow/test/fixtures" ) +//go:generate kubectl -n numaflow-system delete statefulset nats --ignore-not-found=true //go:generate kubectl apply -f testdata/http-auth-fake-secret.yaml -n numaflow-system -type HTTPSuite struct { +//go:generate kubectl apply -k ../../config/apps/nats -n numaflow-system +type BuiltinSourceSuite struct { E2ESuite } -func (s *HTTPSuite) TestHTTPSourcePipeline() { - w := s.Given().Pipeline("@testdata/http-source.yaml"). +func (bss *BuiltinSourceSuite) TestNatsSource() { + subject := "test-subject" + w := bss.Given().Pipeline("@testdata/nats-source-pipeline.yaml"). + When(). + CreatePipelineAndWait() + defer w.DeletePipelineAndWait() + + // wait for all the pods to come up + w.Expect().VertexPodsRunning() + + PumpNatsSubject(subject, 100, 20*time.Millisecond, 10, "test-message") + w.Expect().RedisSinkContains("nats-source-e2e-out", "test-message", SinkCheckWithContainCount(100)) +} + +func (bss *BuiltinSourceSuite) TestHTTPSourcePipeline() { + w := bss.Given().Pipeline("@testdata/http-source.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() @@ -59,8 +78,8 @@ func (s *HTTPSuite) TestHTTPSourcePipeline() { w.Expect().RedisSinkContains("http-source-out", "with-id", SinkCheckWithContainCount(2)) } -func (s *HTTPSuite) TestHTTPSourceAuthPipeline() { - w := s.Given().Pipeline("@testdata/http-source-with-auth.yaml"). +func (bss *BuiltinSourceSuite) TestHTTPSourceAuthPipeline() { + w := bss.Given().Pipeline("@testdata/http-source-with-auth.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() @@ -75,6 +94,25 @@ func (s *HTTPSuite) TestHTTPSourceAuthPipeline() { w.Expect().RedisSinkNotContains("http-auth-source-out", "no-auth") } -func TestHTTPSuite(t *testing.T) { - suite.Run(t, new(HTTPSuite)) +func (bss *BuiltinSourceSuite) TestJetstreamSource() { + const streamName = "test-stream" + const msgPayload = "jetstream-test-message" + const msgCount = 100 + + // The source pods expect stream to exist + PumpJetstream(streamName, msgPayload, msgCount) + + w := bss.Given().Pipeline("@testdata/jetstream-source-pipeline.yaml"). + When(). + CreatePipelineAndWait() + defer w.DeletePipelineAndWait() + + // wait for all the pods to come up + w.Expect().VertexPodsRunning() + + w.Expect().RedisSinkContains("jetstream-source-e2e-out", msgPayload, SinkCheckWithContainCount(msgCount)) +} + +func TestBuiltinSourceSuite(t *testing.T) { + suite.Run(t, new(BuiltinSourceSuite)) } diff --git a/test/http-e2e/testdata/http-auth-fake-secret.yaml b/test/builtin-source-e2e/testdata/http-auth-fake-secret.yaml similarity index 100% rename from test/http-e2e/testdata/http-auth-fake-secret.yaml rename to test/builtin-source-e2e/testdata/http-auth-fake-secret.yaml diff --git a/test/http-e2e/testdata/http-source-with-auth.yaml b/test/builtin-source-e2e/testdata/http-source-with-auth.yaml similarity index 100% rename from test/http-e2e/testdata/http-source-with-auth.yaml rename to test/builtin-source-e2e/testdata/http-source-with-auth.yaml diff --git a/test/http-e2e/testdata/http-source.yaml b/test/builtin-source-e2e/testdata/http-source.yaml similarity index 100% rename from test/http-e2e/testdata/http-source.yaml rename to test/builtin-source-e2e/testdata/http-source.yaml diff --git a/test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml b/test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml similarity index 100% rename from test/jetstream-e2e/testdata/jetstream-source-pipeline.yaml rename to test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml diff --git a/test/nats-e2e/testdata/nats-source-pipeline.yaml b/test/builtin-source-e2e/testdata/nats-source-pipeline.yaml similarity index 100% rename from test/nats-e2e/testdata/nats-source-pipeline.yaml rename to test/builtin-source-e2e/testdata/nats-source-pipeline.yaml diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index 3e9c1d70dd..2d0989ac7e 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -1,6 +1,7 @@ +//go:build test + /* Copyright 2022 The Numaproj Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -176,38 +177,6 @@ func (s *FunctionalSuite) TestUDFFiltering() { w.Expect().RedisSinkNotContains("udf-filtering-out", expect2) } -func (s *FunctionalSuite) TestConditionalForwarding() { - // FIXME: flaky when redis is used as isb - if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { - s.T().SkipNow() - } - - w := s.Given().Pipeline("@testdata/even-odd.yaml"). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - pipelineName := "even-odd" - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("888888"))). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("888889"))). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("not an integer"))) - - w.Expect().RedisSinkContains("even-odd-even-sink", "888888") - w.Expect().RedisSinkNotContains("even-odd-even-sink", "888889") - w.Expect().RedisSinkNotContains("even-odd-even-sink", "not an integer") - - w.Expect().RedisSinkContains("even-odd-odd-sink", "888889") - w.Expect().RedisSinkNotContains("even-odd-odd-sink", "888888") - w.Expect().RedisSinkNotContains("even-odd-odd-sink", "not an integer") - - w.Expect().RedisSinkContains("even-odd-number-sink", "888888") - w.Expect().RedisSinkContains("even-odd-number-sink", "888889") - w.Expect().RedisSinkNotContains("even-odd-number-sink", "not an integer") -} - func (s *FunctionalSuite) TestDropOnFull() { // the drop on full feature is not supported with redis ISBSVC diff --git a/test/e2e/testdata/even-odd.yaml b/test/e2e/testdata/even-odd.yaml deleted file mode 100644 index 7cc6822243..0000000000 --- a/test/e2e/testdata/even-odd.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: numaflow.numaproj.io/v1alpha1 -kind: Pipeline -metadata: - name: even-odd -spec: - vertices: - - name: in - source: - http: {} - - name: even-or-odd - partitions: 2 - udf: - container: - # Tell the input number is even or odd, see https://github.com/numaproj/numaflow-go/tree/main/pkg/mapper/examples/even_odd - image: quay.io/numaio/numaflow-go/map-even-odd:stable - imagePullPolicy: Always - - name: even-sink - partitions: 2 - sink: - udsink: - container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink - image: quay.io/numaio/numaflow-go/redis-sink:stable - imagePullPolicy: Always - env: - - name: SINK_HASH_KEY - # The key is set in the format of "pipeline_name-vertex_name" - value: "even-odd-even-sink" - - name: odd-sink - sink: - udsink: - container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink - image: quay.io/numaio/numaflow-go/redis-sink:stable - imagePullPolicy: Always - env: - - name: SINK_HASH_KEY - # The key is set in the format of "pipeline_name-vertex_name" - value: "even-odd-odd-sink" - - name: number-sink - sink: - udsink: - container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink - image: quay.io/numaio/numaflow-go/redis-sink:stable - imagePullPolicy: Always - env: - - name: SINK_HASH_KEY - # The key is set in the format of "pipeline_name-vertex_name" - value: "even-odd-number-sink" - edges: - - from: in - to: even-or-odd - - from: even-or-odd - to: even-sink - conditions: - tags: - operator: or - values: - - even-tag - - from: even-or-odd - to: odd-sink - conditions: - tags: - operator: or - values: - - odd-tag - - from: even-or-odd - to: number-sink - conditions: - tags: - operator: or - values: - - odd-tag - - even-tag diff --git a/test/idle-source-e2e/idle_source_test.go b/test/idle-source-e2e/idle_source_test.go index ef5e3eff98..cf9f6d8929 100644 --- a/test/idle-source-e2e/idle_source_test.go +++ b/test/idle-source-e2e/idle_source_test.go @@ -62,8 +62,6 @@ func (is *IdleSourceSuite) TestIdleKeyedReducePipelineWithHttpSource() { // wait for all the pods to come up w.Expect().VertexPodsRunning() - defer w.StreamVertexPodlogs("sink", "udsink").TerminateAllPodLogs() - done := make(chan struct{}) go func() { // publish messages to source vertex, with event time starting from 0 @@ -111,7 +109,6 @@ func (is *IdleSourceSuite) TestIdleKeyedReducePipelineWithKafkaSource() { // wait for all the pods to come up w.Expect().VertexPodsRunning() - defer w.StreamVertexPodlogs("sink", "udsink").TerminateAllPodLogs() defer DeleteKafkaTopic(topic) diff --git a/test/jetstream-e2e/jetstream_test.go b/test/jetstream-e2e/jetstream_test.go deleted file mode 100644 index 06aa24d05a..0000000000 --- a/test/jetstream-e2e/jetstream_test.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build test - -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jetstream_e2e - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - "github.com/numaproj/numaflow/test/fixtures" -) - -//go:generate kubectl -n numaflow-system delete statefulset nats --ignore-not-found=true -//go:generate kubectl apply -k ../../config/apps/nats -n numaflow-system - -type JetstreamSuite struct { - fixtures.E2ESuite -} - -func (ns *JetstreamSuite) TestJetstreamSource() { - const streamName = "test-stream" - const msgPayload = "jetstream-test-message" - - // The source pods expects stream to exist - fixtures.PumpJetstream(streamName, msgPayload, 100) - - w := ns.Given().Pipeline("@testdata/jetstream-source-pipeline.yaml"). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - w.Expect().RedisSinkContains("jetstream-source-e2e-out", msgPayload, fixtures.SinkCheckWithContainCount(100)) -} - -func TestJetstreamSuite(t *testing.T) { - suite.Run(t, new(JetstreamSuite)) -} diff --git a/test/kafka-e2e/kafka_test.go b/test/kafka-e2e/kafka_test.go index c7c10a4df9..74d7fbd53c 100644 --- a/test/kafka-e2e/kafka_test.go +++ b/test/kafka-e2e/kafka_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/suite" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/test/fixtures" @@ -39,64 +38,6 @@ type KafkaSuite struct { fixtures.E2ESuite } -func (ks *KafkaSuite) TestKafkaSink() { - topicName := fixtures.GenerateKafkaTopicName() - fixtures.CreateKafkaTopic(topicName, 1) - defer fixtures.DeleteKafkaTopic(topicName) - - pipeline := &dfv1.Pipeline{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kafka-sink-e2e", - }, - Spec: dfv1.PipelineSpec{ - Vertices: []dfv1.AbstractVertex{ - { - Name: "input", - Source: &dfv1.Source{ - Generator: &dfv1.GeneratorSource{ - RPU: ptr.To[int64](5), - Duration: &metav1.Duration{Duration: 2 * time.Second}, - }, - }, - }, - { - Name: "p1", - UDF: &dfv1.UDF{ - Builtin: &dfv1.Function{Name: "cat"}, - }, - }, - - { - Name: "output", - Sink: &dfv1.Sink{ - AbstractSink: dfv1.AbstractSink{ - Kafka: &dfv1.KafkaSink{ - Brokers: []string{"kafka-broker:9092"}, - Topic: topicName, - }, - }, - }, - }, - }, - Edges: []dfv1.Edge{ - { - From: "input", - To: "p1", - }, - { - From: "p1", - To: "output", - }, - }, - }, - } - w := ks.Given().WithPipeline(pipeline). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - fixtures.ExpectKafkaTopicCount(topicName, 15, 3*time.Second) -} - func (ks *KafkaSuite) TestKafkaSourceSink() { inputTopic := fixtures.GenerateKafkaTopicName() fixtures.CreateKafkaTopic(inputTopic, 1) @@ -105,7 +46,7 @@ func (ks *KafkaSuite) TestKafkaSourceSink() { fixtures.CreateKafkaTopic(outputTopic, 1) pipeline := &dfv1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ - Name: "kafka-sink-e2e", + Name: "kafka-source-sink-e2e", }, Spec: dfv1.PipelineSpec{ Vertices: []dfv1.AbstractVertex{ @@ -125,7 +66,6 @@ func (ks *KafkaSuite) TestKafkaSourceSink() { Builtin: &dfv1.Function{Name: "cat"}, }, }, - { Name: "output", Sink: &dfv1.Sink{ @@ -134,7 +74,6 @@ func (ks *KafkaSuite) TestKafkaSourceSink() { Brokers: []string{"kafka-broker:9092"}, Topic: outputTopic, }, - //Log: &dfv1.Log{}, }, }, }, diff --git a/test/sdks-e2e/sdks_test.go b/test/map-e2e/map_test.go similarity index 50% rename from test/sdks-e2e/sdks_test.go rename to test/map-e2e/map_test.go index a1bf013f41..a1d171a253 100644 --- a/test/sdks-e2e/sdks_test.go +++ b/test/map-e2e/map_test.go @@ -19,30 +19,23 @@ limitations under the License. package sdks_e2e import ( - "context" - "fmt" - "os" - "strconv" - "strings" - "sync" "testing" - "time" "github.com/stretchr/testify/suite" . "github.com/numaproj/numaflow/test/fixtures" ) -type SDKsSuite struct { +type MapSuite struct { E2ESuite } -func (s *SDKsSuite) TestUDFunctionAndSink() { - w := s.Given().Pipeline("@testdata/flatmap.yaml"). +func (s *MapSuite) TestBatchMapUDFunctionAndSink() { + w := s.Given().Pipeline("@testdata/flatmap-batch.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() - pipelineName := "flatmap" + pipelineName := "flatmap-batch" w.Expect(). VertexPodsRunning(). @@ -51,6 +44,8 @@ func (s *SDKsSuite) TestUDFunctionAndSink() { VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("rust-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("rust-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) @@ -59,17 +54,17 @@ func (s *SDKsSuite) TestUDFunctionAndSink() { w.Expect(). VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("rust-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } -func (s *SDKsSuite) TestMapStreamUDFunctionAndSink() { - w := s.Given().Pipeline("@testdata/flatmap-stream.yaml"). +func (s *MapSuite) TestUDFunctionAndSink() { + w := s.Given().Pipeline("@testdata/flatmap.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() - - pipelineName := "flatmap-stream" + pipelineName := "flatmap" w.Expect(). VertexPodsRunning(). @@ -81,25 +76,22 @@ func (s *SDKsSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello,hello,hello"))). + w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello,hello"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello"))) w.Expect(). - VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - w.Expect(). - VertexPodLogContains("go-udsink-2", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - w.Expect(). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - w.Expect(). - VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } -func (s *SDKsSuite) TestBatchMapUDFunctionAndSink() { - w := s.Given().Pipeline("@testdata/flatmap-batch.yaml"). +func (s *MapSuite) TestMapStreamUDFunctionAndSink() { + w := s.Given().Pipeline("@testdata/flatmap-stream.yaml"). When(). CreatePipelineAndWait() defer w.DeletePipelineAndWait() - pipelineName := "flatmap-batch" + + pipelineName := "flatmap-stream" w.Expect(). VertexPodsRunning(). @@ -108,132 +100,22 @@ func (s *SDKsSuite) TestBatchMapUDFunctionAndSink() { VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("rust-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("rust-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello,hello"))). + w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello,hello,hello"))). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("hello"))) w.Expect(). - VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("rust-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) -} - -func (s *SDKsSuite) TestReduceSDK() { - - // the reduce feature is not supported with redis ISBSVC - if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { - s.T().SkipNow() - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - w := s.Given().Pipeline("@testdata/simple-keyed-reduce-pipeline.yaml"). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - pipelineName := "even-odd-sum" - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - done := make(chan struct{}) - go func() { - // publish messages to source vertex, with event time starting from 60000 - startTime := 60000 - for i := 0; true; i++ { - select { - case <-ctx.Done(): - return - case <-done: - return - default: - eventTime := strconv.Itoa(startTime + i*1000) - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("1")).WithHeader("X-Numaflow-Event-Time", eventTime)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("2")).WithHeader("X-Numaflow-Event-Time", eventTime)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("3")).WithHeader("X-Numaflow-Event-Time", eventTime)) - } - } - }() - + VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). - VertexPodLogContains("java-udsink", "120", PodLogCheckOptionWithContainer("udsink")). - VertexPodLogContains("java-udsink", "240", PodLogCheckOptionWithContainer("udsink")) - done <- struct{}{} -} - -func (s *SDKsSuite) TestSourceTransformer() { - - // the transformer feature is not supported with redis ISBSVC - if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { - s.T().SkipNow() - } - - var wg sync.WaitGroup - wg.Add(4) - go func() { - defer wg.Done() - s.testSourceTransformer("python") - }() - go func() { - defer wg.Done() - s.testSourceTransformer("java") - }() - go func() { - defer wg.Done() - s.testSourceTransformer("go") - }() - go func() { - defer wg.Done() - s.testSourceTransformer("rust") - }() - wg.Wait() -} - -func (s *SDKsSuite) testSourceTransformer(lang string) { - w := s.Given().Pipeline(fmt.Sprintf("@testdata/transformer/event-time-filter-%s.yaml", lang)). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - pipelineName := fmt.Sprintf("event-time-filter-%s", lang) - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - eventTimeBefore2022_1 := strconv.FormatInt(time.Date(2021, 4, 2, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) - eventTimeBefore2022_2 := strconv.FormatInt(time.Date(1998, 4, 2, 8, 4, 5, 2, time.UTC).UnixMilli(), 10) - eventTimeBefore2022_3 := strconv.FormatInt(time.Date(2013, 4, 4, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) - - eventTimeAfter2022_1 := strconv.FormatInt(time.Date(2023, 4, 2, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) - eventTimeAfter2022_2 := strconv.FormatInt(time.Date(2026, 4, 2, 3, 4, 5, 2, time.UTC).UnixMilli(), 10) - - eventTimeWithin2022_1 := strconv.FormatInt(time.Date(2022, 4, 2, 3, 4, 5, 2, time.UTC).UnixMilli(), 10) - - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_1)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_2)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_3)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("After2022")).WithHeader("X-Numaflow-Event-Time", eventTimeAfter2022_1)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("After2022")).WithHeader("X-Numaflow-Event-Time", eventTimeAfter2022_2)). - SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Within2022")).WithHeader("X-Numaflow-Event-Time", eventTimeWithin2022_1)) - - janFirst2022 := time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC) - janFirst2023 := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) - - w.Expect().VertexPodLogContains("sink-within-2022", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithCount(1)). - VertexPodLogContains("sink-after-2022", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithCount(2)). - VertexPodLogContains("sink-all", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithCount(1)). - VertexPodLogContains("sink-all", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithCount(2)). - VertexPodLogNotContains("sink-within-2022", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithTimeout(1*time.Second)). - VertexPodLogNotContains("sink-after-2022", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithTimeout(1*time.Second)). - VertexPodLogNotContains("sink-all", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)). - VertexPodLogNotContains("sink-within-2022", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)). - VertexPodLogNotContains("sink-after-2022", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)) + VertexPodLogContains("go-udsink-2", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + w.Expect(). + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + w.Expect(). + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) } -func TestHTTPSuite(t *testing.T) { - suite.Run(t, new(SDKsSuite)) +func TestMapSuite(t *testing.T) { + suite.Run(t, new(MapSuite)) } diff --git a/test/sdks-e2e/testdata/flatmap-batch.yaml b/test/map-e2e/testdata/flatmap-batch.yaml similarity index 100% rename from test/sdks-e2e/testdata/flatmap-batch.yaml rename to test/map-e2e/testdata/flatmap-batch.yaml diff --git a/test/sdks-e2e/testdata/flatmap-stream.yaml b/test/map-e2e/testdata/flatmap-stream.yaml similarity index 100% rename from test/sdks-e2e/testdata/flatmap-stream.yaml rename to test/map-e2e/testdata/flatmap-stream.yaml diff --git a/test/sdks-e2e/testdata/flatmap.yaml b/test/map-e2e/testdata/flatmap.yaml similarity index 100% rename from test/sdks-e2e/testdata/flatmap.yaml rename to test/map-e2e/testdata/flatmap.yaml diff --git a/test/monovertex-e2e/monovertex_test.go b/test/monovertex-e2e/monovertex_test.go index 51d9135c56..3fd72d6554 100644 --- a/test/monovertex-e2e/monovertex_test.go +++ b/test/monovertex-e2e/monovertex_test.go @@ -1,3 +1,21 @@ +//go:build test + +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package monovertex_e2e import ( diff --git a/test/nats-e2e/nats_test.go b/test/nats-e2e/nats_test.go deleted file mode 100644 index 72d2c70e99..0000000000 --- a/test/nats-e2e/nats_test.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build test - -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nats_e2e - -import ( - "testing" - "time" - - "github.com/stretchr/testify/suite" - - . "github.com/numaproj/numaflow/test/fixtures" -) - -//go:generate kubectl -n numaflow-system delete statefulset nats --ignore-not-found=true -//go:generate kubectl apply -k ../../config/apps/nats -n numaflow-system - -type NatsSuite struct { - E2ESuite -} - -func (ns *NatsSuite) TestNatsSource() { - subject := "test-subject" - w := ns.Given().Pipeline("@testdata/nats-source-pipeline.yaml"). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - PumpNatsSubject(subject, 100, 20*time.Millisecond, 10, "test-message") - w.Expect().RedisSinkContains("nats-source-e2e-out", "test-message", SinkCheckWithContainCount(100)) -} - -func TestNatsSuite(t *testing.T) { - suite.Run(t, new(NatsSuite)) -} diff --git a/test/reduce-one-e2e/reduce_one_test.go b/test/reduce-one-e2e/reduce_one_test.go index 4cf592e1cf..618913a829 100644 --- a/test/reduce-one-e2e/reduce_one_test.go +++ b/test/reduce-one-e2e/reduce_one_test.go @@ -1,3 +1,5 @@ +//go:build test + /* Copyright 2022 The Numaproj Authors. Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +36,6 @@ type ReduceSuite struct { // one reduce vertex (keyed) func (r *ReduceSuite) TestSimpleKeyedReducePipeline() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -79,7 +80,6 @@ func (r *ReduceSuite) TestSimpleKeyedReducePipeline() { // one reduce vertex(non keyed) func (r *ReduceSuite) TestSimpleNonKeyedReducePipeline() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -123,7 +123,6 @@ func (r *ReduceSuite) TestSimpleNonKeyedReducePipeline() { // two reduce vertex(keyed and non keyed) func (r *ReduceSuite) TestComplexReducePipelineKeyedNonKeyed() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -166,7 +165,6 @@ func (r *ReduceSuite) TestComplexReducePipelineKeyedNonKeyed() { } func (r *ReduceSuite) TestSimpleReducePipelineFailOverUsingWAL() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -193,7 +191,6 @@ func (r *ReduceSuite) TestSimpleReducePipelineFailOverUsingWAL() { w.Expect().VertexPodsRunning() - defer w.StreamVertexPodlogs("compute-sum", "numa").TerminateAllPodLogs() go func() { startTime := int(time.Unix(1000, 0).UnixMilli()) for i := 1; true; i++ { @@ -209,7 +206,6 @@ func (r *ReduceSuite) TestSimpleReducePipelineFailOverUsingWAL() { w.Expect().VertexPodsRunning() w.Exec("/bin/sh", []string{"-c", args}, CheckPodKillSucceeded) w.Expect().VertexPodsRunning() - w.StreamVertexPodlogs("compute-sum", "numa") } w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("1")).WithHeader("X-Numaflow-Event-Time", eventTime)). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("2")).WithHeader("X-Numaflow-Event-Time", eventTime)). @@ -229,7 +225,6 @@ func (r *ReduceSuite) TestSimpleReducePipelineFailOverUsingWAL() { // two reduce vertices (keyed and non-keyed) followed by a sliding window vertex func (r *ReduceSuite) TestComplexSlidingWindowPipeline() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() diff --git a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml index 25e1976331..c770092617 100644 --- a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml @@ -25,8 +25,9 @@ spec: partitions: 1 udf: container: - # Compute the sum, see https://github.com/numaproj/numaflow-go/tree/main/pkg/reducer/examples/sum - image: quay.io/numaio/numaflow-go/reduce-sum:stable + # compute the sum, see https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/reduce/sum + # this also serves as a Java reduce sdk test case + image: quay.io/numaio/numaflow-java/reduce-sum:stable imagePullPolicy: Always groupBy: window: diff --git a/test/reduce-two-e2e/reduce_two_test.go b/test/reduce-two-e2e/reduce_two_test.go index 7a1815f2c2..c4749569a0 100644 --- a/test/reduce-two-e2e/reduce_two_test.go +++ b/test/reduce-two-e2e/reduce_two_test.go @@ -61,8 +61,6 @@ func (r *ReduceSuite) testReduceStream(lang string) { // wait for all the pods to come up w.Expect().VertexPodsRunning() - defer w.StreamVertexPodlogs("sink", "udsink").TerminateAllPodLogs() - done := make(chan struct{}) go func() { // publish messages to source vertex, with event time starting from 60000 @@ -89,53 +87,6 @@ func (r *ReduceSuite) testReduceStream(lang string) { done <- struct{}{} } -func (r *ReduceSuite) TestSimpleSessionPipeline() { - - // the reduce feature is not supported with redis ISBSVC - if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { - r.T().SkipNow() - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - w := r.Given().Pipeline("@testdata/session-reduce/simple-session-sum-pipeline.yaml"). - When(). - CreatePipelineAndWait() - defer w.DeletePipelineAndWait() - pipelineName := "simple-session-sum" - - // wait for all the pods to come up - w.Expect().VertexPodsRunning() - - count := 0 - done := make(chan struct{}) - go func() { - // publish messages to source vertex, with event time starting from 60000 - startTime := 60000 - for i := 0; true; i++ { - select { - case <-ctx.Done(): - return - case <-done: - return - default: - if count == 10 { - startTime = startTime + (10 * 1000) - count = 0 - } else { - startTime = startTime + 1000 - } - eventTime := strconv.Itoa(startTime) - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("100")).WithHeader("X-Numaflow-Event-Time", eventTime)) - count += 1 - } - } - }() - - w.Expect().RedisSinkContains("simple-session-sum-sink", "1000") - done <- struct{}{} -} - func (r *ReduceSuite) TestSimpleSessionKeyedPipelineGo() { r.testSimpleSessionKeyedPipeline("go") } @@ -145,7 +96,6 @@ func (r *ReduceSuite) TestSimpleSessionKeyedPipelineJava() { } func (r *ReduceSuite) testSimpleSessionKeyedPipeline(lang string) { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -162,8 +112,6 @@ func (r *ReduceSuite) testSimpleSessionKeyedPipeline(lang string) { // wait for all the pods to come up w.Expect().VertexPodsRunning() - defer w.StreamVertexPodlogs("sink", "udsink").TerminateAllPodLogs() - count := 0 done := make(chan struct{}) go func() { @@ -199,7 +147,6 @@ func (r *ReduceSuite) testSimpleSessionKeyedPipeline(lang string) { } func (r *ReduceSuite) TestSimpleSessionPipelineFailOverUsingWAL() { - // the reduce feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { r.T().SkipNow() @@ -219,7 +166,6 @@ func (r *ReduceSuite) TestSimpleSessionPipelineFailOverUsingWAL() { args := "kubectl delete po -n numaflow-system -l " + "numaflow.numaproj.io/pipeline-name=simple-session-counter-go,numaflow.numaproj.io/vertex-name=compute-count" - defer w.StreamVertexPodlogs("compute-count", "numa").TerminateAllPodLogs() // Kill the reducer pods before processing to trigger failover. w.Exec("/bin/sh", []string{"-c", args}, CheckPodKillSucceeded) done := make(chan struct{}) @@ -245,7 +191,6 @@ func (r *ReduceSuite) TestSimpleSessionPipelineFailOverUsingWAL() { w.Expect().VertexPodsRunning() w.Exec("/bin/sh", []string{"-c", args}, CheckPodKillSucceeded) w.Expect().VertexPodsRunning() - w.StreamVertexPodlogs("compute-sum", "numa") } w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("1")).WithHeader("X-Numaflow-Event-Time", eventTime)). SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("2")).WithHeader("X-Numaflow-Event-Time", eventTime)) diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml deleted file mode 100644 index 6f1259e085..0000000000 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-sum-pipeline.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: numaflow.numaproj.io/v1alpha1 -kind: Pipeline -metadata: - name: simple-session-sum -spec: - watermark: - maxDelay: 30s - vertices: - - name: in - scale: - min: 1 - source: - http: {} - - name: compute-sum - partitions: 1 - udf: - container: - # https://github.com/numaproj/numaflow-go/tree/main/pkg/sessionreducer/examples/sum - image: quay.io/numaio/numaflow-go/session-sum:stable - imagePullPolicy: Always - groupBy: - window: - session: - timeout: 10s - keyed: true - storage: - persistentVolumeClaim: - volumeSize: 2Gi - accessMode: ReadWriteOnce - - name: sink - scale: - min: 1 - sink: - udsink: - container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink - image: quay.io/numaio/numaflow-go/redis-sink:stable - imagePullPolicy: Always - env: - - name: SINK_HASH_KEY - # The key is set in the format of "pipeline_name-vertex_name" - value: "simple-session-sum-sink" - edges: - - from: in - to: compute-sum - - from: compute-sum - to: sink diff --git a/test/sdks-e2e/README.md b/test/sdks-e2e/README.md deleted file mode 100644 index 24451e9c61..0000000000 --- a/test/sdks-e2e/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# SDKs Test - -Each SDK is supposed to implement: - -1. A `flatmap` UDF example, which splits the input message with `,`, and return a list. Build a docker image `quay.io/numaio/numaflow-${language}/map-flatmap:latest` and push to `quay.io`; - -2. A `log` UDSink example, which prints out original message in pod logs. Build a docker image `quay.io/numaio/numaflow-${language}/sink-log:latest` and push to `quay.io`. diff --git a/test/sdks-e2e/testdata/simple-keyed-reduce-pipeline.yaml b/test/sdks-e2e/testdata/simple-keyed-reduce-pipeline.yaml deleted file mode 100644 index 24f536d75d..0000000000 --- a/test/sdks-e2e/testdata/simple-keyed-reduce-pipeline.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: numaflow.numaproj.io/v1alpha1 -kind: Pipeline -metadata: - name: even-odd-sum -spec: - vertices: - - name: in - source: - http: {} - - name: atoi - scale: - min: 1 - udf: - container: - # Tell the input number is even or odd, see https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/map/evenodd - image: quay.io/numaio/numaflow-java/even-odd:stable - imagePullPolicy: Always - - name: compute-sum - partitions: 2 - udf: - container: - # compute the sum, see https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/reduce/sum - image: quay.io/numaio/numaflow-java/reduce-sum:stable - imagePullPolicy: Always - groupBy: - window: - fixed: - length: 60s - keyed: true - storage: - emptyDir: {} - - name: java-udsink - scale: - min: 1 - sink: - udsink: - container: - # https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/sink/simple - image: quay.io/numaio/numaflow-java/simple-sink:stable - imagePullPolicy: Always - edges: - - from: in - to: atoi - - from: atoi - to: compute-sum - - from: compute-sum - to: java-udsink diff --git a/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go b/test/sideinputs-e2e/sideinput_sink_source_test.go similarity index 99% rename from test/sideinputs-e2e/sideinput-e2e_sink_source_test.go rename to test/sideinputs-e2e/sideinput_sink_source_test.go index be95e4c3f5..9278214ef8 100644 --- a/test/sideinputs-e2e/sideinput-e2e_sink_source_test.go +++ b/test/sideinputs-e2e/sideinput_sink_source_test.go @@ -51,7 +51,6 @@ func (s *SideInputUDSSuite) TestSinkWithSideInput() { } func (s *SideInputUDSSuite) TestSourceWithSideInput() { - // the side inputs feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() @@ -60,7 +59,6 @@ func (s *SideInputUDSSuite) TestSourceWithSideInput() { w := s.setUpTests("@testdata/sideinput-source.yaml") defer w.DeletePipelineAndWait() w.Expect().RedisSinkContains("sideinput-source-test-redis-uds", "e2e-even", SinkCheckWithTimeout(2*time.Minute)) - } func TestSideInputUDSSuite(t *testing.T) { diff --git a/test/sideinputs-e2e/sideinput_test.go b/test/sideinputs-e2e/sideinput_test.go index 8f1e156926..c67e1b2cbe 100644 --- a/test/sideinputs-e2e/sideinput_test.go +++ b/test/sideinputs-e2e/sideinput_test.go @@ -35,7 +35,6 @@ type SideInputSuite struct { } func (s *SideInputSuite) TestSimpleMapSideInputPipeline() { - // the side inputs feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() @@ -77,7 +76,6 @@ func (s *SideInputSuite) TestSimpleMapSideInputPipeline() { } func (s *SideInputSuite) TestSimpleReduceSideInputPipeline() { - // the side inputs feature is not supported with redis ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() diff --git a/test/sdks-e2e/testdata/transformer/event-time-filter-go.yaml b/test/transformer-e2e/testdata/event-time-filter-go.yaml similarity index 100% rename from test/sdks-e2e/testdata/transformer/event-time-filter-go.yaml rename to test/transformer-e2e/testdata/event-time-filter-go.yaml diff --git a/test/sdks-e2e/testdata/transformer/event-time-filter-java.yaml b/test/transformer-e2e/testdata/event-time-filter-java.yaml similarity index 100% rename from test/sdks-e2e/testdata/transformer/event-time-filter-java.yaml rename to test/transformer-e2e/testdata/event-time-filter-java.yaml diff --git a/test/sdks-e2e/testdata/transformer/event-time-filter-python.yaml b/test/transformer-e2e/testdata/event-time-filter-python.yaml similarity index 100% rename from test/sdks-e2e/testdata/transformer/event-time-filter-python.yaml rename to test/transformer-e2e/testdata/event-time-filter-python.yaml diff --git a/test/sdks-e2e/testdata/transformer/event-time-filter-rust.yaml b/test/transformer-e2e/testdata/event-time-filter-rust.yaml similarity index 100% rename from test/sdks-e2e/testdata/transformer/event-time-filter-rust.yaml rename to test/transformer-e2e/testdata/event-time-filter-rust.yaml diff --git a/test/transformer-e2e/transformer_test.go b/test/transformer-e2e/transformer_test.go index 888b3ee3fb..55b88f3683 100644 --- a/test/transformer-e2e/transformer_test.go +++ b/test/transformer-e2e/transformer_test.go @@ -23,7 +23,9 @@ import ( "encoding/json" "fmt" "os" + "strconv" "strings" + "sync" "testing" "time" @@ -88,7 +90,6 @@ func (s *TransformerSuite) TestTimeExtractionFilter() { } func (s *TransformerSuite) TestBuiltinEventTimeExtractor() { - // this test is skipped for redis as watermark is not supported with this ISBSVC if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { s.T().SkipNow() @@ -165,6 +166,73 @@ wmLoop: done <- struct{}{} } +func (s *TransformerSuite) TestSourceTransformer() { + // the transformer feature is not supported with redis ISBSVC + if strings.ToUpper(os.Getenv("ISBSVC")) == "REDIS" { + s.T().SkipNow() + } + + var wg sync.WaitGroup + wg.Add(4) + go func() { + defer wg.Done() + s.testSourceTransformer("python") + }() + go func() { + defer wg.Done() + s.testSourceTransformer("java") + }() + go func() { + defer wg.Done() + s.testSourceTransformer("go") + }() + go func() { + defer wg.Done() + s.testSourceTransformer("rust") + }() + wg.Wait() +} + +func (s *TransformerSuite) testSourceTransformer(lang string) { + w := s.Given().Pipeline(fmt.Sprintf("@testdata/event-time-filter-%s.yaml", lang)). + When(). + CreatePipelineAndWait() + defer w.DeletePipelineAndWait() + pipelineName := fmt.Sprintf("event-time-filter-%s", lang) + + // wait for all the pods to come up + w.Expect().VertexPodsRunning() + + eventTimeBefore2022_1 := strconv.FormatInt(time.Date(2021, 4, 2, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) + eventTimeBefore2022_2 := strconv.FormatInt(time.Date(1998, 4, 2, 8, 4, 5, 2, time.UTC).UnixMilli(), 10) + eventTimeBefore2022_3 := strconv.FormatInt(time.Date(2013, 4, 4, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) + + eventTimeAfter2022_1 := strconv.FormatInt(time.Date(2023, 4, 2, 7, 4, 5, 2, time.UTC).UnixMilli(), 10) + eventTimeAfter2022_2 := strconv.FormatInt(time.Date(2026, 4, 2, 3, 4, 5, 2, time.UTC).UnixMilli(), 10) + + eventTimeWithin2022_1 := strconv.FormatInt(time.Date(2022, 4, 2, 3, 4, 5, 2, time.UTC).UnixMilli(), 10) + + w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_1)). + SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_2)). + SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Before2022")).WithHeader("X-Numaflow-Event-Time", eventTimeBefore2022_3)). + SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("After2022")).WithHeader("X-Numaflow-Event-Time", eventTimeAfter2022_1)). + SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("After2022")).WithHeader("X-Numaflow-Event-Time", eventTimeAfter2022_2)). + SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("Within2022")).WithHeader("X-Numaflow-Event-Time", eventTimeWithin2022_1)) + + janFirst2022 := time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC) + janFirst2023 := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + w.Expect().VertexPodLogContains("sink-within-2022", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithCount(1)). + VertexPodLogContains("sink-after-2022", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithCount(2)). + VertexPodLogContains("sink-all", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithCount(1)). + VertexPodLogContains("sink-all", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithCount(2)). + VertexPodLogNotContains("sink-within-2022", fmt.Sprintf("EventTime - %d", janFirst2023.UnixMilli()), PodLogCheckOptionWithTimeout(1*time.Second)). + VertexPodLogNotContains("sink-after-2022", fmt.Sprintf("EventTime - %d", janFirst2022.UnixMilli()), PodLogCheckOptionWithTimeout(1*time.Second)). + VertexPodLogNotContains("sink-all", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)). + VertexPodLogNotContains("sink-within-2022", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)). + VertexPodLogNotContains("sink-after-2022", "Before2022", PodLogCheckOptionWithTimeout(1*time.Second)) +} + func TestTransformerSuite(t *testing.T) { suite.Run(t, new(TransformerSuite)) } From ef18e092b6bec602080a4c27301e8c24e998691c Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Mon, 19 Aug 2024 18:19:36 -0400 Subject: [PATCH 012/188] chore: version update for releasing (#1944) Signed-off-by: Keran Yang --- go.mod | 2 +- go.sum | 4 ++-- pkg/sdkclient/serverinfo/versions.go | 6 +++--- test/sideinputs-e2e/testdata/sideinput-sink.yaml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 2560e8dc81..f0dd236bd7 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.17 github.com/nats-io/nats.go v1.36.0 - github.com/numaproj/numaflow-go v0.7.1-0.20240711051731-15e45210b784 + github.com/numaproj/numaflow-go v0.8.0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 diff --git a/go.sum b/go.sum index eb4d23b5cf..1e46c54600 100644 --- a/go.sum +++ b/go.sum @@ -485,8 +485,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.7.1-0.20240711051731-15e45210b784 h1:JnpaG557KqDrzIi1c5YeffeLXGmLd8F2lzQEBC+wFWQ= -github.com/numaproj/numaflow-go v0.7.1-0.20240711051731-15e45210b784/go.mod h1:WoMt31+h3up202zTRI8c/qe42B8UbvwLe2mJH0MAlhI= +github.com/numaproj/numaflow-go v0.8.0 h1:1Pp0AMLXkmUPlvFjKeY3a9X+OLU8oN1OQWxD9jLg8Uo= +github.com/numaproj/numaflow-go v0.8.0/go.mod h1:WoMt31+h3up202zTRI8c/qe42B8UbvwLe2mJH0MAlhI= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/sdkclient/serverinfo/versions.go b/pkg/sdkclient/serverinfo/versions.go index 9a9c1cb02f..aded1c9bd3 100644 --- a/pkg/sdkclient/serverinfo/versions.go +++ b/pkg/sdkclient/serverinfo/versions.go @@ -23,7 +23,7 @@ import ( type sdkConstraints map[info.Language]string var minimumSupportedSDKVersions = sdkConstraints{ - info.Go: "0.7.0-rc2", - info.Python: "0.7.0a1", - info.Java: "0.7.2-0", + info.Go: "0.8.0", + info.Python: "0.8.0", + info.Java: "0.8.0", } diff --git a/test/sideinputs-e2e/testdata/sideinput-sink.yaml b/test/sideinputs-e2e/testdata/sideinput-sink.yaml index f172b4ec31..086254a3e7 100644 --- a/test/sideinputs-e2e/testdata/sideinput-sink.yaml +++ b/test/sideinputs-e2e/testdata/sideinput-sink.yaml @@ -26,7 +26,7 @@ spec: udsink: container: # see https://github.com/numaproj/numaflow-go/tree/main/pkg/sideinput/examples/sink_sideinput - image: quay.io/numaio/numaflow-go/redis-sink-with-sideinput:hash + image: quay.io/numaio/numaflow-go/redis-sink-with-sideinput:stable imagePullPolicy: Always env: - name: SINK_HASH_KEY From 216cbd20d5917ecdb655a93295b44c6181944eb0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:43:42 -0700 Subject: [PATCH 013/188] docs: updated CHANGELOG.md (#1968) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b0b83f0f4..0b2f607fbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## v1.3.0 (2024-08-19) + + * [4de121c2](https://github.com/numaproj/numaflow/commit/4de121c2c3b436ac51fba97c8ce5153afc5364c9) Update manifests to v1.3.0 + * [a566548b](https://github.com/numaproj/numaflow/commit/a566548b8d1be71367dc01049086b5a685b610eb) refactor: re-arrange e2e tests (#1961) + * [a30649de](https://github.com/numaproj/numaflow/commit/a30649decdf381950e54187c1633b0e27fe85cff) fix: replicas derived in UI from mvtx status instead of spec (#1965) + * [5a22c321](https://github.com/numaproj/numaflow/commit/5a22c321912df0a5e4d59d8027a1173acfe0079c) feat: add health for monovertex (#1954) + * [1087e860](https://github.com/numaproj/numaflow/commit/1087e860c2896a861fb5068f9416dac39a948b30) feat: enable fallback sink for mvtx (#1957) + * [b5aa6ffb](https://github.com/numaproj/numaflow/commit/b5aa6ffba0466f87727354d6a069e8d6fb8e07ba) feat: Mono vertex UI (#1941) + * [447cd3f4](https://github.com/numaproj/numaflow/commit/447cd3f47cfaf7856c191ee27815fc338cea1cf3) fix: default resources mutated when applying templates (#1948) + * [5a531620](https://github.com/numaproj/numaflow/commit/5a53162077305cea43e6bf9d23dd19805c8c8bb4) feat: autoscaling for MonoVertex (#1927) + * [78468019](https://github.com/numaproj/numaflow/commit/78468019b0a292749ab688b4a74af1149b43d540) fix: retry failed messages for MonoVertex sink (#1933) + * [206a535f](https://github.com/numaproj/numaflow/commit/206a535f7e86f860ed5597616b0e3b1d9ab93ec0) Add Lockheed to Users.md (#1934) + * [c1d25acd](https://github.com/numaproj/numaflow/commit/c1d25acd0d1722c6011ada6ccff06cd5dc8812be) feat: add server-info support and versioning to MonoVertex (#1918) + * [292e3eae](https://github.com/numaproj/numaflow/commit/292e3eae4537c6d497f1eb2de5f72d3f657b4360) feat: source to sink with an optional transformer without ISB (#1904) + +### Contributors + + * Derek Wang + * Keran Yang + * Sidhant Kohli + * Vedant Gupta + * Vigith Maurice + * Yashash H L + * mdwarne1 + ## v1.3.0-rc1 (2024-08-08) * [179f5967](https://github.com/numaproj/numaflow/commit/179f59674a0a61eb7ae7cd7a83612f0eb7b3be7f) Update manifests to v1.3.0-rc1 From a7074aa80345e41c39770e7d069e14c29eaff9e0 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Mon, 19 Aug 2024 20:29:19 -0700 Subject: [PATCH 014/188] doc: update roadmap (#1970) Signed-off-by: Vigith Maurice --- README.md | 2 +- docs/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 96f7fb5cb1..be45ad6409 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Numaflow, created by the Intuit Argo team to address community needs for continu ## Roadmap -- Map Streaming (1.3) +- Mono Vertex to bypass ISB for simple use cases (1.4) ## Demo diff --git a/docs/README.md b/docs/README.md index 577a2afc5e..6388ec5326 100644 --- a/docs/README.md +++ b/docs/README.md @@ -26,7 +26,7 @@ Welcome to Numaflow! A Kubernetes-native, serverless platform for running scalab ## Roadmap -- Map Streaming (1.3) +- Mono Vertex to bypass ISB for simple use cases (1.4) ## Demo From b36e7d3df8453cbedf2080f435f09d8133410147 Mon Sep 17 00:00:00 2001 From: Abdullah Yildirim Date: Tue, 20 Aug 2024 18:27:41 +0000 Subject: [PATCH 015/188] chore: fix broken links (#1972) Signed-off-by: a3hadi --- test/builtin-source-e2e/testdata/http-source-with-auth.yaml | 2 +- test/builtin-source-e2e/testdata/http-source.yaml | 2 +- test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml | 2 +- test/builtin-source-e2e/testdata/nats-source-pipeline.yaml | 2 +- test/diamond-e2e/testdata/cycle-backward.yaml | 2 +- test/diamond-e2e/testdata/cycle-to-self.yaml | 2 +- test/diamond-e2e/testdata/join-on-map.yaml | 2 +- test/diamond-e2e/testdata/join-on-reduce.yaml | 2 +- test/diamond-e2e/testdata/join-on-sink.yaml | 2 +- test/e2e/testdata/simple-fallback.yaml | 2 +- test/e2e/testdata/udf-filtering.yaml | 2 +- test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml | 2 +- test/idle-source-e2e/testdata/kafka-pipeline.yaml | 2 +- test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml | 2 +- test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml | 2 +- .../testdata/complex-sliding-window-pipeline.yaml | 2 +- test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml | 2 +- .../testdata/simple-non-keyed-reduce-pipeline.yaml | 2 +- test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml | 2 +- .../reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml | 2 +- .../testdata/reduce-stream/reduce-stream-java.yaml | 2 +- .../simple-session-keyed-counter-pipeline-go.yaml | 2 +- .../simple-session-keyed-counter-pipeline-java.yaml | 2 +- test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml | 2 +- test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml | 2 +- test/sideinputs-e2e/testdata/sideinput-source.yaml | 2 +- test/transformer-e2e/testdata/source-filtering.yaml | 2 +- 27 files changed, 27 insertions(+), 27 deletions(-) diff --git a/test/builtin-source-e2e/testdata/http-source-with-auth.yaml b/test/builtin-source-e2e/testdata/http-source-with-auth.yaml index 4b27a58cee..08fb2bdf25 100644 --- a/test/builtin-source-e2e/testdata/http-source-with-auth.yaml +++ b/test/builtin-source-e2e/testdata/http-source-with-auth.yaml @@ -15,7 +15,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/builtin-source-e2e/testdata/http-source.yaml b/test/builtin-source-e2e/testdata/http-source.yaml index c67d366ddb..53f32afb07 100644 --- a/test/builtin-source-e2e/testdata/http-source.yaml +++ b/test/builtin-source-e2e/testdata/http-source.yaml @@ -14,7 +14,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml b/test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml index c1a3766530..0ef6065413 100644 --- a/test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml +++ b/test/builtin-source-e2e/testdata/jetstream-source-pipeline.yaml @@ -23,7 +23,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/builtin-source-e2e/testdata/nats-source-pipeline.yaml b/test/builtin-source-e2e/testdata/nats-source-pipeline.yaml index 3dd0d965df..88630fd11d 100644 --- a/test/builtin-source-e2e/testdata/nats-source-pipeline.yaml +++ b/test/builtin-source-e2e/testdata/nats-source-pipeline.yaml @@ -24,7 +24,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/diamond-e2e/testdata/cycle-backward.yaml b/test/diamond-e2e/testdata/cycle-backward.yaml index 7312fc6cce..5ede83e3b0 100644 --- a/test/diamond-e2e/testdata/cycle-backward.yaml +++ b/test/diamond-e2e/testdata/cycle-backward.yaml @@ -23,7 +23,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/diamond-e2e/testdata/cycle-to-self.yaml b/test/diamond-e2e/testdata/cycle-to-self.yaml index eaa7bf9cfa..036d2a4ecb 100644 --- a/test/diamond-e2e/testdata/cycle-to-self.yaml +++ b/test/diamond-e2e/testdata/cycle-to-self.yaml @@ -19,7 +19,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/diamond-e2e/testdata/join-on-map.yaml b/test/diamond-e2e/testdata/join-on-map.yaml index 6ca69cda24..b2bd667da7 100644 --- a/test/diamond-e2e/testdata/join-on-map.yaml +++ b/test/diamond-e2e/testdata/join-on-map.yaml @@ -29,7 +29,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/diamond-e2e/testdata/join-on-reduce.yaml b/test/diamond-e2e/testdata/join-on-reduce.yaml index f0ec43e19f..e1562dd133 100644 --- a/test/diamond-e2e/testdata/join-on-reduce.yaml +++ b/test/diamond-e2e/testdata/join-on-reduce.yaml @@ -50,7 +50,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/diamond-e2e/testdata/join-on-sink.yaml b/test/diamond-e2e/testdata/join-on-sink.yaml index 1f1a442b24..2cf19c9f31 100644 --- a/test/diamond-e2e/testdata/join-on-sink.yaml +++ b/test/diamond-e2e/testdata/join-on-sink.yaml @@ -25,7 +25,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/e2e/testdata/simple-fallback.yaml b/test/e2e/testdata/simple-fallback.yaml index 0e39df2ddf..8637a7b100 100644 --- a/test/e2e/testdata/simple-fallback.yaml +++ b/test/e2e/testdata/simple-fallback.yaml @@ -24,7 +24,7 @@ spec: fallback: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/e2e/testdata/udf-filtering.yaml b/test/e2e/testdata/udf-filtering.yaml index 1c8c79057c..507c7a9b59 100644 --- a/test/e2e/testdata/udf-filtering.yaml +++ b/test/e2e/testdata/udf-filtering.yaml @@ -17,7 +17,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml b/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml index 2fc2a0a89e..28964b3fda 100644 --- a/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml +++ b/test/idle-source-e2e/testdata/idle-source-reduce-pipeline.yaml @@ -47,7 +47,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/idle-source-e2e/testdata/kafka-pipeline.yaml b/test/idle-source-e2e/testdata/kafka-pipeline.yaml index 3af968f58d..8b397f5738 100644 --- a/test/idle-source-e2e/testdata/kafka-pipeline.yaml +++ b/test/idle-source-e2e/testdata/kafka-pipeline.yaml @@ -50,7 +50,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml index 66f1bb34bd..acdb0b29f6 100644 --- a/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml +++ b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml @@ -15,7 +15,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml index d7a56c5f0e..c57c9f580c 100644 --- a/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/complex-reduce-pipeline.yaml @@ -54,7 +54,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml b/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml index 8c19e22d72..3ad9d1b552 100644 --- a/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/complex-sliding-window-pipeline.yaml @@ -86,7 +86,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml index c770092617..1b0d7f713c 100644 --- a/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/simple-keyed-reduce-pipeline.yaml @@ -44,7 +44,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml b/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml index d5a932a7c5..918b936650 100644 --- a/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml +++ b/test/reduce-one-e2e/testdata/simple-non-keyed-reduce-pipeline.yaml @@ -42,7 +42,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml b/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml index 25a6770a5d..15440d9c39 100644 --- a/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml +++ b/test/reduce-one-e2e/testdata/simple-reduce-pipeline-wal.yaml @@ -41,7 +41,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml index d80138366e..d859384b89 100644 --- a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml +++ b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-go.yaml @@ -39,7 +39,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml index b94e7fd649..9b8253d13c 100644 --- a/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml +++ b/test/reduce-two-e2e/testdata/reduce-stream/reduce-stream-java.yaml @@ -39,7 +39,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml index a2d88efeb6..1e8e12ebc5 100644 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml +++ b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-go.yaml @@ -40,7 +40,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml index f468710f03..64c9ee2376 100644 --- a/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml +++ b/test/reduce-two-e2e/testdata/session-reduce/simple-session-keyed-counter-pipeline-java.yaml @@ -45,7 +45,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml index cb3ea152ed..7ecff1e3e6 100644 --- a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml +++ b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml @@ -29,7 +29,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml b/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml index 028a20125e..4497278f68 100644 --- a/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml +++ b/test/sideinputs-e2e/testdata/reduce-sideinput-pipeline.yaml @@ -46,7 +46,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/sideinputs-e2e/testdata/sideinput-source.yaml b/test/sideinputs-e2e/testdata/sideinput-source.yaml index 6c21edd717..60a0b74611 100644 --- a/test/sideinputs-e2e/testdata/sideinput-source.yaml +++ b/test/sideinputs-e2e/testdata/sideinput-source.yaml @@ -28,7 +28,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: diff --git a/test/transformer-e2e/testdata/source-filtering.yaml b/test/transformer-e2e/testdata/source-filtering.yaml index 549fddec63..e517ca756b 100644 --- a/test/transformer-e2e/testdata/source-filtering.yaml +++ b/test/transformer-e2e/testdata/source-filtering.yaml @@ -16,7 +16,7 @@ spec: sink: udsink: container: - # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis-sink + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink image: quay.io/numaio/numaflow-go/redis-sink:stable imagePullPolicy: Always env: From 5ff8ac1d3fea62b2046482bb445b60240f20ef21 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 20 Aug 2024 16:36:41 -0700 Subject: [PATCH 016/188] chore: kustomize transformation config for MonoVertex (#1977) Signed-off-by: Derek Wang --- Makefile | 4 +- .../numaflow-transformer-config.yaml | 711 ++++++++++-------- 2 files changed, 395 insertions(+), 320 deletions(-) diff --git a/Makefile b/Makefile index 696b63f87b..b616d50621 100644 --- a/Makefile +++ b/Makefile @@ -288,12 +288,12 @@ ifneq ($(findstring release,$(GIT_BRANCH)),) .PHONY: prepare-release prepare-release: check-version-warning clean update-manifests-version codegen git status - @git diff --quiet || echo "\n\nPlease run 'git diff' to confirm the file changes are correct.\n" + @git diff --quiet || printf "\n\nPlease run 'git diff' to confirm the file changes are correct.\n\n" .PHONY: release release: check-version-warning @echo - @echo "1. Make sure you have run 'VERSION=$(VERSION) make prepare-release', and confirmed all the changes are expected." + @echo "1. Make sure you have run 'make prepare-release VERSION=$(VERSION)', and confirmed all the changes are expected." @echo @echo "2. Run following commands to commit the changes to the release branch, add give a tag." @echo diff --git a/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml b/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml index 6602151708..39439f10da 100644 --- a/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml +++ b/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml @@ -1,383 +1,458 @@ # https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/images.go images: -- path: spec/vertices/sidecars/image - kind: Pipeline -- path: spec/vertices/udf/container/image - kind: Pipeline -- path: spec/vertices/sink/udsink/container/image - kind: Pipeline -- path: spec/vertices/sink/fallback/udsink/container/image - kind: Pipeline -- path: spec/vertices/source/transformer/container/image - kind: Pipeline -- path: spec/vertices/source/udsource/container/image - kind: Pipeline -- path: spec/sideInputs/container/image - kind: Pipeline - -# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/namereference.go -nameReference: -- kind: ConfigMap - version: v1 - fieldSpecs: - - path: spec/vertices/volumes/configMap/name - kind: Pipeline - - path: spec/vertices/volumes/projected/sources/configMap/name - kind: Pipeline - - path: spec/vertices/containerTemplate/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/containerTemplate/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/initContainerTemplate/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/initContainerTemplate/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/sideInputsContainerTemplate/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/sideInputsContainerTemplate/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/initContainers/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/initContainers/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/sidecars/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/sidecars/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/udf/container/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/udf/container/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/sink/udsink/container/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/vertices/sink/udsink/container/envFrom/configMapRef/name - kind: Pipeline - - path: spec/vertices/sink/fallback/udsink/container/env/valueFrom/configMapKeyRef/name + - path: spec/vertices/sidecars/image kind: Pipeline - - path: spec/vertices/sink/fallback/udsink/container/envFrom/configMapRef/name + - path: spec/vertices/udf/container/image kind: Pipeline - - path: spec/vertices/source/transformer/container/env/valueFrom/configMapKeyRef/name + - path: spec/vertices/sink/udsink/container/image kind: Pipeline - - path: spec/vertices/source/transformer/container/envFrom/configMapRef/name + - path: spec/vertices/sink/fallback/udsink/container/image kind: Pipeline - - path: spec/vertices/source/udsource/container/env/valueFrom/configMapKeyRef/name + - path: spec/vertices/source/transformer/container/image kind: Pipeline - - path: spec/vertices/source/udsource/container/envFrom/configMapRef/name + - path: spec/vertices/source/udsource/container/image kind: Pipeline - - path: spec/sideInputs/container/env/valueFrom/configMapKeyRef/name + - path: spec/sideInputs/container/image kind: Pipeline - - path: spec/sideInputs/container/envFrom/configMapRef/name - kind: Pipeline - - path: spec/templates/daemon/containerTemplate/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/templates/daemon/containerTemplate/envFrom/configMapRef/name - kind: Pipeline - - path: spec/templates/daemon/initContainerTemplate/env/valueFrom/configMapKeyRef/name - kind: Pipeline - - path: spec/templates/daemon/initContainerTemplate/envFrom/configMapRef/name - kind: Pipeline - - path: spec/templates/job/containerTemplate/env/valueFrom/configMapKeyRef/name + - path: spec/source/udsource/container/image + kind: MonoVertex + - path: spec/source/transformer/container/image + kind: MonoVertex + - path: spec/sink/udsink/container/image + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/image + kind: MonoVertex + +# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/namereference.go +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/vertices/volumes/configMap/name + kind: Pipeline + - path: spec/vertices/volumes/projected/sources/configMap/name + kind: Pipeline + - path: spec/vertices/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/containerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/initContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/initContainerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/sideInputsContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/sideInputsContainerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/initContainers/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/initContainers/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/sidecars/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/sidecars/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/udf/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/udf/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/sink/udsink/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/sink/udsink/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/sink/fallback/udsink/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/sink/fallback/udsink/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/source/transformer/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/source/transformer/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/vertices/source/udsource/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/vertices/source/udsource/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/sideInputs/container/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/sideInputs/container/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/daemon/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/daemon/containerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/daemon/initContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/daemon/initContainerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/job/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/job/containerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/containerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/initContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/initContainerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/vertex/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/vertex/containerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/templates/vertex/initContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: Pipeline + - path: spec/templates/vertex/initContainerTemplate/envFrom/configMapRef/name + kind: Pipeline + - path: spec/jetstream/containerTemplate/env/valueFrom/configMapKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/containerTemplate/envFrom/configMapRef/name + kind: InterStepBufferService + - path: spec/jetstream/reloaderContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/reloaderContainerTemplate/envFrom/configMapRef/name + kind: InterStepBufferService + - path: spec/jetstream/metricsContainerTemplate/env/valueFrom/configMapKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/metricsContainerTemplate/envFrom/configMapRef/name + kind: InterStepBufferService + - path: spec/source/transformer/container/env/valueFrom/configMapKeyRef/name + kind: MonoVertex + - path: spec/source/transformer/container/envFrom/configMapRef/name + kind: MonoVertex + - path: spec/source/udsource/container/env/valueFrom/configMapKeyRef/name + kind: MonoVertex + - path: spec/source/udsource/container/envFrom/configMapRef/name + kind: MonoVertex + - path: spec/sink/udsink/container/env/valueFrom/configMapKeyRef/name + kind: MonoVertex + - path: spec/sink/udsink/container/envFrom/configMapRef/name + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/env/valueFrom/configMapKeyRef/name + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/envFrom/configMapRef/name + kind: MonoVertex + - kind: Secret + version: v1 + fieldSpecs: + - path: spec/vertices/volumes/secret/secretName + kind: Pipeline + - path: spec/vertices/volumes/projected/sources/secret/name + kind: Pipeline + - path: spec/vertices/containerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/containerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/initContainerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/initContainerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/sideInputsContainerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/sideInputsContainerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/initContainers/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/initContainers/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/sidecars/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/sidecars/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/udf/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/udf/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/sink/udsink/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/sink/udsink/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/sink/fallback/udsink/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/sink/fallback/udsink/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/source/transformer/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/source/transformer/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/source/udsource/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/vertices/source/udousrce/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/sideInputs/container/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/sideInputs/container/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/daemon/containerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/daemon/containerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/daemon/initContainerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/daemon/initContainerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/job/containerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/job/containerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/containerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/containerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/initContainerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/sideInputsManager/initContainerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/vertex/containerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/vertex/containerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/templates/vertex/initContainerTemplate/env/valueFrom/secretKeyRef/name + kind: Pipeline + - path: spec/templates/vertex/initContainerTemplate/envFrom/secretRef/name + kind: Pipeline + - path: spec/vertices/imagePullSecrets/name + kind: Pipeline + - path: spec/templates/daemon/imagePullSecrets/name + kind: Pipeline + - path: spec/templates/job/imagePullSecrets/name + kind: Pipeline + - path: spec/templates/sideInputsManager/imagePullSecrets/name + kind: Pipeline + - path: spec/templates/vertex/imagePullSecrets/name + kind: Pipeline + - path: spec/jetstream/containerTemplate/env/valueFrom/secretKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/containerTemplate/envFrom/secretRef/name + kind: InterStepBufferService + - path: spec/jetstream/reloaderContainerTemplate/env/valueFrom/secretKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/reloaderContainerTemplate/envFrom/secretRef/name + kind: InterStepBufferService + - path: spec/jetstream/metricsContainerTemplate/env/valueFrom/secretKeyRef/name + kind: InterStepBufferService + - path: spec/jetstream/metricsContainerTemplate/envFrom/secretRef/name + kind: InterStepBufferService + - path: spec/source/transformer/container/env/valueFrom/secretKeyRef/name + kind: MonoVertex + - path: spec/source/transformer/container/envFrom/secretRef/name + kind: MonoVertex + - path: spec/source/udsource/container/env/valueFrom/secretKeyRef/name + kind: MonoVertex + - path: spec/source/udousrce/container/envFrom/secretRef/name + kind: MonoVertex + - path: spec/sink/udsink/container/env/valueFrom/secretKeyRef/name + kind: MonoVertex + - path: spec/sink/udsink/container/envFrom/secretRef/name + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/env/valueFrom/secretKeyRef/name + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/envFrom/secretRef/name + kind: MonoVertex + - kind: ServiceAccount + version: v1 + fieldSpecs: + - path: spec/vertices/serviceAccountName + kind: Pipeline + - path: spec/templates/daemon/serviceAccountName + kind: Pipeline + - path: spec/templates/job/serviceAccountName + kind: Pipeline + - path: spec/templates/sideInputsManager/serviceAccountName + kind: Pipeline + - path: spec/templates/vertex/serviceAccountName + kind: Pipeline + - path: spec/jetstream/serviceAccountName + kind: InterStepBufferService + - path: spec/serviceAccountName + kind: MonoVertex + - kind: PersistentVolumeClaim + version: v1 + fieldSpecs: + - path: spec/vertices/volumes/persistentVolumeClaim/claimName + kind: Pipeline + - kind: PriorityClass + version: v1 + group: scheduling.k8s.io + fieldSpecs: + - path: spec/vertices/priorityClassName + kind: Pipeline + - path: spec/templates/daemon/priorityClassName + kind: Pipeline + - path: spec/templates/job/priorityClassName + kind: Pipeline + - path: spec/templates/sideInputsManager/priorityClassName + kind: Pipeline + - path: spec/templates/vertex/priorityClassName + kind: Pipeline + - path: spec/jetstream/priorityClassName + kind: InterStepBufferService + - path: spec/priorityClassName + kind: MonoVertex + +# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/commonlabels.go +commonLabels: + - path: spec/vertices/metadata/labels + create: true kind: Pipeline - - path: spec/templates/job/containerTemplate/envFrom/configMapRef/name + - path: spec/templates/daemon/metadata/labels + create: true kind: Pipeline - - path: spec/templates/sideInputsManager/containerTemplate/env/valueFrom/configMapKeyRef/name + - path: spec/templates/job/metadata/labels + create: true kind: Pipeline - - path: spec/templates/sideInputsManager/containerTemplate/envFrom/configMapRef/name + - path: spec/templates/sideInputsManager/metadata/labels + create: true kind: Pipeline - - path: spec/templates/sideInputsManager/initContainerTemplate/env/valueFrom/configMapKeyRef/name + - path: spec/templates/vertex/metadata/labels + create: true kind: Pipeline - - path: spec/templates/sideInputsManager/initContainerTemplate/envFrom/configMapRef/name + - path: spec/jetstream/metadata/labels + create: true + kind: InterStepBufferService + - path: spec/metadata/labels + create: true + kind: MonoVertex + +# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/commonannotations.go +commonAnnotations: + - path: spec/vertices/metadata/annotations + create: true kind: Pipeline - - path: spec/templates/vertex/containerTemplate/env/valueFrom/configMapKeyRef/name + - path: spec/templates/daemon/metadata/annotations + create: true kind: Pipeline - - path: spec/templates/vertex/containerTemplate/envFrom/configMapRef/name + - path: spec/templates/job/metadata/annotations + create: true kind: Pipeline - - path: spec/templates/vertex/initContainerTemplate/env/valueFrom/configMapKeyRef/name + - path: spec/templates/sideInputsManager/metadata/annotations + create: true kind: Pipeline - - path: spec/templates/vertex/initContainerTemplate/envFrom/configMapRef/name + - path: spec/templates/vertex/metadata/annotations + create: true kind: Pipeline - - path: spec/jetstream/containerTemplate/env/valueFrom/configMapKeyRef/name - kind: InterStepBufferService - - path: spec/jetstream/containerTemplate/envFrom/configMapRef/name - kind: InterStepBufferService - - path: spec/jetstream/reloaderContainerTemplate/env/valueFrom/configMapKeyRef/name - kind: InterStepBufferService - - path: spec/jetstream/reloaderContainerTemplate/envFrom/configMapRef/name + - path: spec/jetstream/metadata/annotations + create: true kind: InterStepBufferService - - path: spec/jetstream/metricsContainerTemplate/env/valueFrom/configMapKeyRef/name - kind: InterStepBufferService - - path: spec/jetstream/metricsContainerTemplate/envFrom/configMapRef/name - kind: InterStepBufferService -- kind: Secret - version: v1 - fieldSpecs: - - path: spec/vertices/volumes/secret/secretName - kind: Pipeline - - path: spec/vertices/volumes/projected/sources/secret/name - kind: Pipeline - - path: spec/vertices/containerTemplate/env/valueFrom/secretKeyRef/name - kind: Pipeline - - path: spec/vertices/containerTemplate/envFrom/secretRef/name - kind: Pipeline - - path: spec/vertices/initContainerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/metadata/annotations + create: true + kind: MonoVertex + +# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/varreference.go +varReference: + - path: spec/vertices/sidecars/args kind: Pipeline - - path: spec/vertices/initContainerTemplate/envFrom/secretRef/name + - path: spec/vertices/sidecars/command kind: Pipeline - - path: spec/vertices/sideInputsContainerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/vertices/sidecars/env/value kind: Pipeline - - path: spec/vertices/sideInputsContainerTemplate/envFrom/secretRef/name + - path: spec/vertices/udf/container/args kind: Pipeline - - path: spec/vertices/initContainers/env/valueFrom/secretKeyRef/name + - path: spec/vertices/udf/container/command kind: Pipeline - - path: spec/vertices/initContainers/envFrom/secretRef/name + - path: spec/vertices/udf/container/env/value kind: Pipeline - - path: spec/vertices/sidecars/env/valueFrom/secretKeyRef/name + - path: spec/vertices/sink/udsink/container/args kind: Pipeline - - path: spec/vertices/sidecars/envFrom/secretRef/name + - path: spec/vertices/sink/udsink/container/command kind: Pipeline - - path: spec/vertices/udf/container/env/valueFrom/secretKeyRef/name + - path: spec/vertices/sink/udsink/container/env/value kind: Pipeline - - path: spec/vertices/udf/container/envFrom/secretRef/name + - path: spec/vertices/sink/fallback/udsink/container/args kind: Pipeline - - path: spec/vertices/sink/udsink/container/env/valueFrom/secretKeyRef/name + - path: spec/vertices/sink/fallback/udsink/container/command kind: Pipeline - - path: spec/vertices/sink/udsink/container/envFrom/secretRef/name + - path: spec/vertices/sink/fallback/udsink/container/env/value kind: Pipeline - - path: spec/vertices/sink/fallback/udsink/container/env/valueFrom/secretKeyRef/name + - path: spec/vertices/source/transformer/container/args kind: Pipeline - - path: spec/vertices/sink/fallback/udsink/container/envFrom/secretRef/name + - path: spec/vertices/source/transformer/container/command kind: Pipeline - - path: spec/vertices/source/transformer/container/env/valueFrom/secretKeyRef/name + - path: spec/vertices/source/transformer/container/env/value kind: Pipeline - - path: spec/vertices/source/transformer/container/envFrom/secretRef/name + - path: spec/vertices/source/udsource/container/args kind: Pipeline - - path: spec/vertices/source/udsource/container/env/valueFrom/secretKeyRef/name + - path: spec/vertices/source/udsource/container/command kind: Pipeline - - path: spec/vertices/source/udousrce/container/envFrom/secretRef/name + - path: spec/vertices/source/udsource/container/env/value kind: Pipeline - - path: spec/sideInputs/container/env/valueFrom/secretKeyRef/name + - path: spec/sideInputs/container/args kind: Pipeline - - path: spec/sideInputs/container/envFrom/secretRef/name + - path: spec/sideInputs/container/command kind: Pipeline - - path: spec/templates/daemon/containerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/sideInputs/container/env/value kind: Pipeline - - path: spec/templates/daemon/containerTemplate/envFrom/secretRef/name + - path: spec/vertices/containerTemplate/env/value kind: Pipeline - - path: spec/templates/daemon/initContainerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/vertices/initContainerTemplate/env/value kind: Pipeline - - path: spec/templates/daemon/initContainerTemplate/envFrom/secretRef/name + - path: spec/vertices/sideInputsContainerTemplate/env/value kind: Pipeline - - path: spec/templates/job/containerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/vertices/initContainers/env/value kind: Pipeline - - path: spec/templates/job/containerTemplate/envFrom/secretRef/name + - path: spec/templates/daemon/containerTemplate/env/value kind: Pipeline - - path: spec/templates/sideInputsManager/containerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/templates/daemon/initContainerTemplate/env/value kind: Pipeline - - path: spec/templates/sideInputsManager/containerTemplate/envFrom/secretRef/name + - path: spec/templates/job/containerTemplate/env/value kind: Pipeline - - path: spec/templates/sideInputsManager/initContainerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/templates/sideInputsManager/containerTemplate/env/value kind: Pipeline - - path: spec/templates/sideInputsManager/initContainerTemplate/envFrom/secretRef/name + - path: spec/templates/sideInputsManager/initContainerTemplate/env/value kind: Pipeline - - path: spec/templates/vertex/containerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/templates/vertex/containerTemplate/env/value kind: Pipeline - - path: spec/templates/vertex/containerTemplate/envFrom/secretRef/name + - path: spec/templates/vertex/initContainerTemplate/env/value kind: Pipeline - - path: spec/templates/vertex/initContainerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/vertices/sidecars/volumeMounts/mountPath kind: Pipeline - - path: spec/templates/vertex/initContainerTemplate/envFrom/secretRef/name + - path: spec/vertices/udf/container/volumeMounts/mountPath kind: Pipeline - - path: spec/vertices/imagePullSecrets/name + - path: spec/vertices/sink/udsink/container/volumeMounts/mountPath kind: Pipeline - - path: spec/templates/daemon/imagePullSecrets/name + - path: spec/vertices/sink/fallback/udsink/container/volumeMounts/mountPath kind: Pipeline - - path: spec/templates/job/imagePullSecrets/name + - path: spec/vertices/source/transformer/container/volumeMounts/mountPath kind: Pipeline - - path: spec/templates/sideInputsManager/imagePullSecrets/name + - path: spec/vertices/source/udsource/container/volumeMounts/mountPath kind: Pipeline - - path: spec/templates/vertex/imagePullSecrets/name + - path: spec/sideInputs/container/volumeMounts/mountPath kind: Pipeline - - path: spec/jetstream/containerTemplate/env/valueFrom/secretKeyRef/name + - path: spec/jetstream/containerTemplate/env/value kind: InterStepBufferService - - path: spec/jetstream/containerTemplate/envFrom/secretRef/name - kind: InterStepBufferService - - path: spec/jetstream/reloaderContainerTemplate/env/valueFrom/secretKeyRef/name - kind: InterStepBufferService - - path: spec/jetstream/reloaderContainerTemplate/envFrom/secretRef/name - kind: InterStepBufferService - - path: spec/jetstream/metricsContainerTemplate/env/valueFrom/secretKeyRef/name - kind: InterStepBufferService - - path: spec/jetstream/metricsContainerTemplate/envFrom/secretRef/name - kind: InterStepBufferService -- kind: ServiceAccount - version: v1 - fieldSpecs: - - path: spec/vertices/serviceAccountName - kind: Pipeline - - path: spec/templates/daemon/serviceAccountName - kind: Pipeline - - path: spec/templates/job/serviceAccountName - kind: Pipeline - - path: spec/templates/sideInputsManager/serviceAccountName - kind: Pipeline - - path: spec/templates/vertex/serviceAccountName - kind: Pipeline - - path: spec/jetstream/serviceAccountName + - path: spec/jetstream/reloaderContainerTemplate/env/value kind: InterStepBufferService -- kind: PersistentVolumeClaim - version: v1 - fieldSpecs: - - path: spec/vertices/volumes/persistentVolumeClaim/claimName - kind: Pipeline -- kind: PriorityClass - version: v1 - group: scheduling.k8s.io - fieldSpecs: - - path: spec/vertices/priorityClassName - kind: Pipeline - - path: spec/templates/daemon/priorityClassName - kind: Pipeline - - path: spec/templates/job/priorityClassName - kind: Pipeline - - path: spec/templates/sideInputsManager/priorityClassName - kind: Pipeline - - path: spec/templates/vertex/priorityClassName - kind: Pipeline - - path: spec/jetstream/priorityClassName + - path: spec/jetstream/metricsContainerTemplate/env/value kind: InterStepBufferService - -# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/commonlabels.go -commonLabels: -- path: spec/vertices/metadata/labels - create: true - kind: Pipeline -- path: spec/templates/daemon/metadata/labels - create: true - kind: Pipeline -- path: spec/templates/job/metadata/labels - create: true - kind: Pipeline -- path: spec/templates/sideInputsManager/metadata/labels - create: true - kind: Pipeline -- path: spec/templates/vertex/metadata/labels - create: true - kind: Pipeline -- path: spec/jetstream/metadata/labels - create: true - kind: InterStepBufferService - -# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/commonannotations.go -commonAnnotations: -- path: spec/vertices/metadata/annotations - create: true - kind: Pipeline -- path: spec/templates/daemon/metadata/annotations - create: true - kind: Pipeline -- path: spec/templates/job/metadata/annotations - create: true - kind: Pipeline -- path: spec/templates/sideInputsManager/metadata/annotations - create: true - kind: Pipeline -- path: spec/templates/vertex/metadata/annotations - create: true - kind: Pipeline -- path: spec/jetstream/metadata/annotations - create: true - kind: InterStepBufferService - -# https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/varreference.go -varReference: -- path: spec/vertices/sidecars/args - kind: Pipeline -- path: spec/vertices/sidecars/command - kind: Pipeline -- path: spec/vertices/sidecars/env/value - kind: Pipeline -- path: spec/vertices/udf/container/args - kind: Pipeline -- path: spec/vertices/udf/container/command - kind: Pipeline -- path: spec/vertices/udf/container/env/value - kind: Pipeline -- path: spec/vertices/sink/udsink/container/args - kind: Pipeline -- path: spec/vertices/sink/udsink/container/command - kind: Pipeline -- path: spec/vertices/sink/udsink/container/env/value - kind: Pipeline -- path: spec/vertices/sink/fallback/udsink/container/args - kind: Pipeline -- path: spec/vertices/sink/fallback/udsink/container/command - kind: Pipeline -- path: spec/vertices/sink/fallback/udsink/container/env/value - kind: Pipeline -- path: spec/vertices/source/transformer/container/args - kind: Pipeline -- path: spec/vertices/source/transformer/container/command - kind: Pipeline -- path: spec/vertices/source/transformer/container/env/value - kind: Pipeline -- path: spec/vertices/source/udsource/container/args - kind: Pipeline -- path: spec/vertices/source/udsource/container/command - kind: Pipeline -- path: spec/vertices/source/udsource/container/env/value - kind: Pipeline -- path: spec/sideInputs/container/args - kind: Pipeline -- path: spec/sideInputs/container/command - kind: Pipeline -- path: spec/sideInputs/container/env/value - kind: Pipeline -- path: spec/vertices/containerTemplate/env/value - kind: Pipeline -- path: spec/vertices/initContainerTemplate/env/value - kind: Pipeline -- path: spec/vertices/sideInputsContainerTemplate/env/value - kind: Pipeline -- path: spec/vertices/initContainers/env/value - kind: Pipeline -- path: spec/templates/daemon/containerTemplate/env/value - kind: Pipeline -- path: spec/templates/daemon/initContainerTemplate/env/value - kind: Pipeline -- path: spec/templates/job/containerTemplate/env/value - kind: Pipeline -- path: spec/templates/sideInputsManager/containerTemplate/env/value - kind: Pipeline -- path: spec/templates/sideInputsManager/initContainerTemplate/env/value - kind: Pipeline -- path: spec/templates/vertex/containerTemplate/env/value - kind: Pipeline -- path: spec/templates/vertex/initContainerTemplate/env/value - kind: Pipeline -- path: spec/vertices/sidecars/volumeMounts/mountPath - kind: Pipeline -- path: spec/vertices/udf/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/vertices/sink/udsink/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/vertices/sink/fallback/udsink/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/vertices/source/transformer/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/vertices/source/udsource/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/sideInputs/container/volumeMounts/mountPath - kind: Pipeline -- path: spec/jetstream/containerTemplate/env/value - kind: InterStepBufferService -- path: spec/jetstream/reloaderContainerTemplate/env/value - kind: InterStepBufferService -- path: spec/jetstream/metricsContainerTemplate/env/value - kind: InterStepBufferService + - path: spec/source/transformer/container/args + kind: MonoVertex + - path: spec/source/transformer/container/command + kind: MonoVertex + - path: spec/source/transformer/container/env/value + kind: MonoVertex + - path: spec/source/udsource/container/args + kind: MonoVertex + - path: spec/source/udsource/container/command + kind: MonoVertex + - path: spec/source/udsource/container/env/value + - path: spec/sink/udsink/container/args + kind: MonoVertex + - path: spec/sink/udsink/container/command + kind: MonoVertex + - path: spec/sink/udsink/container/env/value + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/args + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/command + kind: MonoVertex + - path: spec/sink/fallback/udsink/container/env/value + kind: MonoVertex # https://github.com/kubernetes-sigs/kustomize/blob/master/api/internal/konfig/builtinpluginconsts/replicas.go replicas: -- path: spec/jetstream/replicas - create: true - kind: InterStepBufferService - + - path: spec/jetstream/replicas + create: true + kind: InterStepBufferService + - path: spec/replicas + create: true + kind: MonoVertex From af2f65220afa80fc8f4bf684cc9ce58234c2bb80 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 20 Aug 2024 17:46:39 -0700 Subject: [PATCH 017/188] fix: remove coloring in logs (#1975) --- rust/monovertex/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index 614e7279aa..515975c16f 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -49,6 +49,7 @@ pub async fn mono_vertex() { .parse_lossy(&config().log_level), ) .with_target(false) + .with_ansi(false) .init(); // Initialize the source, sink and transformer configurations From 33bbbad4d7b16f9494d4164993b1cb9d32acc18b Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 20 Aug 2024 21:48:56 -0700 Subject: [PATCH 018/188] fix: minor perf improvements of mvtx fallback sink (#1967) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Yashash H L --- rust/monovertex/src/forwarder.rs | 250 ++++++++++++++++--------------- rust/monovertex/src/lib.rs | 41 ++--- rust/monovertex/src/sink.rs | 20 ++- 3 files changed, 169 insertions(+), 142 deletions(-) diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 5862e753e9..7be4824b6e 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -94,6 +94,7 @@ impl Forwarder { processed_msgs_count += self.read_and_process_messages().await?; // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded + // TODO: add histogram details (p99, etc.) if last_forwarded_at.elapsed().as_millis() >= 1000 { info!( "Forwarded {} messages at time {}", @@ -112,11 +113,9 @@ impl Forwarder { Ok(()) } - /* - Read messages from the source, apply transformation if transformer is present, - write the messages to the sink, if fallback messages are present write them to the fallback sink, - and then acknowledge the messages back to the source. - */ + /// Read messages from the source, apply transformation if transformer is present, + /// write the messages to the sink, if fallback messages are present write them to the fallback sink, + /// and then acknowledge the messages back to the source. async fn read_and_process_messages(&mut self) -> Result { let start_time = tokio::time::Instant::now(); let messages = self @@ -129,31 +128,31 @@ impl Forwarder { start_time.elapsed().as_millis() ); - // nothing more to be done. + // read returned 0 messages, nothing more to be done. if messages.is_empty() { return Ok(0); } let msg_count = messages.len() as u64; - let bytes_count = messages - .iter() - .map(|msg| msg.value.len() as u64) - .sum::(); forward_metrics() .monovtx_read_total .get_or_create(&self.common_labels) .inc_by(msg_count); + + let (offsets, bytes_count): (Vec, u64) = messages.iter().fold( + (Vec::with_capacity(messages.len()), 0), + |(mut offsets, mut bytes_count), msg| { + offsets.push(msg.offset.clone()); + bytes_count += msg.value.len() as u64; + (offsets, bytes_count) + }, + ); + forward_metrics() .monovtx_read_bytes_total .get_or_create(&self.common_labels) .inc_by(bytes_count); - // collect all the offsets as the transformer can drop (via filter) messages - let offsets = messages - .iter() - .map(|msg| msg.offset.clone()) - .collect::>(); - // Apply transformation if transformer is present let transformed_messages = self.apply_transformer(messages).await?; @@ -169,33 +168,39 @@ impl Forwarder { // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. async fn apply_transformer(&self, messages: Vec) -> Result> { - if let Some(transformer_client) = &self.transformer_client { - let start_time = tokio::time::Instant::now(); - let mut jh = JoinSet::new(); - for message in messages { - let mut transformer_client = transformer_client.clone(); - jh.spawn(async move { transformer_client.transform_fn(message).await }); - } + let transformer_client; + if let Some(trf_client) = &self.transformer_client { + transformer_client = trf_client; + } else { + // return early if there is no transformer + return Ok(messages); + } - let mut results = Vec::new(); - while let Some(task) = jh.join_next().await { - let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; - if let Some(result) = result? { - results.extend(result); - } + let start_time = tokio::time::Instant::now(); + let mut jh = JoinSet::new(); + for message in messages { + let mut transformer_client = transformer_client.clone(); + jh.spawn(async move { transformer_client.transform_fn(message).await }); + } + + let mut results = Vec::new(); + while let Some(task) = jh.join_next().await { + let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; + if let Some(result) = result? { + results.extend(result); } - debug!( - "Transformer latency - {}ms", - start_time.elapsed().as_millis() - ); - Ok(results) - } else { - Ok(messages) } + + debug!( + "Transformer latency - {}ms", + start_time.elapsed().as_millis() + ); + + Ok(results) } // Writes the messages to the sink and handles fallback messages if present - async fn write_to_sink(&mut self, mut messages: Vec) -> Result<()> { + async fn write_to_sink(&mut self, messages: Vec) -> Result<()> { let msg_count = messages.len() as u64; if messages.is_empty() { @@ -205,67 +210,63 @@ impl Forwarder { let mut attempts = 0; let mut error_map = HashMap::new(); let mut fallback_msgs = Vec::new(); + // start with the original set of message to be sent. + // we will overwrite this vec with failed messages and will keep retrying. + let mut messages_to_send = messages; while attempts <= config().sink_max_retry_attempts { let start_time = tokio::time::Instant::now(); - match self.sink_client.sink_fn(messages.clone()).await { + match self.sink_client.sink_fn(messages_to_send.clone()).await { Ok(response) => { debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); attempts += 1; - fallback_msgs.extend( - response - .results - .iter() - .filter(|result| result.status == proto::Status::Fallback as i32) - .map(|result| { - messages - .iter() - .find(|msg| msg.id == result.id) - .unwrap() - .clone() - }) - .collect::>(), - ); - - messages = response + // create a map of id to result, since there is no strict requirement + // for the udsink to return the results in the same order as the requests + let result_map: HashMap<_, _> = response .results .iter() - .filter(|result| result.status == proto::Status::Failure as i32) - .map(|result| { - messages - .iter() - .find(|msg| msg.id == result.id) - .unwrap() - .clone() - }) - .collect::>(); - - if messages.is_empty() { - break; - } else { - error_map.clear(); - for result in response.results { - if result.status == proto::Status::Failure as i32 { - *error_map.entry(result.err_msg).or_insert(0) += 1; - } + .map(|result| (result.id.clone(), result)) + .collect(); + + error_map.clear(); + // drain all the messages that were successfully written + // and keep only the failed messages to send again + // construct the error map for the failed messages + messages_to_send.retain(|msg| { + if let Some(result) = result_map.get(&msg.id) { + return if result.status == proto::Status::Success as i32 { + false + } else if result.status == proto::Status::Fallback as i32 { + fallback_msgs.push(msg.clone()); // add to fallback messages + false + } else { + *error_map.entry(result.err_msg.clone()).or_insert(0) += 1; + true + }; } + false + }); - warn!( - "Retry attempt {} due to retryable error. Errors: {:?}", - attempts, error_map - ); - sleep(tokio::time::Duration::from_millis( - config().sink_retry_interval_in_ms as u64, - )) - .await; + // if all messages are successfully written, break the loop + if messages_to_send.is_empty() { + break; } + + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, error_map + ); + sleep(tokio::time::Duration::from_millis( + config().sink_retry_interval_in_ms as u64, + )) + .await; } Err(e) => return Err(e), } } - if !messages.is_empty() { + if !messages_to_send.is_empty() { return Err(Error::SinkError(format!( "Failed to sink messages after {} attempts. Errors: {:?}", attempts, error_map @@ -285,7 +286,7 @@ impl Forwarder { } // Writes the fallback messages to the fallback sink - async fn handle_fallback_messages(&mut self, mut fallback_msgs: Vec) -> Result<()> { + async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> Result<()> { if self.fallback_client.is_none() { return Err(Error::SinkError( "Response contains fallback messages but no fallback sink is configured" @@ -296,67 +297,78 @@ impl Forwarder { let fallback_client = self.fallback_client.as_mut().unwrap(); let mut attempts = 0; let mut fallback_error_map = HashMap::new(); + // start with the original set of message to be sent. + // we will overwrite this vec with failed messages and will keep retrying. + let mut messages_to_send = fallback_msgs; while attempts <= config().sink_max_retry_attempts { let start_time = tokio::time::Instant::now(); - match fallback_client.sink_fn(fallback_msgs.clone()).await { + match fallback_client.sink_fn(messages_to_send.clone()).await { Ok(fb_response) => { debug!( "Fallback sink latency - {}ms", start_time.elapsed().as_millis() ); - fallback_msgs = fb_response - .results - .iter() - .filter(|result| result.status == proto::Status::Failure as i32) - .map(|result| { - fallback_msgs - .iter() - .find(|msg| msg.id == result.id) - .unwrap() - .clone() - }) - .collect::>(); - - // we can't specify fallback response inside fallback sink - if fb_response + // create a map of id to result, since there is no strict requirement + // for the udsink to return the results in the same order as the requests + let result_map: HashMap<_, _> = fb_response .results .iter() - .any(|result| result.status == proto::Status::Fallback as i32) - { + .map(|result| (result.id.clone(), result)) + .collect(); + + let mut contains_fallback_status = false; + + fallback_error_map.clear(); + // drain all the messages that were successfully written + // and keep only the failed messages to send again + // construct the error map for the failed messages + messages_to_send.retain(|msg| { + if let Some(result) = result_map.get(&msg.id) { + if result.status == proto::Status::Failure as i32 { + *fallback_error_map + .entry(result.err_msg.clone()) + .or_insert(0) += 1; + true + } else if result.status == proto::Status::Fallback as i32 { + contains_fallback_status = true; + false + } else { + false + } + } else { + false + } + }); + + // specifying fallback status in fallback response is not allowed + if contains_fallback_status { return Err(Error::SinkError( - "Fallback sink can't specify status fallback".to_string(), + "Fallback response contains fallback status".to_string(), )); } attempts += 1; - if fallback_msgs.is_empty() { + if messages_to_send.is_empty() { break; - } else { - fallback_error_map.clear(); - for result in fb_response.results { - if result.status != proto::Status::Success as i32 { - *fallback_error_map.entry(result.err_msg).or_insert(0) += 1; - } - } - - warn!( - "Fallback sink retry attempt {} due to retryable error. Errors: {:?}", - attempts, fallback_error_map - ); - sleep(tokio::time::Duration::from_millis( - config().sink_retry_interval_in_ms as u64, - )) - .await; } + + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, fallback_error_map + ); + sleep(tokio::time::Duration::from_millis( + config().sink_retry_interval_in_ms as u64, + )) + .await; } Err(e) => return Err(e), } } - if !fallback_msgs.is_empty() { + if !messages_to_send.is_empty() { return Err(Error::SinkError(format!( "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", attempts, fallback_error_map diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index 515975c16f..9acabc95ea 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -3,7 +3,7 @@ use crate::config::config; pub(crate) use crate::error::Error; use crate::forwarder::ForwarderBuilder; use crate::metrics::{start_metrics_https_server, LagReaderBuilder, MetricsState}; -use crate::sink::{SinkClient, SinkConfig, FB_SINK_SERVER_INFO_FILE, FB_SINK_SOCKET}; +use crate::sink::{SinkClient, SinkConfig}; use crate::source::{SourceClient, SourceConfig}; use crate::transformer::{TransformerClient, TransformerConfig}; use std::net::SocketAddr; @@ -24,22 +24,24 @@ use tracing_subscriber::EnvFilter; /// - Send Acknowledgement back to the Source pub mod error; -mod metrics; +pub(crate) mod source; -pub mod source; +pub(crate) mod sink; -pub mod sink; +pub(crate) mod transformer; -pub mod transformer; +pub(crate) mod forwarder; -pub mod forwarder; +pub(crate) mod config; -pub mod config; +pub(crate) mod message; -pub mod message; -mod server_info; pub(crate) mod shared; +mod server_info; + +mod metrics; + pub async fn mono_vertex() { // Initialize the logger tracing_subscriber::fmt() @@ -74,11 +76,7 @@ pub async fn mono_vertex() { }; let fb_sink_config = if config().is_fallback_enabled { - Some(SinkConfig { - max_message_size: config().grpc_max_message_size, - socket_path: FB_SINK_SOCKET.to_string(), - server_info_file: FB_SINK_SERVER_INFO_FILE.to_string(), - }) + Some(SinkConfig::fallback_default()) } else { None }; @@ -104,7 +102,7 @@ pub async fn mono_vertex() { { error!("Application error: {:?}", e); - // abort the task since we have an error + // abort the signal handler task since we have an error and we are shutting down if !shutdown_handle.is_finished() { shutdown_handle.abort(); } @@ -227,17 +225,18 @@ pub async fn init( // build the forwarder let mut forwarder_builder = ForwarderBuilder::new(source_client, sink_client, cln_token); + // add transformer if exists if let Some(transformer_client) = transformer_client { forwarder_builder = forwarder_builder.transformer_client(transformer_client); } - + // add fallback sink if exists if let Some(fb_sink_client) = fb_sink_client { forwarder_builder = forwarder_builder.fb_sink_client(fb_sink_client); } - + // build the final forwarder let mut forwarder = forwarder_builder.build(); - // start the forwarder + // start the forwarder, it will return only on Signal forwarder.start().await?; info!("Forwarder stopped gracefully"); @@ -378,8 +377,10 @@ mod tests { // FIXME: we need to have a better way, this is flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - env::set_var("SOURCE_SOCKET", src_sock_file.to_str().unwrap()); - env::set_var("SINK_SOCKET", sink_sock_file.to_str().unwrap()); + unsafe { + env::set_var("SOURCE_SOCKET", src_sock_file.to_str().unwrap()); + env::set_var("SINK_SOCKET", sink_sock_file.to_str().unwrap()); + } let cln_token = CancellationToken::new(); diff --git a/rust/monovertex/src/sink.rs b/rust/monovertex/src/sink.rs index 4cedaef49e..fb82273fb6 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -1,3 +1,4 @@ +use crate::config::config; use crate::error::{Error, Result}; use crate::message::Message; use crate::shared::connect_with_uds; @@ -13,9 +14,10 @@ pub mod proto { const RECONNECT_INTERVAL: u64 = 1000; const MAX_RECONNECT_ATTEMPTS: usize = 5; const SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; -pub(crate) const FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; +const FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; + const SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; -pub(crate) const FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; +const FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; /// SinkConfig is the configuration for the sink server. #[derive(Debug, Clone)] @@ -30,7 +32,18 @@ impl Default for SinkConfig { SinkConfig { socket_path: SINK_SOCKET.to_string(), server_info_file: SINK_SERVER_INFO_FILE.to_string(), - max_message_size: 64 * 1024 * 1024, // 64 MB + max_message_size: config().grpc_max_message_size, + } + } +} + +impl SinkConfig { + /// default config for fallback sink + pub(crate) fn fallback_default() -> Self { + SinkConfig { + max_message_size: config().grpc_max_message_size, + socket_path: FB_SINK_SOCKET.to_string(), + server_info_file: FB_SINK_SERVER_INFO_FILE.to_string(), } } } @@ -60,6 +73,7 @@ impl SinkClient { } pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { + // create a channel with at least size let (tx, rx) = tokio::sync::mpsc::channel(if messages.is_empty() { 1 } else { From b21e0bf5b2c288a76ad12fc323721f5d99bc14d4 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Wed, 21 Aug 2024 21:59:38 +0530 Subject: [PATCH 019/188] chore: make nightly build pipeline faster (#1958) Signed-off-by: Sreekanth Signed-off-by: Derek Wang Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice Co-authored-by: Derek Wang --- .github/workflows/ci.yaml | 37 ++++++++++++++ .github/workflows/nightly-build.yml | 79 +++++++++++++++++++++++++++-- .github/workflows/release.yml | 67 ++++++++++++++++++++++-- Dockerfile | 51 ++++++------------- Makefile | 32 ++++++++++-- rust/rust-toolchain.toml | 3 ++ 6 files changed, 224 insertions(+), 45 deletions(-) create mode 100644 rust/rust-toolchain.toml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e27feac027..eb48e60d61 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -143,9 +143,42 @@ jobs: - run: make lint - run: git diff --exit-code + build-rust-amd64: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: ./rust + steps: + - uses: actions/checkout@v4 + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + rustflags: '' + - name: Configure sccache + run: | + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + - name: Run sccache-cache + uses: mozilla-actions/sccache-action@v0.0.5 + - name: Install dependencies + run: sudo apt-get install -y protobuf-compiler + - name: Build binary + run: RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target x86_64-unknown-linux-gnu + - name: Rename binary + run: cp -pv target/x86_64-unknown-linux-gnu/release/numaflow ./numaflow-rs-linux-amd64 + - name: List files + run: pwd && ls -al && file ./numaflow-rs-linux-amd64 + - name: Upload numaflow binary + uses: actions/upload-artifact@v4 + with: + name: numaflow-rs-linux-amd64 + path: rust/numaflow-rs-linux-amd64 + if-no-files-found: error + e2e-tests: name: E2E Tests runs-on: ubuntu-latest + needs: [ build-rust-amd64 ] timeout-minutes: 20 strategy: fail-fast: false @@ -185,6 +218,10 @@ jobs: with: path: ui/node_modules key: ${{ runner.os }}-node-dep-v1-${{ hashFiles('**/yarn.lock') }} + - name: Download Rust amd64 binaries + uses: actions/download-artifact@v4 + with: + name: numaflow-rs-linux-amd64 - name: Install k3d run: curl -sfL https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash & - name: Create a cluster diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 0b1a6353d1..3a671b2275 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -15,7 +15,7 @@ defaults: shell: bash jobs: - build-binaries: + build-go-binaries: runs-on: ubuntu-20.04 if: github.repository == 'numaproj/numaflow' name: Build binaries @@ -40,9 +40,70 @@ jobs: name: binaries path: dist + build-rust-amd64: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: ./rust + steps: + - uses: actions/checkout@v4 + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + rustflags: '' + - name: Configure sccache + run: | + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + - name: Run sccache-cache + uses: mozilla-actions/sccache-action@v0.0.5 + - name: Install dependencies + run: sudo apt-get install -y protobuf-compiler + - name: Build binary + run: RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target x86_64-unknown-linux-gnu + - name: Rename binary + run: cp -pv target/x86_64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-amd64 + - name: Upload numaflow binary + uses: actions/upload-artifact@v3 + with: + name: numaflow-rs-linux-amd64 + path: rust/numaflow-rs-linux-amd64 + + build-rust-arm64: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: ./rust + steps: + - uses: actions/checkout@v4 + - name: Update Rust Toolchain Target + run: | + echo "targets = ['aarch64-unknown-linux-gnu']" >> rust-toolchain.toml + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + rustflags: '' + - name: Configure sccache + run: | + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + - name: Run sccache-cache + uses: mozilla-actions/sccache-action@v0.0.5 + - name: Install dependenices + run: sudo apt-get install -y gcc-aarch64-linux-gnu protobuf-compiler + - name: Build binary + run: RUSTFLAGS='-C target-feature=+crt-static -C linker=aarch64-linux-gnu-gcc' cargo build --release --target aarch64-unknown-linux-gnu + - name: Rename binary + run: cp -pv target/aarch64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-arm64 + - name: Upload numaflow binary + uses: actions/upload-artifact@v3 + with: + name: numaflow-rs-linux-arm64 + path: rust/numaflow-rs-linux-arm64 + build-push-linux-multi: name: Build & push linux/amd64 and linux/arm64 - needs: [ build-binaries ] + needs: [ build-go-binaries, build-rust-amd64, build-rust-arm64] runs-on: ubuntu-20.04 if: github.repository == 'numaproj/numaflow' strategy: @@ -63,12 +124,24 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Download binaries + - name: Download Go binaries uses: actions/download-artifact@v3 with: name: binaries path: dist/ + - name: Download Rust amd64 binaries + uses: actions/download-artifact@v3 + with: + name: numaflow-rs-linux-amd64 + path: dist/numaflow-rs-linux-amd64 + + - name: Download Rust arm64 binaries + uses: actions/download-artifact@v3 + with: + name: numaflow-rs-linux-arm64 + path: dist/numaflow-rs-linux-arm64 + - name: Registry Login uses: docker/login-action@v2 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e5b597014d..5c17591fe8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ defaults: shell: bash jobs: - build-binaries: + build-go-binaries: runs-on: ubuntu-20.04 if: github.repository == 'numaproj/numaflow' name: Build binaries @@ -38,9 +38,58 @@ jobs: name: binaries path: dist + build-rust-amd64: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: ./rust + steps: + - uses: actions/checkout@v4 + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + rustflags: '' + - name: Install dependencies + run: sudo apt-get install -y protobuf-compiler + - name: Build binary + run: RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target x86_64-unknown-linux-gnu + - name: Rename binary + run: cp -pv target/x86_64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-amd64 + - name: Upload numaflow binary + uses: actions/upload-artifact@v3 + with: + name: numaflow-rs-linux-amd64 + path: rust/numaflow-rs-linux-amd64 + + build-rust-arm64: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: ./rust + steps: + - uses: actions/checkout@v4 + - name: Update Rust Toolchain Target + run: | + echo "targets = ['aarch64-unknown-linux-gnu']" >> rust-toolchain.toml + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + rustflags: '' + - name: Install dependenices + run: sudo apt-get install -y gcc-aarch64-linux-gnu protobuf-compiler + - name: Build binary + run: RUSTFLAGS='-C target-feature=+crt-static -C linker=aarch64-linux-gnu-gcc' cargo build --release --target aarch64-unknown-linux-gnu + - name: Rename binary + run: cp -pv target/aarch64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-arm64 + - name: Upload numaflow binary + uses: actions/upload-artifact@v3 + with: + name: numaflow-rs-linux-arm64 + path: rust/numaflow-rs-linux-arm64 + build-push-linux-multi: name: Build & push linux/amd64 and linux/arm64 - needs: [ build-binaries ] + needs: [ build-go-binaries, build-rust-amd64, build-rust-arm64] runs-on: ubuntu-20.04 if: github.repository == 'numaproj/numaflow' strategy: @@ -61,12 +110,24 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - name: Download binaries + - name: Download Go binaries uses: actions/download-artifact@v3 with: name: binaries path: dist/ + - name: Download Rust amd64 binaries + uses: actions/download-artifact@v3 + with: + name: numaflow-rs-linux-amd64 + path: dist/numaflow-rs-linux-amd64 + + - name: Download Rust arm64 binaries + uses: actions/download-artifact@v3 + with: + name: numaflow-rs-linux-arm64 + path: dist/numaflow-rs-linux-arm64 + - name: Registry Login uses: docker/login-action@v2 with: diff --git a/Dockerfile b/Dockerfile index c5ac53cbb1..efee5a8fd2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,41 +8,30 @@ ARG ARCH RUN apk update && apk upgrade && \ apk add ca-certificates && \ apk --no-cache add tzdata -ARG ARCH COPY dist/numaflow-linux-${ARCH} /bin/numaflow +COPY dist/numaflow-rs-linux-${ARCH} /bin/numaflow-rs RUN chmod +x /bin/numaflow +RUN chmod +x /bin/numaflow-rs #################################################################################################### -# extension base +# Rust binary #################################################################################################### -FROM rust:1.80-bookworm AS extension-base +FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef ARG TARGETPLATFORM - -RUN apt-get update && apt-get install protobuf-compiler -y - -RUN cargo new numaflow -# Create a new empty shell project WORKDIR /numaflow +RUN apt-get update && apt-get install -y protobuf-compiler -RUN cargo new servesink -COPY ./rust/servesink/Cargo.toml ./servesink/ - -RUN cargo new backoff -COPY ./rust/backoff/Cargo.toml ./backoff/ - -RUN cargo new numaflow-models -COPY ./rust/numaflow-models/Cargo.toml ./numaflow-models/ -RUN cargo new monovertex -COPY ./rust/monovertex/Cargo.toml ./monovertex/ +FROM chef AS planner +COPY ./rust/ . +RUN cargo chef prepare --recipe-path recipe.json -RUN cargo new serving -COPY ./rust/serving/Cargo.toml ./serving/Cargo.toml - -# Copy all Cargo.toml and Cargo.lock files for caching dependencies -COPY ./rust/Cargo.toml ./rust/Cargo.lock ./ +FROM chef AS rust-builder +ARG TARGETPLATFORM +ARG ARCH +COPY --from=planner /numaflow/recipe.json recipe.json # Build to cache dependencies RUN --mount=type=cache,target=/usr/local/cargo/registry \ @@ -52,18 +41,10 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ "linux/arm64") TARGET="aarch64-unknown-linux-gnu" ;; \ *) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \ esac && \ - mkdir -p src/bin && echo "fn main() {}" > src/bin/main.rs && \ - RUSTFLAGS='-C target-feature=+crt-static' cargo build --workspace --all --release --target ${TARGET} + RUSTFLAGS='-C target-feature=+crt-static' cargo chef cook --workspace --release --target ${TARGET} --recipe-path recipe.json # Copy the actual source code files of the main project and the subprojects -COPY ./rust/src ./src -COPY ./rust/servesink/src ./servesink/src -COPY ./rust/backoff/src ./backoff/src -COPY ./rust/numaflow-models/src ./numaflow-models/src -COPY ./rust/serving/src ./serving/src -COPY ./rust/monovertex/src ./monovertex/src -COPY ./rust/monovertex/build.rs ./monovertex/build.rs -COPY ./rust/monovertex/proto ./monovertex/proto +COPY ./rust/ . # Build the real binaries RUN --mount=type=cache,target=/usr/local/cargo/registry \ @@ -73,7 +54,6 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ "linux/arm64") TARGET="aarch64-unknown-linux-gnu" ;; \ *) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \ esac && \ - touch src/bin/main.rs && \ RUSTFLAGS='-C target-feature=+crt-static' cargo build --workspace --all --release --target ${TARGET} && \ cp -pv target/${TARGET}/release/numaflow /root/numaflow @@ -82,13 +62,14 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ #################################################################################################### ARG BASE_IMAGE FROM ${BASE_IMAGE} AS numaflow +ARG ARCH COPY --from=base /usr/share/zoneinfo /usr/share/zoneinfo COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=base /bin/numaflow /bin/numaflow +COPY --from=base /bin/numaflow-rs /bin/numaflow-rs COPY ui/build /ui/build -COPY --from=extension-base /root/numaflow /bin/numaflow-rs COPY ./rust/serving/config config ENTRYPOINT [ "/bin/numaflow" ] diff --git a/Makefile b/Makefile index b616d50621..d31c9095e6 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,16 @@ SHELL:=/bin/bash PACKAGE=github.com/numaproj/numaflow CURRENT_DIR=$(shell pwd) + +HOST_ARCH=$(shell uname -m) +# Github actions instances are x86_64 +ifeq ($(HOST_ARCH),x86_64) + HOST_ARCH=amd64 +endif +ifeq ($(HOST_ARCH),aarch64) + HOST_ARCH=arm64 +endif + DIST_DIR=${CURRENT_DIR}/dist BINARY_NAME:=numaflow DOCKERFILE:=Dockerfile @@ -78,7 +88,7 @@ dist/$(BINARY_NAME): go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/$(BINARY_NAME) ./cmd dist/e2eapi: - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/e2eapi ./test/e2e-api + CGO_ENABLED=0 GOOS=linux go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/e2eapi ./test/e2e-api dist/$(BINARY_NAME)-%: CGO_ENABLED=0 $(GOARGS) go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/$(BINARY_NAME)-$* ./cmd @@ -162,13 +172,27 @@ ui-test: ui-build ./hack/test-ui.sh .PHONY: image -image: clean ui-build dist/$(BINARY_NAME)-linux-amd64 - DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "ARCH=amd64" --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) -f $(DOCKERFILE) . +image: clean ui-build dist/$(BINARY_NAME)-linux-$(HOST_ARCH) +ifdef GITHUB_ACTIONS + # The binary will be built in a separate Github Actions job + cp -pv numaflow-rs-linux-amd64 dist/numaflow-rs-linux-amd64 +else + $(MAKE) build-rust-in-docker +endif + DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) -f $(DOCKERFILE) . @if [[ "$(DOCKER_PUSH)" = "true" ]]; then $(DOCKER) push $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION); fi ifdef IMAGE_IMPORT_CMD $(IMAGE_IMPORT_CMD) $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) endif +.PHONY: build-rust-in-docker +build-rust-in-docker: + mkdir -p dist + -$(DOCKER) container ls --all --filter=ancestor='$(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)' --format "{{.ID}}" | xargs docker rm + -$(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) + DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) --target rust-builder -f $(DOCKERFILE) . + export CTR=$$(docker create $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)) && $(DOCKER) cp $$CTR:/root/numaflow dist/numaflow-rs-linux-$(HOST_ARCH) && $(DOCKER) rm $$CTR && $(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) + image-multi: ui-build set-qemu dist/$(BINARY_NAME)-linux-arm64.gz dist/$(BINARY_NAME)-linux-amd64.gz $(DOCKER) buildx build --sbom=false --provenance=false --build-arg "BASE_IMAGE=$(RELEASE_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) --platform linux/amd64,linux/arm64 --file $(DOCKERFILE) ${PUSH_OPTION} . @@ -232,7 +256,7 @@ start: image .PHONY: e2eapi-image e2eapi-image: clean dist/e2eapi - DOCKER_BUILDKIT=1 $(DOCKER) build . --build-arg "ARCH=amd64" --target e2eapi --tag $(IMAGE_NAMESPACE)/e2eapi:$(VERSION) --build-arg VERSION="$(VERSION)" + DOCKER_BUILDKIT=1 $(DOCKER) build . --target e2eapi --tag $(IMAGE_NAMESPACE)/e2eapi:$(VERSION) --build-arg VERSION="$(VERSION)" @if [[ "$(DOCKER_PUSH)" = "true" ]]; then $(DOCKER) push $(IMAGE_NAMESPACE)/e2eapi:$(VERSION); fi ifdef IMAGE_IMPORT_CMD $(IMAGE_IMPORT_CMD) $(IMAGE_NAMESPACE)/e2eapi:$(VERSION) diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml new file mode 100644 index 0000000000..a5b1f06904 --- /dev/null +++ b/rust/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +profile = "default" +channel = "1.80" From 3f735f76425a15d8670f145e69e3caa044037a2c Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Thu, 22 Aug 2024 00:22:53 +0530 Subject: [PATCH 020/188] fix: adding not available for negative processing rates (#1983) Signed-off-by: veds-g --- .../partials/ProcessingRates/index.tsx | 10 +++++++--- .../partials/Graph/partials/CustomNode/index.tsx | 16 ++++++++++++---- .../partials/Graph/partials/CustomNode/style.css | 3 +-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx index 87b7c0574f..dd8f4bfd84 100644 --- a/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx +++ b/ui/src/components/common/SlidingSidebar/partials/VertexDetails/partials/ProcessingRates/index.tsx @@ -56,6 +56,10 @@ export function ProcessingRates({ setFoundRates(rates); }, [vertexMetrics, pipelineId, vertexId]); + const formatRate = (rate?: number): string => { + return rate !== undefined && rate >= 0 ? `${rate}/sec` : "Not Available"; + }; + return ( {metric.partition} )} - {metric.oneM}/sec - {metric.fiveM}/sec - {metric.fifteenM}/sec + {formatRate(metric.oneM)} + {formatRate(metric.fiveM)} + {formatRate(metric.fifteenM)}
))} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx index 64718e4f12..a604a36e14 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/index.tsx @@ -298,6 +298,10 @@ const CustomNode: FC = ({ ); }, []); + const formatRate = (rate?: number): string => { + return rate !== undefined && rate >= 0 ? `${rate}/sec` : "Not Available"; + }; + return ( = ({ title={ Processing Rates - 1 min: {data?.vertexMetrics?.ratePerMin}/sec - 5 min: {data?.vertexMetrics?.ratePerFiveMin}/sec - 15 min: {data?.vertexMetrics?.ratePerFifteenMin}/sec + 1 min: {formatRate(data?.vertexMetrics?.ratePerMin)} + + 5 min: {formatRate(data?.vertexMetrics?.ratePerFiveMin)} + + + 15 min: {formatRate(data?.vertexMetrics?.ratePerFifteenMin)} + } arrow placement={"bottom-end"} > - {data?.vertexMetrics?.ratePerMin}/sec + {formatRate(data?.vertexMetrics?.ratePerMin)} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css index 9e9b094718..649d647f8e 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/CustomNode/style.css @@ -1,6 +1,6 @@ .node-rate { display: flex; - width: 9rem; + min-width: 9rem; height: 2.2rem; border-radius: 2rem; background: #d1dee9; @@ -15,7 +15,6 @@ position: absolute; bottom: -12.5%; right: 8%; - text-transform: lowercase; } .node-pods { From b72be93b72e7d5aaacc3b5445f0c0df03d984fd4 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 21 Aug 2024 14:47:23 -0700 Subject: [PATCH 021/188] chore: enable current/desired replicas (#1987) Signed-off-by: Derek Wang --- config/advanced-install/minimal-crds.yaml | 6 ++++++ .../base/crds/full/numaflow.numaproj.io_monovertices.yaml | 6 ++++++ .../crds/minimal/numaflow.numaproj.io_monovertices.yaml | 6 ++++++ config/install.yaml | 6 ++++++ config/namespace-install.yaml | 6 ++++++ pkg/apis/numaflow/v1alpha1/generated.proto | 2 ++ pkg/apis/numaflow/v1alpha1/mono_vertex_types.go | 2 ++ 7 files changed, 34 insertions(+) diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index f06a75b3f4..9d27719000 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -69,6 +69,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .spec.replicas + name: Desired + type: string + - jsonPath: .status.replicas + name: Current + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 15d016ba47..ece346f242 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -21,6 +21,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .spec.replicas + name: Desired + type: string + - jsonPath: .status.replicas + name: Current + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml index 10c877b13b..ac33f527a9 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml @@ -17,6 +17,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .spec.replicas + name: Desired + type: string + - jsonPath: .status.replicas + name: Current + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/install.yaml b/config/install.yaml index 7905eb6e56..b8778fdb89 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -2633,6 +2633,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .spec.replicas + name: Desired + type: string + - jsonPath: .status.replicas + name: Current + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 5e5b823e35..c265de29f4 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -2633,6 +2633,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .spec.replicas + name: Desired + type: string + - jsonPath: .status.replicas + name: Current + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 09b1af32c1..8f12a27eca 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -844,6 +844,8 @@ message Metadata { // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 4d835e9c2c..5ba6a78c12 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -54,6 +54,8 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` From 6918e6f47e9309173dd67e6fc0c105d2cd9814f2 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 21 Aug 2024 19:26:48 -0700 Subject: [PATCH 022/188] fix: do not pass scale info to MonoVertex (#1990) --- pkg/apis/numaflow/v1alpha1/generated.pb.go | 957 +++++++++--------- .../numaflow/v1alpha1/mono_vertex_types.go | 4 +- .../v1alpha1/mono_vertex_types_test.go | 88 ++ .../numaflow/v1alpha1/openapi_generated.go | 3 +- pkg/apis/numaflow/v1alpha1/sink_test.go | 6 +- .../numaflow/v1alpha1/user_defined_sink.go | 2 +- .../numaflow/v1alpha1/vertex_types_test.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 6 +- pkg/reconciler/vertex/controller_test.go | 2 +- 9 files changed, 585 insertions(+), 485 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index bd85ab3af0..0b8823d5d0 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2704,471 +2704,471 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7423 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xc7, - 0x75, 0xae, 0xe6, 0x8f, 0x33, 0x73, 0x86, 0xe4, 0xee, 0xd6, 0x4a, 0x2b, 0xee, 0x6a, 0xb5, 0x5c, - 0xb7, 0xae, 0x74, 0xd7, 0xd7, 0x36, 0x79, 0xc5, 0xab, 0x3f, 0xfb, 0xda, 0x96, 0x38, 0xe4, 0x92, - 0x4b, 0x2d, 0xb9, 0x4b, 0x9f, 0x21, 0x57, 0xb2, 0x75, 0x6d, 0xdd, 0x66, 0x77, 0x71, 0xd8, 0x62, - 0x4f, 0xf7, 0xa8, 0xbb, 0x87, 0xbb, 0x94, 0xaf, 0xe1, 0xbf, 0x07, 0xe9, 0x22, 0x09, 0x12, 0xf8, - 0xc9, 0x40, 0xe0, 0x04, 0x09, 0x02, 0xf8, 0xc1, 0x70, 0x1e, 0x02, 0x28, 0x0f, 0x01, 0xf2, 0x07, - 0x04, 0x89, 0x13, 0xe4, 0xc7, 0x0f, 0x01, 0xa2, 0x20, 0x00, 0x11, 0x33, 0xc8, 0x43, 0x12, 0xc4, - 0x30, 0x62, 0x20, 0xb6, 0x17, 0x06, 0x1c, 0xd4, 0x5f, 0xff, 0x4d, 0xcf, 0x2e, 0x39, 0x4d, 0xae, - 0x56, 0x89, 0xde, 0xba, 0xab, 0x4e, 0x7d, 0xa7, 0xfa, 0xd4, 0xcf, 0x39, 0x75, 0xea, 0x54, 0x35, - 0x2c, 0xb6, 0xad, 0x60, 0xab, 0xb7, 0x31, 0x65, 0xb8, 0x9d, 0x69, 0xa7, 0xd7, 0xd1, 0xbb, 0x9e, - 0xfb, 0x1a, 0x7f, 0xd8, 0xb4, 0xdd, 0x9b, 0xd3, 0xdd, 0xed, 0xf6, 0xb4, 0xde, 0xb5, 0xfc, 0x28, - 0x65, 0xe7, 0x49, 0xdd, 0xee, 0x6e, 0xe9, 0x4f, 0x4e, 0xb7, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, 0xe6, - 0x54, 0xd7, 0x73, 0x03, 0x97, 0x3c, 0x1b, 0x01, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, 0xbb, - 0xdd, 0x9e, 0x62, 0x40, 0x51, 0x8a, 0x02, 0x3a, 0xf7, 0x91, 0x58, 0x0d, 0xda, 0x6e, 0xdb, 0x9d, - 0xe6, 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, 0xce, - 0x9f, 0xb2, 0x5c, 0x56, 0xad, 0x69, 0xc3, 0xf5, 0xe8, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, 0x15, - 0xd1, 0x74, 0x74, 0x63, 0xcb, 0x72, 0xa8, 0xb7, 0xab, 0xbe, 0x65, 0xda, 0xa3, 0xbe, 0xdb, 0xf3, - 0x0c, 0x7a, 0xa8, 0x52, 0xfe, 0x74, 0x87, 0x06, 0x7a, 0x16, 0xaf, 0xe9, 0x41, 0xa5, 0xbc, 0x9e, - 0x13, 0x58, 0x9d, 0x7e, 0x36, 0xcf, 0xdc, 0xad, 0x80, 0x6f, 0x6c, 0xd1, 0x8e, 0x9e, 0x2e, 0xa7, - 0xfd, 0x5d, 0x1d, 0x4e, 0xcf, 0x6e, 0xf8, 0x81, 0xa7, 0x1b, 0xc1, 0xaa, 0x6b, 0xae, 0xd1, 0x4e, - 0xd7, 0xd6, 0x03, 0x4a, 0xb6, 0xa1, 0xc6, 0xea, 0x66, 0xea, 0x81, 0x3e, 0x51, 0xb8, 0x58, 0xb8, - 0xd4, 0x98, 0x99, 0x9d, 0x1a, 0xb2, 0x2d, 0xa6, 0x56, 0x24, 0x50, 0x73, 0x74, 0x7f, 0x6f, 0xb2, - 0xa6, 0xde, 0x30, 0x64, 0x40, 0xbe, 0x5e, 0x80, 0x51, 0xc7, 0x35, 0x69, 0x8b, 0xda, 0xd4, 0x08, - 0x5c, 0x6f, 0xa2, 0x78, 0xb1, 0x74, 0xa9, 0x31, 0xf3, 0xb9, 0xa1, 0x39, 0x66, 0x7c, 0xd1, 0xd4, - 0xb5, 0x18, 0x83, 0xcb, 0x4e, 0xe0, 0xed, 0x36, 0x1f, 0xfc, 0xce, 0xde, 0xe4, 0x03, 0xfb, 0x7b, - 0x93, 0xa3, 0xf1, 0x2c, 0x4c, 0xd4, 0x84, 0xac, 0x43, 0x23, 0x70, 0x6d, 0x26, 0x32, 0xcb, 0x75, - 0xfc, 0x89, 0x12, 0xaf, 0xd8, 0x85, 0x29, 0x21, 0x6d, 0xc6, 0x7e, 0x8a, 0x75, 0x97, 0xa9, 0x9d, - 0x27, 0xa7, 0xd6, 0x42, 0xb2, 0xe6, 0x69, 0x09, 0xdc, 0x88, 0xd2, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, - 0x27, 0x7c, 0x6a, 0xf4, 0x3c, 0x2b, 0xd8, 0x9d, 0x73, 0x9d, 0x80, 0xde, 0x0a, 0x26, 0xca, 0x5c, - 0xca, 0x4f, 0x64, 0x41, 0xaf, 0xba, 0x66, 0x2b, 0x49, 0xdd, 0x3c, 0xbd, 0xbf, 0x37, 0x79, 0x22, - 0x95, 0x88, 0x69, 0x4c, 0xe2, 0xc0, 0x49, 0xab, 0xa3, 0xb7, 0xe9, 0x6a, 0xcf, 0xb6, 0x5b, 0xd4, - 0xf0, 0x68, 0xe0, 0x4f, 0x54, 0xf8, 0x27, 0x5c, 0xca, 0xe2, 0xb3, 0xec, 0x1a, 0xba, 0x7d, 0x7d, - 0xe3, 0x35, 0x6a, 0x04, 0x48, 0x37, 0xa9, 0x47, 0x1d, 0x83, 0x36, 0x27, 0xe4, 0xc7, 0x9c, 0x5c, - 0x4a, 0x21, 0x61, 0x1f, 0x36, 0x59, 0x84, 0x53, 0x5d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, - 0xaf, 0xe9, 0x1d, 0x3a, 0x31, 0x72, 0xb1, 0x70, 0xa9, 0xde, 0x3c, 0x2b, 0x61, 0x4e, 0xad, 0xa6, - 0x09, 0xb0, 0xbf, 0x0c, 0xb9, 0x04, 0x35, 0x95, 0x38, 0x51, 0xbd, 0x58, 0xb8, 0x54, 0x11, 0x7d, - 0x47, 0x95, 0xc5, 0x30, 0x97, 0x2c, 0x40, 0x4d, 0xdf, 0xdc, 0xb4, 0x1c, 0x46, 0x59, 0xe3, 0x22, - 0x3c, 0x9f, 0xf5, 0x69, 0xb3, 0x92, 0x46, 0xe0, 0xa8, 0x37, 0x0c, 0xcb, 0x92, 0x17, 0x81, 0xf8, - 0xd4, 0xdb, 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xcf, 0x09, 0x78, 0xdd, 0xeb, 0xbc, 0xee, 0xe7, - 0x64, 0xdd, 0x49, 0xab, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x17, 0xe0, 0xa4, 0x1c, 0x76, 0x91, 0x14, - 0x80, 0x23, 0x3d, 0xc8, 0x04, 0x89, 0xa9, 0x3c, 0xec, 0xa3, 0x26, 0x26, 0x9c, 0xd7, 0x7b, 0x81, - 0xdb, 0x61, 0x90, 0x49, 0xa6, 0x6b, 0xee, 0x36, 0x75, 0x26, 0x1a, 0x17, 0x0b, 0x97, 0x6a, 0xcd, - 0x8b, 0xfb, 0x7b, 0x93, 0xe7, 0x67, 0xef, 0x40, 0x87, 0x77, 0x44, 0x21, 0xd7, 0xa1, 0x6e, 0x3a, - 0xfe, 0xaa, 0x6b, 0x5b, 0xc6, 0xee, 0xc4, 0x28, 0xaf, 0xe0, 0x93, 0xf2, 0x53, 0xeb, 0xf3, 0xd7, - 0x5a, 0x22, 0xe3, 0xf6, 0xde, 0xe4, 0xf9, 0xfe, 0xd9, 0x71, 0x2a, 0xcc, 0xc7, 0x08, 0x83, 0xac, - 0x70, 0xc0, 0x39, 0xd7, 0xd9, 0xb4, 0xda, 0x13, 0x63, 0xbc, 0x35, 0x2e, 0x0e, 0xe8, 0xd0, 0xf3, - 0xd7, 0x5a, 0x82, 0xae, 0x39, 0x26, 0xd9, 0x89, 0x57, 0x8c, 0x10, 0xce, 0x3d, 0x0f, 0xa7, 0xfa, - 0x46, 0x2d, 0x39, 0x09, 0xa5, 0x6d, 0xba, 0xcb, 0x27, 0xa5, 0x3a, 0xb2, 0x47, 0xf2, 0x20, 0x54, - 0x76, 0x74, 0xbb, 0x47, 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xac, 0xf8, 0x5c, 0x41, 0xfb, 0xf5, - 0x12, 0x8c, 0xaa, 0xb9, 0xa0, 0x65, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x5b, 0xce, 0x68, - 0x1f, 0x1f, 0x7a, 0x7e, 0x59, 0x76, 0xdb, 0xcd, 0xea, 0xfe, 0xde, 0x64, 0x69, 0xd9, 0x6d, 0x23, - 0x43, 0x24, 0x06, 0x54, 0xb6, 0xf5, 0xcd, 0x6d, 0x9d, 0xd7, 0xa1, 0x31, 0xd3, 0x1c, 0x1a, 0xfa, - 0x2a, 0x43, 0x61, 0x75, 0x6d, 0xd6, 0xf7, 0xf7, 0x26, 0x2b, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, - 0xbe, 0x61, 0xeb, 0xc6, 0xf6, 0x96, 0x6b, 0xd3, 0x89, 0x52, 0x4e, 0x46, 0x4d, 0x85, 0x24, 0x1a, - 0x20, 0x7c, 0xc5, 0x88, 0x07, 0x31, 0x60, 0xa4, 0x67, 0xfa, 0x96, 0xb3, 0x2d, 0x67, 0xa7, 0xe7, - 0x87, 0xe6, 0xb6, 0x3e, 0xcf, 0xbf, 0x09, 0xf6, 0xf7, 0x26, 0x47, 0xc4, 0x33, 0x4a, 0x68, 0xed, - 0xfb, 0x0d, 0x18, 0x57, 0x8d, 0x74, 0x83, 0x7a, 0x01, 0xbd, 0x45, 0x2e, 0x42, 0xd9, 0x61, 0x83, - 0x86, 0x37, 0x72, 0x73, 0x54, 0xf6, 0xc9, 0x32, 0x1f, 0x2c, 0x3c, 0x87, 0xd5, 0x4c, 0x28, 0x5c, - 0x29, 0xf0, 0xe1, 0x6b, 0xd6, 0xe2, 0x30, 0xa2, 0x66, 0xe2, 0x19, 0x25, 0x34, 0x79, 0x05, 0xca, - 0xfc, 0xe3, 0x85, 0xa8, 0x3f, 0x31, 0x3c, 0x0b, 0xf6, 0xe9, 0x35, 0xf6, 0x05, 0xfc, 0xc3, 0x39, - 0x28, 0xeb, 0x8a, 0x3d, 0x73, 0x53, 0x0a, 0xf6, 0xe3, 0x39, 0x04, 0xbb, 0x20, 0xba, 0xe2, 0xfa, - 0xfc, 0x02, 0x32, 0x44, 0xf2, 0x8b, 0x05, 0x38, 0x65, 0xb8, 0x4e, 0xa0, 0x33, 0x23, 0x40, 0xa9, - 0xbf, 0x89, 0x0a, 0xe7, 0xf3, 0xe2, 0xd0, 0x7c, 0xe6, 0xd2, 0x88, 0xcd, 0x87, 0xd8, 0x6c, 0xde, - 0x97, 0x8c, 0xfd, 0xbc, 0xc9, 0x2f, 0x17, 0xe0, 0x21, 0x36, 0xcb, 0xf6, 0x11, 0x73, 0xdd, 0x70, - 0xb4, 0xb5, 0x3a, 0xbb, 0xbf, 0x37, 0xf9, 0xd0, 0x52, 0x16, 0x33, 0xcc, 0xae, 0x03, 0xab, 0xdd, - 0x69, 0xbd, 0xdf, 0x60, 0xe0, 0x7a, 0xa7, 0x31, 0xb3, 0x7c, 0x94, 0x46, 0x48, 0xf3, 0x11, 0xd9, - 0x95, 0xb3, 0x6c, 0x2e, 0xcc, 0xaa, 0x05, 0xb9, 0x0c, 0xd5, 0x1d, 0xd7, 0xee, 0x75, 0xa8, 0x3f, - 0x51, 0xe3, 0x9a, 0xfb, 0x5c, 0xd6, 0x84, 0x7a, 0x83, 0x93, 0x34, 0x4f, 0x48, 0xf8, 0xaa, 0x78, - 0xf7, 0x51, 0x95, 0x25, 0x16, 0x8c, 0xd8, 0x56, 0xc7, 0x0a, 0x7c, 0xae, 0xd2, 0x1a, 0x33, 0x97, - 0x87, 0xfe, 0x2c, 0x31, 0x44, 0x97, 0x39, 0x98, 0x18, 0x35, 0xe2, 0x19, 0x25, 0x03, 0x36, 0x15, - 0xfa, 0x86, 0x6e, 0x0b, 0x95, 0xd7, 0x98, 0xf9, 0xe4, 0xf0, 0xc3, 0x86, 0xa1, 0x34, 0xc7, 0xe4, - 0x37, 0x55, 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0x59, 0x18, 0x4f, 0xb4, 0xa6, 0x3f, 0xd1, 0xe0, 0xd2, - 0x79, 0x34, 0x4b, 0x3a, 0x21, 0x55, 0xf3, 0x8c, 0x04, 0x1b, 0x4f, 0xf4, 0x10, 0x1f, 0x53, 0x60, - 0xe4, 0x2a, 0xd4, 0x7c, 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0x13, 0xa3, 0x07, 0x01, 0x3e, 0x29, 0x81, - 0x6b, 0x2d, 0x59, 0x0c, 0x43, 0x00, 0x32, 0x05, 0xd0, 0xd5, 0xbd, 0xc0, 0x12, 0x26, 0xe4, 0x18, - 0x37, 0x67, 0xc6, 0xf7, 0xf7, 0x26, 0x61, 0x35, 0x4c, 0xc5, 0x18, 0x05, 0xa3, 0x67, 0x65, 0x97, - 0x9c, 0x6e, 0x2f, 0xf0, 0x27, 0xc6, 0x2f, 0x96, 0x2e, 0xd5, 0x05, 0x7d, 0x2b, 0x4c, 0xc5, 0x18, - 0x05, 0xf9, 0x76, 0x01, 0x1e, 0x89, 0x5e, 0xfb, 0x07, 0xd9, 0x89, 0x23, 0x1f, 0x64, 0x93, 0xfb, - 0x7b, 0x93, 0x8f, 0xb4, 0x06, 0xb3, 0xc4, 0x3b, 0xd5, 0x47, 0x7b, 0x09, 0xc6, 0x66, 0x7b, 0xc1, - 0x96, 0xeb, 0x59, 0x6f, 0x70, 0x73, 0x98, 0x2c, 0x40, 0x25, 0xe0, 0x66, 0x8d, 0xd0, 0xcb, 0x8f, - 0x67, 0x89, 0x5a, 0x98, 0x98, 0x57, 0xe9, 0xae, 0xb2, 0x06, 0x84, 0x7e, 0x14, 0x66, 0x8e, 0x28, - 0xae, 0xfd, 0x5a, 0x01, 0xea, 0x4d, 0xdd, 0xb7, 0x0c, 0x06, 0x4f, 0xe6, 0xa0, 0xdc, 0xf3, 0xa9, - 0x77, 0x38, 0x50, 0x3e, 0x4b, 0xaf, 0xfb, 0xd4, 0x43, 0x5e, 0x98, 0x5c, 0x87, 0x5a, 0x57, 0xf7, - 0xfd, 0x9b, 0xae, 0x67, 0x4a, 0x4d, 0x73, 0x40, 0x20, 0x61, 0xaf, 0xca, 0xa2, 0x18, 0x82, 0x68, - 0x0d, 0x88, 0x54, 0xad, 0xf6, 0xc3, 0x02, 0x9c, 0x6e, 0xf6, 0x36, 0x37, 0xa9, 0x27, 0xcd, 0x33, - 0x61, 0xf8, 0x10, 0x0a, 0x15, 0x8f, 0x9a, 0x96, 0x2f, 0xeb, 0x3e, 0x3f, 0x74, 0xd3, 0x21, 0x43, - 0x91, 0x76, 0x16, 0x97, 0x17, 0x4f, 0x40, 0x81, 0x4e, 0x7a, 0x50, 0x7f, 0x8d, 0x06, 0x7e, 0xe0, - 0x51, 0xbd, 0x23, 0xbf, 0xee, 0xca, 0xd0, 0xac, 0x5e, 0xa4, 0x41, 0x8b, 0x23, 0xc5, 0xcd, 0xba, - 0x30, 0x11, 0x23, 0x4e, 0xda, 0x1f, 0x56, 0x60, 0x74, 0xce, 0xed, 0x6c, 0x58, 0x0e, 0x35, 0x2f, - 0x9b, 0x6d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0xdb, 0x54, 0x7e, 0xed, 0xf0, 0x7a, 0x96, 0x81, 0x45, - 0xd6, 0x02, 0x7b, 0x43, 0x0e, 0x4c, 0x96, 0x61, 0x7c, 0xd3, 0x73, 0x3b, 0x62, 0xea, 0x5a, 0xdb, - 0xed, 0x4a, 0x53, 0xb1, 0xf9, 0xdf, 0xd4, 0x74, 0xb0, 0x90, 0xc8, 0xbd, 0xbd, 0x37, 0x09, 0xd1, - 0x1b, 0xa6, 0xca, 0x92, 0x97, 0x61, 0x22, 0x4a, 0x09, 0xc7, 0xf0, 0x1c, 0xb3, 0xab, 0xb9, 0xa9, - 0x50, 0x69, 0x9e, 0xdf, 0xdf, 0x9b, 0x9c, 0x58, 0x18, 0x40, 0x83, 0x03, 0x4b, 0x93, 0x37, 0x0b, - 0x70, 0x32, 0xca, 0x14, 0xf3, 0xaa, 0xb4, 0x10, 0x8e, 0x68, 0xc2, 0xe6, 0x0b, 0x90, 0x85, 0x14, - 0x0b, 0xec, 0x63, 0x4a, 0x16, 0x60, 0x34, 0x70, 0x63, 0xf2, 0xaa, 0x70, 0x79, 0x69, 0x6a, 0xc5, - 0xbc, 0xe6, 0x0e, 0x94, 0x56, 0xa2, 0x1c, 0x41, 0x38, 0xa3, 0xde, 0x53, 0x92, 0x1a, 0xe1, 0x92, - 0x3a, 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x02, 0x07, 0x94, 0x24, 0x5f, 0x2e, 0xc0, 0xb8, - 0xca, 0x92, 0x32, 0xaa, 0x1e, 0xa5, 0x8c, 0x08, 0xeb, 0x11, 0x6b, 0x09, 0x06, 0x98, 0x62, 0xa8, - 0xfd, 0xb8, 0x0c, 0xf5, 0x70, 0x66, 0x23, 0x8f, 0x41, 0x85, 0xaf, 0x85, 0xa5, 0xc1, 0x1a, 0xaa, - 0x2c, 0xbe, 0x64, 0x46, 0x91, 0x47, 0x1e, 0x87, 0xaa, 0xe1, 0x76, 0x3a, 0xba, 0x63, 0x72, 0xff, - 0x46, 0xbd, 0xd9, 0x60, 0x9a, 0x7a, 0x4e, 0x24, 0xa1, 0xca, 0x23, 0xe7, 0xa1, 0xac, 0x7b, 0x6d, - 0xe1, 0x6a, 0xa8, 0x8b, 0xf9, 0x68, 0xd6, 0x6b, 0xfb, 0xc8, 0x53, 0xc9, 0x47, 0xa1, 0x44, 0x9d, - 0x9d, 0x89, 0xf2, 0x60, 0x53, 0xe0, 0xb2, 0xb3, 0x73, 0x43, 0xf7, 0x9a, 0x0d, 0x59, 0x87, 0xd2, - 0x65, 0x67, 0x07, 0x59, 0x19, 0xb2, 0x0c, 0x55, 0xea, 0xec, 0xb0, 0xb6, 0x97, 0x3e, 0x80, 0x0f, - 0x0c, 0x28, 0xce, 0x48, 0xa4, 0x55, 0x1c, 0x1a, 0x14, 0x32, 0x19, 0x15, 0x04, 0xf9, 0x34, 0x8c, - 0x0a, 0xdb, 0x62, 0x85, 0xb5, 0x89, 0x3f, 0x31, 0xc2, 0x21, 0x27, 0x07, 0x1b, 0x27, 0x9c, 0x2e, - 0xf2, 0xb9, 0xc4, 0x12, 0x7d, 0x4c, 0x40, 0x91, 0x4f, 0x43, 0x5d, 0xb9, 0xd3, 0x54, 0xcb, 0x66, - 0xba, 0x2b, 0x50, 0x12, 0x21, 0x7d, 0xbd, 0x67, 0x79, 0xb4, 0x43, 0x9d, 0xc0, 0x6f, 0x9e, 0x52, - 0x0b, 0x58, 0x95, 0xeb, 0x63, 0x84, 0x46, 0x36, 0xfa, 0xfd, 0x2e, 0xc2, 0x69, 0xf0, 0xd8, 0x80, - 0x59, 0x7d, 0x08, 0xa7, 0xcb, 0xe7, 0xe0, 0x44, 0xe8, 0x18, 0x91, 0x6b, 0x6b, 0xe1, 0x46, 0x78, - 0x8a, 0x15, 0x5f, 0x4a, 0x66, 0xdd, 0xde, 0x9b, 0x7c, 0x34, 0x63, 0x75, 0x1d, 0x11, 0x60, 0x1a, - 0x4c, 0xfb, 0xfd, 0x12, 0xf4, 0x9b, 0xdd, 0x49, 0xa1, 0x15, 0x8e, 0x5a, 0x68, 0xe9, 0x0f, 0x12, - 0xd3, 0xe7, 0x73, 0xb2, 0x58, 0xfe, 0x8f, 0xca, 0x6a, 0x98, 0xd2, 0x51, 0x37, 0xcc, 0xfd, 0x32, - 0x76, 0xb4, 0xb7, 0xca, 0x30, 0x3e, 0xaf, 0xd3, 0x8e, 0xeb, 0xdc, 0x75, 0x11, 0x52, 0xb8, 0x2f, - 0x16, 0x21, 0x97, 0xa0, 0xe6, 0xd1, 0xae, 0x6d, 0x19, 0xba, 0xcf, 0x9b, 0x5e, 0xba, 0xe3, 0x50, - 0xa6, 0x61, 0x98, 0x3b, 0x60, 0xf1, 0x59, 0xba, 0x2f, 0x17, 0x9f, 0xe5, 0x77, 0x7f, 0xf1, 0xa9, - 0x7d, 0xb9, 0x08, 0xdc, 0x50, 0x21, 0x17, 0xa1, 0xcc, 0x94, 0x70, 0xda, 0xe5, 0xc1, 0x3b, 0x0e, - 0xcf, 0x21, 0xe7, 0xa0, 0x18, 0xb8, 0x72, 0xe4, 0x81, 0xcc, 0x2f, 0xae, 0xb9, 0x58, 0x0c, 0x5c, - 0xf2, 0x06, 0x80, 0xe1, 0x3a, 0xa6, 0xa5, 0xbc, 0xd4, 0xf9, 0x3e, 0x6c, 0xc1, 0xf5, 0x6e, 0xea, - 0x9e, 0x39, 0x17, 0x22, 0x8a, 0xe5, 0x47, 0xf4, 0x8e, 0x31, 0x6e, 0xe4, 0x79, 0x18, 0x71, 0x9d, - 0x85, 0x9e, 0x6d, 0x73, 0x81, 0xd6, 0x9b, 0xff, 0x9d, 0xad, 0x09, 0xaf, 0xf3, 0x94, 0xdb, 0x7b, - 0x93, 0x67, 0x85, 0x7d, 0xcb, 0xde, 0x5e, 0xf2, 0xac, 0xc0, 0x72, 0xda, 0xad, 0xc0, 0xd3, 0x03, - 0xda, 0xde, 0x45, 0x59, 0x4c, 0xfb, 0x5a, 0x01, 0x1a, 0x0b, 0xd6, 0x2d, 0x6a, 0xbe, 0x64, 0x39, - 0xa6, 0x7b, 0x93, 0x20, 0x8c, 0xd8, 0xd4, 0x69, 0x07, 0x5b, 0xb2, 0xf7, 0x4f, 0xc5, 0xc6, 0x5a, - 0xb8, 0xb9, 0x11, 0xd5, 0xbf, 0x43, 0x03, 0x9d, 0x8d, 0xbe, 0xf9, 0x9e, 0x74, 0xbf, 0x8b, 0x45, - 0x29, 0x47, 0x40, 0x89, 0x44, 0xa6, 0xa1, 0x2e, 0xac, 0x4f, 0xcb, 0x69, 0x73, 0x19, 0xd6, 0xa2, - 0x49, 0xaf, 0xa5, 0x32, 0x30, 0xa2, 0xd1, 0x76, 0xe1, 0x54, 0x9f, 0x18, 0x88, 0x09, 0xe5, 0x40, - 0x6f, 0xab, 0xf9, 0x75, 0x61, 0x68, 0x01, 0xaf, 0xe9, 0xed, 0x98, 0x70, 0xb9, 0x8e, 0x5f, 0xd3, - 0x99, 0x8e, 0x67, 0xe8, 0xda, 0x4f, 0x0b, 0x50, 0x5b, 0xe8, 0x39, 0x06, 0x5f, 0x1b, 0xdd, 0xdd, - 0x15, 0xa6, 0x0c, 0x86, 0x62, 0xa6, 0xc1, 0xd0, 0x83, 0x91, 0xed, 0x9b, 0xa1, 0x41, 0xd1, 0x98, - 0x59, 0x19, 0xbe, 0x57, 0xc8, 0x2a, 0x4d, 0x5d, 0xe5, 0x78, 0x62, 0x0f, 0x65, 0x5c, 0x56, 0x68, - 0xe4, 0xea, 0x4b, 0x9c, 0xa9, 0x64, 0x76, 0xee, 0xa3, 0xd0, 0x88, 0x91, 0x1d, 0xca, 0x69, 0xfb, - 0xdb, 0x65, 0x18, 0x59, 0x6c, 0xb5, 0x66, 0x57, 0x97, 0xc8, 0xd3, 0xd0, 0x90, 0xee, 0xf5, 0x6b, - 0x91, 0x0c, 0xc2, 0xdd, 0x95, 0x56, 0x94, 0x85, 0x71, 0x3a, 0x66, 0x8e, 0x79, 0x54, 0xb7, 0x3b, - 0x72, 0xb0, 0x84, 0xe6, 0x18, 0xb2, 0x44, 0x14, 0x79, 0x44, 0x87, 0x71, 0xb6, 0xc2, 0x63, 0x22, - 0x14, 0xab, 0x37, 0x39, 0x6c, 0x0e, 0xb8, 0xbe, 0xe3, 0x46, 0xe2, 0x7a, 0x02, 0x00, 0x53, 0x80, - 0xe4, 0x39, 0xa8, 0xe9, 0xbd, 0x60, 0x8b, 0x1b, 0xd0, 0x62, 0x6c, 0x9c, 0xe7, 0xbb, 0x0f, 0x32, - 0xed, 0xf6, 0xde, 0xe4, 0xe8, 0x55, 0x6c, 0x3e, 0xad, 0xde, 0x31, 0xa4, 0x66, 0x95, 0x53, 0x2b, - 0x46, 0x59, 0xb9, 0xca, 0xa1, 0x2b, 0xb7, 0x9a, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x05, 0x46, 0xb7, - 0xe9, 0x6e, 0xa0, 0x6f, 0x48, 0x06, 0x23, 0x87, 0x61, 0x70, 0x92, 0x99, 0x70, 0x57, 0x63, 0xc5, - 0x31, 0x01, 0x46, 0x7c, 0x78, 0x70, 0x9b, 0x7a, 0x1b, 0xd4, 0x73, 0xe5, 0xea, 0x53, 0x32, 0xa9, - 0x1e, 0x86, 0xc9, 0xc4, 0xfe, 0xde, 0xe4, 0x83, 0x57, 0x33, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x49, - 0x11, 0x4e, 0x2c, 0x8a, 0xfd, 0x4d, 0xd7, 0x13, 0x4a, 0x98, 0x9c, 0x85, 0x92, 0xd7, 0xed, 0xf1, - 0x9e, 0x53, 0x12, 0x7e, 0x52, 0x5c, 0x5d, 0x47, 0x96, 0x46, 0x5e, 0x86, 0x9a, 0x29, 0xa7, 0x0c, - 0xb9, 0xf8, 0x3d, 0xec, 0x44, 0xc3, 0x95, 0xa0, 0x7a, 0xc3, 0x10, 0x8d, 0x59, 0xfa, 0x1d, 0xbf, - 0xdd, 0xb2, 0xde, 0xa0, 0x72, 0x3d, 0xc8, 0x2d, 0xfd, 0x15, 0x91, 0x84, 0x2a, 0x8f, 0x69, 0xd5, - 0x6d, 0xba, 0x2b, 0x56, 0x43, 0xe5, 0x48, 0xab, 0x5e, 0x95, 0x69, 0x18, 0xe6, 0x92, 0x49, 0x35, - 0x58, 0x58, 0x2f, 0x28, 0x8b, 0x95, 0xfc, 0x0d, 0x96, 0x20, 0xc7, 0x0d, 0x9b, 0x32, 0x5f, 0xb3, - 0x82, 0x80, 0x7a, 0xb2, 0x19, 0x87, 0x9a, 0x32, 0x5f, 0xe4, 0x08, 0x28, 0x91, 0xc8, 0x87, 0xa0, - 0xce, 0xc1, 0x9b, 0xb6, 0xbb, 0xc1, 0x1b, 0xae, 0x2e, 0xd6, 0xf4, 0x37, 0x54, 0x22, 0x46, 0xf9, - 0xda, 0xcf, 0x8a, 0x70, 0x66, 0x91, 0x06, 0xc2, 0xaa, 0x99, 0xa7, 0x5d, 0xdb, 0xdd, 0x65, 0xa6, - 0x25, 0xd2, 0xd7, 0xc9, 0x0b, 0x00, 0x96, 0xbf, 0xd1, 0xda, 0x31, 0xf8, 0x38, 0x10, 0x63, 0xf8, - 0xa2, 0x1c, 0x92, 0xb0, 0xd4, 0x6a, 0xca, 0x9c, 0xdb, 0x89, 0x37, 0x8c, 0x95, 0x89, 0x96, 0x57, - 0xc5, 0x3b, 0x2c, 0xaf, 0x5a, 0x00, 0xdd, 0xc8, 0x40, 0x2d, 0x71, 0xca, 0xff, 0xa5, 0xd8, 0x1c, - 0xc6, 0x36, 0x8d, 0xc1, 0xe4, 0x31, 0x19, 0x1d, 0x38, 0x69, 0xd2, 0x4d, 0xbd, 0x67, 0x07, 0xa1, - 0x51, 0x2d, 0x07, 0xf1, 0xc1, 0xed, 0xf2, 0x70, 0xef, 0x75, 0x3e, 0x85, 0x84, 0x7d, 0xd8, 0xda, - 0xef, 0x94, 0xe0, 0xdc, 0x22, 0x0d, 0x42, 0x8f, 0x8b, 0x9c, 0x1d, 0x5b, 0x5d, 0x6a, 0xb0, 0x56, - 0x78, 0xb3, 0x00, 0x23, 0xb6, 0xbe, 0x41, 0x6d, 0xa6, 0xbd, 0xd8, 0xd7, 0xbc, 0x3a, 0xb4, 0x22, - 0x18, 0xcc, 0x65, 0x6a, 0x99, 0x73, 0x48, 0xa9, 0x06, 0x91, 0x88, 0x92, 0x3d, 0x9b, 0xd4, 0x0d, - 0xbb, 0xe7, 0x07, 0xd4, 0x5b, 0x75, 0xbd, 0x40, 0xda, 0x93, 0xe1, 0xa4, 0x3e, 0x17, 0x65, 0x61, - 0x9c, 0x8e, 0xcc, 0x00, 0x18, 0xb6, 0x45, 0x9d, 0x80, 0x97, 0x12, 0xe3, 0x8a, 0xa8, 0xf6, 0x9d, - 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0x75, 0x5c, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, 0x64, 0xb5, - 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, 0x15, 0x8b, - 0xb2, 0x30, 0x4e, 0xc7, 0x74, 0x5e, 0xec, 0xfb, 0x0f, 0xa5, 0xf3, 0xbe, 0x55, 0x87, 0x0b, 0x09, - 0xb1, 0x06, 0x7a, 0x40, 0x37, 0x7b, 0x76, 0x8b, 0x06, 0xaa, 0x01, 0x87, 0xd4, 0x85, 0x3f, 0x17, - 0xb5, 0xbb, 0x88, 0xaa, 0x30, 0x8e, 0xa6, 0xdd, 0xfb, 0x2a, 0x78, 0xa0, 0xb6, 0x9f, 0x86, 0xba, - 0xa3, 0x07, 0x3e, 0x1f, 0xb8, 0x72, 0x8c, 0x86, 0x66, 0xd8, 0x35, 0x95, 0x81, 0x11, 0x0d, 0x59, - 0x85, 0x07, 0xa5, 0x88, 0x2f, 0xdf, 0xea, 0xba, 0x5e, 0x40, 0x3d, 0x51, 0x56, 0xaa, 0x53, 0x59, - 0xf6, 0xc1, 0x95, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x56, 0xe0, 0xb4, 0x21, 0x76, 0x9a, 0xa9, 0xed, - 0xea, 0xa6, 0x02, 0x14, 0x0e, 0xae, 0x70, 0x69, 0x34, 0xd7, 0x4f, 0x82, 0x59, 0xe5, 0xd2, 0xbd, - 0x79, 0x64, 0xa8, 0xde, 0x5c, 0x1d, 0xa6, 0x37, 0xd7, 0x86, 0xeb, 0xcd, 0xf5, 0x83, 0xf5, 0x66, - 0x26, 0x79, 0xd6, 0x8f, 0xa8, 0xc7, 0xcc, 0x13, 0xa1, 0x61, 0x63, 0x81, 0x0c, 0xa1, 0xe4, 0x5b, - 0x19, 0x34, 0x98, 0x59, 0x92, 0x6c, 0xc0, 0x39, 0x91, 0x7e, 0xd9, 0x31, 0xbc, 0xdd, 0x2e, 0x53, - 0x3c, 0x31, 0xdc, 0x46, 0xc2, 0xc3, 0x78, 0xae, 0x35, 0x90, 0x12, 0xef, 0x80, 0x42, 0xfe, 0x37, - 0x8c, 0x89, 0x56, 0x5a, 0xd1, 0xbb, 0x1c, 0x56, 0x84, 0x35, 0x3c, 0x24, 0x61, 0xc7, 0xe6, 0xe2, - 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, 0xbb, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0x6b, 0x94, 0x9a, - 0xd4, 0xe4, 0xbb, 0x35, 0xf5, 0xe6, 0xc3, 0xca, 0xd1, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x79, - 0x0e, 0x46, 0xfd, 0x40, 0xf7, 0x02, 0xe9, 0xd6, 0x9b, 0x18, 0x17, 0x61, 0x1f, 0xca, 0xeb, 0xd5, - 0x8a, 0xe5, 0x61, 0x82, 0x32, 0x53, 0x5f, 0x9c, 0x38, 0x3e, 0x7d, 0x91, 0x67, 0xb6, 0xfa, 0x93, - 0x22, 0x5c, 0x5c, 0xa4, 0xc1, 0x8a, 0xeb, 0x48, 0xa7, 0x68, 0x96, 0xda, 0x3f, 0x90, 0x4f, 0x34, - 0xa9, 0xb4, 0x8b, 0x47, 0xaa, 0xb4, 0x4b, 0x47, 0xa4, 0xb4, 0xcb, 0xc7, 0xa8, 0xb4, 0x7f, 0xaf, - 0x08, 0x0f, 0x27, 0x24, 0xb9, 0xea, 0x9a, 0x6a, 0xc2, 0x7f, 0x5f, 0x80, 0x07, 0x10, 0xe0, 0x6d, - 0x61, 0x77, 0xf2, 0x6d, 0xad, 0x94, 0xc5, 0xf3, 0xd5, 0xb4, 0xc5, 0xf3, 0x4a, 0x1e, 0xcd, 0x97, - 0xc1, 0xe1, 0x40, 0x1a, 0xef, 0x45, 0x20, 0x9e, 0xdc, 0x84, 0x13, 0xae, 0x9f, 0x98, 0xd1, 0x13, - 0xc6, 0x95, 0x61, 0x1f, 0x05, 0x66, 0x94, 0x22, 0x2d, 0x78, 0xc8, 0xa7, 0x4e, 0x60, 0x39, 0xd4, - 0x4e, 0xc2, 0x09, 0x6b, 0xe8, 0x51, 0x09, 0xf7, 0x50, 0x2b, 0x8b, 0x08, 0xb3, 0xcb, 0xe6, 0x99, - 0x07, 0xfe, 0x1c, 0xb8, 0xc9, 0x29, 0x44, 0x73, 0x64, 0x16, 0xcb, 0x9b, 0x69, 0x8b, 0xe5, 0xd5, - 0xfc, 0xed, 0x36, 0x9c, 0xb5, 0x32, 0x03, 0xc0, 0x5b, 0x21, 0x6e, 0xae, 0x84, 0x4a, 0x1a, 0xc3, - 0x1c, 0x8c, 0x51, 0x31, 0x05, 0xa4, 0xe4, 0x1c, 0xb7, 0x54, 0x42, 0x05, 0xd4, 0x8a, 0x67, 0x62, - 0x92, 0x76, 0xa0, 0xb5, 0x53, 0x19, 0xda, 0xda, 0x79, 0x11, 0x48, 0xc2, 0xf1, 0x28, 0xf0, 0x46, - 0x92, 0x61, 0x8d, 0x4b, 0x7d, 0x14, 0x98, 0x51, 0x6a, 0x40, 0x57, 0xae, 0x1e, 0x6d, 0x57, 0xae, - 0x0d, 0xdf, 0x95, 0xc9, 0xab, 0x70, 0x96, 0xb3, 0x92, 0xf2, 0x49, 0x02, 0x0b, 0xbb, 0xe7, 0x03, - 0x12, 0xf8, 0x2c, 0x0e, 0x22, 0xc4, 0xc1, 0x18, 0xac, 0x7d, 0x0c, 0x8f, 0x9a, 0x8c, 0xb9, 0x6e, - 0x0f, 0xb6, 0x89, 0xe6, 0x32, 0x68, 0x30, 0xb3, 0x24, 0xeb, 0x62, 0x01, 0xeb, 0x86, 0xfa, 0x86, - 0x4d, 0x4d, 0x19, 0xd6, 0x19, 0x76, 0xb1, 0xb5, 0xe5, 0x96, 0xcc, 0xc1, 0x18, 0x55, 0x96, 0x99, - 0x32, 0x7a, 0x48, 0x33, 0x65, 0x91, 0x7b, 0xe9, 0x37, 0x13, 0xd6, 0x90, 0xb4, 0x75, 0xc2, 0x40, - 0xdd, 0xb9, 0x34, 0x01, 0xf6, 0x97, 0xe1, 0x56, 0xa2, 0xe1, 0x59, 0xdd, 0xc0, 0x4f, 0x62, 0x8d, - 0xa7, 0xac, 0xc4, 0x0c, 0x1a, 0xcc, 0x2c, 0xc9, 0xec, 0xf3, 0x2d, 0xaa, 0xdb, 0xc1, 0x56, 0x12, - 0xf0, 0x44, 0xd2, 0x3e, 0xbf, 0xd2, 0x4f, 0x82, 0x59, 0xe5, 0x32, 0x15, 0xd2, 0xc9, 0xfb, 0xd3, - 0xac, 0xfa, 0x4a, 0x09, 0xce, 0x2e, 0xd2, 0x20, 0x8c, 0xab, 0x79, 0xdf, 0x8d, 0xf2, 0x2e, 0xb8, - 0x51, 0xbe, 0x59, 0x81, 0xd3, 0x8b, 0x34, 0xe8, 0xb3, 0xc6, 0xfe, 0x8b, 0x8a, 0x7f, 0x05, 0x4e, - 0x47, 0xa1, 0x5c, 0xad, 0xc0, 0xf5, 0x84, 0x2e, 0x4f, 0xad, 0x96, 0x5b, 0xfd, 0x24, 0x98, 0x55, - 0x8e, 0x7c, 0x1a, 0x1e, 0xe6, 0xaa, 0xde, 0x69, 0x0b, 0xff, 0xac, 0x70, 0x26, 0xc4, 0x8e, 0x09, - 0x4c, 0x4a, 0xc8, 0x87, 0x5b, 0xd9, 0x64, 0x38, 0xa8, 0x3c, 0xf9, 0x22, 0x8c, 0x76, 0xad, 0x2e, - 0xb5, 0x2d, 0x87, 0xdb, 0x67, 0xb9, 0x43, 0x42, 0x56, 0x63, 0x60, 0xd1, 0x02, 0x2e, 0x9e, 0x8a, - 0x09, 0x86, 0x99, 0x3d, 0xb5, 0x76, 0x8c, 0x3d, 0xf5, 0xdf, 0x8a, 0x50, 0x5d, 0xf4, 0xdc, 0x5e, - 0xb7, 0xb9, 0x4b, 0xda, 0x30, 0x72, 0x93, 0x6f, 0x9e, 0xc9, 0xad, 0xa9, 0xe1, 0xc3, 0xa1, 0xc5, - 0x1e, 0x5c, 0x64, 0x12, 0x89, 0x77, 0x94, 0xf0, 0xac, 0x13, 0x6f, 0xd3, 0x5d, 0x6a, 0xca, 0x3d, - 0xb4, 0xb0, 0x13, 0x5f, 0x65, 0x89, 0x28, 0xf2, 0x48, 0x07, 0x4e, 0xe8, 0xb6, 0xed, 0xde, 0xa4, - 0xe6, 0xb2, 0x1e, 0x50, 0x87, 0xfa, 0x6a, 0x4b, 0xf2, 0xb0, 0x6e, 0x69, 0xbe, 0xaf, 0x3f, 0x9b, - 0x84, 0xc2, 0x34, 0x36, 0x79, 0x0d, 0xaa, 0x7e, 0xe0, 0x7a, 0xca, 0xd8, 0x6a, 0xcc, 0xcc, 0x0d, - 0xdf, 0xe8, 0xcd, 0x4f, 0xb5, 0x04, 0x94, 0xf0, 0xd9, 0xcb, 0x17, 0x54, 0x0c, 0xb4, 0x6f, 0x14, - 0x00, 0xae, 0xac, 0xad, 0xad, 0xca, 0xed, 0x05, 0x13, 0xca, 0x7a, 0x2f, 0xdc, 0xa8, 0x1c, 0x7e, - 0x43, 0x30, 0x11, 0x0f, 0x29, 0xf7, 0xf0, 0x7a, 0xc1, 0x16, 0x72, 0x74, 0xf2, 0x41, 0xa8, 0x4a, - 0x03, 0x59, 0x8a, 0x3d, 0x0c, 0x2d, 0x90, 0x46, 0x34, 0xaa, 0x7c, 0xed, 0xb7, 0x8a, 0x00, 0x4b, - 0xa6, 0x4d, 0x5b, 0x2a, 0x82, 0xbd, 0x1e, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, 0x73, 0xc8, 0xdd, - 0x54, 0xee, 0xf3, 0x5f, 0x53, 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfd, 0x80, 0x76, 0x97, 0x9c, - 0x80, 0x7a, 0x3b, 0xba, 0x3d, 0xe4, 0x26, 0xca, 0x49, 0xe1, 0x17, 0x89, 0x70, 0x30, 0x81, 0x4a, - 0x74, 0x68, 0x58, 0x8e, 0x21, 0x06, 0x48, 0x73, 0x77, 0xc8, 0x8e, 0x74, 0x82, 0xad, 0x38, 0x96, - 0x22, 0x18, 0x8c, 0x63, 0x6a, 0x3f, 0x28, 0xc2, 0x19, 0xce, 0x8f, 0x55, 0x23, 0x11, 0x8f, 0x49, - 0xfe, 0x6f, 0xdf, 0x39, 0xb8, 0xff, 0x79, 0x30, 0xd6, 0xe2, 0x18, 0xd5, 0x0a, 0x0d, 0xf4, 0xc8, - 0x9e, 0x8b, 0xd2, 0x62, 0x87, 0xdf, 0x7a, 0x50, 0xf6, 0xd9, 0x7c, 0x25, 0xa4, 0xd7, 0x1a, 0xba, - 0x0b, 0x65, 0x7f, 0x00, 0x9f, 0xbd, 0xc2, 0x5d, 0x63, 0x3e, 0x6b, 0x71, 0x76, 0xe4, 0x0b, 0x30, - 0xe2, 0x07, 0x7a, 0xd0, 0x53, 0x43, 0x73, 0xfd, 0xa8, 0x19, 0x73, 0xf0, 0x68, 0x1e, 0x11, 0xef, - 0x28, 0x99, 0x6a, 0x3f, 0x28, 0xc0, 0xb9, 0xec, 0x82, 0xcb, 0x96, 0x1f, 0x90, 0xff, 0xd3, 0x27, - 0xf6, 0x03, 0xb6, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0x06, 0x64, 0xab, 0x94, 0x98, 0xc8, 0x03, 0xa8, - 0x58, 0x01, 0xed, 0xa8, 0xf5, 0xe5, 0xf5, 0x23, 0xfe, 0xf4, 0x98, 0x6a, 0x67, 0x5c, 0x50, 0x30, - 0xd3, 0xde, 0x2a, 0x0e, 0xfa, 0x64, 0xae, 0x3e, 0xec, 0x64, 0xcc, 0xef, 0xd5, 0x7c, 0x31, 0xbf, - 0xc9, 0x0a, 0xf5, 0x87, 0xfe, 0xfe, 0xbf, 0xfe, 0xd0, 0xdf, 0xeb, 0xf9, 0x43, 0x7f, 0x53, 0x62, - 0x18, 0x18, 0x01, 0xfc, 0x4e, 0x09, 0xce, 0xdf, 0xa9, 0xdb, 0x30, 0x7d, 0x26, 0x7b, 0x67, 0x5e, - 0x7d, 0x76, 0xe7, 0x7e, 0x48, 0x66, 0xa0, 0xd2, 0xdd, 0xd2, 0x7d, 0x65, 0x94, 0xa9, 0x05, 0x4b, - 0x65, 0x95, 0x25, 0xde, 0x66, 0x93, 0x06, 0x37, 0xe6, 0xf8, 0x2b, 0x0a, 0x52, 0x36, 0x1d, 0x77, - 0xa8, 0xef, 0x47, 0x3e, 0x81, 0x70, 0x3a, 0x5e, 0x11, 0xc9, 0xa8, 0xf2, 0x49, 0x00, 0x23, 0xc2, - 0xc5, 0x2c, 0x35, 0xd3, 0xf0, 0x81, 0x5c, 0x19, 0x61, 0xe2, 0xd1, 0x47, 0xc9, 0xdd, 0x0a, 0xc9, - 0x8b, 0x4c, 0x41, 0x39, 0x88, 0x82, 0x76, 0xd5, 0xd2, 0xbc, 0x9c, 0x61, 0x9f, 0x72, 0x3a, 0xb6, - 0xb0, 0x77, 0x37, 0xb8, 0x53, 0xdd, 0x94, 0xfb, 0xe7, 0x96, 0xeb, 0x70, 0x83, 0xac, 0x14, 0x2d, - 0xec, 0xaf, 0xf7, 0x51, 0x60, 0x46, 0x29, 0xed, 0xaf, 0x6a, 0x70, 0x26, 0xbb, 0x3f, 0x30, 0xb9, - 0xed, 0x50, 0xcf, 0x67, 0xd8, 0x85, 0xa4, 0xdc, 0x6e, 0x88, 0x64, 0x54, 0xf9, 0xef, 0xe9, 0x80, - 0xb3, 0x6f, 0x16, 0xe0, 0xac, 0x27, 0xf7, 0x88, 0xee, 0x45, 0xd0, 0xd9, 0xa3, 0xc2, 0x9d, 0x31, - 0x80, 0x21, 0x0e, 0xae, 0x0b, 0xf9, 0x8d, 0x02, 0x4c, 0x74, 0x52, 0x7e, 0x8e, 0x63, 0x3c, 0x30, - 0xc6, 0xa3, 0xe2, 0x57, 0x06, 0xf0, 0xc3, 0x81, 0x35, 0x21, 0x5f, 0x84, 0x46, 0x97, 0xf5, 0x0b, - 0x3f, 0xa0, 0x8e, 0xa1, 0xce, 0x8c, 0x0d, 0x3f, 0x92, 0x56, 0x23, 0x2c, 0x15, 0x8a, 0x26, 0xec, - 0x83, 0x58, 0x06, 0xc6, 0x39, 0xde, 0xe7, 0x27, 0xc4, 0x2e, 0x41, 0xcd, 0xa7, 0x41, 0x60, 0x39, - 0x6d, 0xb1, 0xde, 0xa8, 0x8b, 0xb1, 0xd2, 0x92, 0x69, 0x18, 0xe6, 0x92, 0x0f, 0x41, 0x9d, 0x6f, - 0x39, 0xcd, 0x7a, 0x6d, 0x7f, 0xa2, 0xce, 0xc3, 0xc5, 0xc6, 0x44, 0x00, 0x9c, 0x4c, 0xc4, 0x28, - 0x9f, 0x3c, 0x05, 0xa3, 0x1b, 0x7c, 0xf8, 0xca, 0xe3, 0xbc, 0xc2, 0xc7, 0xc5, 0xad, 0xb5, 0x66, - 0x2c, 0x1d, 0x13, 0x54, 0x64, 0x06, 0x80, 0x86, 0xfb, 0x72, 0x69, 0x7f, 0x56, 0xb4, 0x63, 0x87, - 0x31, 0x2a, 0xf2, 0x28, 0x94, 0x02, 0xdb, 0xe7, 0x3e, 0xac, 0x5a, 0xb4, 0x04, 0x5d, 0x5b, 0x6e, - 0x21, 0x4b, 0xd7, 0x7e, 0x56, 0x80, 0x13, 0xa9, 0xc3, 0x25, 0xac, 0x48, 0xcf, 0xb3, 0xe5, 0x34, - 0x12, 0x16, 0x59, 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0x69, 0x96, 0x17, 0x73, 0xde, 0x5c, 0x70, - 0x4d, 0x0f, 0x7c, 0x66, 0x87, 0xf7, 0x59, 0xe4, 0x7c, 0x9b, 0x2f, 0xaa, 0x8f, 0xd4, 0x03, 0xb1, - 0x6d, 0xbe, 0x28, 0x0f, 0x13, 0x94, 0x29, 0x87, 0x5f, 0xf9, 0x20, 0x0e, 0x3f, 0xed, 0x6b, 0xc5, - 0x98, 0x04, 0xa4, 0x65, 0x7f, 0x17, 0x09, 0x3c, 0xc1, 0x14, 0x68, 0xa8, 0xdc, 0xeb, 0x71, 0xfd, - 0xc7, 0x95, 0xb1, 0xcc, 0x25, 0x2f, 0x09, 0xd9, 0x97, 0x72, 0x9e, 0x42, 0x5d, 0x5b, 0x6e, 0x89, - 0xe8, 0x2a, 0xd5, 0x6a, 0x61, 0x13, 0x94, 0x8f, 0xa9, 0x09, 0xb4, 0x3f, 0x2b, 0x41, 0xe3, 0x45, - 0x77, 0xe3, 0x3d, 0x12, 0x41, 0x9d, 0xad, 0xa6, 0x8a, 0xef, 0xa2, 0x9a, 0x5a, 0x87, 0x87, 0x83, - 0xc0, 0x6e, 0x51, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, - 0x35, 0xe5, 0x76, 0xd2, 0x23, 0xfb, 0x7b, 0x93, 0x0f, 0xaf, 0xad, 0x2d, 0x67, 0x91, 0xe0, 0xa0, - 0xb2, 0x7c, 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0x9f, 0x94, 0x91, 0x31, 0x37, 0x62, 0xda, - 0x88, 0xa5, 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xf5, 0xf0, 0xe4, 0x3b, 0x79, 0x1c, 0xaa, 0x1b, - 0x9e, 0xbb, 0x4d, 0x3d, 0xb1, 0x73, 0x27, 0x4f, 0xca, 0x34, 0x45, 0x12, 0xaa, 0x3c, 0xf2, 0x18, - 0x54, 0x02, 0xb7, 0x6b, 0x19, 0x69, 0x87, 0xda, 0x1a, 0x4b, 0x44, 0x91, 0x77, 0x7c, 0x1d, 0xfc, - 0x89, 0x84, 0x69, 0x57, 0x1f, 0x68, 0x8c, 0xbd, 0x02, 0x65, 0x5f, 0xf7, 0x6d, 0xa9, 0x4f, 0x73, - 0x1c, 0x22, 0x9f, 0x6d, 0x2d, 0xcb, 0x43, 0xe4, 0xb3, 0xad, 0x65, 0xe4, 0xa0, 0xda, 0x8f, 0x8b, - 0xd0, 0x10, 0x72, 0x13, 0xb3, 0xc2, 0x51, 0x4a, 0xee, 0x79, 0x1e, 0x4a, 0xe1, 0xf7, 0x3a, 0xd4, - 0xe3, 0x6e, 0x26, 0x39, 0xc9, 0xc5, 0xf7, 0x07, 0xa2, 0xcc, 0x30, 0x9c, 0x22, 0x4a, 0x52, 0xa2, - 0x2f, 0x1f, 0xa3, 0xe8, 0x2b, 0x07, 0x12, 0xfd, 0xc8, 0x71, 0x88, 0xfe, 0xcd, 0x22, 0xd4, 0x97, - 0xad, 0x4d, 0x6a, 0xec, 0x1a, 0x36, 0x3f, 0x13, 0x68, 0x52, 0x9b, 0x06, 0x74, 0xd1, 0xd3, 0x0d, - 0xba, 0x4a, 0x3d, 0x8b, 0xdf, 0xd9, 0xc2, 0xc6, 0x07, 0x9f, 0x81, 0xe4, 0x99, 0xc0, 0xf9, 0x01, - 0x34, 0x38, 0xb0, 0x34, 0x59, 0x82, 0x51, 0x93, 0xfa, 0x96, 0x47, 0xcd, 0xd5, 0xd8, 0x42, 0xe5, - 0x71, 0xa5, 0x6a, 0xe6, 0x63, 0x79, 0xb7, 0xf7, 0x26, 0xc7, 0x94, 0x83, 0x52, 0xac, 0x58, 0x12, - 0x45, 0xd9, 0x90, 0xef, 0xea, 0x3d, 0x3f, 0xab, 0x8e, 0xb1, 0x21, 0xbf, 0x9a, 0x4d, 0x82, 0x83, - 0xca, 0x6a, 0x15, 0x28, 0x2d, 0xbb, 0x6d, 0xed, 0xad, 0x12, 0x84, 0x97, 0xfb, 0x90, 0xff, 0x5f, - 0x80, 0x86, 0xee, 0x38, 0x6e, 0x20, 0x2f, 0xce, 0x11, 0x3b, 0xf0, 0x98, 0xfb, 0x0e, 0xa1, 0xa9, - 0xd9, 0x08, 0x54, 0x6c, 0xde, 0x86, 0x1b, 0xca, 0xb1, 0x1c, 0x8c, 0xf3, 0x26, 0xbd, 0xd4, 0x7e, - 0xf2, 0x4a, 0xfe, 0x5a, 0x1c, 0x60, 0xf7, 0xf8, 0xdc, 0x27, 0xe1, 0x64, 0xba, 0xb2, 0x87, 0xd9, - 0x0e, 0xca, 0xb5, 0x31, 0x5f, 0x04, 0x88, 0x62, 0x4a, 0xee, 0x81, 0x13, 0xcb, 0x4a, 0x38, 0xb1, - 0x16, 0x87, 0x17, 0x70, 0x58, 0xe9, 0x81, 0x8e, 0xab, 0xd7, 0x53, 0x8e, 0xab, 0xa5, 0xa3, 0x60, - 0x76, 0x67, 0x67, 0xd5, 0x6f, 0x16, 0xe0, 0x64, 0x44, 0x2c, 0x4f, 0xc8, 0x3e, 0x0b, 0x63, 0x1e, - 0xd5, 0xcd, 0xa6, 0x1e, 0x18, 0x5b, 0x3c, 0xd4, 0xbb, 0xc0, 0x63, 0xb3, 0x4f, 0xed, 0xef, 0x4d, - 0x8e, 0x61, 0x3c, 0x03, 0x93, 0x74, 0x44, 0x87, 0x06, 0x4b, 0x58, 0xb3, 0x3a, 0xd4, 0xed, 0x05, - 0x43, 0x7a, 0x4d, 0xf9, 0x82, 0x05, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xef, 0x14, 0x60, 0x3c, 0x5e, - 0xe1, 0x63, 0xf7, 0xa8, 0x6d, 0x25, 0x3d, 0x6a, 0x73, 0x47, 0xd0, 0x26, 0x03, 0xbc, 0x68, 0x3f, - 0xa9, 0xc5, 0x3f, 0x8d, 0x7b, 0xce, 0xe2, 0xce, 0x82, 0xc2, 0x1d, 0x9d, 0x05, 0xef, 0xfd, 0x5b, - 0x63, 0x06, 0x59, 0xb9, 0xe5, 0xfb, 0xd8, 0xca, 0x7d, 0x37, 0xaf, 0x9e, 0x89, 0x5d, 0x9f, 0x32, - 0x92, 0xe3, 0xfa, 0x94, 0x4e, 0x78, 0x7d, 0x4a, 0xf5, 0xc8, 0x26, 0x9d, 0x83, 0x5c, 0xa1, 0x52, - 0xbb, 0xa7, 0x57, 0xa8, 0xd4, 0x8f, 0xeb, 0x0a, 0x15, 0xc8, 0x7b, 0x85, 0xca, 0x57, 0x0b, 0x30, - 0x6e, 0x26, 0x4e, 0xcc, 0x72, 0xdf, 0x42, 0x1e, 0x55, 0x93, 0x3c, 0x80, 0x2b, 0x8e, 0x4c, 0x25, - 0xd3, 0x30, 0xc5, 0x52, 0xfb, 0x51, 0x39, 0xae, 0x07, 0xee, 0xb5, 0xab, 0xfa, 0x99, 0xa4, 0xab, - 0xfa, 0x62, 0xda, 0x55, 0x7d, 0x22, 0x16, 0x45, 0x1a, 0x77, 0x57, 0x7f, 0x38, 0x36, 0x3d, 0xb2, - 0x39, 0x69, 0x2c, 0x92, 0x74, 0xc6, 0x14, 0xf9, 0x61, 0xa8, 0xf9, 0xea, 0x1a, 0x46, 0xb1, 0xb0, - 0x89, 0xda, 0x45, 0x5d, 0x91, 0x18, 0x52, 0x30, 0x4b, 0xdc, 0xa3, 0xba, 0xef, 0x3a, 0x69, 0x4b, - 0x1c, 0x79, 0x2a, 0xca, 0xdc, 0xb8, 0xcb, 0x7c, 0xe4, 0x2e, 0x2e, 0x73, 0x1d, 0x1a, 0xb6, 0xee, - 0x07, 0xeb, 0x5d, 0x53, 0x0f, 0xa8, 0x29, 0xc7, 0xdb, 0xff, 0x38, 0x98, 0xae, 0x62, 0xfa, 0x2f, - 0x32, 0x08, 0x97, 0x23, 0x18, 0x8c, 0x63, 0x12, 0x13, 0x46, 0xd9, 0x2b, 0x1f, 0x0d, 0xe6, 0xac, - 0xba, 0x02, 0xe0, 0x30, 0x3c, 0x42, 0x4f, 0xcf, 0x72, 0x0c, 0x07, 0x13, 0xa8, 0x03, 0xbc, 0xea, - 0xf5, 0xa1, 0xbc, 0xea, 0x5f, 0xad, 0x43, 0xe3, 0x9a, 0x1e, 0x58, 0x3b, 0x94, 0xef, 0xe2, 0x1c, - 0x8f, 0x2b, 0xfd, 0x57, 0x0a, 0x70, 0x26, 0x19, 0xaa, 0x77, 0x8c, 0xfe, 0x74, 0x7e, 0xf1, 0x07, - 0x66, 0x72, 0xc3, 0x01, 0xb5, 0xe0, 0x9e, 0xf5, 0xbe, 0xc8, 0xbf, 0xe3, 0xf6, 0xac, 0xb7, 0x06, - 0x31, 0xc4, 0xc1, 0x75, 0x79, 0xaf, 0x78, 0xd6, 0xef, 0xef, 0x8b, 0xd9, 0x52, 0x7e, 0xff, 0xea, - 0x7d, 0xe3, 0xf7, 0xaf, 0xdd, 0x17, 0xc6, 0x56, 0x37, 0xe6, 0xf7, 0xaf, 0xe7, 0x8c, 0x3f, 0x91, - 0xd1, 0xed, 0x02, 0x6d, 0xd0, 0xfe, 0x01, 0x3f, 0x98, 0xae, 0xfc, 0xb1, 0xcc, 0x46, 0xd9, 0xd0, - 0x7d, 0xcb, 0x90, 0x6a, 0x2f, 0xc7, 0x45, 0x94, 0xea, 0xc6, 0x2e, 0xb1, 0x4d, 0xcd, 0x5f, 0x51, - 0x60, 0x47, 0x37, 0x83, 0x15, 0x73, 0xdd, 0x0c, 0x46, 0xe6, 0xa0, 0xec, 0xb0, 0xd5, 0x73, 0xe9, - 0xd0, 0x77, 0x81, 0x5d, 0xbb, 0x4a, 0x77, 0x91, 0x17, 0xd6, 0xde, 0x2e, 0x02, 0xb0, 0xcf, 0x3f, - 0x98, 0x07, 0xfe, 0x83, 0x50, 0xf5, 0x7b, 0x7c, 0xad, 0x2c, 0x15, 0x76, 0x14, 0xb4, 0x23, 0x92, - 0x51, 0xe5, 0x93, 0xc7, 0xa0, 0xf2, 0x7a, 0x8f, 0xf6, 0xd4, 0x76, 0x72, 0x68, 0xae, 0x7d, 0x8a, - 0x25, 0xa2, 0xc8, 0x3b, 0x3e, 0x6f, 0x9a, 0xf2, 0xd4, 0x57, 0x8e, 0xcb, 0x53, 0x5f, 0x87, 0xea, - 0x35, 0x97, 0xc7, 0x00, 0x6a, 0xff, 0x5c, 0x04, 0x88, 0x62, 0xac, 0xc8, 0x37, 0x0a, 0xf0, 0x50, - 0x38, 0xe0, 0x02, 0x61, 0x75, 0xcf, 0xd9, 0xba, 0xd5, 0xc9, 0xed, 0xb5, 0xcf, 0x1a, 0xec, 0x7c, - 0x06, 0x5a, 0xcd, 0x62, 0x87, 0xd9, 0xb5, 0x20, 0x08, 0x35, 0xda, 0xe9, 0x06, 0xbb, 0xf3, 0x96, - 0x27, 0x7b, 0x60, 0x66, 0x28, 0xdf, 0x65, 0x49, 0x23, 0x8a, 0xca, 0xa5, 0x21, 0x1f, 0x44, 0x2a, - 0x07, 0x43, 0x1c, 0xb2, 0x05, 0x35, 0xc7, 0x7d, 0xd5, 0x67, 0xe2, 0x90, 0xdd, 0xf1, 0x85, 0xe1, - 0x45, 0x2e, 0xc4, 0x2a, 0xbc, 0xbc, 0xf2, 0x05, 0xab, 0x8e, 0x14, 0xf6, 0xd7, 0x8b, 0x70, 0x3a, - 0x43, 0x0e, 0xe4, 0x05, 0x38, 0x29, 0xc3, 0xd9, 0xa2, 0xeb, 0x89, 0x0b, 0xd1, 0xf5, 0xc4, 0xad, - 0x54, 0x1e, 0xf6, 0x51, 0x93, 0x57, 0x01, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, 0xe2, 0x9a, 0xca, 0x1e, - 0x7d, 0x7e, 0x7f, 0x6f, 0x12, 0x66, 0xc3, 0xd4, 0xdb, 0x7b, 0x93, 0x1f, 0xc9, 0x8a, 0x50, 0x4d, - 0xc9, 0x39, 0x2a, 0x80, 0x31, 0x48, 0xf2, 0x39, 0x00, 0xb1, 0xf4, 0x0a, 0x0f, 0xd1, 0xdf, 0xc5, - 0x5f, 0x31, 0xa5, 0xae, 0x2b, 0x9a, 0xfa, 0x54, 0x4f, 0x77, 0x02, 0x2b, 0xd8, 0x15, 0x77, 0x96, - 0xdc, 0x08, 0x51, 0x30, 0x86, 0xa8, 0xfd, 0x71, 0x11, 0x6a, 0xca, 0x53, 0x7a, 0x0f, 0xdc, 0x63, - 0xed, 0x84, 0x7b, 0xec, 0x88, 0x62, 0x52, 0xb3, 0x9c, 0x63, 0x6e, 0xca, 0x39, 0xb6, 0x98, 0x9f, - 0xd5, 0x9d, 0x5d, 0x63, 0xdf, 0x2e, 0xc2, 0xb8, 0x22, 0xcd, 0xeb, 0x18, 0xfb, 0x04, 0x9c, 0x10, - 0x7b, 0xc9, 0x2b, 0xfa, 0x2d, 0x71, 0x7d, 0x0b, 0x17, 0x58, 0x59, 0x84, 0x81, 0x36, 0x93, 0x59, - 0x98, 0xa6, 0x65, 0xdd, 0x5a, 0x24, 0xad, 0xb3, 0x75, 0x84, 0xd8, 0x7d, 0x12, 0xeb, 0x1d, 0xde, - 0xad, 0x9b, 0xa9, 0x3c, 0xec, 0xa3, 0x4e, 0x7b, 0xe6, 0xca, 0xc7, 0xe0, 0x99, 0xfb, 0xeb, 0x02, - 0x8c, 0x46, 0xf2, 0x3a, 0x76, 0xbf, 0xdc, 0x66, 0xd2, 0x2f, 0x37, 0x9b, 0xbb, 0x3b, 0x0c, 0xf0, - 0xca, 0xfd, 0x42, 0x15, 0x12, 0xa1, 0xd1, 0x64, 0x03, 0xce, 0x59, 0x99, 0x01, 0x5e, 0xb1, 0xd9, - 0x26, 0x3c, 0xeb, 0xbb, 0x34, 0x90, 0x12, 0xef, 0x80, 0x42, 0x7a, 0x50, 0xdb, 0xa1, 0x5e, 0x60, - 0x19, 0x54, 0x7d, 0xdf, 0x62, 0x6e, 0x93, 0x4c, 0xfa, 0x1e, 0x43, 0x99, 0xde, 0x90, 0x0c, 0x30, - 0x64, 0x45, 0x36, 0xa0, 0x42, 0xcd, 0x36, 0x55, 0x17, 0xea, 0xe4, 0xbc, 0xae, 0x32, 0x94, 0x27, - 0x7b, 0xf3, 0x51, 0x40, 0x13, 0x1f, 0xea, 0xb6, 0xda, 0x5b, 0x92, 0xfd, 0x70, 0x78, 0x03, 0x2b, - 0xdc, 0xa5, 0x8a, 0xce, 0xda, 0x87, 0x49, 0x18, 0xf1, 0x21, 0xdb, 0xa1, 0x93, 0xab, 0x72, 0x44, - 0x93, 0xc7, 0x1d, 0x5c, 0x5c, 0x3e, 0xd4, 0x6f, 0xea, 0x01, 0xf5, 0x3a, 0xba, 0xb7, 0x2d, 0x57, - 0x1b, 0xc3, 0x7f, 0xe1, 0x4b, 0x0a, 0x29, 0xfa, 0xc2, 0x30, 0x09, 0x23, 0x3e, 0xc4, 0x85, 0x7a, - 0x20, 0xcd, 0x67, 0xe5, 0xc9, 0x1b, 0x9e, 0xa9, 0x32, 0xc4, 0x7d, 0x19, 0x22, 0xad, 0x5e, 0x31, - 0xe2, 0x41, 0x76, 0x12, 0x57, 0xf9, 0x8a, 0x0b, 0x9c, 0x9b, 0x39, 0x3c, 0xc2, 0x12, 0x2a, 0x52, - 0x37, 0xd9, 0x57, 0x02, 0x6b, 0x6f, 0x57, 0xa2, 0x69, 0xf9, 0x5e, 0xfb, 0xa9, 0x9e, 0x4a, 0xfa, - 0xa9, 0x2e, 0xa4, 0xfd, 0x54, 0xa9, 0x2d, 0xca, 0xc3, 0x07, 0x55, 0xa6, 0x3c, 0x44, 0xe5, 0x63, - 0xf0, 0x10, 0x3d, 0x09, 0x8d, 0x1d, 0x3e, 0x13, 0x88, 0xdb, 0x79, 0x2a, 0x5c, 0x8d, 0xf0, 0x99, - 0xfd, 0x46, 0x94, 0x8c, 0x71, 0x1a, 0x56, 0x44, 0x58, 0x20, 0xd1, 0xf5, 0xa6, 0xb2, 0x48, 0x2b, - 0x4a, 0xc6, 0x38, 0x0d, 0x8f, 0xc7, 0xb2, 0x9c, 0x6d, 0x51, 0xa0, 0xca, 0x0b, 0x88, 0x78, 0x2c, - 0x95, 0x88, 0x51, 0x3e, 0xb9, 0x04, 0xb5, 0x9e, 0xb9, 0x29, 0x68, 0x6b, 0x9c, 0x96, 0x5b, 0x98, - 0xeb, 0xf3, 0x0b, 0xf2, 0xb6, 0x20, 0x95, 0xcb, 0x6a, 0xd2, 0xd1, 0xbb, 0x2a, 0x83, 0xaf, 0x0d, - 0x65, 0x4d, 0x56, 0xa2, 0x64, 0x8c, 0xd3, 0x90, 0x8f, 0xc1, 0xb8, 0x47, 0xcd, 0x9e, 0x41, 0xc3, - 0x52, 0xc0, 0x4b, 0x71, 0xaf, 0x28, 0x26, 0x72, 0x30, 0x45, 0x39, 0xc0, 0xcf, 0xd5, 0x18, 0xca, - 0xcf, 0xf5, 0xfd, 0x02, 0x90, 0xfe, 0xf8, 0x65, 0xb2, 0x05, 0x23, 0x0e, 0xf7, 0x7e, 0xe5, 0xbe, - 0x10, 0x39, 0xe6, 0x44, 0x13, 0xd3, 0x92, 0x4c, 0x90, 0xf8, 0xc4, 0x81, 0x1a, 0xbd, 0x15, 0x50, - 0xcf, 0x09, 0xcf, 0x33, 0x1c, 0xcd, 0xe5, 0xcb, 0x62, 0x35, 0x20, 0x91, 0x31, 0xe4, 0xa1, 0xfd, - 0xb0, 0x08, 0x8d, 0x18, 0xdd, 0xdd, 0x16, 0x95, 0xfc, 0x48, 0xb5, 0x70, 0x3a, 0xad, 0x7b, 0xb6, - 0x1c, 0x61, 0xb1, 0x23, 0xd5, 0x32, 0x0b, 0x97, 0x31, 0x4e, 0x47, 0x66, 0x00, 0x3a, 0xba, 0x1f, - 0x50, 0x8f, 0x6b, 0xdf, 0xd4, 0x41, 0xe6, 0x95, 0x30, 0x07, 0x63, 0x54, 0xe4, 0xa2, 0xbc, 0x3e, - 0xbb, 0x9c, 0xbc, 0x78, 0x6e, 0xc0, 0xdd, 0xd8, 0x95, 0x23, 0xb8, 0x1b, 0x9b, 0xb4, 0xe1, 0xa4, - 0xaa, 0xb5, 0xca, 0x3d, 0xdc, 0xb5, 0x64, 0x62, 0xfd, 0x92, 0x82, 0xc0, 0x3e, 0x50, 0xed, 0xed, - 0x02, 0x8c, 0x25, 0x5c, 0x1e, 0xe2, 0xca, 0x38, 0x15, 0x7d, 0x9f, 0xb8, 0x32, 0x2e, 0x16, 0x34, - 0xff, 0x04, 0x8c, 0x08, 0x01, 0xa5, 0x83, 0xea, 0x84, 0x08, 0x51, 0xe6, 0xb2, 0xb9, 0x4c, 0x3a, - 0x55, 0xd3, 0x73, 0x99, 0xf4, 0xba, 0xa2, 0xca, 0x17, 0xee, 0x76, 0x51, 0xbb, 0x7e, 0x77, 0xbb, - 0x48, 0xc7, 0x90, 0x42, 0xfb, 0x51, 0x09, 0x78, 0x08, 0x0a, 0x79, 0x16, 0xea, 0x1d, 0x6a, 0x6c, - 0xe9, 0x8e, 0xe5, 0xab, 0x2b, 0x23, 0xd9, 0xea, 0xb6, 0xbe, 0xa2, 0x12, 0x6f, 0x33, 0x80, 0xd9, - 0xd6, 0x32, 0x8f, 0xf2, 0x8e, 0x68, 0x89, 0x01, 0x23, 0x6d, 0xdf, 0xd7, 0xbb, 0x56, 0xee, 0x1d, - 0x50, 0x71, 0x45, 0x9f, 0x18, 0x44, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb5, 0x75, 0xcb, - 0xc9, 0xfd, 0x8f, 0x12, 0xf6, 0x05, 0xab, 0x0c, 0x49, 0xb8, 0x74, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, - 0x83, 0x86, 0x6f, 0x78, 0x7a, 0xc7, 0xdf, 0xd2, 0x67, 0x9e, 0x7e, 0x26, 0xb7, 0x91, 0x14, 0xb1, - 0x12, 0x73, 0xf6, 0x1c, 0xce, 0xae, 0xb4, 0xae, 0xcc, 0xce, 0x3c, 0xfd, 0x0c, 0xc6, 0xf9, 0xc4, - 0xd9, 0x3e, 0xfd, 0xe4, 0x8c, 0xec, 0xf7, 0x47, 0xce, 0xf6, 0xe9, 0x27, 0x67, 0x30, 0xce, 0x47, - 0xfb, 0xf7, 0x02, 0xd4, 0x43, 0x5a, 0xb2, 0x0e, 0xc0, 0x46, 0xa0, 0xbc, 0x54, 0xef, 0x50, 0x17, - 0xdc, 0xf3, 0x55, 0xf1, 0x7a, 0x58, 0x18, 0x63, 0x40, 0x19, 0xb7, 0x0e, 0x16, 0x8f, 0xfa, 0xd6, - 0xc1, 0x69, 0xa8, 0x6f, 0xe9, 0x8e, 0xe9, 0x6f, 0xe9, 0xdb, 0x62, 0x22, 0x8a, 0xdd, 0xc3, 0x79, - 0x45, 0x65, 0x60, 0x44, 0xa3, 0xfd, 0x4b, 0x05, 0xc4, 0xb6, 0x25, 0x1b, 0x2a, 0xa6, 0xe5, 0x8b, - 0xb8, 0xd9, 0x02, 0x2f, 0x19, 0x0e, 0x95, 0x79, 0x99, 0x8e, 0x21, 0x05, 0x39, 0x0b, 0xa5, 0x8e, - 0xe5, 0xc8, 0x1d, 0x0f, 0xee, 0xf0, 0x5a, 0xb1, 0x1c, 0x64, 0x69, 0x3c, 0x4b, 0xbf, 0x25, 0x43, - 0x9e, 0x44, 0x96, 0x7e, 0x0b, 0x59, 0x1a, 0x5b, 0x82, 0xda, 0xae, 0xbb, 0xbd, 0xa1, 0x1b, 0xdb, - 0x2a, 0x32, 0xaa, 0xcc, 0x15, 0x21, 0x5f, 0x82, 0x2e, 0x27, 0xb3, 0x30, 0x4d, 0x4b, 0x16, 0xe1, - 0x84, 0xe1, 0xba, 0xb6, 0xe9, 0xde, 0x74, 0x54, 0x71, 0x61, 0x3a, 0xf0, 0x9d, 0x84, 0x79, 0xda, - 0xf5, 0xa8, 0xc1, 0xec, 0x8b, 0xb9, 0x24, 0x11, 0xa6, 0x4b, 0x91, 0x75, 0x78, 0xf8, 0x0d, 0xea, - 0xb9, 0x72, 0xba, 0x68, 0xd9, 0x94, 0x76, 0x15, 0xa0, 0x30, 0x2c, 0x78, 0xa4, 0xd6, 0x67, 0xb2, - 0x49, 0x70, 0x50, 0x59, 0x1e, 0xf3, 0xa9, 0x7b, 0x6d, 0x1a, 0xac, 0x7a, 0xae, 0x41, 0x7d, 0xdf, - 0x72, 0xda, 0x0a, 0xb6, 0x1a, 0xc1, 0xae, 0x65, 0x93, 0xe0, 0xa0, 0xb2, 0xe4, 0x65, 0x98, 0x10, - 0x59, 0x42, 0x6b, 0xcf, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0x27, 0xd7, 0x98, 0xd8, - 0xa0, 0x58, 0x1b, 0x40, 0x83, 0x03, 0x4b, 0xf3, 0x3f, 0x69, 0xc9, 0xed, 0xa9, 0x55, 0xea, 0xf1, - 0x7e, 0x20, 0xed, 0x19, 0xf1, 0x27, 0xad, 0x54, 0x1e, 0xf6, 0x51, 0x13, 0x84, 0x33, 0x7c, 0xbb, - 0x7b, 0xbd, 0x9b, 0x12, 0xba, 0xb4, 0x70, 0xf8, 0x3e, 0x54, 0x2b, 0x93, 0x02, 0x07, 0x94, 0x64, - 0xdf, 0xcb, 0x73, 0xe6, 0xdd, 0x9b, 0x4e, 0x1a, 0xb5, 0x11, 0x7d, 0x6f, 0x6b, 0x00, 0x0d, 0x0e, - 0x2c, 0xad, 0xfd, 0x51, 0x11, 0xc6, 0x12, 0x27, 0x9f, 0xef, 0xbb, 0x13, 0xa6, 0xcc, 0x54, 0xec, - 0xf8, 0xed, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, 0x77, 0x95, 0xaa, 0x53, 0xea, 0x7c, 0xf4, 0xaf, - 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0x1c, 0x9f, 0x79, 0xff, 0x69, 0xa0, 0x64, 0xc4, - 0xbd, 0x9f, 0x5c, 0x37, 0x08, 0xdf, 0xa7, 0x80, 0xd7, 0x02, 0x18, 0x8d, 0x53, 0xb0, 0x11, 0x1f, - 0x59, 0x55, 0xd5, 0x84, 0x45, 0xb5, 0x04, 0xa5, 0x20, 0x18, 0xf6, 0xec, 0xaa, 0x70, 0xa4, 0xaf, - 0x2d, 0x23, 0xc3, 0xd0, 0x36, 0x59, 0xdb, 0xf9, 0xbe, 0xe5, 0x3a, 0xf2, 0x22, 0xe3, 0x75, 0xa8, - 0x06, 0xd2, 0x97, 0x34, 0xdc, 0xd9, 0x5b, 0xee, 0xd7, 0x55, 0x7e, 0x24, 0x85, 0xa5, 0xfd, 0x4d, - 0x11, 0xea, 0xe1, 0xba, 0xef, 0x00, 0x17, 0x04, 0xbb, 0x50, 0x0f, 0x03, 0x63, 0x72, 0xff, 0x9f, - 0x2c, 0x8a, 0xd7, 0xe0, 0x4b, 0x95, 0xf0, 0x15, 0x23, 0x1e, 0xf1, 0xa0, 0x9b, 0x52, 0x8e, 0xa0, - 0x9b, 0x2e, 0x54, 0x03, 0xcf, 0x6a, 0xb7, 0xa5, 0x11, 0x9a, 0x27, 0xea, 0x26, 0x14, 0xd7, 0x9a, - 0x00, 0x94, 0x92, 0x15, 0x2f, 0xa8, 0xd8, 0x68, 0xaf, 0xc1, 0xc9, 0x34, 0x25, 0xb7, 0xd0, 0x8c, - 0x2d, 0x6a, 0xf6, 0x6c, 0x25, 0xe3, 0xc8, 0x42, 0x93, 0xe9, 0x18, 0x52, 0xb0, 0x55, 0x1a, 0x6b, - 0xa6, 0x37, 0x5c, 0x47, 0xad, 0x7f, 0xb9, 0xb1, 0xbb, 0x26, 0xd3, 0x30, 0xcc, 0xd5, 0xfe, 0xa9, - 0x04, 0x67, 0xa3, 0xd5, 0xfb, 0x8a, 0xee, 0xe8, 0xed, 0x03, 0xfc, 0x94, 0xea, 0xfd, 0xd3, 0x0c, - 0x87, 0xbd, 0xe5, 0xbd, 0x74, 0x1f, 0xdc, 0xf2, 0xfe, 0xe3, 0x02, 0xf0, 0x20, 0x3e, 0xf2, 0x45, - 0x18, 0xd5, 0x63, 0xff, 0x23, 0x94, 0xcd, 0x79, 0x39, 0x77, 0x73, 0xf2, 0x58, 0xc1, 0x30, 0x28, - 0x25, 0x9e, 0x8a, 0x09, 0x86, 0xc4, 0x85, 0xda, 0xa6, 0x6e, 0xdb, 0xcc, 0x68, 0xc9, 0xbd, 0x1b, - 0x91, 0x60, 0xce, 0xbb, 0xf9, 0x82, 0x84, 0xc6, 0x90, 0x89, 0xf6, 0x8f, 0x05, 0x18, 0x6b, 0xd9, - 0x96, 0x69, 0x39, 0xed, 0x63, 0xbc, 0xde, 0xfd, 0x3a, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x90, 0xf3, - 0xb8, 0xd0, 0x20, 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0xbe, 0xf8, 0xd2, 0x01, 0xee, 0x8b, 0xff, 0xe9, - 0x08, 0xc8, 0x40, 0x50, 0xd2, 0x83, 0x7a, 0x5b, 0x5d, 0x43, 0x2d, 0xbf, 0xf1, 0x4a, 0x8e, 0x2b, - 0xcc, 0x12, 0x17, 0x5a, 0x8b, 0x59, 0x37, 0x4c, 0xc4, 0x88, 0x13, 0xa1, 0xc9, 0x5f, 0x50, 0xce, + // 7415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x24, 0xc7, + 0x75, 0xb6, 0xe6, 0xc6, 0x99, 0x39, 0x43, 0x72, 0x77, 0x6b, 0xa5, 0x15, 0x77, 0xb5, 0x5a, 0xae, + 0x5b, 0xbf, 0xf4, 0xaf, 0x7f, 0xdb, 0xe4, 0x2f, 0xfe, 0xba, 0xd9, 0xbf, 0x6d, 0x89, 0x43, 0x2e, + 0xb9, 0xd4, 0x92, 0xbb, 0xf4, 0x19, 0x72, 0x25, 0x5b, 0xbf, 0xad, 0xbf, 0xd9, 0x5d, 0x1c, 0xb6, + 0xd8, 0xd3, 0x3d, 0xea, 0xee, 0xe1, 0x2e, 0xe5, 0x04, 0xbe, 0x3d, 0x48, 0x41, 0x12, 0x24, 0xf0, + 0x93, 0x81, 0xc0, 0x09, 0x12, 0x04, 0xf0, 0x83, 0xe1, 0x3c, 0x04, 0x50, 0x1e, 0x02, 0xe4, 0x06, + 0x04, 0x89, 0x13, 0xe4, 0xe2, 0x87, 0x00, 0x51, 0x10, 0x80, 0x88, 0x19, 0xe4, 0x21, 0x09, 0x62, + 0x18, 0x31, 0x10, 0xdb, 0x0b, 0x03, 0x0e, 0xea, 0xd6, 0xb7, 0xe9, 0xd9, 0x25, 0xa7, 0xc9, 0xd5, + 0x2a, 0xd1, 0x5b, 0x77, 0xd5, 0xa9, 0xef, 0x54, 0x9f, 0xba, 0x9d, 0x3a, 0x75, 0xea, 0x34, 0x2c, + 0xb6, 0xad, 0x60, 0xab, 0xb7, 0x31, 0x65, 0xb8, 0x9d, 0x69, 0xa7, 0xd7, 0xd1, 0xbb, 0x9e, 0xfb, + 0x1a, 0x7f, 0xd8, 0xb4, 0xdd, 0x9b, 0xd3, 0xdd, 0xed, 0xf6, 0xb4, 0xde, 0xb5, 0xfc, 0x28, 0x65, + 0xe7, 0x49, 0xdd, 0xee, 0x6e, 0xe9, 0x4f, 0x4e, 0xb7, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, 0xe6, 0x54, + 0xd7, 0x73, 0x03, 0x97, 0x3c, 0x1b, 0x01, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, 0xbb, 0xdd, + 0x9e, 0x62, 0x40, 0x51, 0x8a, 0x02, 0x3a, 0xf7, 0x91, 0x58, 0x0d, 0xda, 0x6e, 0xdb, 0x9d, 0xe6, + 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, 0xce, 0x9f, + 0xb2, 0x5c, 0x56, 0xad, 0x69, 0xc3, 0xf5, 0xe8, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, 0x15, 0xd1, + 0x74, 0x74, 0x63, 0xcb, 0x72, 0xa8, 0xb7, 0xab, 0xbe, 0x65, 0xda, 0xa3, 0xbe, 0xdb, 0xf3, 0x0c, + 0x7a, 0xa8, 0x52, 0xfe, 0x74, 0x87, 0x06, 0x7a, 0x16, 0xaf, 0xe9, 0x41, 0xa5, 0xbc, 0x9e, 0x13, + 0x58, 0x9d, 0x7e, 0x36, 0xcf, 0xdc, 0xad, 0x80, 0x6f, 0x6c, 0xd1, 0x8e, 0x9e, 0x2e, 0xa7, 0xfd, + 0x7d, 0x1d, 0x4e, 0xcf, 0x6e, 0xf8, 0x81, 0xa7, 0x1b, 0xc1, 0xaa, 0x6b, 0xae, 0xd1, 0x4e, 0xd7, + 0xd6, 0x03, 0x4a, 0xb6, 0xa1, 0xc6, 0xea, 0x66, 0xea, 0x81, 0x3e, 0x51, 0xb8, 0x58, 0xb8, 0xd4, + 0x98, 0x99, 0x9d, 0x1a, 0xb2, 0x2d, 0xa6, 0x56, 0x24, 0x50, 0x73, 0x74, 0x7f, 0x6f, 0xb2, 0xa6, + 0xde, 0x30, 0x64, 0x40, 0xbe, 0x56, 0x80, 0x51, 0xc7, 0x35, 0x69, 0x8b, 0xda, 0xd4, 0x08, 0x5c, + 0x6f, 0xa2, 0x78, 0xb1, 0x74, 0xa9, 0x31, 0xf3, 0xb9, 0xa1, 0x39, 0x66, 0x7c, 0xd1, 0xd4, 0xb5, + 0x18, 0x83, 0xcb, 0x4e, 0xe0, 0xed, 0x36, 0x1f, 0xfc, 0xf6, 0xde, 0xe4, 0x03, 0xfb, 0x7b, 0x93, + 0xa3, 0xf1, 0x2c, 0x4c, 0xd4, 0x84, 0xac, 0x43, 0x23, 0x70, 0x6d, 0x26, 0x32, 0xcb, 0x75, 0xfc, + 0x89, 0x12, 0xaf, 0xd8, 0x85, 0x29, 0x21, 0x6d, 0xc6, 0x7e, 0x8a, 0x75, 0x97, 0xa9, 0x9d, 0x27, + 0xa7, 0xd6, 0x42, 0xb2, 0xe6, 0x69, 0x09, 0xdc, 0x88, 0xd2, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, 0x27, + 0x7c, 0x6a, 0xf4, 0x3c, 0x2b, 0xd8, 0x9d, 0x73, 0x9d, 0x80, 0xde, 0x0a, 0x26, 0xca, 0x5c, 0xca, + 0x4f, 0x64, 0x41, 0xaf, 0xba, 0x66, 0x2b, 0x49, 0xdd, 0x3c, 0xbd, 0xbf, 0x37, 0x79, 0x22, 0x95, + 0x88, 0x69, 0x4c, 0xe2, 0xc0, 0x49, 0xab, 0xa3, 0xb7, 0xe9, 0x6a, 0xcf, 0xb6, 0x5b, 0xd4, 0xf0, + 0x68, 0xe0, 0x4f, 0x54, 0xf8, 0x27, 0x5c, 0xca, 0xe2, 0xb3, 0xec, 0x1a, 0xba, 0x7d, 0x7d, 0xe3, + 0x35, 0x6a, 0x04, 0x48, 0x37, 0xa9, 0x47, 0x1d, 0x83, 0x36, 0x27, 0xe4, 0xc7, 0x9c, 0x5c, 0x4a, + 0x21, 0x61, 0x1f, 0x36, 0x59, 0x84, 0x53, 0x5d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, 0xaf, + 0xe9, 0x1d, 0x3a, 0x31, 0x72, 0xb1, 0x70, 0xa9, 0xde, 0x3c, 0x2b, 0x61, 0x4e, 0xad, 0xa6, 0x09, + 0xb0, 0xbf, 0x0c, 0xb9, 0x04, 0x35, 0x95, 0x38, 0x51, 0xbd, 0x58, 0xb8, 0x54, 0x11, 0x7d, 0x47, + 0x95, 0xc5, 0x30, 0x97, 0x2c, 0x40, 0x4d, 0xdf, 0xdc, 0xb4, 0x1c, 0x46, 0x59, 0xe3, 0x22, 0x3c, + 0x9f, 0xf5, 0x69, 0xb3, 0x92, 0x46, 0xe0, 0xa8, 0x37, 0x0c, 0xcb, 0x92, 0x17, 0x81, 0xf8, 0xd4, + 0xdb, 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xcf, 0x09, 0x78, 0xdd, 0xeb, 0xbc, 0xee, 0xe7, 0x64, + 0xdd, 0x49, 0xab, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x17, 0xe0, 0xa4, 0x1c, 0x76, 0x91, 0x14, 0x80, + 0x23, 0x3d, 0xc8, 0x04, 0x89, 0xa9, 0x3c, 0xec, 0xa3, 0x26, 0x26, 0x9c, 0xd7, 0x7b, 0x81, 0xdb, + 0x61, 0x90, 0x49, 0xa6, 0x6b, 0xee, 0x36, 0x75, 0x26, 0x1a, 0x17, 0x0b, 0x97, 0x6a, 0xcd, 0x8b, + 0xfb, 0x7b, 0x93, 0xe7, 0x67, 0xef, 0x40, 0x87, 0x77, 0x44, 0x21, 0xd7, 0xa1, 0x6e, 0x3a, 0xfe, + 0xaa, 0x6b, 0x5b, 0xc6, 0xee, 0xc4, 0x28, 0xaf, 0xe0, 0x93, 0xf2, 0x53, 0xeb, 0xf3, 0xd7, 0x5a, + 0x22, 0xe3, 0xf6, 0xde, 0xe4, 0xf9, 0xfe, 0xd9, 0x71, 0x2a, 0xcc, 0xc7, 0x08, 0x83, 0xac, 0x70, + 0xc0, 0x39, 0xd7, 0xd9, 0xb4, 0xda, 0x13, 0x63, 0xbc, 0x35, 0x2e, 0x0e, 0xe8, 0xd0, 0xf3, 0xd7, + 0x5a, 0x82, 0xae, 0x39, 0x26, 0xd9, 0x89, 0x57, 0x8c, 0x10, 0xce, 0x3d, 0x0f, 0xa7, 0xfa, 0x46, + 0x2d, 0x39, 0x09, 0xa5, 0x6d, 0xba, 0xcb, 0x27, 0xa5, 0x3a, 0xb2, 0x47, 0xf2, 0x20, 0x54, 0x76, + 0x74, 0xbb, 0x47, 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xac, 0xf8, 0x5c, 0x41, 0xfb, 0x8d, 0x12, + 0x8c, 0xaa, 0xb9, 0xa0, 0x65, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x5b, 0xce, 0x68, 0x1f, + 0x1f, 0x7a, 0x7e, 0x59, 0x76, 0xdb, 0xcd, 0xea, 0xfe, 0xde, 0x64, 0x69, 0xd9, 0x6d, 0x23, 0x43, + 0x24, 0x06, 0x54, 0xb6, 0xf5, 0xcd, 0x6d, 0x9d, 0xd7, 0xa1, 0x31, 0xd3, 0x1c, 0x1a, 0xfa, 0x2a, + 0x43, 0x61, 0x75, 0x6d, 0xd6, 0xf7, 0xf7, 0x26, 0x2b, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, 0xbe, + 0x61, 0xeb, 0xc6, 0xf6, 0x96, 0x6b, 0xd3, 0x89, 0x52, 0x4e, 0x46, 0x4d, 0x85, 0x24, 0x1a, 0x20, + 0x7c, 0xc5, 0x88, 0x07, 0x31, 0x60, 0xa4, 0x67, 0xfa, 0x96, 0xb3, 0x2d, 0x67, 0xa7, 0xe7, 0x87, + 0xe6, 0xb6, 0x3e, 0xcf, 0xbf, 0x09, 0xf6, 0xf7, 0x26, 0x47, 0xc4, 0x33, 0x4a, 0x68, 0xed, 0x7b, + 0x0d, 0x18, 0x57, 0x8d, 0x74, 0x83, 0x7a, 0x01, 0xbd, 0x45, 0x2e, 0x42, 0xd9, 0x61, 0x83, 0x86, + 0x37, 0x72, 0x73, 0x54, 0xf6, 0xc9, 0x32, 0x1f, 0x2c, 0x3c, 0x87, 0xd5, 0x4c, 0x2c, 0xb8, 0x52, + 0xe0, 0xc3, 0xd7, 0xac, 0xc5, 0x61, 0x44, 0xcd, 0xc4, 0x33, 0x4a, 0x68, 0xf2, 0x0a, 0x94, 0xf9, + 0xc7, 0x0b, 0x51, 0x7f, 0x62, 0x78, 0x16, 0xec, 0xd3, 0x6b, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, + 0xd6, 0x15, 0x7b, 0xe6, 0xa6, 0x14, 0xec, 0xc7, 0x73, 0x08, 0x76, 0x41, 0x74, 0xc5, 0xf5, 0xf9, + 0x05, 0x64, 0x88, 0xe4, 0x97, 0x0a, 0x70, 0xca, 0x70, 0x9d, 0x40, 0x67, 0x4a, 0x80, 0x5a, 0xfe, + 0x26, 0x2a, 0x9c, 0xcf, 0x8b, 0x43, 0xf3, 0x99, 0x4b, 0x23, 0x36, 0x1f, 0x62, 0xb3, 0x79, 0x5f, + 0x32, 0xf6, 0xf3, 0x26, 0xbf, 0x52, 0x80, 0x87, 0xd8, 0x2c, 0xdb, 0x47, 0xcc, 0xd7, 0x86, 0xa3, + 0xad, 0xd5, 0xd9, 0xfd, 0xbd, 0xc9, 0x87, 0x96, 0xb2, 0x98, 0x61, 0x76, 0x1d, 0x58, 0xed, 0x4e, + 0xeb, 0xfd, 0x0a, 0x03, 0x5f, 0x77, 0x1a, 0x33, 0xcb, 0x47, 0xa9, 0x84, 0x34, 0x1f, 0x91, 0x5d, + 0x39, 0x4b, 0xe7, 0xc2, 0xac, 0x5a, 0x90, 0xcb, 0x50, 0xdd, 0x71, 0xed, 0x5e, 0x87, 0xfa, 0x13, + 0x35, 0xbe, 0x72, 0x9f, 0xcb, 0x9a, 0x50, 0x6f, 0x70, 0x92, 0xe6, 0x09, 0x09, 0x5f, 0x15, 0xef, + 0x3e, 0xaa, 0xb2, 0xc4, 0x82, 0x11, 0xdb, 0xea, 0x58, 0x81, 0xcf, 0x97, 0xb4, 0xc6, 0xcc, 0xe5, + 0xa1, 0x3f, 0x4b, 0x0c, 0xd1, 0x65, 0x0e, 0x26, 0x46, 0x8d, 0x78, 0x46, 0xc9, 0x80, 0x4d, 0x85, + 0xbe, 0xa1, 0xdb, 0x62, 0xc9, 0x6b, 0xcc, 0x7c, 0x72, 0xf8, 0x61, 0xc3, 0x50, 0x9a, 0x63, 0xf2, + 0x9b, 0x2a, 0xfc, 0x15, 0x05, 0x36, 0xf9, 0x2c, 0x8c, 0x27, 0x5a, 0xd3, 0x9f, 0x68, 0x70, 0xe9, + 0x3c, 0x9a, 0x25, 0x9d, 0x90, 0xaa, 0x79, 0x46, 0x82, 0x8d, 0x27, 0x7a, 0x88, 0x8f, 0x29, 0x30, + 0x72, 0x15, 0x6a, 0xbe, 0x65, 0x52, 0x43, 0xf7, 0xfc, 0x89, 0xd1, 0x83, 0x00, 0x9f, 0x94, 0xc0, + 0xb5, 0x96, 0x2c, 0x86, 0x21, 0x00, 0x99, 0x02, 0xe8, 0xea, 0x5e, 0x60, 0x09, 0x15, 0x72, 0x8c, + 0xab, 0x33, 0xe3, 0xfb, 0x7b, 0x93, 0xb0, 0x1a, 0xa6, 0x62, 0x8c, 0x82, 0xd1, 0xb3, 0xb2, 0x4b, + 0x4e, 0xb7, 0x17, 0xf8, 0x13, 0xe3, 0x17, 0x4b, 0x97, 0xea, 0x82, 0xbe, 0x15, 0xa6, 0x62, 0x8c, + 0x82, 0x7c, 0xab, 0x00, 0x8f, 0x44, 0xaf, 0xfd, 0x83, 0xec, 0xc4, 0x91, 0x0f, 0xb2, 0xc9, 0xfd, + 0xbd, 0xc9, 0x47, 0x5a, 0x83, 0x59, 0xe2, 0x9d, 0xea, 0xa3, 0xbd, 0x04, 0x63, 0xb3, 0xbd, 0x60, + 0xcb, 0xf5, 0xac, 0x37, 0xb8, 0x3a, 0x4c, 0x16, 0xa0, 0x12, 0x70, 0xb5, 0x46, 0xac, 0xcb, 0x8f, + 0x67, 0x89, 0x5a, 0xa8, 0x98, 0x57, 0xe9, 0xae, 0xd2, 0x06, 0xc4, 0xfa, 0x28, 0xd4, 0x1c, 0x51, + 0x5c, 0xfb, 0xf5, 0x02, 0xd4, 0x9b, 0xba, 0x6f, 0x19, 0x0c, 0x9e, 0xcc, 0x41, 0xb9, 0xe7, 0x53, + 0xef, 0x70, 0xa0, 0x7c, 0x96, 0x5e, 0xf7, 0xa9, 0x87, 0xbc, 0x30, 0xb9, 0x0e, 0xb5, 0xae, 0xee, + 0xfb, 0x37, 0x5d, 0xcf, 0x94, 0x2b, 0xcd, 0x01, 0x81, 0x84, 0xbe, 0x2a, 0x8b, 0x62, 0x08, 0xa2, + 0x35, 0x20, 0x5a, 0x6a, 0xb5, 0x1f, 0x14, 0xe0, 0x74, 0xb3, 0xb7, 0xb9, 0x49, 0x3d, 0xa9, 0x9e, + 0x09, 0xc5, 0x87, 0x50, 0xa8, 0x78, 0xd4, 0xb4, 0x7c, 0x59, 0xf7, 0xf9, 0xa1, 0x9b, 0x0e, 0x19, + 0x8a, 0xd4, 0xb3, 0xb8, 0xbc, 0x78, 0x02, 0x0a, 0x74, 0xd2, 0x83, 0xfa, 0x6b, 0x34, 0xf0, 0x03, + 0x8f, 0xea, 0x1d, 0xf9, 0x75, 0x57, 0x86, 0x66, 0xf5, 0x22, 0x0d, 0x5a, 0x1c, 0x29, 0xae, 0xd6, + 0x85, 0x89, 0x18, 0x71, 0xd2, 0xfe, 0xa8, 0x02, 0xa3, 0x73, 0x6e, 0x67, 0xc3, 0x72, 0xa8, 0x79, + 0xd9, 0x6c, 0x53, 0xf2, 0x2a, 0x94, 0xa9, 0xd9, 0xa6, 0xf2, 0x6b, 0x87, 0x5f, 0x67, 0x19, 0x58, + 0xa4, 0x2d, 0xb0, 0x37, 0xe4, 0xc0, 0x64, 0x19, 0xc6, 0x37, 0x3d, 0xb7, 0x23, 0xa6, 0xae, 0xb5, + 0xdd, 0xae, 0x54, 0x15, 0x9b, 0xff, 0x43, 0x4d, 0x07, 0x0b, 0x89, 0xdc, 0xdb, 0x7b, 0x93, 0x10, + 0xbd, 0x61, 0xaa, 0x2c, 0x79, 0x19, 0x26, 0xa2, 0x94, 0x70, 0x0c, 0xcf, 0x31, 0xbd, 0x9a, 0xab, + 0x0a, 0x95, 0xe6, 0xf9, 0xfd, 0xbd, 0xc9, 0x89, 0x85, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x79, 0xb3, + 0x00, 0x27, 0xa3, 0x4c, 0x31, 0xaf, 0x4a, 0x0d, 0xe1, 0x88, 0x26, 0x6c, 0xbe, 0x01, 0x59, 0x48, + 0xb1, 0xc0, 0x3e, 0xa6, 0x64, 0x01, 0x46, 0x03, 0x37, 0x26, 0xaf, 0x0a, 0x97, 0x97, 0xa6, 0x76, + 0xcc, 0x6b, 0xee, 0x40, 0x69, 0x25, 0xca, 0x11, 0x84, 0x33, 0xea, 0x3d, 0x25, 0xa9, 0x11, 0x2e, + 0xa9, 0x73, 0xfb, 0x7b, 0x93, 0x67, 0xd6, 0x32, 0x29, 0x70, 0x40, 0x49, 0xf2, 0xa5, 0x02, 0x8c, + 0xab, 0x2c, 0x29, 0xa3, 0xea, 0x51, 0xca, 0x88, 0xb0, 0x1e, 0xb1, 0x96, 0x60, 0x80, 0x29, 0x86, + 0xda, 0x8f, 0xca, 0x50, 0x0f, 0x67, 0x36, 0xf2, 0x18, 0x54, 0xf8, 0x5e, 0x58, 0x2a, 0xac, 0xe1, + 0x92, 0xc5, 0xb7, 0xcc, 0x28, 0xf2, 0xc8, 0xe3, 0x50, 0x35, 0xdc, 0x4e, 0x47, 0x77, 0x4c, 0x6e, + 0xdf, 0xa8, 0x37, 0x1b, 0x6c, 0xa5, 0x9e, 0x13, 0x49, 0xa8, 0xf2, 0xc8, 0x79, 0x28, 0xeb, 0x5e, + 0x5b, 0x98, 0x1a, 0xea, 0x62, 0x3e, 0x9a, 0xf5, 0xda, 0x3e, 0xf2, 0x54, 0xf2, 0x51, 0x28, 0x51, + 0x67, 0x67, 0xa2, 0x3c, 0x58, 0x15, 0xb8, 0xec, 0xec, 0xdc, 0xd0, 0xbd, 0x66, 0x43, 0xd6, 0xa1, + 0x74, 0xd9, 0xd9, 0x41, 0x56, 0x86, 0x2c, 0x43, 0x95, 0x3a, 0x3b, 0xac, 0xed, 0xa5, 0x0d, 0xe0, + 0x03, 0x03, 0x8a, 0x33, 0x12, 0xa9, 0x15, 0x87, 0x0a, 0x85, 0x4c, 0x46, 0x05, 0x41, 0x3e, 0x0d, + 0xa3, 0x42, 0xb7, 0x58, 0x61, 0x6d, 0xe2, 0x4f, 0x8c, 0x70, 0xc8, 0xc9, 0xc1, 0xca, 0x09, 0xa7, + 0x8b, 0x6c, 0x2e, 0xb1, 0x44, 0x1f, 0x13, 0x50, 0xe4, 0xd3, 0x50, 0x57, 0xe6, 0x34, 0xd5, 0xb2, + 0x99, 0xe6, 0x0a, 0x94, 0x44, 0x48, 0x5f, 0xef, 0x59, 0x1e, 0xed, 0x50, 0x27, 0xf0, 0x9b, 0xa7, + 0xd4, 0x06, 0x56, 0xe5, 0xfa, 0x18, 0xa1, 0x91, 0x8d, 0x7e, 0xbb, 0x8b, 0x30, 0x1a, 0x3c, 0x36, + 0x60, 0x56, 0x1f, 0xc2, 0xe8, 0xf2, 0x39, 0x38, 0x11, 0x1a, 0x46, 0xe4, 0xde, 0x5a, 0x98, 0x11, + 0x9e, 0x62, 0xc5, 0x97, 0x92, 0x59, 0xb7, 0xf7, 0x26, 0x1f, 0xcd, 0xd8, 0x5d, 0x47, 0x04, 0x98, + 0x06, 0xd3, 0xfe, 0xa0, 0x04, 0xfd, 0x6a, 0x77, 0x52, 0x68, 0x85, 0xa3, 0x16, 0x5a, 0xfa, 0x83, + 0xc4, 0xf4, 0xf9, 0x9c, 0x2c, 0x96, 0xff, 0xa3, 0xb2, 0x1a, 0xa6, 0x74, 0xd4, 0x0d, 0x73, 0xbf, + 0x8c, 0x1d, 0xed, 0xad, 0x32, 0x8c, 0xcf, 0xeb, 0xb4, 0xe3, 0x3a, 0x77, 0xdd, 0x84, 0x14, 0xee, + 0x8b, 0x4d, 0xc8, 0x25, 0xa8, 0x79, 0xb4, 0x6b, 0x5b, 0x86, 0xee, 0xf3, 0xa6, 0x97, 0xe6, 0x38, + 0x94, 0x69, 0x18, 0xe6, 0x0e, 0xd8, 0x7c, 0x96, 0xee, 0xcb, 0xcd, 0x67, 0xf9, 0xdd, 0xdf, 0x7c, + 0x6a, 0x5f, 0x2a, 0x02, 0x57, 0x54, 0xc8, 0x45, 0x28, 0xb3, 0x45, 0x38, 0x6d, 0xf2, 0xe0, 0x1d, + 0x87, 0xe7, 0x90, 0x73, 0x50, 0x0c, 0x5c, 0x39, 0xf2, 0x40, 0xe6, 0x17, 0xd7, 0x5c, 0x2c, 0x06, + 0x2e, 0x79, 0x03, 0xc0, 0x70, 0x1d, 0xd3, 0x52, 0x56, 0xea, 0x7c, 0x1f, 0xb6, 0xe0, 0x7a, 0x37, + 0x75, 0xcf, 0x9c, 0x0b, 0x11, 0xc5, 0xf6, 0x23, 0x7a, 0xc7, 0x18, 0x37, 0xf2, 0x3c, 0x8c, 0xb8, + 0xce, 0x42, 0xcf, 0xb6, 0xb9, 0x40, 0xeb, 0xcd, 0xff, 0xc9, 0xf6, 0x84, 0xd7, 0x79, 0xca, 0xed, + 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, 0x6f, 0x2f, 0x79, 0x56, 0x60, 0x39, 0xed, 0x56, 0xe0, 0xe9, + 0x01, 0x6d, 0xef, 0xa2, 0x2c, 0xa6, 0x7d, 0xb5, 0x00, 0x8d, 0x05, 0xeb, 0x16, 0x35, 0x5f, 0xb2, + 0x1c, 0xd3, 0xbd, 0x49, 0x10, 0x46, 0x6c, 0xea, 0xb4, 0x83, 0x2d, 0xd9, 0xfb, 0xa7, 0x62, 0x63, + 0x2d, 0x3c, 0xdc, 0x88, 0xea, 0xdf, 0xa1, 0x81, 0xce, 0x46, 0xdf, 0x7c, 0x4f, 0x9a, 0xdf, 0xc5, + 0xa6, 0x94, 0x23, 0xa0, 0x44, 0x22, 0xd3, 0x50, 0x17, 0xda, 0xa7, 0xe5, 0xb4, 0xb9, 0x0c, 0x6b, + 0xd1, 0xa4, 0xd7, 0x52, 0x19, 0x18, 0xd1, 0x68, 0xbb, 0x70, 0xaa, 0x4f, 0x0c, 0xc4, 0x84, 0x72, + 0xa0, 0xb7, 0xd5, 0xfc, 0xba, 0x30, 0xb4, 0x80, 0xd7, 0xf4, 0x76, 0x4c, 0xb8, 0x7c, 0x8d, 0x5f, + 0xd3, 0xd9, 0x1a, 0xcf, 0xd0, 0xb5, 0x9f, 0x14, 0xa0, 0xb6, 0xd0, 0x73, 0x0c, 0xbe, 0x37, 0xba, + 0xbb, 0x29, 0x4c, 0x29, 0x0c, 0xc5, 0x4c, 0x85, 0xa1, 0x07, 0x23, 0xdb, 0x37, 0x43, 0x85, 0xa2, + 0x31, 0xb3, 0x32, 0x7c, 0xaf, 0x90, 0x55, 0x9a, 0xba, 0xca, 0xf1, 0xc4, 0x19, 0xca, 0xb8, 0xac, + 0xd0, 0xc8, 0xd5, 0x97, 0x38, 0x53, 0xc9, 0xec, 0xdc, 0x47, 0xa1, 0x11, 0x23, 0x3b, 0x94, 0xd1, + 0xf6, 0x77, 0xca, 0x30, 0xb2, 0xd8, 0x6a, 0xcd, 0xae, 0x2e, 0x91, 0xa7, 0xa1, 0x21, 0xcd, 0xeb, + 0xd7, 0x22, 0x19, 0x84, 0xa7, 0x2b, 0xad, 0x28, 0x0b, 0xe3, 0x74, 0x4c, 0x1d, 0xf3, 0xa8, 0x6e, + 0x77, 0xe4, 0x60, 0x09, 0xd5, 0x31, 0x64, 0x89, 0x28, 0xf2, 0x88, 0x0e, 0xe3, 0x6c, 0x87, 0xc7, + 0x44, 0x28, 0x76, 0x6f, 0x72, 0xd8, 0x1c, 0x70, 0x7f, 0xc7, 0x95, 0xc4, 0xf5, 0x04, 0x00, 0xa6, + 0x00, 0xc9, 0x73, 0x50, 0xd3, 0x7b, 0xc1, 0x16, 0x57, 0xa0, 0xc5, 0xd8, 0x38, 0xcf, 0x4f, 0x1f, + 0x64, 0xda, 0xed, 0xbd, 0xc9, 0xd1, 0xab, 0xd8, 0x7c, 0x5a, 0xbd, 0x63, 0x48, 0xcd, 0x2a, 0xa7, + 0x76, 0x8c, 0xb2, 0x72, 0x95, 0x43, 0x57, 0x6e, 0x35, 0x01, 0x80, 0x29, 0x40, 0xf2, 0x0a, 0x8c, + 0x6e, 0xd3, 0xdd, 0x40, 0xdf, 0x90, 0x0c, 0x46, 0x0e, 0xc3, 0xe0, 0x24, 0x53, 0xe1, 0xae, 0xc6, + 0x8a, 0x63, 0x02, 0x8c, 0xf8, 0xf0, 0xe0, 0x36, 0xf5, 0x36, 0xa8, 0xe7, 0xca, 0xdd, 0xa7, 0x64, + 0x52, 0x3d, 0x0c, 0x93, 0x89, 0xfd, 0xbd, 0xc9, 0x07, 0xaf, 0x66, 0xc0, 0x60, 0x26, 0xb8, 0xf6, + 0xe3, 0x22, 0x9c, 0x58, 0x14, 0xe7, 0x9b, 0xae, 0x27, 0x16, 0x61, 0x72, 0x16, 0x4a, 0x5e, 0xb7, + 0xc7, 0x7b, 0x4e, 0x49, 0xd8, 0x49, 0x71, 0x75, 0x1d, 0x59, 0x1a, 0x79, 0x19, 0x6a, 0xa6, 0x9c, + 0x32, 0xe4, 0xe6, 0xf7, 0xb0, 0x13, 0x0d, 0x5f, 0x04, 0xd5, 0x1b, 0x86, 0x68, 0x4c, 0xd3, 0xef, + 0xf8, 0xed, 0x96, 0xf5, 0x06, 0x95, 0xfb, 0x41, 0xae, 0xe9, 0xaf, 0x88, 0x24, 0x54, 0x79, 0x6c, + 0x55, 0xdd, 0xa6, 0xbb, 0x62, 0x37, 0x54, 0x8e, 0x56, 0xd5, 0xab, 0x32, 0x0d, 0xc3, 0x5c, 0x32, + 0xa9, 0x06, 0x0b, 0xeb, 0x05, 0x65, 0xb1, 0x93, 0xbf, 0xc1, 0x12, 0xe4, 0xb8, 0x61, 0x53, 0xe6, + 0x6b, 0x56, 0x10, 0x50, 0x4f, 0x36, 0xe3, 0x50, 0x53, 0xe6, 0x8b, 0x1c, 0x01, 0x25, 0x12, 0xf9, + 0x10, 0xd4, 0x39, 0x78, 0xd3, 0x76, 0x37, 0x78, 0xc3, 0xd5, 0xc5, 0x9e, 0xfe, 0x86, 0x4a, 0xc4, + 0x28, 0x5f, 0xfb, 0x69, 0x11, 0xce, 0x2c, 0xd2, 0x40, 0x68, 0x35, 0xf3, 0xb4, 0x6b, 0xbb, 0xbb, + 0x4c, 0xb5, 0x44, 0xfa, 0x3a, 0x79, 0x01, 0xc0, 0xf2, 0x37, 0x5a, 0x3b, 0x06, 0x1f, 0x07, 0x62, + 0x0c, 0x5f, 0x94, 0x43, 0x12, 0x96, 0x5a, 0x4d, 0x99, 0x73, 0x3b, 0xf1, 0x86, 0xb1, 0x32, 0xd1, + 0xf6, 0xaa, 0x78, 0x87, 0xed, 0x55, 0x0b, 0xa0, 0x1b, 0x29, 0xa8, 0x25, 0x4e, 0xf9, 0x7f, 0x14, + 0x9b, 0xc3, 0xe8, 0xa6, 0x31, 0x98, 0x3c, 0x2a, 0xa3, 0x03, 0x27, 0x4d, 0xba, 0xa9, 0xf7, 0xec, + 0x20, 0x54, 0xaa, 0xe5, 0x20, 0x3e, 0xb8, 0x5e, 0x1e, 0x9e, 0xbd, 0xce, 0xa7, 0x90, 0xb0, 0x0f, + 0x5b, 0xfb, 0xdd, 0x12, 0x9c, 0x5b, 0xa4, 0x41, 0x68, 0x71, 0x91, 0xb3, 0x63, 0xab, 0x4b, 0x0d, + 0xd6, 0x0a, 0x6f, 0x16, 0x60, 0xc4, 0xd6, 0x37, 0xa8, 0xcd, 0x56, 0x2f, 0xf6, 0x35, 0xaf, 0x0e, + 0xbd, 0x10, 0x0c, 0xe6, 0x32, 0xb5, 0xcc, 0x39, 0xa4, 0x96, 0x06, 0x91, 0x88, 0x92, 0x3d, 0x9b, + 0xd4, 0x0d, 0xbb, 0xe7, 0x07, 0xd4, 0x5b, 0x75, 0xbd, 0x40, 0xea, 0x93, 0xe1, 0xa4, 0x3e, 0x17, + 0x65, 0x61, 0x9c, 0x8e, 0xcc, 0x00, 0x18, 0xb6, 0x45, 0x9d, 0x80, 0x97, 0x12, 0xe3, 0x8a, 0xa8, + 0xf6, 0x9d, 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0x75, 0x5c, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, + 0x64, 0xb5, 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, + 0x15, 0x8b, 0xb2, 0x30, 0x4e, 0xc7, 0xd6, 0xbc, 0xd8, 0xf7, 0x1f, 0x6a, 0xcd, 0xfb, 0x66, 0x1d, + 0x2e, 0x24, 0xc4, 0x1a, 0xe8, 0x01, 0xdd, 0xec, 0xd9, 0x2d, 0x1a, 0xa8, 0x06, 0x1c, 0x72, 0x2d, + 0xfc, 0xf9, 0xa8, 0xdd, 0x85, 0x57, 0x85, 0x71, 0x34, 0xed, 0xde, 0x57, 0xc1, 0x03, 0xb5, 0xfd, + 0x34, 0xd4, 0x1d, 0x3d, 0xf0, 0xf9, 0xc0, 0x95, 0x63, 0x34, 0x54, 0xc3, 0xae, 0xa9, 0x0c, 0x8c, + 0x68, 0xc8, 0x2a, 0x3c, 0x28, 0x45, 0x7c, 0xf9, 0x56, 0xd7, 0xf5, 0x02, 0xea, 0x89, 0xb2, 0x72, + 0x39, 0x95, 0x65, 0x1f, 0x5c, 0xc9, 0xa0, 0xc1, 0xcc, 0x92, 0x64, 0x05, 0x4e, 0x1b, 0xe2, 0xa4, + 0x99, 0xda, 0xae, 0x6e, 0x2a, 0x40, 0x61, 0xe0, 0x0a, 0xb7, 0x46, 0x73, 0xfd, 0x24, 0x98, 0x55, + 0x2e, 0xdd, 0x9b, 0x47, 0x86, 0xea, 0xcd, 0xd5, 0x61, 0x7a, 0x73, 0x6d, 0xb8, 0xde, 0x5c, 0x3f, + 0x58, 0x6f, 0x66, 0x92, 0x67, 0xfd, 0x88, 0x7a, 0x4c, 0x3d, 0x11, 0x2b, 0x6c, 0xcc, 0x91, 0x21, + 0x94, 0x7c, 0x2b, 0x83, 0x06, 0x33, 0x4b, 0x92, 0x0d, 0x38, 0x27, 0xd2, 0x2f, 0x3b, 0x86, 0xb7, + 0xdb, 0x65, 0x0b, 0x4f, 0x0c, 0xb7, 0x91, 0xb0, 0x30, 0x9e, 0x6b, 0x0d, 0xa4, 0xc4, 0x3b, 0xa0, + 0x90, 0xff, 0x0b, 0x63, 0xa2, 0x95, 0x56, 0xf4, 0x2e, 0x87, 0x15, 0x6e, 0x0d, 0x0f, 0x49, 0xd8, + 0xb1, 0xb9, 0x78, 0x26, 0x26, 0x69, 0xc9, 0x2c, 0x9c, 0xe8, 0xee, 0x18, 0xec, 0x71, 0x69, 0xf3, + 0x1a, 0xa5, 0x26, 0x35, 0xf9, 0x69, 0x4d, 0xbd, 0xf9, 0xb0, 0x32, 0x74, 0xac, 0x26, 0xb3, 0x31, + 0x4d, 0x4f, 0x9e, 0x83, 0x51, 0x3f, 0xd0, 0xbd, 0x40, 0x9a, 0xf5, 0x26, 0xc6, 0x85, 0xdb, 0x87, + 0xb2, 0x7a, 0xb5, 0x62, 0x79, 0x98, 0xa0, 0xcc, 0x5c, 0x2f, 0x4e, 0x1c, 0xdf, 0x7a, 0x91, 0x67, + 0xb6, 0xfa, 0xd3, 0x22, 0x5c, 0x5c, 0xa4, 0xc1, 0x8a, 0xeb, 0x48, 0xa3, 0x68, 0xd6, 0xb2, 0x7f, + 0x20, 0x9b, 0x68, 0x72, 0xd1, 0x2e, 0x1e, 0xe9, 0xa2, 0x5d, 0x3a, 0xa2, 0x45, 0xbb, 0x7c, 0x8c, + 0x8b, 0xf6, 0xef, 0x17, 0xe1, 0xe1, 0x84, 0x24, 0x57, 0x5d, 0x53, 0x4d, 0xf8, 0xef, 0x0b, 0xf0, + 0x00, 0x02, 0xbc, 0x2d, 0xf4, 0x4e, 0x7e, 0xac, 0x95, 0xd2, 0x78, 0xbe, 0x92, 0xd6, 0x78, 0x5e, + 0xc9, 0xb3, 0xf2, 0x65, 0x70, 0x38, 0xd0, 0x8a, 0xf7, 0x22, 0x10, 0x4f, 0x1e, 0xc2, 0x09, 0xd3, + 0x4f, 0x4c, 0xe9, 0x09, 0xfd, 0xca, 0xb0, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x16, 0x3c, 0xe4, 0x53, + 0x27, 0xb0, 0x1c, 0x6a, 0x27, 0xe1, 0x84, 0x36, 0xf4, 0xa8, 0x84, 0x7b, 0xa8, 0x95, 0x45, 0x84, + 0xd9, 0x65, 0xf3, 0xcc, 0x03, 0x7f, 0x01, 0x5c, 0xe5, 0x14, 0xa2, 0x39, 0x32, 0x8d, 0xe5, 0xcd, + 0xb4, 0xc6, 0xf2, 0x6a, 0xfe, 0x76, 0x1b, 0x4e, 0x5b, 0x99, 0x01, 0xe0, 0xad, 0x10, 0x57, 0x57, + 0xc2, 0x45, 0x1a, 0xc3, 0x1c, 0x8c, 0x51, 0xb1, 0x05, 0x48, 0xc9, 0x39, 0xae, 0xa9, 0x84, 0x0b, + 0x50, 0x2b, 0x9e, 0x89, 0x49, 0xda, 0x81, 0xda, 0x4e, 0x65, 0x68, 0x6d, 0xe7, 0x45, 0x20, 0x09, + 0xc3, 0xa3, 0xc0, 0x1b, 0x49, 0xba, 0x35, 0x2e, 0xf5, 0x51, 0x60, 0x46, 0xa9, 0x01, 0x5d, 0xb9, + 0x7a, 0xb4, 0x5d, 0xb9, 0x36, 0x7c, 0x57, 0x26, 0xaf, 0xc2, 0x59, 0xce, 0x4a, 0xca, 0x27, 0x09, + 0x2c, 0xf4, 0x9e, 0x0f, 0x48, 0xe0, 0xb3, 0x38, 0x88, 0x10, 0x07, 0x63, 0xb0, 0xf6, 0x31, 0x3c, + 0x6a, 0x32, 0xe6, 0xba, 0x3d, 0x58, 0x27, 0x9a, 0xcb, 0xa0, 0xc1, 0xcc, 0x92, 0xac, 0x8b, 0x05, + 0xac, 0x1b, 0xea, 0x1b, 0x36, 0x35, 0xa5, 0x5b, 0x67, 0xd8, 0xc5, 0xd6, 0x96, 0x5b, 0x32, 0x07, + 0x63, 0x54, 0x59, 0x6a, 0xca, 0xe8, 0x21, 0xd5, 0x94, 0x45, 0x6e, 0xa5, 0xdf, 0x4c, 0x68, 0x43, + 0x52, 0xd7, 0x09, 0x1d, 0x75, 0xe7, 0xd2, 0x04, 0xd8, 0x5f, 0x86, 0x6b, 0x89, 0x86, 0x67, 0x75, + 0x03, 0x3f, 0x89, 0x35, 0x9e, 0xd2, 0x12, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd3, 0xcf, 0xb7, 0xa8, + 0x6e, 0x07, 0x5b, 0x49, 0xc0, 0x13, 0x49, 0xfd, 0xfc, 0x4a, 0x3f, 0x09, 0x66, 0x95, 0xcb, 0x5c, + 0x90, 0x4e, 0xde, 0x9f, 0x6a, 0xd5, 0x97, 0x4b, 0x70, 0x76, 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, + 0x66, 0x94, 0x77, 0xc1, 0x8c, 0xf2, 0x8d, 0x0a, 0x9c, 0x5e, 0xa4, 0x41, 0x9f, 0x36, 0xf6, 0xdf, + 0x54, 0xfc, 0x2b, 0x70, 0x3a, 0x72, 0xe5, 0x6a, 0x05, 0xae, 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, + 0xd5, 0x4f, 0x82, 0x59, 0xe5, 0xc8, 0xa7, 0xe1, 0x61, 0xbe, 0xd4, 0x3b, 0x6d, 0x61, 0x9f, 0x15, + 0xc6, 0x84, 0xd8, 0x35, 0x81, 0x49, 0x09, 0xf9, 0x70, 0x2b, 0x9b, 0x0c, 0x07, 0x95, 0x27, 0x5f, + 0x80, 0xd1, 0xae, 0xd5, 0xa5, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, + 0xda, 0xc0, 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0xd6, 0x8e, 0xb1, 0xa7, 0xfe, 0x7b, 0x11, + 0xaa, 0x8b, 0x9e, 0xdb, 0xeb, 0x36, 0x77, 0x49, 0x1b, 0x46, 0x6e, 0xf2, 0xc3, 0x33, 0x79, 0x34, + 0x35, 0xbc, 0x3b, 0xb4, 0x38, 0x83, 0x8b, 0x54, 0x22, 0xf1, 0x8e, 0x12, 0x9e, 0x75, 0xe2, 0x6d, + 0xba, 0x4b, 0x4d, 0x79, 0x86, 0x16, 0x76, 0xe2, 0xab, 0x2c, 0x11, 0x45, 0x1e, 0xe9, 0xc0, 0x09, + 0xdd, 0xb6, 0xdd, 0x9b, 0xd4, 0x5c, 0xd6, 0x03, 0xea, 0x50, 0x5f, 0x1d, 0x49, 0x1e, 0xd6, 0x2c, + 0xcd, 0xcf, 0xf5, 0x67, 0x93, 0x50, 0x98, 0xc6, 0x26, 0xaf, 0x41, 0xd5, 0x0f, 0x5c, 0x4f, 0x29, + 0x5b, 0x8d, 0x99, 0xb9, 0xe1, 0x1b, 0xbd, 0xf9, 0xa9, 0x96, 0x80, 0x12, 0x36, 0x7b, 0xf9, 0x82, + 0x8a, 0x81, 0xf6, 0xf5, 0x02, 0xc0, 0x95, 0xb5, 0xb5, 0x55, 0x79, 0xbc, 0x60, 0x42, 0x59, 0xef, + 0x85, 0x07, 0x95, 0xc3, 0x1f, 0x08, 0x26, 0xfc, 0x21, 0xe5, 0x19, 0x5e, 0x2f, 0xd8, 0x42, 0x8e, + 0x4e, 0x3e, 0x08, 0x55, 0xa9, 0x20, 0x4b, 0xb1, 0x87, 0xae, 0x05, 0x52, 0x89, 0x46, 0x95, 0xaf, + 0xfd, 0x76, 0x11, 0x60, 0xc9, 0xb4, 0x69, 0x4b, 0x79, 0xb0, 0xd7, 0x83, 0x2d, 0x8f, 0xfa, 0x5b, + 0xae, 0x6d, 0x0e, 0x79, 0x9a, 0xca, 0x6d, 0xfe, 0x6b, 0x0a, 0x04, 0x23, 0x3c, 0x62, 0xc2, 0xa8, + 0x1f, 0xd0, 0xee, 0x92, 0x13, 0x50, 0x6f, 0x47, 0xb7, 0x87, 0x3c, 0x44, 0x39, 0x29, 0xec, 0x22, + 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x0d, 0xcb, 0x31, 0xc4, 0x00, 0x69, 0xee, 0x0e, 0xd9, 0x91, + 0x4e, 0xb0, 0x1d, 0xc7, 0x52, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xfb, 0x45, 0x38, 0xc3, 0xf9, 0xb1, + 0x6a, 0x24, 0xfc, 0x31, 0xc9, 0xff, 0xef, 0xbb, 0x07, 0xf7, 0xbf, 0x0f, 0xc6, 0x5a, 0x5c, 0xa3, + 0x5a, 0xa1, 0x81, 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0xec, 0xf2, 0x5b, 0x0f, 0xca, 0x3e, 0x9b, 0xaf, + 0x84, 0xf4, 0x5a, 0x43, 0x77, 0xa1, 0xec, 0x0f, 0xe0, 0xb3, 0x57, 0x78, 0x6a, 0xcc, 0x67, 0x2d, + 0xce, 0x8e, 0xfc, 0x2c, 0x8c, 0xf8, 0x81, 0x1e, 0xf4, 0xd4, 0xd0, 0x5c, 0x3f, 0x6a, 0xc6, 0x1c, + 0x3c, 0x9a, 0x47, 0xc4, 0x3b, 0x4a, 0xa6, 0xda, 0xf7, 0x0b, 0x70, 0x2e, 0xbb, 0xe0, 0xb2, 0xe5, + 0x07, 0xe4, 0xff, 0xf5, 0x89, 0xfd, 0x80, 0x2d, 0xce, 0x4a, 0x73, 0xa1, 0x87, 0x0e, 0xd9, 0x2a, + 0x25, 0x26, 0xf2, 0x00, 0x2a, 0x56, 0x40, 0x3b, 0x6a, 0x7f, 0x79, 0xfd, 0x88, 0x3f, 0x3d, 0xb6, + 0xb4, 0x33, 0x2e, 0x28, 0x98, 0x69, 0x6f, 0x15, 0x07, 0x7d, 0x32, 0x5f, 0x3e, 0xec, 0xa4, 0xcf, + 0xef, 0xd5, 0x7c, 0x3e, 0xbf, 0xc9, 0x0a, 0xf5, 0xbb, 0xfe, 0xfe, 0x4c, 0xbf, 0xeb, 0xef, 0xf5, + 0xfc, 0xae, 0xbf, 0x29, 0x31, 0x0c, 0xf4, 0x00, 0x7e, 0xa7, 0x04, 0xe7, 0xef, 0xd4, 0x6d, 0xd8, + 0x7a, 0x26, 0x7b, 0x67, 0xde, 0xf5, 0xec, 0xce, 0xfd, 0x90, 0xcc, 0x40, 0xa5, 0xbb, 0xa5, 0xfb, + 0x4a, 0x29, 0x53, 0x1b, 0x96, 0xca, 0x2a, 0x4b, 0xbc, 0xcd, 0x26, 0x0d, 0xae, 0xcc, 0xf1, 0x57, + 0x14, 0xa4, 0x6c, 0x3a, 0xee, 0x50, 0xdf, 0x8f, 0x6c, 0x02, 0xe1, 0x74, 0xbc, 0x22, 0x92, 0x51, + 0xe5, 0x93, 0x00, 0x46, 0x84, 0x89, 0x59, 0xae, 0x4c, 0xc3, 0x3b, 0x72, 0x65, 0xb8, 0x89, 0x47, + 0x1f, 0x25, 0x4f, 0x2b, 0x24, 0x2f, 0x32, 0x05, 0xe5, 0x20, 0x72, 0xda, 0x55, 0x5b, 0xf3, 0x72, + 0x86, 0x7e, 0xca, 0xe9, 0xd8, 0xc6, 0xde, 0xdd, 0xe0, 0x46, 0x75, 0x53, 0x9e, 0x9f, 0x5b, 0xae, + 0xc3, 0x15, 0xb2, 0x52, 0xb4, 0xb1, 0xbf, 0xde, 0x47, 0x81, 0x19, 0xa5, 0xb4, 0xbf, 0xae, 0xc1, + 0x99, 0xec, 0xfe, 0xc0, 0xe4, 0xb6, 0x43, 0x3d, 0x9f, 0x61, 0x17, 0x92, 0x72, 0xbb, 0x21, 0x92, + 0x51, 0xe5, 0xbf, 0xa7, 0x1d, 0xce, 0xbe, 0x51, 0x80, 0xb3, 0x9e, 0x3c, 0x23, 0xba, 0x17, 0x4e, + 0x67, 0x8f, 0x0a, 0x73, 0xc6, 0x00, 0x86, 0x38, 0xb8, 0x2e, 0xe4, 0x37, 0x0b, 0x30, 0xd1, 0x49, + 0xd9, 0x39, 0x8e, 0xf1, 0xc2, 0x18, 0xf7, 0x8a, 0x5f, 0x19, 0xc0, 0x0f, 0x07, 0xd6, 0x84, 0x7c, + 0x01, 0x1a, 0x5d, 0xd6, 0x2f, 0xfc, 0x80, 0x3a, 0x86, 0xba, 0x33, 0x36, 0xfc, 0x48, 0x5a, 0x8d, + 0xb0, 0x94, 0x2b, 0x9a, 0xd0, 0x0f, 0x62, 0x19, 0x18, 0xe7, 0x78, 0x9f, 0xdf, 0x10, 0xbb, 0x04, + 0x35, 0x9f, 0x06, 0x81, 0xe5, 0xb4, 0xc5, 0x7e, 0xa3, 0x2e, 0xc6, 0x4a, 0x4b, 0xa6, 0x61, 0x98, + 0x4b, 0x3e, 0x04, 0x75, 0x7e, 0xe4, 0x34, 0xeb, 0xb5, 0xfd, 0x89, 0x3a, 0x77, 0x17, 0x1b, 0x13, + 0x0e, 0x70, 0x32, 0x11, 0xa3, 0x7c, 0xf2, 0x14, 0x8c, 0x6e, 0xf0, 0xe1, 0x2b, 0xaf, 0xf3, 0x0a, + 0x1b, 0x17, 0xd7, 0xd6, 0x9a, 0xb1, 0x74, 0x4c, 0x50, 0x91, 0x19, 0x00, 0x1a, 0x9e, 0xcb, 0xa5, + 0xed, 0x59, 0xd1, 0x89, 0x1d, 0xc6, 0xa8, 0xc8, 0xa3, 0x50, 0x0a, 0x6c, 0x9f, 0xdb, 0xb0, 0x6a, + 0xd1, 0x16, 0x74, 0x6d, 0xb9, 0x85, 0x2c, 0x5d, 0xfb, 0x69, 0x01, 0x4e, 0xa4, 0x2e, 0x97, 0xb0, + 0x22, 0x3d, 0xcf, 0x96, 0xd3, 0x48, 0x58, 0x64, 0x1d, 0x97, 0x91, 0xa5, 0x93, 0x57, 0xa5, 0x5a, + 0x5e, 0xcc, 0x19, 0xb9, 0xe0, 0x9a, 0x1e, 0xf8, 0x4c, 0x0f, 0xef, 0xd3, 0xc8, 0xf9, 0x31, 0x5f, + 0x54, 0x1f, 0xb9, 0x0e, 0xc4, 0x8e, 0xf9, 0xa2, 0x3c, 0x4c, 0x50, 0xa6, 0x0c, 0x7e, 0xe5, 0x83, + 0x18, 0xfc, 0xb4, 0xaf, 0x16, 0x63, 0x12, 0x90, 0x9a, 0xfd, 0x5d, 0x24, 0xf0, 0x04, 0x5b, 0x40, + 0xc3, 0xc5, 0xbd, 0x1e, 0x5f, 0xff, 0xf8, 0x62, 0x2c, 0x73, 0xc9, 0x4b, 0x42, 0xf6, 0xa5, 0x9c, + 0xb7, 0x50, 0xd7, 0x96, 0x5b, 0xc2, 0xbb, 0x4a, 0xb5, 0x5a, 0xd8, 0x04, 0xe5, 0x63, 0x6a, 0x02, + 0xed, 0xcf, 0x4b, 0xd0, 0x78, 0xd1, 0xdd, 0x78, 0x8f, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, 0x77, + 0x71, 0x99, 0x5a, 0x87, 0x87, 0x83, 0xc0, 0x6e, 0x51, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, + 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, 0x35, 0xe5, 0x71, 0xd2, 0x23, 0xfb, 0x7b, 0x93, 0x0f, 0xaf, + 0xad, 0x2d, 0x67, 0x91, 0xe0, 0xa0, 0xb2, 0x7c, 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0xdf, + 0x94, 0x91, 0x3e, 0x37, 0x62, 0xda, 0x88, 0xa5, 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xf5, 0xf0, + 0xe6, 0x3b, 0x79, 0x1c, 0xaa, 0x1b, 0x9e, 0xbb, 0x4d, 0x3d, 0x71, 0x72, 0x27, 0x6f, 0xca, 0x34, + 0x45, 0x12, 0xaa, 0x3c, 0xf2, 0x18, 0x54, 0x02, 0xb7, 0x6b, 0x19, 0x69, 0x83, 0xda, 0x1a, 0x4b, + 0x44, 0x91, 0x77, 0x7c, 0x1d, 0xfc, 0x89, 0x84, 0x6a, 0x57, 0x1f, 0xa8, 0x8c, 0xbd, 0x02, 0x65, + 0x5f, 0xf7, 0x6d, 0xb9, 0x9e, 0xe6, 0xb8, 0x44, 0x3e, 0xdb, 0x5a, 0x96, 0x97, 0xc8, 0x67, 0x5b, + 0xcb, 0xc8, 0x41, 0xb5, 0x1f, 0x15, 0xa1, 0x21, 0xe4, 0x26, 0x66, 0x85, 0xa3, 0x94, 0xdc, 0xf3, + 0xdc, 0x95, 0xc2, 0xef, 0x75, 0xa8, 0xc7, 0xcd, 0x4c, 0x72, 0x92, 0x8b, 0x9f, 0x0f, 0x44, 0x99, + 0xa1, 0x3b, 0x45, 0x94, 0xa4, 0x44, 0x5f, 0x3e, 0x46, 0xd1, 0x57, 0x0e, 0x24, 0xfa, 0x91, 0xe3, + 0x10, 0xfd, 0x9b, 0x45, 0xa8, 0x2f, 0x5b, 0x9b, 0xd4, 0xd8, 0x35, 0x6c, 0x7e, 0x27, 0xd0, 0xa4, + 0x36, 0x0d, 0xe8, 0xa2, 0xa7, 0x1b, 0x74, 0x95, 0x7a, 0x16, 0x8f, 0xd9, 0xc2, 0xc6, 0x07, 0x9f, + 0x81, 0xe4, 0x9d, 0xc0, 0xf9, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x59, 0x82, 0x51, 0x93, 0xfa, 0x96, + 0x47, 0xcd, 0xd5, 0xd8, 0x46, 0xe5, 0x71, 0xb5, 0xd4, 0xcc, 0xc7, 0xf2, 0x6e, 0xef, 0x4d, 0x8e, + 0x29, 0x03, 0xa5, 0xd8, 0xb1, 0x24, 0x8a, 0xb2, 0x21, 0xdf, 0xd5, 0x7b, 0x7e, 0x56, 0x1d, 0x63, + 0x43, 0x7e, 0x35, 0x9b, 0x04, 0x07, 0x95, 0xd5, 0x2a, 0x50, 0x5a, 0x76, 0xdb, 0xda, 0x5b, 0x25, + 0x08, 0x83, 0xfb, 0x90, 0x9f, 0x2b, 0x40, 0x43, 0x77, 0x1c, 0x37, 0x90, 0x81, 0x73, 0xc4, 0x09, + 0x3c, 0xe6, 0x8e, 0x21, 0x34, 0x35, 0x1b, 0x81, 0x8a, 0xc3, 0xdb, 0xf0, 0x40, 0x39, 0x96, 0x83, + 0x71, 0xde, 0xa4, 0x97, 0x3a, 0x4f, 0x5e, 0xc9, 0x5f, 0x8b, 0x03, 0x9c, 0x1e, 0x9f, 0xfb, 0x24, + 0x9c, 0x4c, 0x57, 0xf6, 0x30, 0xc7, 0x41, 0xb9, 0x0e, 0xe6, 0x8b, 0x00, 0x91, 0x4f, 0xc9, 0x3d, + 0x30, 0x62, 0x59, 0x09, 0x23, 0xd6, 0xe2, 0xf0, 0x02, 0x0e, 0x2b, 0x3d, 0xd0, 0x70, 0xf5, 0x7a, + 0xca, 0x70, 0xb5, 0x74, 0x14, 0xcc, 0xee, 0x6c, 0xac, 0xfa, 0xad, 0x02, 0x9c, 0x8c, 0x88, 0xe5, + 0x0d, 0xd9, 0x67, 0x61, 0xcc, 0xa3, 0xba, 0xd9, 0xd4, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, + 0x6f, 0xf6, 0xa9, 0xfd, 0xbd, 0xc9, 0x31, 0x8c, 0x67, 0x60, 0x92, 0x8e, 0xe8, 0xd0, 0x60, 0x09, + 0x6b, 0x56, 0x87, 0xba, 0xbd, 0x60, 0x48, 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, + 0xed, 0x9d, 0x02, 0x8c, 0xc7, 0x2b, 0x7c, 0xec, 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, + 0xda, 0x64, 0x80, 0x15, 0xed, 0xc7, 0xb5, 0xf8, 0xa7, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, + 0xa3, 0xb1, 0xe0, 0xbd, 0x1f, 0x35, 0x66, 0x90, 0x96, 0x5b, 0xbe, 0x8f, 0xb5, 0xdc, 0x77, 0x33, + 0xf4, 0x4c, 0x2c, 0x7c, 0xca, 0x48, 0x8e, 0xf0, 0x29, 0x9d, 0x30, 0x7c, 0x4a, 0xf5, 0xc8, 0x26, + 0x9d, 0x83, 0x84, 0x50, 0xa9, 0xdd, 0xd3, 0x10, 0x2a, 0xf5, 0xe3, 0x0a, 0xa1, 0x02, 0x79, 0x43, + 0xa8, 0x7c, 0xa5, 0x00, 0xe3, 0x66, 0xe2, 0xc6, 0x2c, 0xb7, 0x2d, 0xe4, 0x59, 0x6a, 0x92, 0x17, + 0x70, 0xc5, 0x95, 0xa9, 0x64, 0x1a, 0xa6, 0x58, 0x6a, 0x3f, 0x2c, 0xc7, 0xd7, 0x81, 0x7b, 0x6d, + 0xaa, 0x7e, 0x26, 0x69, 0xaa, 0xbe, 0x98, 0x36, 0x55, 0x9f, 0x88, 0x79, 0x91, 0xc6, 0xcd, 0xd5, + 0x1f, 0x8e, 0x4d, 0x8f, 0x6c, 0x4e, 0x1a, 0x8b, 0x24, 0x9d, 0x31, 0x45, 0x7e, 0x18, 0x6a, 0xbe, + 0x0a, 0xc3, 0x28, 0x36, 0x36, 0x51, 0xbb, 0xa8, 0x10, 0x89, 0x21, 0x05, 0xd3, 0xc4, 0x3d, 0xaa, + 0xfb, 0xae, 0x93, 0xd6, 0xc4, 0x91, 0xa7, 0xa2, 0xcc, 0x8d, 0x9b, 0xcc, 0x47, 0xee, 0x62, 0x32, + 0xd7, 0xa1, 0x61, 0xeb, 0x7e, 0xb0, 0xde, 0x35, 0xf5, 0x80, 0x9a, 0x72, 0xbc, 0xfd, 0xaf, 0x83, + 0xad, 0x55, 0x6c, 0xfd, 0x8b, 0x14, 0xc2, 0xe5, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x51, 0xf6, + 0xca, 0x47, 0x83, 0x39, 0xab, 0x42, 0x00, 0x1c, 0x86, 0x47, 0x68, 0xe9, 0x59, 0x8e, 0xe1, 0x60, + 0x02, 0x75, 0x80, 0x55, 0xbd, 0x3e, 0x94, 0x55, 0xfd, 0x2b, 0x75, 0x68, 0x5c, 0xd3, 0x03, 0x6b, + 0x87, 0xf2, 0x53, 0x9c, 0xe3, 0x31, 0xa5, 0xff, 0x6a, 0x01, 0xce, 0x24, 0x5d, 0xf5, 0x8e, 0xd1, + 0x9e, 0xce, 0x03, 0x7f, 0x60, 0x26, 0x37, 0x1c, 0x50, 0x0b, 0x6e, 0x59, 0xef, 0xf3, 0xfc, 0x3b, + 0x6e, 0xcb, 0x7a, 0x6b, 0x10, 0x43, 0x1c, 0x5c, 0x97, 0xf7, 0x8a, 0x65, 0xfd, 0xfe, 0x0e, 0xcc, + 0x96, 0xb2, 0xfb, 0x57, 0xef, 0x1b, 0xbb, 0x7f, 0xed, 0xbe, 0x50, 0xb6, 0xba, 0x31, 0xbb, 0x7f, + 0x3d, 0xa7, 0xff, 0x89, 0xf4, 0x6e, 0x17, 0x68, 0x83, 0xce, 0x0f, 0xf8, 0xc5, 0x74, 0x65, 0x8f, + 0x65, 0x3a, 0xca, 0x86, 0xee, 0x5b, 0x86, 0x5c, 0xf6, 0x72, 0x04, 0xa2, 0x54, 0x11, 0xbb, 0xc4, + 0x31, 0x35, 0x7f, 0x45, 0x81, 0x1d, 0x45, 0x06, 0x2b, 0xe6, 0x8a, 0x0c, 0x46, 0xe6, 0xa0, 0xec, + 0xb0, 0xdd, 0x73, 0xe9, 0xd0, 0xb1, 0xc0, 0xae, 0x5d, 0xa5, 0xbb, 0xc8, 0x0b, 0x6b, 0x6f, 0x17, + 0x01, 0xd8, 0xe7, 0x1f, 0xcc, 0x02, 0xff, 0x41, 0xa8, 0xfa, 0x3d, 0xbe, 0x57, 0x96, 0x0b, 0x76, + 0xe4, 0xb4, 0x23, 0x92, 0x51, 0xe5, 0x93, 0xc7, 0xa0, 0xf2, 0x7a, 0x8f, 0xf6, 0xd4, 0x71, 0x72, + 0xa8, 0xae, 0x7d, 0x8a, 0x25, 0xa2, 0xc8, 0x3b, 0x3e, 0x6b, 0x9a, 0xb2, 0xd4, 0x57, 0x8e, 0xcb, + 0x52, 0x5f, 0x87, 0xea, 0x35, 0x97, 0xfb, 0x00, 0x6a, 0xff, 0x52, 0x04, 0x88, 0x7c, 0xac, 0xc8, + 0xd7, 0x0b, 0xf0, 0x50, 0x38, 0xe0, 0x02, 0xa1, 0x75, 0xcf, 0xd9, 0xba, 0xd5, 0xc9, 0x6d, 0xb5, + 0xcf, 0x1a, 0xec, 0x7c, 0x06, 0x5a, 0xcd, 0x62, 0x87, 0xd9, 0xb5, 0x20, 0x08, 0x35, 0xda, 0xe9, + 0x06, 0xbb, 0xf3, 0x96, 0x27, 0x7b, 0x60, 0xa6, 0x2b, 0xdf, 0x65, 0x49, 0x23, 0x8a, 0xca, 0xad, + 0x21, 0x1f, 0x44, 0x2a, 0x07, 0x43, 0x1c, 0xb2, 0x05, 0x35, 0xc7, 0x7d, 0xd5, 0x67, 0xe2, 0x90, + 0xdd, 0xf1, 0x85, 0xe1, 0x45, 0x2e, 0xc4, 0x2a, 0xac, 0xbc, 0xf2, 0x05, 0xab, 0x8e, 0x14, 0xf6, + 0xd7, 0x8a, 0x70, 0x3a, 0x43, 0x0e, 0xe4, 0x05, 0x38, 0x29, 0xdd, 0xd9, 0xa2, 0xf0, 0xc4, 0x85, + 0x28, 0x3c, 0x71, 0x2b, 0x95, 0x87, 0x7d, 0xd4, 0xe4, 0x55, 0x00, 0xdd, 0x30, 0xa8, 0xef, 0xaf, + 0xb8, 0xa6, 0xd2, 0x47, 0x9f, 0xdf, 0xdf, 0x9b, 0x84, 0xd9, 0x30, 0xf5, 0xf6, 0xde, 0xe4, 0x47, + 0xb2, 0x3c, 0x54, 0x53, 0x72, 0x8e, 0x0a, 0x60, 0x0c, 0x92, 0x7c, 0x0e, 0x40, 0x6c, 0xbd, 0xc2, + 0x4b, 0xf4, 0x77, 0xb1, 0x57, 0x4c, 0xa9, 0x70, 0x45, 0x53, 0x9f, 0xea, 0xe9, 0x4e, 0x60, 0x05, + 0xbb, 0x22, 0x66, 0xc9, 0x8d, 0x10, 0x05, 0x63, 0x88, 0xda, 0x9f, 0x14, 0xa1, 0xa6, 0x2c, 0xa5, + 0xf7, 0xc0, 0x3c, 0xd6, 0x4e, 0x98, 0xc7, 0x8e, 0xc8, 0x27, 0x35, 0xcb, 0x38, 0xe6, 0xa6, 0x8c, + 0x63, 0x8b, 0xf9, 0x59, 0xdd, 0xd9, 0x34, 0xf6, 0xad, 0x22, 0x8c, 0x2b, 0xd2, 0xbc, 0x86, 0xb1, + 0x4f, 0xc0, 0x09, 0x71, 0x96, 0xbc, 0xa2, 0xdf, 0x12, 0xe1, 0x5b, 0xb8, 0xc0, 0xca, 0xc2, 0x0d, + 0xb4, 0x99, 0xcc, 0xc2, 0x34, 0x2d, 0xeb, 0xd6, 0x22, 0x69, 0x9d, 0xed, 0x23, 0xc4, 0xe9, 0x93, + 0xd8, 0xef, 0xf0, 0x6e, 0xdd, 0x4c, 0xe5, 0x61, 0x1f, 0x75, 0xda, 0x32, 0x57, 0x3e, 0x06, 0xcb, + 0xdc, 0xdf, 0x14, 0x60, 0x34, 0x92, 0xd7, 0xb1, 0xdb, 0xe5, 0x36, 0x93, 0x76, 0xb9, 0xd9, 0xdc, + 0xdd, 0x61, 0x80, 0x55, 0xee, 0x17, 0xab, 0x90, 0x70, 0x8d, 0x26, 0x1b, 0x70, 0xce, 0xca, 0x74, + 0xf0, 0x8a, 0xcd, 0x36, 0xe1, 0x5d, 0xdf, 0xa5, 0x81, 0x94, 0x78, 0x07, 0x14, 0xd2, 0x83, 0xda, + 0x0e, 0xf5, 0x02, 0xcb, 0xa0, 0xea, 0xfb, 0x16, 0x73, 0xab, 0x64, 0xd2, 0xf6, 0x18, 0xca, 0xf4, + 0x86, 0x64, 0x80, 0x21, 0x2b, 0xb2, 0x01, 0x15, 0x6a, 0xb6, 0xa9, 0x0a, 0xa8, 0x93, 0x33, 0x5c, + 0x65, 0x28, 0x4f, 0xf6, 0xe6, 0xa3, 0x80, 0x26, 0x3e, 0xd4, 0x6d, 0x75, 0xb6, 0x24, 0xfb, 0xe1, + 0xf0, 0x0a, 0x56, 0x78, 0x4a, 0x15, 0xdd, 0xb5, 0x0f, 0x93, 0x30, 0xe2, 0x43, 0xb6, 0x43, 0x23, + 0x57, 0xe5, 0x88, 0x26, 0x8f, 0x3b, 0x98, 0xb8, 0x7c, 0xa8, 0xdf, 0xd4, 0x03, 0xea, 0x75, 0x74, + 0x6f, 0x5b, 0xee, 0x36, 0x86, 0xff, 0xc2, 0x97, 0x14, 0x52, 0xf4, 0x85, 0x61, 0x12, 0x46, 0x7c, + 0x88, 0x0b, 0xf5, 0x40, 0xaa, 0xcf, 0xca, 0x92, 0x37, 0x3c, 0x53, 0xa5, 0x88, 0xfb, 0xd2, 0x45, + 0x5a, 0xbd, 0x62, 0xc4, 0x83, 0xec, 0x24, 0x42, 0xf9, 0x8a, 0x00, 0xce, 0xcd, 0x1c, 0x16, 0x61, + 0x09, 0x15, 0x2d, 0x37, 0xd9, 0x21, 0x81, 0xb5, 0xb7, 0x2b, 0xd1, 0xb4, 0x7c, 0xaf, 0xed, 0x54, + 0x4f, 0x25, 0xed, 0x54, 0x17, 0xd2, 0x76, 0xaa, 0xd4, 0x11, 0xe5, 0xe1, 0x9d, 0x2a, 0x53, 0x16, + 0xa2, 0xf2, 0x31, 0x58, 0x88, 0x9e, 0x84, 0xc6, 0x0e, 0x9f, 0x09, 0x44, 0x74, 0x9e, 0x0a, 0x5f, + 0x46, 0xf8, 0xcc, 0x7e, 0x23, 0x4a, 0xc6, 0x38, 0x0d, 0x2b, 0x22, 0x34, 0x90, 0x28, 0xbc, 0xa9, + 0x2c, 0xd2, 0x8a, 0x92, 0x31, 0x4e, 0xc3, 0xfd, 0xb1, 0x2c, 0x67, 0x5b, 0x14, 0xa8, 0xf2, 0x02, + 0xc2, 0x1f, 0x4b, 0x25, 0x62, 0x94, 0x4f, 0x2e, 0x41, 0xad, 0x67, 0x6e, 0x0a, 0xda, 0x1a, 0xa7, + 0xe5, 0x1a, 0xe6, 0xfa, 0xfc, 0x82, 0x8c, 0x16, 0xa4, 0x72, 0x59, 0x4d, 0x3a, 0x7a, 0x57, 0x65, + 0xf0, 0xbd, 0xa1, 0xac, 0xc9, 0x4a, 0x94, 0x8c, 0x71, 0x1a, 0xf2, 0x31, 0x18, 0xf7, 0xa8, 0xd9, + 0x33, 0x68, 0x58, 0x0a, 0x78, 0x29, 0x6e, 0x15, 0xc5, 0x44, 0x0e, 0xa6, 0x28, 0x07, 0xd8, 0xb9, + 0x1a, 0x43, 0xd9, 0xb9, 0xbe, 0x57, 0x00, 0xd2, 0xef, 0xbf, 0x4c, 0xb6, 0x60, 0xc4, 0xe1, 0xd6, + 0xaf, 0xdc, 0x01, 0x91, 0x63, 0x46, 0x34, 0x31, 0x2d, 0xc9, 0x04, 0x89, 0x4f, 0x1c, 0xa8, 0xd1, + 0x5b, 0x01, 0xf5, 0x9c, 0xf0, 0x3e, 0xc3, 0xd1, 0x04, 0x5f, 0x16, 0xbb, 0x01, 0x89, 0x8c, 0x21, + 0x0f, 0xed, 0x07, 0x45, 0x68, 0xc4, 0xe8, 0xee, 0xb6, 0xa9, 0xe4, 0x57, 0xaa, 0x85, 0xd1, 0x69, + 0xdd, 0xb3, 0xe5, 0x08, 0x8b, 0x5d, 0xa9, 0x96, 0x59, 0xb8, 0x8c, 0x71, 0x3a, 0x32, 0x03, 0xd0, + 0xd1, 0xfd, 0x80, 0x7a, 0x7c, 0xf5, 0x4d, 0x5d, 0x64, 0x5e, 0x09, 0x73, 0x30, 0x46, 0x45, 0x2e, + 0xca, 0xf0, 0xd9, 0xe5, 0x64, 0xe0, 0xb9, 0x01, 0xb1, 0xb1, 0x2b, 0x47, 0x10, 0x1b, 0x9b, 0xb4, + 0xe1, 0xa4, 0xaa, 0xb5, 0xca, 0x3d, 0x5c, 0x58, 0x32, 0xb1, 0x7f, 0x49, 0x41, 0x60, 0x1f, 0xa8, + 0xf6, 0x76, 0x01, 0xc6, 0x12, 0x26, 0x0f, 0x11, 0x32, 0x4e, 0x79, 0xdf, 0x27, 0x42, 0xc6, 0xc5, + 0x9c, 0xe6, 0x9f, 0x80, 0x11, 0x21, 0xa0, 0xb4, 0x53, 0x9d, 0x10, 0x21, 0xca, 0x5c, 0x36, 0x97, + 0x49, 0xa3, 0x6a, 0x7a, 0x2e, 0x93, 0x56, 0x57, 0x54, 0xf9, 0xc2, 0xdc, 0x2e, 0x6a, 0xd7, 0x6f, + 0x6e, 0x17, 0xe9, 0x18, 0x52, 0x68, 0x3f, 0x2c, 0x01, 0x77, 0x41, 0x21, 0xcf, 0x42, 0xbd, 0x43, + 0x8d, 0x2d, 0xdd, 0xb1, 0x7c, 0x15, 0x32, 0x92, 0xed, 0x6e, 0xeb, 0x2b, 0x2a, 0xf1, 0x36, 0x03, + 0x98, 0x6d, 0x2d, 0x73, 0x2f, 0xef, 0x88, 0x96, 0x18, 0x30, 0xd2, 0xf6, 0x7d, 0xbd, 0x6b, 0xe5, + 0x3e, 0x01, 0x15, 0x21, 0xfa, 0xc4, 0x20, 0x12, 0xcf, 0x28, 0xa1, 0x89, 0x01, 0x95, 0xae, 0xad, + 0x5b, 0x4e, 0xee, 0x7f, 0x94, 0xb0, 0x2f, 0x58, 0x65, 0x48, 0xc2, 0xa4, 0xc3, 0x1f, 0x51, 0x60, + 0x93, 0x1e, 0x34, 0x7c, 0xc3, 0xd3, 0x3b, 0xfe, 0x96, 0x3e, 0xf3, 0xf4, 0x33, 0xb9, 0x95, 0xa4, + 0x88, 0x95, 0x98, 0xb3, 0xe7, 0x70, 0x76, 0xa5, 0x75, 0x65, 0x76, 0xe6, 0xe9, 0x67, 0x30, 0xce, + 0x27, 0xce, 0xf6, 0xe9, 0x27, 0x67, 0x64, 0xbf, 0x3f, 0x72, 0xb6, 0x4f, 0x3f, 0x39, 0x83, 0x71, + 0x3e, 0xda, 0x7f, 0x14, 0xa0, 0x1e, 0xd2, 0x92, 0x75, 0x00, 0x36, 0x02, 0x65, 0x50, 0xbd, 0x43, + 0x05, 0xb8, 0xe7, 0xbb, 0xe2, 0xf5, 0xb0, 0x30, 0xc6, 0x80, 0x32, 0xa2, 0x0e, 0x16, 0x8f, 0x3a, + 0xea, 0xe0, 0x34, 0xd4, 0xb7, 0x74, 0xc7, 0xf4, 0xb7, 0xf4, 0x6d, 0x31, 0x11, 0xc5, 0xe2, 0x70, + 0x5e, 0x51, 0x19, 0x18, 0xd1, 0x68, 0xff, 0x5a, 0x01, 0x71, 0x6c, 0xc9, 0x86, 0x8a, 0x69, 0xf9, + 0xc2, 0x6f, 0xb6, 0xc0, 0x4b, 0x86, 0x43, 0x65, 0x5e, 0xa6, 0x63, 0x48, 0x41, 0xce, 0x42, 0xa9, + 0x63, 0x39, 0xf2, 0xc4, 0x83, 0x1b, 0xbc, 0x56, 0x2c, 0x07, 0x59, 0x1a, 0xcf, 0xd2, 0x6f, 0x49, + 0x97, 0x27, 0x91, 0xa5, 0xdf, 0x42, 0x96, 0xc6, 0xb6, 0xa0, 0xb6, 0xeb, 0x6e, 0x6f, 0xe8, 0xc6, + 0xb6, 0xf2, 0x8c, 0x2a, 0xf3, 0x85, 0x90, 0x6f, 0x41, 0x97, 0x93, 0x59, 0x98, 0xa6, 0x25, 0x8b, + 0x70, 0xc2, 0x70, 0x5d, 0xdb, 0x74, 0x6f, 0x3a, 0xaa, 0xb8, 0x50, 0x1d, 0xf8, 0x49, 0xc2, 0x3c, + 0xed, 0x7a, 0xd4, 0x60, 0xfa, 0xc5, 0x5c, 0x92, 0x08, 0xd3, 0xa5, 0xc8, 0x3a, 0x3c, 0xfc, 0x06, + 0xf5, 0x5c, 0x39, 0x5d, 0xb4, 0x6c, 0x4a, 0xbb, 0x0a, 0x50, 0x28, 0x16, 0xdc, 0x53, 0xeb, 0x33, + 0xd9, 0x24, 0x38, 0xa8, 0x2c, 0xf7, 0xf9, 0xd4, 0xbd, 0x36, 0x0d, 0x56, 0x3d, 0xd7, 0xa0, 0xbe, + 0x6f, 0x39, 0x6d, 0x05, 0x5b, 0x8d, 0x60, 0xd7, 0xb2, 0x49, 0x70, 0x50, 0x59, 0xf2, 0x32, 0x4c, + 0x88, 0x2c, 0xb1, 0x6a, 0xcf, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0x27, 0xd7, 0x98, + 0x38, 0xa0, 0x58, 0x1b, 0x40, 0x83, 0x03, 0x4b, 0xf3, 0x3f, 0x69, 0xc9, 0xe3, 0xa9, 0x55, 0xea, + 0xf1, 0x7e, 0x20, 0xf5, 0x19, 0xf1, 0x27, 0xad, 0x54, 0x1e, 0xf6, 0x51, 0x13, 0x84, 0x33, 0xfc, + 0xb8, 0x7b, 0xbd, 0x9b, 0x12, 0xba, 0xd4, 0x70, 0xf8, 0x39, 0x54, 0x2b, 0x93, 0x02, 0x07, 0x94, + 0x64, 0xdf, 0xcb, 0x73, 0xe6, 0xdd, 0x9b, 0x4e, 0x1a, 0xb5, 0x11, 0x7d, 0x6f, 0x6b, 0x00, 0x0d, + 0x0e, 0x2c, 0xad, 0xfd, 0x71, 0x11, 0xc6, 0x12, 0x37, 0x9f, 0xef, 0xbb, 0x1b, 0xa6, 0x4c, 0x55, + 0xec, 0xf8, 0xed, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, 0x77, 0x95, 0xaa, 0x5b, 0xea, 0x7c, 0xf4, + 0xaf, 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0x0c, 0x9f, 0x79, 0xff, 0x69, 0xa0, 0x64, + 0xc4, 0xad, 0x9f, 0x7c, 0x6d, 0x10, 0xb6, 0x4f, 0x01, 0xaf, 0x05, 0x30, 0x1a, 0xa7, 0x60, 0x23, + 0x3e, 0xd2, 0xaa, 0xaa, 0x09, 0x8d, 0x6a, 0x09, 0x4a, 0x41, 0x30, 0xec, 0xdd, 0x55, 0x61, 0x48, + 0x5f, 0x5b, 0x46, 0x86, 0xa1, 0x6d, 0xb2, 0xb6, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0x20, 0xe3, 0x75, + 0xa8, 0x06, 0xd2, 0x96, 0x34, 0xdc, 0xdd, 0x5b, 0x6e, 0xd7, 0x55, 0x76, 0x24, 0x85, 0xa5, 0xfd, + 0x6d, 0x11, 0xea, 0xe1, 0xbe, 0xef, 0x00, 0x01, 0x82, 0x5d, 0xa8, 0x87, 0x8e, 0x31, 0xb9, 0xff, + 0x4f, 0x16, 0xf9, 0x6b, 0xf0, 0xad, 0x4a, 0xf8, 0x8a, 0x11, 0x8f, 0xb8, 0xd3, 0x4d, 0x29, 0x87, + 0xd3, 0x4d, 0x17, 0xaa, 0x81, 0x67, 0xb5, 0xdb, 0x52, 0x09, 0xcd, 0xe3, 0x75, 0x13, 0x8a, 0x6b, + 0x4d, 0x00, 0x4a, 0xc9, 0x8a, 0x17, 0x54, 0x6c, 0xb4, 0xd7, 0xe0, 0x64, 0x9a, 0x92, 0x6b, 0x68, + 0xc6, 0x16, 0x35, 0x7b, 0xb6, 0x92, 0x71, 0xa4, 0xa1, 0xc9, 0x74, 0x0c, 0x29, 0xd8, 0x2e, 0x8d, + 0x35, 0xd3, 0x1b, 0xae, 0xa3, 0xf6, 0xbf, 0x5c, 0xd9, 0x5d, 0x93, 0x69, 0x18, 0xe6, 0x6a, 0xff, + 0x5c, 0x82, 0xb3, 0xd1, 0xee, 0x7d, 0x45, 0x77, 0xf4, 0xf6, 0x01, 0x7e, 0x4a, 0xf5, 0xfe, 0x6d, + 0x86, 0xc3, 0x46, 0x79, 0x2f, 0xdd, 0x07, 0x51, 0xde, 0x7f, 0x54, 0x00, 0xee, 0xc4, 0x47, 0xbe, + 0x00, 0xa3, 0x7a, 0xec, 0x7f, 0x84, 0xb2, 0x39, 0x2f, 0xe7, 0x6e, 0x4e, 0xee, 0x2b, 0x18, 0x3a, + 0xa5, 0xc4, 0x53, 0x31, 0xc1, 0x90, 0xb8, 0x50, 0xdb, 0xd4, 0x6d, 0x9b, 0x29, 0x2d, 0xb9, 0x4f, + 0x23, 0x12, 0xcc, 0x79, 0x37, 0x5f, 0x90, 0xd0, 0x18, 0x32, 0xd1, 0xfe, 0xa9, 0x00, 0x63, 0x2d, + 0xdb, 0x32, 0x2d, 0xa7, 0x7d, 0x8c, 0xe1, 0xdd, 0xaf, 0x43, 0xc5, 0xb7, 0x2d, 0x93, 0x0e, 0x39, + 0x8f, 0x8b, 0x15, 0x84, 0x01, 0xa0, 0xc0, 0x49, 0xc6, 0x8b, 0x2f, 0x1d, 0x20, 0x5e, 0xfc, 0x4f, + 0x46, 0x40, 0x3a, 0x82, 0x92, 0x1e, 0xd4, 0xdb, 0x2a, 0x0c, 0xb5, 0xfc, 0xc6, 0x2b, 0x39, 0x42, + 0x98, 0x25, 0x02, 0x5a, 0x8b, 0x59, 0x37, 0x4c, 0xc4, 0x88, 0x13, 0xa1, 0xc9, 0x5f, 0x50, 0xce, 0xe7, 0xfc, 0x05, 0xa5, 0x60, 0xd7, 0xff, 0x13, 0x4a, 0x1d, 0xca, 0x5b, 0x41, 0xd0, 0x95, 0xe3, - 0x6a, 0xf8, 0x48, 0xdf, 0xe8, 0x16, 0x0d, 0x61, 0x8d, 0xb0, 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xa3, - 0x87, 0x7f, 0x3e, 0x9a, 0xcb, 0xb5, 0x23, 0x1d, 0x67, 0xc1, 0xde, 0x91, 0x43, 0x93, 0xcf, 0x43, - 0x23, 0xf0, 0x74, 0xc7, 0xdf, 0x74, 0xbd, 0x0e, 0xf5, 0xe4, 0x32, 0x6e, 0x21, 0xc7, 0x5f, 0x18, - 0xd7, 0x22, 0x34, 0xb1, 0xd5, 0x95, 0x48, 0xc2, 0x38, 0x37, 0xb2, 0x0d, 0xb5, 0x9e, 0x29, 0x2a, - 0x26, 0xfd, 0x1b, 0xb3, 0x79, 0x7e, 0xac, 0x19, 0xdb, 0x6f, 0x56, 0x6f, 0x18, 0x32, 0x48, 0xfe, - 0xe4, 0xab, 0x7a, 0x54, 0x3f, 0xf9, 0x8a, 0xf7, 0xc6, 0xac, 0x23, 0xfe, 0xa4, 0x23, 0x2d, 0x4a, - 0xa7, 0x2d, 0xc3, 0x65, 0x16, 0x72, 0x1b, 0x7b, 0x82, 0x65, 0x23, 0xb4, 0x4a, 0x9d, 0x36, 0x2a, - 0x1e, 0x5a, 0x07, 0xa4, 0x1b, 0x9a, 0x18, 0x89, 0x5f, 0x61, 0x88, 0x73, 0x27, 0xd3, 0x07, 0x9b, - 0x0f, 0xc2, 0x7f, 0x32, 0xc4, 0xae, 0xe2, 0xcd, 0xfc, 0xe7, 0x85, 0xf6, 0xb7, 0x45, 0x28, 0xad, - 0x2d, 0xb7, 0xc4, 0xf5, 0x7a, 0xfc, 0x3f, 0x33, 0xb4, 0xb5, 0x6d, 0x75, 0x6f, 0x50, 0xcf, 0xda, - 0xdc, 0x95, 0xab, 0xd3, 0xd8, 0xf5, 0x7a, 0x69, 0x0a, 0xcc, 0x28, 0x45, 0x5e, 0x81, 0x51, 0x43, - 0x9f, 0xa3, 0x5e, 0x30, 0xcc, 0xda, 0x9b, 0x1f, 0xb0, 0x9b, 0x9b, 0x8d, 0x8a, 0x63, 0x02, 0x8c, - 0xac, 0x03, 0x18, 0x11, 0x74, 0xe9, 0xd0, 0x1e, 0x83, 0x18, 0x70, 0x0c, 0x88, 0x20, 0xd4, 0xb7, - 0x19, 0x29, 0x47, 0x2d, 0x1f, 0x06, 0x95, 0xf7, 0x9c, 0xab, 0xaa, 0x2c, 0x46, 0x30, 0x9a, 0x03, - 0x63, 0x89, 0xff, 0x63, 0x90, 0x8f, 0x42, 0xcd, 0xed, 0xc6, 0xa6, 0xd3, 0x3a, 0x5f, 0x4e, 0xd7, - 0xae, 0xcb, 0xb4, 0xdb, 0x7b, 0x93, 0x63, 0xcb, 0x6e, 0xdb, 0x32, 0x54, 0x02, 0x86, 0xe4, 0x44, - 0x83, 0x11, 0x7e, 0x2a, 0x46, 0xfd, 0x1d, 0x83, 0xeb, 0x0e, 0x7e, 0x81, 0xbd, 0x8f, 0x32, 0x47, - 0xfb, 0x52, 0x19, 0xa2, 0xcd, 0x1b, 0xe2, 0xc3, 0x88, 0x88, 0xfa, 0x95, 0x33, 0xf7, 0xb1, 0x06, - 0x18, 0x4b, 0x56, 0xa4, 0x0d, 0xa5, 0xd7, 0xdc, 0x8d, 0xdc, 0x13, 0x77, 0xec, 0x38, 0xac, 0x70, - 0x27, 0xc5, 0x12, 0x90, 0x71, 0x20, 0xbf, 0x5a, 0x80, 0x53, 0x7e, 0xda, 0xe8, 0x94, 0xdd, 0x01, - 0xf3, 0x5b, 0xd7, 0x69, 0x33, 0x56, 0x46, 0x50, 0x0e, 0xca, 0xc6, 0xfe, 0xba, 0x30, 0xf9, 0x8b, - 0x5d, 0x15, 0xd9, 0x9d, 0x16, 0x73, 0xfe, 0xd3, 0x2d, 0x29, 0xff, 0x64, 0x1a, 0x4a, 0x56, 0xda, - 0x57, 0x8a, 0xd0, 0x88, 0xcd, 0xd6, 0xb9, 0x7f, 0xba, 0x72, 0x2b, 0xf5, 0xd3, 0x95, 0xd5, 0xe1, - 0x37, 0x19, 0xa3, 0x5a, 0x1d, 0xf7, 0x7f, 0x57, 0xfe, 0xb4, 0x08, 0xa5, 0xf5, 0xf9, 0x85, 0xe4, - 0x72, 0xb1, 0x70, 0x0f, 0x96, 0x8b, 0x5b, 0x50, 0xdd, 0xe8, 0x59, 0x76, 0x60, 0x39, 0xb9, 0x0f, - 0xec, 0xab, 0x7f, 0xd4, 0xc8, 0x73, 0xaf, 0x02, 0x15, 0x15, 0x3c, 0x69, 0x43, 0xb5, 0x2d, 0x6e, - 0x4c, 0xcb, 0x1d, 0x7a, 0x25, 0x6f, 0x5e, 0x13, 0x8c, 0xe4, 0x0b, 0x2a, 0x74, 0xed, 0x0b, 0x20, - 0xff, 0x72, 0x4d, 0xfc, 0xe3, 0x91, 0x66, 0x68, 0x8c, 0x66, 0x49, 0x54, 0xfb, 0x3c, 0x84, 0x96, - 0xc0, 0x3d, 0x6f, 0x4e, 0xed, 0x5f, 0x0b, 0x90, 0x34, 0x7e, 0xee, 0x7d, 0x8f, 0xda, 0x4e, 0xf7, - 0xa8, 0xf9, 0xa3, 0x18, 0x80, 0xd9, 0x9d, 0x4a, 0xfb, 0x83, 0x22, 0x8c, 0xdc, 0xb3, 0x83, 0x96, - 0x34, 0x11, 0x49, 0x36, 0x97, 0x73, 0x72, 0x1c, 0x18, 0x47, 0xd6, 0x49, 0xc5, 0x91, 0xe5, 0xfd, - 0xb3, 0xe6, 0x5d, 0xa2, 0xc8, 0xfe, 0xb2, 0x00, 0x72, 0x6a, 0x5e, 0x72, 0xfc, 0x40, 0x77, 0x0c, - 0xfe, 0x83, 0x77, 0xa9, 0x07, 0xf2, 0x86, 0x2b, 0xc8, 0x90, 0x1e, 0xa1, 0xfa, 0xf9, 0xb3, 0x9a, - 0xf7, 0xc9, 0x87, 0xa1, 0xb6, 0xe5, 0xfa, 0x01, 0x9f, 0xeb, 0x8b, 0x49, 0xdf, 0xce, 0x15, 0x99, - 0x8e, 0x21, 0x45, 0x7a, 0x5b, 0xaf, 0x32, 0x78, 0x5b, 0x4f, 0xfb, 0x56, 0x11, 0x46, 0xdf, 0x2b, - 0xa7, 0x45, 0xb3, 0xe2, 0xee, 0x4a, 0x39, 0xe3, 0xee, 0xca, 0x87, 0x89, 0xbb, 0xd3, 0xbe, 0x5b, - 0x00, 0xb8, 0x67, 0x47, 0x55, 0xcd, 0x64, 0x48, 0x5c, 0xee, 0x7e, 0x95, 0x1d, 0x10, 0xf7, 0xbb, - 0x15, 0xf5, 0x49, 0x3c, 0x1c, 0xee, 0xcd, 0x02, 0x8c, 0xeb, 0x89, 0x10, 0xb3, 0xdc, 0xe6, 0x65, - 0x2a, 0x62, 0x2d, 0x3c, 0x96, 0x97, 0x4c, 0xc7, 0x14, 0x5b, 0xf2, 0x5c, 0x74, 0x4d, 0xea, 0xb5, - 0xa8, 0xdb, 0xf7, 0xdd, 0x6f, 0xca, 0x4d, 0x9d, 0x04, 0xe5, 0x5d, 0x42, 0xfa, 0x4a, 0x47, 0x12, - 0xd2, 0x17, 0x3f, 0xac, 0x54, 0xbe, 0xe3, 0x61, 0xa5, 0x1d, 0xa8, 0x6f, 0x7a, 0x6e, 0x87, 0x47, - 0xcd, 0xc9, 0x7f, 0x72, 0x5e, 0xce, 0xa1, 0x53, 0xa2, 0xbf, 0x51, 0x47, 0xaa, 0x75, 0x41, 0xe1, - 0x63, 0xc4, 0x8a, 0x3b, 0xa5, 0x5d, 0xc1, 0x75, 0xe4, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, - 0x47, 0xc5, 0x26, 0x19, 0x29, 0x57, 0xbd, 0x37, 0x91, 0x72, 0xda, 0xcf, 0x97, 0xd5, 0x04, 0x76, - 0xdf, 0xdd, 0xc8, 0xf7, 0xde, 0x3f, 0xe2, 0x98, 0x3e, 0x7f, 0x58, 0xbd, 0x87, 0xe7, 0x0f, 0x6b, - 0x43, 0xc5, 0x65, 0xed, 0x95, 0x20, 0xb5, 0x76, 0x7a, 0x7f, 0x87, 0xe2, 0x3f, 0xd5, 0x0e, 0xc5, - 0x5b, 0x45, 0x88, 0x26, 0x82, 0x43, 0x86, 0x5a, 0xbc, 0x0c, 0xb5, 0x8e, 0x7e, 0x6b, 0x9e, 0xda, - 0xfa, 0x6e, 0x9e, 0x1f, 0x29, 0xae, 0x48, 0x0c, 0x0c, 0xd1, 0x88, 0x0f, 0x60, 0x85, 0x97, 0x19, - 0xe7, 0xf6, 0x38, 0x47, 0xf7, 0x22, 0x0b, 0x9f, 0x56, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, 0x14, - 0x41, 0xde, 0x7a, 0x4d, 0x28, 0x54, 0x36, 0xad, 0x5b, 0xd4, 0xcc, 0x1d, 0x76, 0x18, 0xfb, 0xbd, - 0xad, 0x70, 0xa9, 0xf3, 0x04, 0x14, 0xe8, 0xdc, 0x57, 0x2a, 0xb6, 0x48, 0xa4, 0xfc, 0x72, 0xf8, - 0x4a, 0xe3, 0x5b, 0x2d, 0xd2, 0x57, 0x2a, 0x92, 0x50, 0xf1, 0x10, 0xae, 0x59, 0xbe, 0x4f, 0x2d, - 0x45, 0x9a, 0xc7, 0x35, 0x1b, 0xdb, 0xef, 0x56, 0xae, 0x59, 0x5f, 0x1c, 0x40, 0x96, 0x3c, 0x9a, - 0x9f, 0xfd, 0xce, 0xf7, 0x2e, 0x3c, 0xf0, 0xdd, 0xef, 0x5d, 0x78, 0xe0, 0x9d, 0xef, 0x5d, 0x78, - 0xe0, 0x4b, 0xfb, 0x17, 0x0a, 0xdf, 0xd9, 0xbf, 0x50, 0xf8, 0xee, 0xfe, 0x85, 0xc2, 0x3b, 0xfb, - 0x17, 0x0a, 0x7f, 0xbf, 0x7f, 0xa1, 0xf0, 0x4b, 0xff, 0x70, 0xe1, 0x81, 0xcf, 0x3c, 0x1b, 0x55, - 0x61, 0x5a, 0x55, 0x61, 0x5a, 0x31, 0x9c, 0xee, 0x6e, 0xb7, 0xa7, 0x59, 0x15, 0xa2, 0x14, 0x55, - 0x85, 0xff, 0x08, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x6f, 0x91, 0x67, 0x6c, 0x92, 0x00, 0x00, + 0x6a, 0x78, 0x4f, 0xdf, 0x28, 0x8a, 0x86, 0xd0, 0x46, 0xd8, 0x3b, 0x72, 0x68, 0xc6, 0xc2, 0xd1, + 0xc3, 0x3f, 0x1f, 0xcd, 0xe5, 0x3a, 0x91, 0x8e, 0xb3, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0xe7, 0xa1, + 0x11, 0x78, 0xba, 0xe3, 0x6f, 0xba, 0x5e, 0x87, 0x7a, 0x72, 0x1b, 0xb7, 0x90, 0xe3, 0x2f, 0x8c, + 0x6b, 0x11, 0x9a, 0x38, 0xea, 0x4a, 0x24, 0x61, 0x9c, 0x1b, 0xd9, 0x86, 0x5a, 0xcf, 0x14, 0x15, + 0x93, 0xf6, 0x8d, 0xd9, 0x3c, 0x3f, 0xd6, 0x8c, 0x9d, 0x37, 0xab, 0x37, 0x0c, 0x19, 0x24, 0x7f, + 0xf2, 0x55, 0x3d, 0xaa, 0x9f, 0x7c, 0xc5, 0x7b, 0x63, 0xd6, 0x15, 0x7f, 0xd2, 0x91, 0x1a, 0xa5, + 0xd3, 0x96, 0xee, 0x32, 0x0b, 0xb9, 0x95, 0x3d, 0xc1, 0xb2, 0x11, 0x6a, 0xa5, 0x4e, 0x1b, 0x15, + 0x0f, 0xad, 0x03, 0xd2, 0x0c, 0x4d, 0x8c, 0xc4, 0xaf, 0x30, 0xc4, 0xbd, 0x93, 0xe9, 0x83, 0xcd, + 0x07, 0xe1, 0x3f, 0x19, 0x62, 0xa1, 0x78, 0x33, 0xff, 0x79, 0xa1, 0xfd, 0x5d, 0x11, 0x4a, 0x6b, + 0xcb, 0x2d, 0x11, 0x5e, 0x8f, 0xff, 0x67, 0x86, 0xb6, 0xb6, 0xad, 0xee, 0x0d, 0xea, 0x59, 0x9b, + 0xbb, 0x72, 0x77, 0x1a, 0x0b, 0xaf, 0x97, 0xa6, 0xc0, 0x8c, 0x52, 0xe4, 0x15, 0x18, 0x35, 0xf4, + 0x39, 0xea, 0x05, 0xc3, 0xec, 0xbd, 0xf9, 0x05, 0xbb, 0xb9, 0xd9, 0xa8, 0x38, 0x26, 0xc0, 0xc8, + 0x3a, 0x80, 0x11, 0x41, 0x97, 0x0e, 0x6d, 0x31, 0x88, 0x01, 0xc7, 0x80, 0x08, 0x42, 0x7d, 0x9b, + 0x91, 0x72, 0xd4, 0xf2, 0x61, 0x50, 0x79, 0xcf, 0xb9, 0xaa, 0xca, 0x62, 0x04, 0xa3, 0x39, 0x30, + 0x96, 0xf8, 0x3f, 0x06, 0xf9, 0x28, 0xd4, 0xdc, 0x6e, 0x6c, 0x3a, 0xad, 0xf3, 0xed, 0x74, 0xed, + 0xba, 0x4c, 0xbb, 0xbd, 0x37, 0x39, 0xb6, 0xec, 0xb6, 0x2d, 0x43, 0x25, 0x60, 0x48, 0x4e, 0x34, + 0x18, 0xe1, 0xb7, 0x62, 0xd4, 0xdf, 0x31, 0xf8, 0xda, 0xc1, 0x03, 0xd8, 0xfb, 0x28, 0x73, 0xb4, + 0x2f, 0x96, 0x21, 0x3a, 0xbc, 0x21, 0x3e, 0x8c, 0x08, 0xaf, 0x5f, 0x39, 0x73, 0x1f, 0xab, 0x83, + 0xb1, 0x64, 0x45, 0xda, 0x50, 0x7a, 0xcd, 0xdd, 0xc8, 0x3d, 0x71, 0xc7, 0xae, 0xc3, 0x0a, 0x73, + 0x52, 0x2c, 0x01, 0x19, 0x07, 0xf2, 0x6b, 0x05, 0x38, 0xe5, 0xa7, 0x95, 0x4e, 0xd9, 0x1d, 0x30, + 0xbf, 0x76, 0x9d, 0x56, 0x63, 0xa5, 0x07, 0xe5, 0xa0, 0x6c, 0xec, 0xaf, 0x0b, 0x93, 0xbf, 0x38, + 0x55, 0x91, 0xdd, 0x69, 0x31, 0xe7, 0x3f, 0xdd, 0x92, 0xf2, 0x4f, 0xa6, 0xa1, 0x64, 0xa5, 0x7d, + 0xb9, 0x08, 0x8d, 0xd8, 0x6c, 0x9d, 0xfb, 0xa7, 0x2b, 0xb7, 0x52, 0x3f, 0x5d, 0x59, 0x1d, 0xfe, + 0x90, 0x31, 0xaa, 0xd5, 0x71, 0xff, 0x77, 0xe5, 0xcf, 0x8a, 0x50, 0x5a, 0x9f, 0x5f, 0x48, 0x6e, + 0x17, 0x0b, 0xf7, 0x60, 0xbb, 0xb8, 0x05, 0xd5, 0x8d, 0x9e, 0x65, 0x07, 0x96, 0x93, 0xfb, 0xc2, + 0xbe, 0xfa, 0x47, 0x8d, 0xbc, 0xf7, 0x2a, 0x50, 0x51, 0xc1, 0x93, 0x36, 0x54, 0xdb, 0x22, 0x62, + 0x5a, 0x6e, 0xd7, 0x2b, 0x19, 0x79, 0x4d, 0x30, 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x5d, 0x90, 0x7f, + 0xb9, 0xbe, 0xe7, 0xd2, 0xd4, 0x3e, 0x0f, 0xa1, 0x16, 0x70, 0xef, 0x99, 0xff, 0x5b, 0x01, 0x92, + 0x8a, 0xcf, 0xbd, 0xef, 0x4d, 0xdb, 0xe9, 0xde, 0x34, 0x7f, 0x14, 0x83, 0x2f, 0xbb, 0x43, 0x69, + 0x7f, 0x58, 0x84, 0x91, 0x7b, 0x76, 0xc9, 0x92, 0x26, 0xbc, 0xc8, 0xe6, 0x72, 0x4e, 0x8c, 0x03, + 0x7d, 0xc8, 0x3a, 0x29, 0x1f, 0xb2, 0xbc, 0x7f, 0xd5, 0xbc, 0x8b, 0x07, 0xd9, 0x5f, 0x15, 0x40, + 0x4e, 0xcb, 0x4b, 0x8e, 0x1f, 0xe8, 0x8e, 0xc1, 0x7f, 0xee, 0x2e, 0xd7, 0x80, 0xbc, 0xae, 0x0a, + 0xd2, 0x9d, 0x47, 0x2c, 0xfb, 0xfc, 0x59, 0xcd, 0xf9, 0xe4, 0xc3, 0x50, 0xdb, 0x72, 0xfd, 0x80, + 0xcf, 0xf3, 0xc5, 0xa4, 0x5d, 0xe7, 0x8a, 0x4c, 0xc7, 0x90, 0x22, 0x7d, 0xa4, 0x57, 0x19, 0x7c, + 0xa4, 0xa7, 0x7d, 0xb3, 0x08, 0xa3, 0xef, 0x95, 0x9b, 0xa2, 0x59, 0x3e, 0x77, 0xa5, 0x9c, 0x3e, + 0x77, 0xe5, 0xc3, 0xf8, 0xdc, 0x69, 0xdf, 0x29, 0x00, 0xdc, 0xb3, 0x6b, 0xaa, 0x66, 0xd2, 0x1d, + 0x2e, 0x77, 0xbf, 0xca, 0x76, 0x86, 0xfb, 0xbd, 0x8a, 0xfa, 0x24, 0xee, 0x0a, 0xf7, 0x66, 0x01, + 0xc6, 0xf5, 0x84, 0x7b, 0x59, 0x6e, 0xd5, 0x32, 0xe5, 0xad, 0x16, 0x5e, 0xc9, 0x4b, 0xa6, 0x63, + 0x8a, 0x2d, 0x79, 0x2e, 0x0a, 0x91, 0x7a, 0x2d, 0xea, 0xf6, 0x7d, 0xb1, 0x4d, 0xb9, 0x9a, 0x93, + 0xa0, 0xbc, 0x8b, 0x3b, 0x5f, 0xe9, 0x48, 0xdc, 0xf9, 0xe2, 0x17, 0x95, 0xca, 0x77, 0xbc, 0xa8, + 0xb4, 0x03, 0xf5, 0x4d, 0xcf, 0xed, 0x70, 0x8f, 0x39, 0xf9, 0x3f, 0xce, 0xcb, 0x39, 0xd6, 0x94, + 0xe8, 0x4f, 0xd4, 0x91, 0x8d, 0x67, 0x41, 0xe1, 0x63, 0xc4, 0x8a, 0x1b, 0xa4, 0x5d, 0xc1, 0x75, + 0xe4, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, 0x47, 0xc5, 0x26, 0xe9, 0x25, 0x57, 0xbd, 0x37, + 0x5e, 0x72, 0xda, 0x2f, 0x94, 0xd5, 0x04, 0x76, 0xdf, 0x45, 0xe3, 0x7b, 0xef, 0x5f, 0x6f, 0x4c, + 0xdf, 0x3d, 0xac, 0xde, 0xc3, 0xbb, 0x87, 0xb5, 0xa1, 0x7c, 0xb2, 0xf6, 0x4a, 0x90, 0xda, 0x37, + 0xbd, 0x7f, 0x3a, 0xf1, 0x5f, 0xea, 0x74, 0xe2, 0xad, 0x22, 0x44, 0x13, 0xc1, 0x21, 0xdd, 0x2c, + 0x5e, 0x86, 0x5a, 0x47, 0xbf, 0x35, 0x4f, 0x6d, 0x7d, 0x37, 0xcf, 0x4f, 0x14, 0x57, 0x24, 0x06, + 0x86, 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0x40, 0xc6, 0xb9, 0xad, 0xcd, 0x51, 0x4c, 0x64, 0x61, 0xcf, + 0x8a, 0xde, 0x31, 0xc6, 0x46, 0xfb, 0xcb, 0x22, 0xc8, 0x88, 0xd7, 0x84, 0x42, 0x65, 0xd3, 0xba, + 0x45, 0xcd, 0xdc, 0x2e, 0x87, 0xb1, 0x5f, 0xdb, 0x0a, 0x73, 0x3a, 0x4f, 0x40, 0x81, 0xce, 0xed, + 0xa4, 0xe2, 0x78, 0x44, 0xca, 0x2f, 0x87, 0x9d, 0x34, 0x7e, 0xcc, 0x22, 0xed, 0xa4, 0x22, 0x09, + 0x15, 0x0f, 0x61, 0x96, 0xe5, 0x67, 0xd4, 0x52, 0xa4, 0x79, 0xcc, 0xb2, 0xb1, 0xb3, 0x6e, 0x65, + 0x96, 0xf5, 0xc5, 0xe5, 0x63, 0xc9, 0xa3, 0xf9, 0xd9, 0x6f, 0x7f, 0xf7, 0xc2, 0x03, 0xdf, 0xf9, + 0xee, 0x85, 0x07, 0xde, 0xf9, 0xee, 0x85, 0x07, 0xbe, 0xb8, 0x7f, 0xa1, 0xf0, 0xed, 0xfd, 0x0b, + 0x85, 0xef, 0xec, 0x5f, 0x28, 0xbc, 0xb3, 0x7f, 0xa1, 0xf0, 0x0f, 0xfb, 0x17, 0x0a, 0xbf, 0xfc, + 0x8f, 0x17, 0x1e, 0xf8, 0xcc, 0xb3, 0x51, 0x15, 0xa6, 0x55, 0x15, 0xa6, 0x15, 0xc3, 0xe9, 0xee, + 0x76, 0x7b, 0x9a, 0x55, 0x21, 0x4a, 0x51, 0x55, 0xf8, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, + 0x35, 0xa0, 0x09, 0x68, 0x92, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -8367,16 +8367,18 @@ func (m *UDSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Container != nil { + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -10911,8 +10913,10 @@ func (m *UDSink) Size() (n int) { } var l int _ = l - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12439,7 +12443,7 @@ func (this *UDSink) String() string { return "nil" } s := strings.Join([]string{`&UDSink{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, `}`, }, "") return s @@ -29017,6 +29021,9 @@ func (m *UDSink) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + if m.Container == nil { + m.Container = &Container{} + } if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 5ba6a78c12..81c9cf56e6 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -296,7 +296,9 @@ func (mv MonoVertex) simpleCopy() MonoVertex { } func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, error) { - monoVtxBytes, err := json.Marshal(mv.simpleCopy()) + copiedSpec := mv.simpleCopy() + copiedSpec.Spec.Scale = Scale{} + monoVtxBytes, err := json.Marshal(copiedSpec) if err != nil { return nil, errors.New("failed to marshal mono vertex spec") } diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go index 2f2fee9a39..aeb809db05 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -19,7 +19,47 @@ package v1alpha1 import ( "testing" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +var ( + testMvtx = MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: MonoVertexSpec{ + Scale: Scale{ + Min: ptr.To[int32](2), + Max: ptr.To[int32](4), + }, + Source: &Source{ + UDSource: &UDSource{ + Container: &Container{ + Image: "test-image1", + }, + }, + UDTransformer: &UDTransformer{ + Container: &Container{ + Image: "test-image2", + }, + }, + }, + Sink: &Sink{ + AbstractSink: AbstractSink{ + UDSink: &UDSink{ + Container: &Container{ + Image: "test-image3", + }, + }, + }, + }, + }, + } ) func TestMonoVertex_GetDaemonServiceObj(t *testing.T) { @@ -83,3 +123,51 @@ func TestMonoVertexStatus_MarkDeployFailed(t *testing.T) { t.Errorf("MarkDeployFailed should set the Message correctly, got %s", mvs.Message) } } + +func TestMonoVertexGetPodSpec(t *testing.T) { + + t.Run("test get pod spec - okay", func(t *testing.T) { + req := GetMonoVertexPodSpecReq{ + Image: "my-image", + PullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "ENV_VAR_NAME", + Value: "ENV_VAR_VALUE", + }, + }, + DefaultResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + } + podSpec, err := testMvtx.GetPodSpec(req) + assert.NoError(t, err) + assert.Equal(t, 4, len(podSpec.Containers)) + assert.Equal(t, 1, len(podSpec.Volumes)) + assert.Equal(t, "my-image", podSpec.Containers[0].Image) + assert.Equal(t, corev1.PullIfNotPresent, podSpec.Containers[0].ImagePullPolicy) + assert.Equal(t, "100m", podSpec.Containers[0].Resources.Requests.Cpu().String()) + assert.Equal(t, "200m", podSpec.Containers[0].Resources.Limits.Cpu().String()) + assert.Equal(t, "100Mi", podSpec.Containers[0].Resources.Requests.Memory().String()) + assert.Equal(t, "200Mi", podSpec.Containers[0].Resources.Limits.Memory().String()) + assert.Equal(t, "test-image1", podSpec.Containers[1].Image) + assert.Equal(t, "test-image2", podSpec.Containers[2].Image) + assert.Equal(t, "test-image3", podSpec.Containers[3].Image) + for _, c := range podSpec.Containers { + assert.Equal(t, 1, len(c.VolumeMounts)) + } + envNames := []string{} + for _, env := range podSpec.Containers[0].Env { + envNames = append(envNames, env.Name) + } + assert.Contains(t, envNames, "ENV_VAR_NAME") + assert.Contains(t, envNames, EnvMonoVertexObject) + }) +} diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index ac9d1140ef..79d63b30c4 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -4899,8 +4899,7 @@ func schema_pkg_apis_numaflow_v1alpha1_UDSink(ref common.ReferenceCallback) comm Properties: map[string]spec.Schema{ "container": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Container"), + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Container"), }, }, }, diff --git a/pkg/apis/numaflow/v1alpha1/sink_test.go b/pkg/apis/numaflow/v1alpha1/sink_test.go index 32860fc1dd..0fe7f002af 100644 --- a/pkg/apis/numaflow/v1alpha1/sink_test.go +++ b/pkg/apis/numaflow/v1alpha1/sink_test.go @@ -45,7 +45,7 @@ func Test_Sink_getUDSinkContainer(t *testing.T) { x := Sink{ AbstractSink: AbstractSink{ UDSink: &UDSink{ - Container: Container{ + Container: &Container{ Image: "my-image", Args: []string{"my-arg"}, SecurityContext: &corev1.SecurityContext{}, @@ -84,7 +84,7 @@ func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { x := Sink{ AbstractSink: AbstractSink{ UDSink: &UDSink{ - Container: Container{ + Container: &Container{ Image: "my-image", Args: []string{"my-arg"}, SecurityContext: &corev1.SecurityContext{}, @@ -96,7 +96,7 @@ func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { }, Fallback: &AbstractSink{ UDSink: &UDSink{ - Container: Container{ + Container: &Container{ Image: "my-image", Args: []string{"my-arg"}, SecurityContext: &corev1.SecurityContext{}, diff --git a/pkg/apis/numaflow/v1alpha1/user_defined_sink.go b/pkg/apis/numaflow/v1alpha1/user_defined_sink.go index 4edc970e59..2f10670ae6 100644 --- a/pkg/apis/numaflow/v1alpha1/user_defined_sink.go +++ b/pkg/apis/numaflow/v1alpha1/user_defined_sink.go @@ -17,5 +17,5 @@ limitations under the License. package v1alpha1 type UDSink struct { - Container Container `json:"container" protobuf:"bytes,1,opt,name=container"` + Container *Container `json:"container" protobuf:"bytes,1,opt,name=container"` } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index 4336e15e31..1a4534c3ec 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -324,7 +324,7 @@ func TestGetPodSpec(t *testing.T) { testObj.Spec.Sink = &Sink{ AbstractSink: AbstractSink{ UDSink: &UDSink{ - Container: Container{ + Container: &Container{ Image: "image", Command: []string{"cmd"}, Args: []string{"arg0"}, diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 475ef0ea31..b450852954 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -2524,7 +2524,11 @@ func (in *UDF) DeepCopy() *UDF { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UDSink) DeepCopyInto(out *UDSink) { *out = *in - in.Container.DeepCopyInto(&out.Container) + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(Container) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/reconciler/vertex/controller_test.go b/pkg/reconciler/vertex/controller_test.go index f0f9128a8f..dedc25898e 100644 --- a/pkg/reconciler/vertex/controller_test.go +++ b/pkg/reconciler/vertex/controller_test.go @@ -317,7 +317,7 @@ func Test_BuildPodSpec(t *testing.T) { testObj.Spec.Sink = &dfv1.Sink{ AbstractSink: dfv1.AbstractSink{ UDSink: &dfv1.UDSink{ - Container: dfv1.Container{ + Container: &dfv1.Container{ Image: "image", Command: []string{"cmd"}, Args: []string{"arg0"}, From 6f19b17e94ab812b1fbda501127896535cef2ab4 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 22 Aug 2024 08:56:48 +0530 Subject: [PATCH 023/188] chore: metrics and https support for serving (#1985) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/Cargo.lock | 124 +------------ rust/monovertex/Cargo.toml | 4 +- rust/monovertex/src/metrics.rs | 2 +- rust/serving/Cargo.toml | 9 +- rust/serving/src/app.rs | 101 +++++++---- .../src/app/callback/store/redisstore.rs | 10 +- rust/serving/src/app/tracker.rs | 2 +- rust/serving/src/config.rs | 37 ++-- rust/serving/src/error.rs | 44 ++--- rust/serving/src/lib.rs | 39 +++-- rust/serving/src/metrics.rs | 165 +++++++++++++----- rust/serving/src/pipeline.rs | 9 +- rust/src/bin/main.rs | 4 +- 13 files changed, 290 insertions(+), 260 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 94bce16b9a..13b368b4ab 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -17,18 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -524,21 +512,6 @@ dependencies = [ "libc", ] -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" - [[package]] name = "crunchy" version = "0.2.2" @@ -908,9 +881,6 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", -] [[package]] name = "headers" @@ -1443,45 +1413,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "metrics" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" -dependencies = [ - "ahash", - "portable-atomic", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.15.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" -dependencies = [ - "base64 0.22.1", - "indexmap 2.3.0", - "metrics", - "metrics-util", - "quanta", - "thiserror", -] - -[[package]] -name = "metrics-util" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", - "hashbrown 0.14.5", - "metrics", - "num_cpus", - "quanta", - "sketches-ddsketch", -] - [[package]] name = "mime" version = "0.3.17" @@ -1653,16 +1584,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "numaflow" version = "0.1.0" @@ -2041,21 +1962,6 @@ dependencies = [ "prost", ] -[[package]] -name = "quanta" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" -dependencies = [ - "crossbeam-utils", - "libc", - "once_cell", - "raw-cpuid", - "wasi", - "web-sys", - "winapi", -] - [[package]] name = "quote" version = "1.0.36" @@ -2095,15 +2001,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "raw-cpuid" -version = "11.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" -dependencies = [ - "bitflags 2.6.0", -] - [[package]] name = "rcgen" version = "0.13.1" @@ -2597,17 +2494,20 @@ dependencies = [ "async-nats", "axum", "axum-macros", + "axum-server", "backoff", "base64 0.22.1", "chrono", "config", "hyper-util", - "metrics", - "metrics-exporter-prometheus", + "parking_lot", + "prometheus-client", + "rcgen", "redis", "serde", "serde_json", "tempfile", + "thiserror", "tokio", "tower", "tower-http", @@ -2691,12 +2591,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "sketches-ddsketch" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" - [[package]] name = "slab" version = "0.4.9" @@ -2888,9 +2782,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", @@ -3090,9 +2984,9 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index 4efb9658b2..d56a502541 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -6,10 +6,10 @@ edition = "2021" [dependencies] axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } -tonic = "0.12.0" +tonic = "0.12.1" bytes = "1.7.1" thiserror = "1.0.63" -tokio = { version = "1.39.2", features = ["full"] } +tokio = { version = "1.39.3", features = ["full"] } tracing = "0.1.40" tokio-util = "0.7.11" tokio-stream = "0.1.15" diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 789ba47a3b..7a87b508a4 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -174,7 +174,7 @@ static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new() // forward_metrics_labels is a helper function used to fetch the // MONOVTX_METRICS_LABELS object pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { - crate::metrics::MONOVTX_METRICS_LABELS.get_or_init(|| { + MONOVTX_METRICS_LABELS.get_or_init(|| { let common_labels = vec![ ( MONO_VERTEX_NAME_LABEL.to_string(), diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index 635bb4f208..2dfb9b9c33 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -11,13 +11,12 @@ all-tests = ["redis-tests", "nats-tests"] [dependencies] async-nats = "0.35.1" axum = "0.7.5" +axum-server = { version = "0.7.1", features = ["tls-rustls"] } axum-macros = "0.4.1" hyper-util = { version = "0.1.6", features = ["client-legacy"] } -metrics = { version = "0.23.0", default-features = false } -metrics-exporter-prometheus = { version = "0.15.3", default-features = false } serde = { version = "1.0.204", features = ["derive"] } serde_json = "1.0.120" -tokio = { version = "1.36.0", features = ["full"] } +tokio = { version = "1.39.3", features = ["full"] } tower = "0.4.13" tower-http = { version = "0.5.2", features = ["trace", "timeout"] } tracing = "0.1.40" @@ -30,4 +29,8 @@ trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } backoff = { path = "../backoff" } base64 = "0.22.1" +rcgen = "0.13.1" +parking_lot = "0.12.3" +prometheus-client = "0.22.3" +thiserror = "1.0.63" diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index 87a789159b..3043f3e34b 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -1,4 +1,5 @@ use std::env; +use std::net::SocketAddr; use std::time::Duration; use async_nats::jetstream; @@ -8,14 +9,15 @@ use axum::http::StatusCode; use axum::middleware::Next; use axum::response::Response; use axum::{body::Body, http::Request, middleware, response::IntoResponse, routing::get, Router}; +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::rt::TokioExecutor; -use tokio::net::TcpListener; use tokio::signal; use tower::ServiceBuilder; use tower_http::timeout::TimeoutLayer; use tower_http::trace::{DefaultOnResponse, TraceLayer}; -use tracing::{debug, info_span, Level}; +use tracing::{debug, info, info_span, Level}; use uuid::Uuid; use self::{ @@ -25,6 +27,7 @@ use self::{ use crate::app::callback::store::Store; use crate::app::tracker::MessageGraph; use crate::pipeline::pipeline_spec; +use crate::Error::{InitError, MetricsServer}; use crate::{app::callback::state::State as CallbackState, config, metrics::capture_metrics}; /// manage callbacks @@ -48,14 +51,10 @@ const ENV_NUMAFLOW_SERVING_AUTH_TOKEN: &str = "NUMAFLOW_SERVING_AUTH_TOKEN"; // - [ ] outer fallback for /v1/direct /// Start the main application Router and the axum server. -pub(crate) async fn start_main_server(addr: A) -> crate::Result<()> -where - A: tokio::net::ToSocketAddrs + std::fmt::Debug, -{ - let listener = TcpListener::bind(&addr) - .await - .map_err(|e| format!("Creating listener on {:?}: {}", addr, e))?; - +pub(crate) async fn start_main_server( + addr: SocketAddr, + tls_config: RustlsConfig, +) -> crate::Result<()> { debug!(?addr, "App server started"); let layers = ServiceBuilder::new() @@ -90,8 +89,14 @@ where .layer(middleware::from_fn(auth_middleware)); // Create the message graph from the pipeline spec and the redis store - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()) - .map_err(|e| format!("Creating message graph from pipeline spec: {:?}", e))?; + let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).map_err(|e| { + InitError(format!( + "Creating message graph from pipeline spec: {:?}", + e + )) + })?; + + // Create a redis store to store the callbacks and the custom responses let redis_store = callback::store::redisstore::RedisConnection::new( &config().redis.addr, config().redis.max_tasks, @@ -99,18 +104,51 @@ where .await?; let state = CallbackState::new(msg_graph, redis_store).await?; + let handle = Handle::new(); + // Spawn a task to gracefully shutdown server. + tokio::spawn(graceful_shutdown(handle.clone())); + // Create a Jetstream context let js_context = create_js_context().await?; let router = setup_app(js_context, state).await?.layer(layers); - axum::serve(listener, router) - .with_graceful_shutdown(shutdown_signal()) + axum_server::bind_rustls(addr, tls_config) + .handle(handle) + .serve(router.into_make_service()) .await - .map_err(|e| format!("Starting web server: {}", e))?; + .map_err(|e| MetricsServer(format!("Starting web server for metrics: {}", e)))?; Ok(()) } +// Gracefully shutdown the server on receiving SIGINT or SIGTERM +// by sending a shutdown signal to the server using the handle. +async fn graceful_shutdown(handle: Handle) { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } + + info!("sending graceful shutdown signal"); + + // Signal the server to shutdown using Handle. + // TODO: make the duration configurable + handle.graceful_shutdown(Some(Duration::from_secs(30))); +} + async fn create_js_context() -> crate::Result { // Check for user and password in the Jetstream configuration let js_config = &config().jetstream; @@ -130,11 +168,11 @@ async fn create_js_context() -> crate::Result { _ => async_nats::connect(&js_config.url).await, } .map_err(|e| { - format!( + InitError(format!( "Connecting to jetstream server {}: {}", &config().jetstream.url, e - ) + )) })?; Ok(jetstream::new(js_client)) } @@ -240,30 +278,11 @@ async fn routes( .merge(message_path_handler)) } -async fn shutdown_signal() { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - }; - - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, - } -} - #[cfg(test)] mod tests { use super::*; use crate::app::callback::store::memstore::InMemoryStore; + use crate::config::cert_key_pair; use async_nats::jetstream::stream; use axum::http::StatusCode; use std::net::SocketAddr; @@ -272,9 +291,15 @@ mod tests { #[tokio::test] async fn test_start_main_server() { + let (cert, key) = cert_key_pair(); + + let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) + .await + .unwrap(); + let addr = SocketAddr::from(([127, 0, 0, 1], 0)); let server = tokio::spawn(async move { - let result = start_main_server(addr).await; + let result = start_main_server(addr, tls_config).await; assert!(result.is_ok()) }); diff --git a/rust/serving/src/app/callback/store/redisstore.rs b/rust/serving/src/app/callback/store/redisstore.rs index dea8f0b41d..002f68b6c1 100644 --- a/rust/serving/src/app/callback/store/redisstore.rs +++ b/rust/serving/src/app/callback/store/redisstore.rs @@ -7,12 +7,12 @@ use tokio::sync::Semaphore; use backoff::retry::Retry; use backoff::strategy::fixed; +use super::PayloadToSave; use crate::app::callback::CallbackRequest; use crate::consts::SAVED; +use crate::Error::Connection; use crate::{config, Error}; -use super::PayloadToSave; - const LPUSH: &str = "LPUSH"; const LRANGE: &str = "LRANGE"; const EXPIRE: &str = "EXPIRE"; @@ -27,12 +27,12 @@ pub(crate) struct RedisConnection { impl RedisConnection { /// Creates a new RedisConnection with concurrent operations on Redis set by max_tasks. pub(crate) async fn new(addr: &str, max_tasks: usize) -> crate::Result { - let client = - redis::Client::open(addr).map_err(|e| format!("Creating Redis client: {e:?}"))?; + let client = redis::Client::open(addr) + .map_err(|e| Connection(format!("Creating Redis client: {e:?}")))?; let conn = client .get_connection_manager() .await - .map_err(|e| format!("Connecting to Redis server: {e:?}"))?; + .map_err(|e| Connection(format!("Connecting to Redis server: {e:?}")))?; Ok(Self { conn_manager: conn, max_tasks, diff --git a/rust/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs index 8487ac5696..cdd31a1c99 100644 --- a/rust/serving/src/app/tracker.rs +++ b/rust/serving/src/app/tracker.rs @@ -108,7 +108,7 @@ impl MessageGraph { if result { match serde_json::to_string(&subgraph) { Ok(json) => Ok(Some(json)), - Err(e) => Err(Error::SubGraphGeneratorError(e.to_string())), + Err(e) => Err(Error::SubGraphGenerator(e.to_string())), } } else { Ok(None) diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index 0f9e8f6238..ab40485876 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -1,13 +1,15 @@ -use std::fmt::Debug; -use std::path::Path; -use std::{env, sync::OnceLock}; - +use async_nats::rustls; use base64::prelude::BASE64_STANDARD; use base64::Engine; use config::Config; +use rcgen::{generate_simple_self_signed, Certificate, CertifiedKey, KeyPair}; use serde::{Deserialize, Serialize}; +use std::fmt::Debug; +use std::path::Path; +use std::{env, sync::OnceLock}; use tracing::info; +use crate::Error::ParseConfig; use crate::{Error, Result}; const ENV_PREFIX: &str = "NUMAFLOW_SERVING"; @@ -33,12 +35,23 @@ pub fn config() -> &'static Settings { }) } +static GLOBAL_TLS_CONFIG: OnceLock<(Certificate, KeyPair)> = OnceLock::new(); + +fn init_cert_key_pair() -> std::result::Result<(Certificate, KeyPair), String> { + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) + .map_err(|e| format!("Failed to generate cert {:?}", e))?; + Ok((cert, key_pair)) +} + +pub fn cert_key_pair() -> &'static (Certificate, KeyPair) { + GLOBAL_TLS_CONFIG.get_or_init(|| init_cert_key_pair().expect("Failed to initialize TLS config")) +} + #[derive(Debug, Deserialize)] pub struct JetStreamConfig { pub stream: String, pub url: String, - pub user: Option, - pub password: Option, } #[derive(Debug, Deserialize)] @@ -94,11 +107,11 @@ impl Settings { .separator("."), ) .build() - .map_err(|e| format!("generating runtime configuration: {e:?}"))?; + .map_err(|e| ParseConfig(format!("generating runtime configuration: {e:?}")))?; let mut settings = settings .try_deserialize::() - .map_err(|e| format!("parsing runtime configuration: {e:?}"))?; + .map_err(|e| ParseConfig(format!("parsing runtime configuration: {e:?}")))?; // Update JetStreamConfig from environment variables if let Ok(url) = env::var(ENV_NUMAFLOW_SERVING_JETSTREAM_URL) { @@ -114,10 +127,10 @@ impl Settings { Ok(source_spec_encoded) => { let source_spec_decoded = BASE64_STANDARD .decode(source_spec_encoded.as_bytes()) - .map_err(|e| format!("decoding NUMAFLOW_SERVING_SOURCE: {e:?}"))?; + .map_err(|e| ParseConfig(format!("decoding NUMAFLOW_SERVING_SOURCE: {e:?}")))?; let source_spec = serde_json::from_slice::(&source_spec_decoded) - .map_err(|e| format!("parsing NUMAFLOW_SERVING_SOURCE: {e:?}"))?; + .map_err(|e| ParseConfig(format!("parsing NUMAFLOW_SERVING_SOURCE: {e:?}")))?; // Update tid_header from source_spec if let Some(msg_id_header_key) = source_spec.msg_id_header_key { @@ -130,10 +143,10 @@ impl Settings { // Update redis.ttl_secs from environment variable settings.redis.ttl_secs = match env::var(ENV_NUMAFLOW_SERVING_STORE_TTL) { Ok(ttl_secs) => Some(ttl_secs.parse().map_err(|e| { - format!( + ParseConfig(format!( "parsing NUMAFLOW_SERVING_STORE_TTL: expected u32, got {:?}", e - ) + )) })?), Err(_) => None, }; diff --git a/rust/serving/src/error.rs b/rust/serving/src/error.rs index 83d4546b2e..d53509c939 100644 --- a/rust/serving/src/error.rs +++ b/rust/serving/src/error.rs @@ -1,46 +1,50 @@ +use thiserror::Error; + +// TODO: introduce module level error handling + pub type Result = std::result::Result; -#[derive(Debug)] +#[derive(Error, Debug, Clone)] pub enum Error { - ConfigMissingEnv(&'static str), + #[error("ParseConfig Error - {0}")] + ParseConfig(String), // callback errors // TODO: store the ID too? + #[error("IDNotFound Error - {0}")] IDNotFound(&'static str), + #[error("SubGraphGenerator Error - {0}")] // subgraph generator errors - SubGraphGeneratorError(String), + SubGraphGenerator(String), + #[error("StoreWrite Error - {0}")] // Store write errors StoreWrite(String), + #[error("SubGraphNotFound Error - {0}")] // Sub Graph Not Found Error SubGraphNotFound(&'static str), + #[error("SubGraphInvalidInput Error - {0}")] // Sub Graph Invalid Input Error SubGraphInvalidInput(String), + #[error("StoreRead Error - {0}")] // Store read errors StoreRead(String), - // catch-all variant for now - Other(String), -} + #[error("Metrics Error - {0}")] + // Metrics errors + MetricsServer(String), -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // reuse the debug implementation for now - write!(f, "{self:?}") - } -} + #[error("Connection Error - {0}")] + Connection(String), -impl std::error::Error for Error {} + #[error("Init Error - {0}")] + InitError(String), -impl From for Error -where - T: Into, -{ - fn from(value: T) -> Self { - Error::Other(value.into()) - } + #[error("Other Error - {0}")] + // catch-all variant for now + Other(String), } diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 4a86d0a0bf..72de843d8d 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -1,9 +1,11 @@ pub use self::error::{Error, Result}; use crate::app::start_main_server; -use crate::config::config; -use crate::metrics::start_metrics_server; +use crate::config::{cert_key_pair, config}; +use crate::metrics::start_https_metrics_server; use crate::pipeline::pipeline_spec; -use tracing::{error, info}; +use axum_server::tls_rustls::RustlsConfig; +use std::net::SocketAddr; +use tracing::info; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; @@ -14,7 +16,13 @@ mod error; mod metrics; mod pipeline; -pub async fn serve() { +pub async fn serve() -> std::result::Result<(), Box> { + let (cert, key) = cert_key_pair(); + + let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) + .await + .map_err(|e| format!("Failed to create tls config {:?}", e))?; + tracing_subscriber::registry() .with( tracing_subscriber::EnvFilter::try_from_default_env() @@ -27,19 +35,22 @@ pub async fn serve() { info!(config = ?config(), pipeline_spec = ? pipeline_spec(), "Starting server with config and pipeline spec"); - let metrics_server_handle = tokio::spawn(start_metrics_server(( - "0.0.0.0", - config().metrics_server_listen_port, - ))); - let app_server_handle = tokio::spawn(start_main_server(("0.0.0.0", config().app_listen_port))); + // Start the metrics server, which serves the prometheus metrics. + let metrics_addr: SocketAddr = + format!("0.0.0.0:{}", &config().metrics_server_listen_port).parse()?; + + let metrics_server_handle = + tokio::spawn(start_https_metrics_server(metrics_addr, tls_config.clone())); + + let app_addr: SocketAddr = format!("0.0.0.0:{}", &config().app_listen_port).parse()?; + + // Start the main server, which serves the application. + let app_server_handle = tokio::spawn(start_main_server(app_addr, tls_config)); // TODO: is try_join the best? we need to short-circuit at the first failure - let servers = tokio::try_join!(flatten(app_server_handle), flatten(metrics_server_handle)); + tokio::try_join!(flatten(app_server_handle), flatten(metrics_server_handle))?; - if let Err(e) = servers { - error!(error=?e, "Failed to run the servers"); - std::process::exit(1) - } + Ok(()) } async fn flatten(handle: tokio::task::JoinHandle>) -> Result { diff --git a/rust/serving/src/metrics.rs b/rust/serving/src/metrics.rs index caddadb377..cd4277efa3 100644 --- a/rust/serving/src/metrics.rs +++ b/rust/serving/src/metrics.rs @@ -1,5 +1,7 @@ -use std::{future::ready, time::Instant}; - +use crate::Error::MetricsServer; +use axum::body::Body; +use axum::http::StatusCode; +use axum::response::IntoResponse; use axum::{ extract::{MatchedPath, Request}, middleware::Next, @@ -7,52 +9,111 @@ use axum::{ routing::get, Router, }; -use metrics::{counter, histogram}; -use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; -use tokio::net::{TcpListener, ToSocketAddrs}; +use axum_server::tls_rustls::RustlsConfig; +use prometheus_client::encoding::text::encode; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::Registry; +use std::net::SocketAddr; +use std::sync::OnceLock; +use std::time::Instant; use tracing::debug; -/// Collect and emit prometheus metrics. +// Define the labels for the metrics +pub const SERVING_METHOD_LABEL: &str = "method"; +pub const SERVING_PATH_LABEL: &str = "path"; +const SERVING_STATUS_LABEL: &str = "status"; -/// Metrics router and server -pub(crate) async fn start_metrics_server(addr: A) -> crate::Result<()> -where - A: ToSocketAddrs + std::fmt::Debug, -{ - // setup_metrics_recorder should only be invoked once - let recorder_handle = setup_metrics_recorder()?; - let metrics_app = Router::new().route("/metrics", get(move || ready(recorder_handle.render()))); +// Define the metrics +const HTTP_REQUESTS_TOTAL: &str = "http_requests_total"; +const HTTP_REQUESTS_DURATION: &str = "http_requests_duration"; - let listener = TcpListener::bind(&addr) - .await - .map_err(|e| format!("Creating listener on {:?}: {}", addr, e))?; +#[derive(Default)] +pub struct GlobalRegistry { + pub registry: parking_lot::Mutex, +} + +impl GlobalRegistry { + fn new() -> Self { + GlobalRegistry { + registry: parking_lot::Mutex::new(Registry::default()), + } + } +} + +static GLOBAL_REGISTER: OnceLock = OnceLock::new(); + +fn global_registry() -> &'static GlobalRegistry { + GLOBAL_REGISTER.get_or_init(GlobalRegistry::new) +} + +pub struct ServingMetrics { + pub http_requests_total: Family, Counter>, + pub http_requests_duration: Family, Histogram>, +} + +impl ServingMetrics { + fn new() -> Self { + let http_requests_total = Family::, Counter>::default(); + let http_requests_duration = + Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(0.001, 2.0, 20)) + }); + + let metrics = Self { + http_requests_total, + http_requests_duration, + }; + + let mut registry = global_registry().registry.lock(); + + registry.register( + HTTP_REQUESTS_TOTAL, + "A Counter to keep track of the total number of HTTP requests", + metrics.http_requests_total.clone(), + ); + registry.register( + HTTP_REQUESTS_DURATION, + "A Histogram to keep track of the duration of HTTP requests", + metrics.http_requests_duration.clone(), + ); + + metrics + } +} - debug!("metrics server started at addr: {:?}", addr); +static SERVING_METRICS: OnceLock = OnceLock::new(); + +pub(crate) fn serving_metrics() -> &'static ServingMetrics { + SERVING_METRICS.get_or_init(ServingMetrics::new) +} - axum::serve(listener, metrics_app) +pub(crate) async fn start_https_metrics_server( + addr: SocketAddr, + tls_config: RustlsConfig, +) -> crate::Result<()> { + let metrics_app = Router::new().route("/metrics", get(metrics_handler)); + + axum_server::bind_rustls(addr, tls_config) + .serve(metrics_app.into_make_service()) .await - .map_err(|e| format!("Starting web server for metrics: {}", e))?; + .map_err(|e| MetricsServer(format!("Starting web server for metrics: {}", e)))?; + Ok(()) } -/// setup the Prometheus metrics recorder. -fn setup_metrics_recorder() -> crate::Result { - // 1 micro-sec < t < 1000 seconds - let log_to_power_of_sqrt2_bins: [f64; 62] = (0..62) - .map(|i| 2_f64.sqrt().powf(i as f64)) - .collect::>() - .try_into() - .unwrap(); - - let prometheus_handle = PrometheusBuilder::new() - .set_buckets_for_metric( - Matcher::Full("http_requests_duration_micros".to_string()), - &log_to_power_of_sqrt2_bins, - ) - .map_err(|e| format!("Prometheus set_buckets_for_metric: {}", e))? - .install_recorder() - .map_err(|e| format!("Prometheus install_recorder: {}", e))?; - Ok(prometheus_handle) +// metrics_handler is used to generate and return a snapshot of the +// current state of the metrics in the global registry +pub async fn metrics_handler() -> impl IntoResponse { + let state = global_registry().registry.lock(); + let mut buffer = String::new(); + encode(&mut buffer, &state).unwrap(); + debug!("Exposing Metrics: {:?}", buffer); + axum::http::Response::builder() + .status(StatusCode::OK) + .body(Body::from(buffer)) + .unwrap() } /// Emit request metrics and also latency metrics. @@ -73,11 +134,21 @@ pub(crate) async fn capture_metrics(request: Request, next: Next) -> Response { let latency = start.elapsed().as_micros() as f64; let status = response.status().as_u16().to_string(); - let labels = [("method", method), ("path", path), ("status", status)]; + let labels = vec![ + (SERVING_METHOD_LABEL.to_string(), method), + (SERVING_PATH_LABEL.to_string(), path), + (SERVING_STATUS_LABEL.to_string(), status), + ]; - histogram!("http_requests_duration_micros", &labels).record(latency); + serving_metrics() + .http_requests_duration + .get_or_create(&labels) + .observe(latency); - counter!("http_requests_total", &labels).increment(1); + serving_metrics() + .http_requests_total + .get_or_create(&labels) + .inc(); response } @@ -87,19 +158,25 @@ mod tests { use std::net::SocketAddr; use std::time::Duration; + use super::*; + use crate::config::cert_key_pair; use axum::body::Body; use axum::http::{HeaderMap, StatusCode}; use axum::middleware; use tokio::time::sleep; use tower::ServiceExt; - use super::*; - #[tokio::test] async fn test_start_metrics_server() { + let (cert, key) = cert_key_pair(); + + let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) + .await + .unwrap(); + let addr = SocketAddr::from(([127, 0, 0, 1], 0)); let server = tokio::spawn(async move { - let result = start_metrics_server(addr).await; + let result = start_https_metrics_server(addr, tls_config).await; assert!(result.is_ok()) }); diff --git a/rust/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs index 6a51d65627..7f98c30611 100644 --- a/rust/serving/src/pipeline.rs +++ b/rust/serving/src/pipeline.rs @@ -1,6 +1,7 @@ use std::env; use std::sync::OnceLock; +use crate::Error::ParseConfig; use base64::prelude::BASE64_STANDARD; use base64::Engine; use serde::{Deserialize, Serialize}; @@ -79,18 +80,18 @@ impl Pipeline { // If the environment variable is set, decode and parse the pipeline let decoded = BASE64_STANDARD .decode(env_value.as_bytes()) - .map_err(|e| format!("decoding pipeline from env: {e:?}"))?; + .map_err(|e| ParseConfig(format!("decoding pipeline from env: {e:?}")))?; serde_json::from_slice::(&decoded) - .map_err(|e| format!("parsing pipeline from env: {e:?}"))? + .map_err(|e| ParseConfig(format!("parsing pipeline from env: {e:?}")))? } Err(_) => { // If the environment variable is not set, read the pipeline from a file let file_path = "./config/pipeline_spec.json"; let file_contents = std::fs::read_to_string(file_path) - .map_err(|e| format!("reading pipeline file: {e:?}"))?; + .map_err(|e| ParseConfig(format!("reading pipeline file: {e:?}")))?; serde_json::from_str::(&file_contents) - .map_err(|e| format!("parsing pipeline file: {e:?}"))? + .map_err(|e| ParseConfig(format!("parsing pipeline file: {e:?}")))? } }; Ok(pipeline) diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index 8c29f33f73..9d102c019b 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -7,7 +7,9 @@ async fn main() { // Based on the argument, run the appropriate component. if args.contains(&"--serving".to_string()) { - serving::serve().await; + if let Err(e) = serving::serve().await { + error!("Error running serving: {}", e); + } } else if args.contains(&"--servesink".to_string()) { if let Err(e) = servesink::servesink().await { info!("Error running servesink: {}", e); From deb1626ece55579d30e6d9003abe854980cc2923 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Thu, 22 Aug 2024 17:44:49 +0530 Subject: [PATCH 024/188] fix: test coverage generation for Rust code (#1993) Signed-off-by: Sreekanth --- .github/workflows/ci.yaml | 11 +++++++++-- .github/workflows/nightly-build.yml | 2 ++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index eb48e60d61..02dd5feeb5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -102,10 +102,16 @@ jobs: - name: Install Rust uses: actions-rust-lang/setup-rust-toolchain@v1 with: - components: llvm-tools-preview + cache-workspaces: rust -> target + + - name: Install llvm-tools-preview + working-directory: ./rust + run: rustup component add llvm-tools-preview - name: Install grcov - run: cargo install grcov + uses: taiki-e/install-action@v2 + with: + tool: grcov - name: Install Protobuf Compiler run: sudo apt-get install -y protobuf-compiler @@ -153,6 +159,7 @@ jobs: - name: Setup Rust toolchain uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: + cache-workspaces: rust -> target rustflags: '' - name: Configure sccache run: | diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 3a671b2275..641bc2d593 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -50,6 +50,7 @@ jobs: - name: Setup Rust toolchain uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: + cache-workspaces: rust -> target rustflags: '' - name: Configure sccache run: | @@ -82,6 +83,7 @@ jobs: - name: Setup Rust toolchain uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: + cache-workspaces: rust -> target rustflags: '' - name: Configure sccache run: | From 04b259ac53d993b0d04f3c82bd2815da607b112e Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Fri, 23 Aug 2024 13:58:09 +0530 Subject: [PATCH 025/188] chore: use numaflow models to create minimum pipeline spec for serving (#1995) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- pkg/apis/numaflow/v1alpha1/vertex_types.go | 4 +- rust/Cargo.lock | 1 + rust/serving/Cargo.toml | 1 + rust/serving/src/app.rs | 12 ++-- rust/serving/src/app/callback.rs | 10 ++-- rust/serving/src/app/callback/state.rs | 10 ++-- rust/serving/src/app/jetstream_proxy.rs | 10 ++-- rust/serving/src/app/message_path.rs | 4 +- rust/serving/src/app/tracker.rs | 12 ++-- rust/serving/src/config.rs | 5 +- rust/serving/src/lib.rs | 4 +- rust/serving/src/pipeline.rs | 68 +++++++++++++++++----- 12 files changed, 92 insertions(+), 49 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index b4a26f4815..5b97700944 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -424,7 +424,7 @@ func (v Vertex) getServingContainer(req GetVertexPodSpecReq) (corev1.Container, HTTPGet: &corev1.HTTPGetAction{ Path: "/readyz", Port: intstr.FromInt32(VertexHTTPSPort), - Scheme: corev1.URISchemeHTTP, + Scheme: corev1.URISchemeHTTPS, }, }, InitialDelaySeconds: 3, @@ -437,7 +437,7 @@ func (v Vertex) getServingContainer(req GetVertexPodSpecReq) (corev1.Container, HTTPGet: &corev1.HTTPGetAction{ Path: "/livez", Port: intstr.FromInt32(VertexHTTPSPort), - Scheme: corev1.URISchemeHTTP, + Scheme: corev1.URISchemeHTTPS, }, }, InitialDelaySeconds: 20, diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 13b368b4ab..c269c5e60d 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -2500,6 +2500,7 @@ dependencies = [ "chrono", "config", "hyper-util", + "numaflow-models", "parking_lot", "prometheus-client", "rcgen", diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index 2dfb9b9c33..0af3c74e95 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -33,4 +33,5 @@ rcgen = "0.13.1" parking_lot = "0.12.3" prometheus-client = "0.22.3" thiserror = "1.0.63" +numaflow-models = { path = "../numaflow-models" } diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index 3043f3e34b..ad8403e022 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -26,7 +26,7 @@ use self::{ }; use crate::app::callback::store::Store; use crate::app::tracker::MessageGraph; -use crate::pipeline::pipeline_spec; +use crate::pipeline::min_pipeline_spec; use crate::Error::{InitError, MetricsServer}; use crate::{app::callback::state::State as CallbackState, config, metrics::capture_metrics}; @@ -89,7 +89,7 @@ pub(crate) async fn start_main_server( .layer(middleware::from_fn(auth_middleware)); // Create the message graph from the pipeline spec and the redis store - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).map_err(|e| { + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).map_err(|e| { InitError(format!( "Creating message graph from pipeline spec: {:?}", e @@ -328,7 +328,7 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); @@ -354,7 +354,7 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); @@ -387,7 +387,7 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); @@ -427,7 +427,7 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); let app = Router::new() diff --git a/rust/serving/src/app/callback.rs b/rust/serving/src/app/callback.rs index 0734535a8c..6ecbc87ccc 100644 --- a/rust/serving/src/app/callback.rs +++ b/rust/serving/src/app/callback.rs @@ -75,14 +75,14 @@ mod tests { use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; - use crate::pipeline::pipeline_spec; + use crate::pipeline::min_pipeline_spec; use super::*; #[tokio::test] async fn test_callback_failure() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); let app = callback_handler(state); @@ -109,7 +109,7 @@ mod tests { #[tokio::test] async fn test_callback_success() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let mut state = CallbackState::new(msg_graph, store).await.unwrap(); let x = state.register("test_id".to_string()); @@ -160,7 +160,7 @@ mod tests { #[tokio::test] async fn test_callback_save() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); let app = callback_handler(state); @@ -179,7 +179,7 @@ mod tests { #[tokio::test] async fn test_without_id_header() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); let app = callback_handler(state); diff --git a/rust/serving/src/app/callback/state.rs b/rust/serving/src/app/callback/state.rs index da971c86d0..aebf68d3a5 100644 --- a/rust/serving/src/app/callback/state.rs +++ b/rust/serving/src/app/callback/state.rs @@ -236,13 +236,13 @@ mod tests { use axum::body::Bytes; use crate::app::callback::store::memstore::InMemoryStore; - use crate::pipeline::pipeline_spec; + use crate::pipeline::min_pipeline_spec; use super::*; #[tokio::test] async fn test_state() { - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -302,7 +302,7 @@ mod tests { #[tokio::test] async fn test_retrieve_saved_no_entry() { - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -317,7 +317,7 @@ mod tests { #[tokio::test] async fn test_insert_callback_requests_invalid_id() { - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -338,7 +338,7 @@ mod tests { #[tokio::test] async fn test_retrieve_subgraph_from_storage_no_entry() { - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); diff --git a/rust/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs index 6a56266f72..dd80f40eda 100644 --- a/rust/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -57,7 +57,7 @@ pub(crate) async fn jetstream_proxy( callback: callback_store, stream: &config().jetstream.stream, callback_url: format!( - "http://{}:{}/v1/process/callback", + "https://{}:{}/v1/process/callback", config().host_ip, config().app_listen_port ), @@ -277,7 +277,7 @@ mod tests { use crate::app::callback::store::PayloadToSave; use crate::app::callback::CallbackRequest; use crate::app::tracker::MessageGraph; - use crate::pipeline::pipeline_spec; + use crate::pipeline::min_pipeline_spec; use crate::Error; use super::*; @@ -323,7 +323,7 @@ mod tests { .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e))?; let mock_store = MockStore {}; - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()) + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()) .map_err(|e| format!("Failed to create message graph from pipeline spec: {:?}", e))?; let callback_state = CallbackState::new(msg_graph, mock_store).await?; @@ -401,7 +401,7 @@ mod tests { .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e)); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); @@ -465,7 +465,7 @@ mod tests { .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e)); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); diff --git a/rust/serving/src/app/message_path.rs b/rust/serving/src/app/message_path.rs index 4b362b68ea..54139566f8 100644 --- a/rust/serving/src/app/message_path.rs +++ b/rust/serving/src/app/message_path.rs @@ -46,14 +46,14 @@ mod tests { use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; - use crate::pipeline::pipeline_spec; + use crate::pipeline::min_pipeline_spec; use super::*; #[tokio::test] async fn test_message_path_not_present() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); let app = get_message_path(state); diff --git a/rust/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs index cdd31a1c99..85d3c2b76d 100644 --- a/rust/serving/src/app/tracker.rs +++ b/rust/serving/src/app/tracker.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::app::callback::CallbackRequest; -use crate::pipeline::{Edge, OperatorType, Pipeline}; +use crate::pipeline::{Edge, PipelineDCG, OperatorType}; use crate::Error; fn compare_slice(operator: &OperatorType, a: &[String], b: &[String]) -> bool { @@ -225,7 +225,7 @@ impl MessageGraph { } // from_env reads the pipeline stored in the environment variable and creates a MessageGraph from it. - pub(crate) fn from_pipeline(pipeline_spec: &Pipeline) -> Result { + pub(crate) fn from_pipeline(pipeline_spec: &PipelineDCG) -> Result { let mut dag = Graph::with_capacity(pipeline_spec.edges.len()); for edge in &pipeline_spec.edges { dag.entry(edge.from.clone()).or_default().push(edge.clone()); @@ -369,7 +369,7 @@ mod tests { #[test] fn test_generate_subgraph_complex() { - let pipeline = Pipeline { + let pipeline = PipelineDCG { vertices: vec![ Vertex { name: "a".to_string(), @@ -561,7 +561,7 @@ mod tests { #[test] fn test_simple_dropped_message() { - let pipeline = Pipeline { + let pipeline = PipelineDCG { vertices: vec![ Vertex { name: "a".to_string(), @@ -636,7 +636,7 @@ mod tests { #[test] fn test_complex_dropped_message() { - let pipeline = Pipeline { + let pipeline = PipelineDCG { vertices: vec![ Vertex { name: "a".to_string(), @@ -805,7 +805,7 @@ mod tests { #[test] fn test_simple_cycle_pipeline() { - let pipeline = Pipeline { + let pipeline = PipelineDCG { vertices: vec![ Vertex { name: "a".to_string(), diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index ab40485876..eb3b9ac361 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -1,3 +1,5 @@ +use crate::Error::ParseConfig; +use crate::{Error, Result}; use async_nats::rustls; use base64::prelude::BASE64_STANDARD; use base64::Engine; @@ -9,9 +11,6 @@ use std::path::Path; use std::{env, sync::OnceLock}; use tracing::info; -use crate::Error::ParseConfig; -use crate::{Error, Result}; - const ENV_PREFIX: &str = "NUMAFLOW_SERVING"; const ENV_NUMAFLOW_SERVING_SOURCE_OBJECT: &str = "NUMAFLOW_SERVING_SOURCE_OBJECT"; const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 72de843d8d..86265a5ed0 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -2,7 +2,7 @@ pub use self::error::{Error, Result}; use crate::app::start_main_server; use crate::config::{cert_key_pair, config}; use crate::metrics::start_https_metrics_server; -use crate::pipeline::pipeline_spec; +use crate::pipeline::min_pipeline_spec; use axum_server::tls_rustls::RustlsConfig; use std::net::SocketAddr; use tracing::info; @@ -33,7 +33,7 @@ pub async fn serve() -> std::result::Result<(), Box &'static Pipeline { - static PIPELINE: OnceLock = OnceLock::new(); - PIPELINE.get_or_init(|| match Pipeline::load() { +pub fn min_pipeline_spec() -> &'static PipelineDCG { + static PIPELINE: OnceLock = OnceLock::new(); + PIPELINE.get_or_init(|| match PipelineDCG::load() { Ok(pipeline) => pipeline, - Err(e) => panic!("Failed to load pipeline: {:?}", e), + Err(e) => panic!("Failed to load minimum pipeline spec: {:?}", e), }) } @@ -39,6 +40,17 @@ impl OperatorType { } } +impl From for OperatorType { + fn from(s: String) -> Self { + match s.as_str() { + "and" => OperatorType::And, + "or" => OperatorType::Or, + "not" => OperatorType::Not, + _ => panic!("Invalid operator type: {}", s), + } + } +} + // Tag is a struct that contains the information about the tags for the edge #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Tag { @@ -60,10 +72,11 @@ pub struct Edge { pub conditions: Option, } -// Pipeline is a struct that contains the information about the pipeline. +/// DCG (directed compute graph) of the pipeline with minimal information build using vertices and edges +/// from the pipeline spec #[derive(Serialize, Deserialize, Debug, Clone)] #[serde()] -pub struct Pipeline { +pub struct PipelineDCG { pub vertices: Vec, pub edges: Vec, } @@ -73,16 +86,16 @@ pub struct Vertex { pub name: String, } -impl Pipeline { +impl PipelineDCG { pub fn load() -> crate::Result { - let pipeline = match env::var(ENV_MIN_PIPELINE_SPEC) { + let full_pipeline_spec = match env::var(ENV_MIN_PIPELINE_SPEC) { Ok(env_value) => { // If the environment variable is set, decode and parse the pipeline let decoded = BASE64_STANDARD .decode(env_value.as_bytes()) .map_err(|e| ParseConfig(format!("decoding pipeline from env: {e:?}")))?; - serde_json::from_slice::(&decoded) + serde_json::from_slice::(&decoded) .map_err(|e| ParseConfig(format!("parsing pipeline from env: {e:?}")))? } Err(_) => { @@ -90,11 +103,41 @@ impl Pipeline { let file_path = "./config/pipeline_spec.json"; let file_contents = std::fs::read_to_string(file_path) .map_err(|e| ParseConfig(format!("reading pipeline file: {e:?}")))?; - serde_json::from_str::(&file_contents) + serde_json::from_str::(&file_contents) .map_err(|e| ParseConfig(format!("parsing pipeline file: {e:?}")))? } }; - Ok(pipeline) + + let vertices: Vec = full_pipeline_spec + .vertices + .ok_or(ParseConfig("missing vertices in pipeline spec".to_string()))? + .iter() + .map(|v| Vertex { + name: v.name.clone(), + }) + .collect(); + + let edges: Vec = full_pipeline_spec + .edges + .ok_or(ParseConfig("missing edges in pipeline spec".to_string()))? + .iter() + .map(|e| { + let conditions = e.conditions.clone().map(|c| Conditions { + tags: Some(Tag { + operator: c.tags.operator.map(|o| o.into()), + values: c.tags.values.clone(), + }), + }); + + Edge { + from: e.from.clone(), + to: e.to.clone(), + conditions, + } + }) + .collect(); + + Ok(PipelineDCG { vertices, edges }) } } @@ -104,8 +147,7 @@ mod tests { #[test] fn test_pipeline_load() { - let pipeline = pipeline_spec(); - + let pipeline = min_pipeline_spec(); assert_eq!(pipeline.vertices.len(), 3); assert_eq!(pipeline.edges.len(), 2); assert_eq!(pipeline.vertices[0].name, "in"); From ae02243b3f30de8da407b148bbac7cb2e48a68c4 Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Fri, 23 Aug 2024 19:14:20 -0400 Subject: [PATCH 026/188] fix: e2e testing isbsvc deletion timeout issue (#1997) Signed-off-by: Keran Yang --- pkg/apis/numaflow/v1alpha1/const.go | 17 +++++---- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 2 +- pkg/reconciler/isbsvc/installer/jetstream.go | 2 +- test/fixtures/e2e_suite.go | 24 ++++++++---- test/fixtures/expect.go | 2 +- test/fixtures/given.go | 6 +-- test/fixtures/when.go | 40 +++++++++++++++++++- 7 files changed, 70 insertions(+), 23 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 177e6f73a1..8677101378 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -87,14 +87,15 @@ const ( ServingSourceContainer = "serving" // components - ComponentISBSvc = "isbsvc" - ComponentDaemon = "daemon" - ComponentVertex = "vertex" - ComponentMonoVertex = "mono-vertex" - ComponentMonoVertexDaemon = "mono-vertex-daemon" - ComponentJob = "job" - ComponentSideInputManager = "side-inputs-manager" - ComponentUXServer = "numaflow-ux" + ComponentISBSvc = "isbsvc" + ComponentDaemon = "daemon" + ComponentVertex = "vertex" + ComponentMonoVertex = "mono-vertex" + ComponentMonoVertexDaemon = "mono-vertex-daemon" + ComponentJob = "job" + ComponentSideInputManager = "side-inputs-manager" + ComponentUXServer = "numaflow-ux" + ComponentControllerManager = "controller-manager" // controllers ControllerISBSvc = "isbsvc-controller" diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index 010b53bf20..07d62f673d 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -690,7 +690,7 @@ func (pls *PipelineStatus) MarkDeployFailed(reason, message string) { pls.SetPhase(PipelinePhaseFailed, message) } -// MarkVerticesHealthy set the daemon service of the pipeline is healthy. +// MarkDaemonServiceHealthy set the daemon service of the pipeline is healthy. func (pls *PipelineStatus) MarkDaemonServiceHealthy() { pls.MarkTrue(PipelineConditionDaemonServiceHealthy) } diff --git a/pkg/reconciler/isbsvc/installer/jetstream.go b/pkg/reconciler/isbsvc/installer/jetstream.go index d887f79e38..f0faa09caa 100644 --- a/pkg/reconciler/isbsvc/installer/jetstream.go +++ b/pkg/reconciler/isbsvc/installer/jetstream.go @@ -411,7 +411,7 @@ func (r *jetStreamInstaller) createConfigMap(ctx context.Context) error { svcName := generateJetStreamServiceName(r.isbSvc) ssName := generateJetStreamStatefulSetName(r.isbSvc) replicas := r.isbSvc.Spec.JetStream.GetReplicas() - routes := []string{} + var routes []string for j := 0; j < replicas; j++ { routes = append(routes, fmt.Sprintf("nats://%s-%s.%s.%s.svc:%s", ssName, strconv.Itoa(j), svcName, r.isbSvc.Namespace, strconv.Itoa(int(clusterPort)))) } diff --git a/test/fixtures/e2e_suite.go b/test/fixtures/e2e_suite.go index 64b27e3eb9..be1255a037 100644 --- a/test/fixtures/e2e_suite.go +++ b/test/fixtures/e2e_suite.go @@ -38,10 +38,13 @@ import ( ) const ( - Namespace = "numaflow-system" - Label = "numaflow-e2e" - LabelValue = "true" - ISBSvcName = "numaflow-e2e" + Namespace = "numaflow-system" + Label = "numaflow-e2e" + LabelValue = "true" + ISBSvcName = "numaflow-e2e" + // the number 90 is carefully chosen to ensure the test suite can finish within a reasonable time without timing out. + // please exercise caution when updating this value, as it may cause e2e tests to be flaky. + // if updated, consider running the entire e2e test suite multiple times to ensure stability. defaultTimeout = 90 * time.Second LogSourceVertexStarted = "Start processing source messages" @@ -139,13 +142,18 @@ func (s *E2ESuite) TearDownSuite() { When(). Wait(5 * time.Second). DeleteISBSvc(). - Wait(3 * time.Second). + Wait(3 * time.Second) + // force deleting the ISB svc pods because we have seen pods stuck in terminating state after CRD deletion, + // which causes e2e tests to timeout, this is a workaround to avoid the issue. + deleteISBPodsCMD := fmt.Sprintf("kubectl delete pods -n %s -l %s=%s,%s=%s --ignore-not-found=true --grace-period=0 --force", Namespace, dfv1.KeyComponent, dfv1.ComponentISBSvc, dfv1.KeyISBSvcName, ISBSvcName) + s.Given().When().Exec("sh", []string{"-c", deleteISBPodsCMD}, OutputRegexp("")) + s.Given().ISBSvc(getISBSvcSpec()). + When(). Expect(). ISBSvcDeleted(defaultTimeout) - s.T().Log("ISB svc is deleted") - deleteCMD := fmt.Sprintf("kubectl delete -k ../../config/apps/redis -n %s --ignore-not-found=true", Namespace) - s.Given().When().Exec("sh", []string{"-c", deleteCMD}, OutputRegexp(`service "redis" deleted`)) + deleteRedisCMD := fmt.Sprintf("kubectl delete -k ../../config/apps/redis -n %s --ignore-not-found=true", Namespace) + s.Given().When().Exec("sh", []string{"-c", deleteRedisCMD}, OutputRegexp(`service "redis" deleted`)) s.T().Log("Redis resources are deleted") close(s.stopch) } diff --git a/test/fixtures/expect.go b/test/fixtures/expect.go index 2e550653fe..afa30447d9 100644 --- a/test/fixtures/expect.go +++ b/test/fixtures/expect.go @@ -76,7 +76,7 @@ func (t *Expect) ISBSvcDeleted(timeout time.Duration) *Expect { t.t.Fatalf("Expected ISB svc to be deleted: %v", err) } - labelSelector := fmt.Sprintf("%s=isbsvc-controller,%s=%s", dfv1.KeyManagedBy, dfv1.KeyISBSvcName, ISBSvcName) + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyManagedBy, dfv1.ControllerISBSvc, dfv1.KeyISBSvcName, ISBSvcName) opts := metav1.ListOptions{LabelSelector: labelSelector} timeoutCh := make(chan bool, 1) go func() { diff --git a/test/fixtures/given.go b/test/fixtures/given.go index 30a4ab01a7..e6f2d0dbef 100644 --- a/test/fixtures/given.go +++ b/test/fixtures/given.go @@ -43,7 +43,7 @@ type Given struct { kubeClient kubernetes.Interface } -// creates an ISBSvc based on the parameter, this may be: +// ISBSvc creates an ISBSvc based on the parameter, this may be: // // 1. A file name if it starts with "@" // 2. Raw YAML. @@ -61,7 +61,7 @@ func (g *Given) ISBSvc(text string) *Given { return g } -// creates a Pipeline based on the parameter, this may be: +// Pipeline creates a Pipeline based on the parameter, this may be: // // 1. A file name if it starts with "@" // 2. Raw YAML. @@ -79,7 +79,7 @@ func (g *Given) Pipeline(text string) *Given { return g } -// / creates a MonoVertex based on the parameter, this may be: +// MonoVertex creates a MonoVertex based on the parameter, this may be: // // 1. A file name if it starts with "@" // 2. Raw YAML. diff --git a/test/fixtures/when.go b/test/fixtures/when.go index 7ab85ae772..986085bdd6 100644 --- a/test/fixtures/when.go +++ b/test/fixtures/when.go @@ -276,7 +276,7 @@ func (w *When) TerminateAllPodPortForwards() *When { return w } -func (w *When) StreamVertexPodlogs(vertexName, containerName string) *When { +func (w *When) StreamVertexPodLogs(vertexName, containerName string) *When { w.t.Helper() ctx := context.Background() labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyPipelineName, w.pipeline.Name, dfv1.KeyVertexName, vertexName) @@ -295,6 +295,44 @@ func (w *When) StreamVertexPodlogs(vertexName, containerName string) *When { return w } +func (w *When) StreamISBLogs() *When { + w.t.Helper() + ctx := context.Background() + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyComponent, dfv1.ComponentISBSvc, dfv1.KeyManagedBy, dfv1.ControllerISBSvc) + podList, err := w.kubeClient.CoreV1().Pods(Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + w.t.Fatalf("Error getting ISB service pods: %v", err) + } + for _, pod := range podList.Items { + stopCh := make(chan struct{}, 1) + streamPodLogs(ctx, w.kubeClient, Namespace, pod.Name, "main", stopCh) + if w.streamLogsStopChannels == nil { + w.streamLogsStopChannels = make(map[string]chan struct{}) + } + w.streamLogsStopChannels[pod.Name+":main"] = stopCh + } + return w +} + +func (w *When) StreamControllerLogs() *When { + w.t.Helper() + ctx := context.Background() + labelSelector := fmt.Sprintf("%s=%s", dfv1.KeyComponent, dfv1.ComponentControllerManager) + podList, err := w.kubeClient.CoreV1().Pods(Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + w.t.Fatalf("Error getting the controller pods: %v", err) + } + for _, pod := range podList.Items { + stopCh := make(chan struct{}, 1) + streamPodLogs(ctx, w.kubeClient, Namespace, pod.Name, "controller-manager", stopCh) + if w.streamLogsStopChannels == nil { + w.streamLogsStopChannels = make(map[string]chan struct{}) + } + w.streamLogsStopChannels[pod.Name+":controller-manager"] = stopCh + } + return w +} + func (w *When) TerminateAllPodLogs() *When { w.t.Helper() if len(w.streamLogsStopChannels) > 0 { From 86c381f7efd16cbd29277e518668619815b2c2d8 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Sat, 24 Aug 2024 10:34:51 +0530 Subject: [PATCH 027/188] chore: fix servesink (#1999) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/monovertex/src/forwarder.rs | 7 +- rust/monovertex/src/source.rs | 4 +- rust/servesink/src/lib.rs | 200 +++++++++++++++++++++++++++---- 3 files changed, 178 insertions(+), 33 deletions(-) diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 7be4824b6e..d9f68fc608 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -168,13 +168,10 @@ impl Forwarder { // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. async fn apply_transformer(&self, messages: Vec) -> Result> { - let transformer_client; - if let Some(trf_client) = &self.transformer_client { - transformer_client = trf_client; - } else { + let Some(transformer_client) = &self.transformer_client else { // return early if there is no transformer return Ok(messages); - } + }; let start_time = tokio::time::Instant::now(); let mut jh = JoinSet::new(); diff --git a/rust/monovertex/src/source.rs b/rust/monovertex/src/source.rs index a58922d5c7..681b0beb58 100644 --- a/rust/monovertex/src/source.rs +++ b/rust/monovertex/src/source.rs @@ -104,8 +104,6 @@ impl SourceClient { Ok(self.client.ack_fn(request).await?.into_inner()) } - #[allow(dead_code)] - // TODO: remove dead_code pub(crate) async fn pending_fn(&mut self) -> Result { let request = Request::new(()); let response = self @@ -114,7 +112,7 @@ impl SourceClient { .await? .into_inner() .result - .map_or(0, |r| r.count); + .map_or(-1, |r| r.count); // default to -1(unavailable) Ok(response) } diff --git a/rust/servesink/src/lib.rs b/rust/servesink/src/lib.rs index 5663b61b07..b54ed8b580 100644 --- a/rust/servesink/src/lib.rs +++ b/rust/servesink/src/lib.rs @@ -5,6 +5,10 @@ use reqwest::Client; use tracing::{error, warn}; use tracing_subscriber::prelude::*; +const NUMAFLOW_CALLBACK_URL_HEADER: &str = "X-Numaflow-Callback-Url"; +const NUMAFLOW_ID_HEADER: &str = "X-Numaflow-Id"; + +/// servesink is a Numaflow Sink which forwards the payload to the Numaflow serving URL. pub async fn servesink() -> Result<(), Box> { tracing_subscriber::registry() .with( @@ -14,14 +18,14 @@ pub async fn servesink() -> Result<(), Box> { .with(tracing_subscriber::fmt::layer().with_ansi(false)) .init(); - sink::Server::new(Logger::new()).start().await + sink::Server::new(ServeSink::new()).start().await } -struct Logger { +struct ServeSink { client: Client, } -impl Logger { +impl ServeSink { fn new() -> Self { Self { client: Client::new(), @@ -30,43 +34,189 @@ impl Logger { } #[tonic::async_trait] -impl sink::Sinker for Logger { +impl sink::Sinker for ServeSink { async fn sink(&self, mut input: tokio::sync::mpsc::Receiver) -> Vec { let mut responses: Vec = Vec::new(); while let Some(datum) = input.recv().await { - // do something better, but for now let's just log it. - // please note that `from_utf8` is working because the input in this - // example uses utf-8 data. - let response = match std::str::from_utf8(&datum.value) { - Ok(_v) => { - // record the response - Response::ok(datum.id) + // if the callback url is absent, ignore the request + let url = match datum.headers.get(NUMAFLOW_CALLBACK_URL_HEADER) { + Some(url) => url, + None => { + warn!( + "Missing {} header, Ignoring the request", + NUMAFLOW_CALLBACK_URL_HEADER + ); + responses.push(Response::ok(datum.id)); + continue; } - Err(e) => Response::failure(datum.id, format!("Invalid UTF-8 sequence: {}", e)), - }; - // return the responses - responses.push(response); - let Some(url) = datum.headers.get("X-Numaflow-Callback-Url") else { - warn!("X-Numaflow-Callback-Url header is not found in the payload"); - continue; }; - let Some(numaflow_id) = datum.headers.get("X-Numaflow-Id") else { - warn!("X-Numaflow-Id header is not found in the payload"); - continue; + + // if the numaflow id is absent, ignore the request + let numaflow_id = match datum.headers.get(NUMAFLOW_ID_HEADER) { + Some(id) => id, + None => { + warn!( + "Missing {} header, Ignoring the request", + NUMAFLOW_ID_HEADER + ); + responses.push(Response::ok(datum.id)); + continue; + } }; + let resp = self .client .post(format!("{}_{}", url, "save")) - .header("X-Numaflow-Id", numaflow_id) + .header(NUMAFLOW_ID_HEADER, numaflow_id) .header("id", numaflow_id) .body(datum.value) .send() .await; - if let Err(e) = resp { - error!(error=?e, url=url, "Sending result to numaserve") - } + + let response = match resp { + Ok(_) => Response::ok(datum.id), + Err(e) => { + error!("Sending result to serving URL {:?}", e); + Response::failure(datum.id, format!("Failed to send: {}", e)) + } + }; + + responses.push(response); } responses } } + +#[cfg(test)] +mod tests { + use super::*; + use numaflow::sink::{SinkRequest, Sinker}; + use std::collections::HashMap; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use tokio::net::TcpListener; + use tokio::sync::mpsc; + + #[tokio::test] + async fn test_serve_sink_without_url_header() { + let serve_sink = ServeSink::new(); + let (tx, rx) = mpsc::channel(1); + + let mut headers = HashMap::new(); + headers.insert(NUMAFLOW_ID_HEADER.to_string(), "12345".to_string()); + + let request = SinkRequest { + keys: vec![], + id: "1".to_string(), + value: b"test".to_vec(), + watermark: Default::default(), + headers, + event_time: Default::default(), + }; + + tx.send(request).await.unwrap(); + drop(tx); // Close the sender to end the stream + + let responses = serve_sink.sink(rx).await; + assert_eq!(responses.len(), 1); + assert!(responses[0].success); + } + + #[tokio::test] + async fn test_serve_sink_without_id_header() { + let serve_sink = ServeSink::new(); + let (tx, rx) = mpsc::channel(1); + + let mut headers = HashMap::new(); + headers.insert( + NUMAFLOW_CALLBACK_URL_HEADER.to_string(), + "http://localhost:8080".to_string(), + ); + + let request = SinkRequest { + keys: vec![], + id: "1".to_string(), + value: b"test".to_vec(), + watermark: Default::default(), + headers, + event_time: Default::default(), + }; + + tx.send(request).await.unwrap(); + drop(tx); // Close the sender to end the stream + + let responses = serve_sink.sink(rx).await; + assert_eq!(responses.len(), 1); + assert!(responses[0].success); + } + + async fn start_server() -> (String, mpsc::Sender<()>) { + let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1); + let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + let addr_str = format!("{}", addr); + tokio::spawn(async move { + loop { + tokio::select! { + _ = shutdown_rx.recv() => { + break; + } + Ok((mut socket, _)) = listener.accept() => { + tokio::spawn(async move { + let mut buffer = [0; 1024]; + let _ = socket.read(&mut buffer).await.unwrap(); + let request = String::from_utf8_lossy(&buffer[..]); + let response = if request.contains("/error") { + "HTTP/1.1 500 INTERNAL SERVER ERROR\r\n\ + content-length: 0\r\n\ + \r\n" + } else { + "HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + \r\n" + }; + socket.write_all(response.as_bytes()).await.unwrap(); + }); + } + } + } + }); + (addr_str, shutdown_tx) + } + + #[tokio::test] + async fn test_serve_sink() { + let serve_sink = ServeSink::new(); + + let (addr, shutdown_tx) = start_server().await; + + let (tx, rx) = mpsc::channel(1); + + let mut headers = HashMap::new(); + headers.insert(NUMAFLOW_ID_HEADER.to_string(), "12345".to_string()); + + headers.insert( + NUMAFLOW_CALLBACK_URL_HEADER.to_string(), + format!("http://{}/sync", addr), + ); + + let request = SinkRequest { + keys: vec![], + id: "1".to_string(), + value: b"test".to_vec(), + watermark: Default::default(), + headers, + event_time: Default::default(), + }; + + tx.send(request).await.unwrap(); + drop(tx); // Close the sender to end the stream + + let responses = serve_sink.sink(rx).await; + assert_eq!(responses.len(), 1); + assert!(responses[0].success); + + // Stop the server + shutdown_tx.send(()).await.unwrap(); + } +} From 1f2f482a0f50a2527630e00262f473f1483cefce Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 23 Aug 2024 23:51:41 -0700 Subject: [PATCH 028/188] chore: disallow MonoVertex replicas less than min or greater than max (#1994) --- pkg/reconciler/monovertex/controller.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index b345369c66..3bec61394e 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -110,7 +110,9 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon }() monoVtx.Status.SetObservedGeneration(monoVtx.Generation) - mr.scaler.StartWatching(mVtxKey) + if monoVtx.Scalable() { + mr.scaler.StartWatching(mVtxKey) + } // TODO: handle lifecycle changes // Regular mono vertex change @@ -155,6 +157,16 @@ func (mr *monoVertexReconciler) reconcileNonLifecycleChanges(ctx context.Context func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1.MonoVertex) error { desiredReplicas := monoVtx.GetReplicas() + // Don't allow replicas to be out of the range of min and max when auto scaling is enabled + if s := monoVtx.Spec.Scale; !s.Disabled { + max := int(s.GetMaxReplicas()) + min := int(s.GetMinReplicas()) + if desiredReplicas < min { + desiredReplicas = min + } else if desiredReplicas > max { + desiredReplicas = max + } + } // Set metrics defer func() { reconciler.MonoVertexDesiredReplicas.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(float64(desiredReplicas)) From 11aa5b65977736dae34ad3cf029cb55751bd41c3 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 25 Aug 2024 09:57:33 -0700 Subject: [PATCH 029/188] test: avoid building image twice in e2e testing ci (#2001) Signed-off-by: Derek Wang --- .github/workflows/ci.yaml | 2 +- Makefile | 6 ++++- config/apps/kafka/kustomization.yaml | 7 ++--- config/apps/nats/kustomization.yaml | 7 ++--- config/apps/redis/kustomization.yaml | 7 ++--- .../examples/transformer/kustomization.yaml | 26 ++++++++++--------- test/manifests/kustomization.yaml | 6 +++-- 7 files changed, 36 insertions(+), 25 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 02dd5feeb5..9eaa4de89b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -243,4 +243,4 @@ jobs: - name: Run tests env: GOPATH: /home/runner/go - run: KUBECONFIG=~/.kube/numaflow-e2e-config VERSION=${{ github.sha }} ISBSVC=${{matrix.driver}} make test-${{matrix.case}} + run: KUBECONFIG=~/.kube/numaflow-e2e-config VERSION=${{ github.sha }} ISBSVC=${{matrix.driver}} SKIP_IMAGE_BUILD=true make test-${{matrix.case}} diff --git a/Makefile b/Makefile index d31c9095e6..a4bc2012bc 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,11 @@ test-idle-source-e2e: test-builtin-source-e2e: test-%: $(MAKE) cleanup-e2e - $(MAKE) image e2eapi-image +ifndef SKIP_IMAGE_BUILD + # Skip building image in CI since the image would have been built during "make start" + $(MAKE) image +endif + $(MAKE) e2eapi-image $(MAKE) restart-control-plane-components cat test/manifests/e2e-api-pod.yaml | sed 's@quay.io/numaproj/@$(IMAGE_NAMESPACE)/@' | sed 's/:latest/:$(VERSION)/' | kubectl -n numaflow-system apply -f - go generate $(shell find ./test/$* -name '*.go') diff --git a/config/apps/kafka/kustomization.yaml b/config/apps/kafka/kustomization.yaml index b59416a95f..965156ee4a 100644 --- a/config/apps/kafka/kustomization.yaml +++ b/config/apps/kafka/kustomization.yaml @@ -4,6 +4,7 @@ kind: Kustomization resources: - kafka-minimal.yaml -commonLabels: - "numaflow-e2e": "true" - +labels: + - includeSelectors: true + pairs: + numaflow-e2e: "true" diff --git a/config/apps/nats/kustomization.yaml b/config/apps/nats/kustomization.yaml index 9d42b1a927..374d50a960 100644 --- a/config/apps/nats/kustomization.yaml +++ b/config/apps/nats/kustomization.yaml @@ -5,6 +5,7 @@ resources: - nats.yaml - nats-auth-fake-token.yaml -commonLabels: - "numaflow-e2e": "true" - +labels: + - includeSelectors: true + pairs: + numaflow-e2e: "true" diff --git a/config/apps/redis/kustomization.yaml b/config/apps/redis/kustomization.yaml index 488495a419..453d88b4b7 100644 --- a/config/apps/redis/kustomization.yaml +++ b/config/apps/redis/kustomization.yaml @@ -5,6 +5,7 @@ kind: Kustomization resources: - redis-minimal.yaml -commonLabels: - "numaflow-e2e": "true" - +labels: + - includeSelectors: true + pairs: + numaflow-e2e: "true" diff --git a/docs/user-guide/reference/kustomize/examples/transformer/kustomization.yaml b/docs/user-guide/reference/kustomize/examples/transformer/kustomization.yaml index afa859a44b..92d6bd308d 100644 --- a/docs/user-guide/reference/kustomize/examples/transformer/kustomization.yaml +++ b/docs/user-guide/reference/kustomize/examples/transformer/kustomization.yaml @@ -2,29 +2,31 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization configurations: -- https://raw.githubusercontent.com/numaproj/numaflow/main/docs/user-guide/kustomize/numaflow-transformer-config.yaml + - https://raw.githubusercontent.com/numaproj/numaflow/main/docs/user-guide/kustomize/numaflow-transformer-config.yaml namePrefix: my- resources: -- my-pipeline.yaml + - my-pipeline.yaml configMapGenerator: -- name: my-cm - literals: - - FOO=BAR + - literals: + - FOO=BAR + name: my-cm secretGenerator: -- name: my-secret - literals: - - password=Pa5SW0rD + - literals: + - password=Pa5SW0rD + name: my-secret -commonLabels: - foo: bar +labels: + - includeSelectors: true + pairs: + foo: bar commonAnnotations: foo: bar images: -- name: my-pipeline/my-udf - newTag: my-version + - name: my-pipeline/my-udf + newTag: my-version diff --git a/test/manifests/kustomization.yaml b/test/manifests/kustomization.yaml index 87a4407f00..86071ab881 100644 --- a/test/manifests/kustomization.yaml +++ b/test/manifests/kustomization.yaml @@ -60,5 +60,7 @@ patches: namespace: numaflow-system -commonLabels: - "app.kubernetes.io/part-of": "numaflow" +labels: + - includeSelectors: true + pairs: + app.kubernetes.io/part-of: numaflow From 102d1de1230a5a9baf29128757b12e6af4413bf3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 25 Aug 2024 15:33:17 -0400 Subject: [PATCH 030/188] chore(deps): bump micromatch from 4.0.7 to 4.0.8 in /ui (#2002) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index c6b1eb6c88..eda56fac2e 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -8091,9 +8091,9 @@ methods@~1.1.2: integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: - version "4.0.7" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.7.tgz#33e8190d9fe474a9895525f5618eee136d46c2e5" - integrity sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: braces "^3.0.3" picomatch "^2.3.1" From 91f372ca9ea413041ad157746530481d78114fcf Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 26 Aug 2024 11:23:22 -0700 Subject: [PATCH 031/188] feat: more flexible scaling with `replicasPerScaleUp` and `replicasPerScaleDown` (#2003) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 17 +- api/openapi-spec/swagger.json | 17 +- .../numaflow.numaproj.io_monovertices.yaml | 9 +- .../full/numaflow.numaproj.io_pipelines.yaml | 9 +- .../full/numaflow.numaproj.io_vertices.yaml | 9 +- config/install.yaml | 27 +- config/namespace-install.yaml | 27 +- docs/APIs.md | 68 +- docs/user-guide/reference/autoscaling.md | 8 +- pkg/apis/numaflow/v1alpha1/generated.pb.go | 1026 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 32 +- .../numaflow/v1alpha1/openapi_generated.go | 23 +- pkg/apis/numaflow/v1alpha1/scale.go | 54 +- pkg/apis/numaflow/v1alpha1/scale_test.go | 12 +- .../v1alpha1/zz_generated.deepcopy.go | 19 +- pkg/reconciler/monovertex/scaling/scaling.go | 11 +- pkg/reconciler/vertex/scaling/scaling.go | 13 +- rust/numaflow-models/src/models/scale.rs | 17 +- 18 files changed, 768 insertions(+), 630 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 9a119f6a1d..e3601b73a4 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19734,11 +19734,6 @@ "io.numaproj.numaflow.v1alpha1.Scale": { "description": "Scale defines the parameters for autoscaling.", "properties": { - "cooldownSeconds": { - "description": "Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one.", - "format": "int64", - "type": "integer" - }, "disabled": { "description": "Whether to disable autoscaling. Set to \"true\" when using Kubernetes HPA or any other 3rd party autoscaling strategies.", "type": "boolean" @@ -19759,7 +19754,17 @@ "type": "integer" }, "replicasPerScale": { - "description": "ReplicasPerScale defines maximum replicas can be scaled up or down at once. The is use to prevent too aggressive scaling operations", + "description": "DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. The is use to prevent from too aggressive scaling operations Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead", + "format": "int64", + "type": "integer" + }, + "replicasPerScaleDown": { + "description": "ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. The is use to prevent from too aggressive scaling down operations", + "format": "int64", + "type": "integer" + }, + "replicasPerScaleUp": { + "description": "ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. The is use to prevent from too aggressive scaling up operations", "format": "int64", "type": "integer" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index e0be1c1a7f..55540a4b09 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19721,11 +19721,6 @@ "description": "Scale defines the parameters for autoscaling.", "type": "object", "properties": { - "cooldownSeconds": { - "description": "Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one.", - "type": "integer", - "format": "int64" - }, "disabled": { "description": "Whether to disable autoscaling. Set to \"true\" when using Kubernetes HPA or any other 3rd party autoscaling strategies.", "type": "boolean" @@ -19746,7 +19741,17 @@ "format": "int32" }, "replicasPerScale": { - "description": "ReplicasPerScale defines maximum replicas can be scaled up or down at once. The is use to prevent too aggressive scaling operations", + "description": "DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. The is use to prevent from too aggressive scaling operations Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead", + "type": "integer", + "format": "int64" + }, + "replicasPerScaleDown": { + "description": "ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. The is use to prevent from too aggressive scaling down operations", + "type": "integer", + "format": "int64" + }, + "replicasPerScaleUp": { + "description": "ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. The is use to prevent from too aggressive scaling up operations", "type": "integer", "format": "int64" }, diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index ece346f242..7967f0362d 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -2210,9 +2210,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -2227,6 +2224,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 0f8f7d3c77..00c4a0b008 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -5939,9 +5939,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -5956,6 +5953,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index e832cec26a..cd7e10a99f 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -1587,9 +1587,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -1604,6 +1601,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer diff --git a/config/install.yaml b/config/install.yaml index b8778fdb89..cf347b8bcf 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -4822,9 +4822,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -4839,6 +4836,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer @@ -14096,9 +14099,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -14113,6 +14113,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer @@ -19518,9 +19524,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -19535,6 +19538,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index c265de29f4..be25fca8be 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -4822,9 +4822,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -4839,6 +4836,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer @@ -14096,9 +14099,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -14113,6 +14113,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer @@ -19518,9 +19524,6 @@ spec: type: string scale: properties: - cooldownSeconds: - format: int32 - type: integer disabled: type: boolean lookbackSeconds: @@ -19535,6 +19538,12 @@ spec: replicasPerScale: format: int32 type: integer + replicasPerScaleDown: + format: int32 + type: integer + replicasPerScaleUp: + format: int32 + type: integer scaleDownCooldownSeconds: format: int32 type: integer diff --git a/docs/APIs.md b/docs/APIs.md index 2fcab4c372..a5c42c14f7 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -8428,26 +8428,6 @@ processing rate.
- - - - - - - - @@ -8522,8 +8502,10 @@ have buffers to read. (Optional)

-ReplicasPerScale defines maximum replicas can be scaled up or down at -once. The is use to prevent too aggressive scaling operations +DeprecatedReplicasPerScale defines the number of maximum replicas that +can be changed in a single scale up or down operation. The is use to +prevent from too aggressive scaling operations Deprecated: Use +ReplicasPerScaleUp and ReplicasPerScaleDown instead

@@ -8572,6 +8554,48 @@ CooldownSeconds if not set.
+ + + + + + + + + + + + + + + +
-cooldownSeconds
uint32 -
- -(Optional) -

- -Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds -instead. Cooldown seconds after a scaling operation before another one. -

- -
- zeroReplicaSleepSeconds
uint32
+ +replicasPerScaleUp
uint32 +
+ +(Optional) +

+ +ReplicasPerScaleUp defines the number of maximum replicas that can be +changed in a single scaled up operation. The is use to prevent from too +aggressive scaling up operations +

+ +
+ +replicasPerScaleDown
uint32 +
+ +(Optional) +

+ +ReplicasPerScaleDown defines the number of maximum replicas that can be +changed in a single scaled down operation. The is use to prevent from +too aggressive scaling down operations +

+ +
diff --git a/docs/user-guide/reference/autoscaling.md b/docs/user-guide/reference/autoscaling.md index 6226e7a9b6..e78cc0ec59 100644 --- a/docs/user-guide/reference/autoscaling.md +++ b/docs/user-guide/reference/autoscaling.md @@ -35,7 +35,8 @@ spec: zeroReplicaSleepSeconds: 120 # Optional, defaults to 120. targetProcessingSeconds: 20 # Optional, defaults to 20. targetBufferAvailability: 50 # Optional, defaults to 50. - replicasPerScale: 2 # Optional, defaults to 2. + replicasPerScaleUp: 2 # Optional, defaults to 2. + replicasPerScaleDown: 2 # Optional, defaults to 2. ``` - `disabled` - Whether to disable Numaflow autoscaling, defaults to `false`. @@ -60,8 +61,11 @@ spec: support autoscaling, typically increasing the value leads to lower processing rate, thus less replicas. - `targetBufferAvailability` - Targeted buffer availability in percentage, defaults to `50`. It is only effective for `UDF` and `Sink` vertices, it determines how aggressive you want to do for autoscaling, increasing the value will bring more replicas. -- `replicasPerScale` - Maximum number of replicas change happens in one scale up or down operation, defaults to `2`. For +- `replicasPerScaleUp` - Maximum number of replica change happens in one scale up operation, defaults to `2`. For example, if current replica number is 3, the calculated desired replica number is 8; instead of scaling up the vertex to 8, it only does 5. +- `replicasPerScaleDown` - Maximum number of replica change happens in one scale down operation, defaults to `2`. For + example, if current replica number is 9, the calculated desired replica number is 4; instead of scaling down the vertex to 4, it only does 7. +- `replicasPerScale` - (Deprecated: Use `replicasPerScaleUp` and `replicasPerScaleDown` instead, will be removed in v1.5) Maximum number of replica change happens in one scale up or down operation, defaults to `2`. To disable Numaflow autoscaling, set `disabled: true` as following. diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 0b8823d5d0..b152ea965c 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2704,471 +2704,472 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7415 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x24, 0xc7, - 0x75, 0xb6, 0xe6, 0xc6, 0x99, 0x39, 0x43, 0x72, 0x77, 0x6b, 0xa5, 0x15, 0x77, 0xb5, 0x5a, 0xae, - 0x5b, 0xbf, 0xf4, 0xaf, 0x7f, 0xdb, 0xe4, 0x2f, 0xfe, 0xba, 0xd9, 0xbf, 0x6d, 0x89, 0x43, 0x2e, - 0xb9, 0xd4, 0x92, 0xbb, 0xf4, 0x19, 0x72, 0x25, 0x5b, 0xbf, 0xad, 0xbf, 0xd9, 0x5d, 0x1c, 0xb6, - 0xd8, 0xd3, 0x3d, 0xea, 0xee, 0xe1, 0x2e, 0xe5, 0x04, 0xbe, 0x3d, 0x48, 0x41, 0x12, 0x24, 0xf0, - 0x93, 0x81, 0xc0, 0x09, 0x12, 0x04, 0xf0, 0x83, 0xe1, 0x3c, 0x04, 0x50, 0x1e, 0x02, 0xe4, 0x06, - 0x04, 0x89, 0x13, 0xe4, 0xe2, 0x87, 0x00, 0x51, 0x10, 0x80, 0x88, 0x19, 0xe4, 0x21, 0x09, 0x62, - 0x18, 0x31, 0x10, 0xdb, 0x0b, 0x03, 0x0e, 0xea, 0xd6, 0xb7, 0xe9, 0xd9, 0x25, 0xa7, 0xc9, 0xd5, - 0x2a, 0xd1, 0x5b, 0x77, 0xd5, 0xa9, 0xef, 0x54, 0x9f, 0xba, 0x9d, 0x3a, 0x75, 0xea, 0x34, 0x2c, - 0xb6, 0xad, 0x60, 0xab, 0xb7, 0x31, 0x65, 0xb8, 0x9d, 0x69, 0xa7, 0xd7, 0xd1, 0xbb, 0x9e, 0xfb, - 0x1a, 0x7f, 0xd8, 0xb4, 0xdd, 0x9b, 0xd3, 0xdd, 0xed, 0xf6, 0xb4, 0xde, 0xb5, 0xfc, 0x28, 0x65, - 0xe7, 0x49, 0xdd, 0xee, 0x6e, 0xe9, 0x4f, 0x4e, 0xb7, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, 0xe6, 0x54, - 0xd7, 0x73, 0x03, 0x97, 0x3c, 0x1b, 0x01, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, 0xbb, 0xdd, - 0x9e, 0x62, 0x40, 0x51, 0x8a, 0x02, 0x3a, 0xf7, 0x91, 0x58, 0x0d, 0xda, 0x6e, 0xdb, 0x9d, 0xe6, - 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, 0xce, 0x9f, - 0xb2, 0x5c, 0x56, 0xad, 0x69, 0xc3, 0xf5, 0xe8, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, 0x15, 0xd1, - 0x74, 0x74, 0x63, 0xcb, 0x72, 0xa8, 0xb7, 0xab, 0xbe, 0x65, 0xda, 0xa3, 0xbe, 0xdb, 0xf3, 0x0c, - 0x7a, 0xa8, 0x52, 0xfe, 0x74, 0x87, 0x06, 0x7a, 0x16, 0xaf, 0xe9, 0x41, 0xa5, 0xbc, 0x9e, 0x13, - 0x58, 0x9d, 0x7e, 0x36, 0xcf, 0xdc, 0xad, 0x80, 0x6f, 0x6c, 0xd1, 0x8e, 0x9e, 0x2e, 0xa7, 0xfd, - 0x7d, 0x1d, 0x4e, 0xcf, 0x6e, 0xf8, 0x81, 0xa7, 0x1b, 0xc1, 0xaa, 0x6b, 0xae, 0xd1, 0x4e, 0xd7, - 0xd6, 0x03, 0x4a, 0xb6, 0xa1, 0xc6, 0xea, 0x66, 0xea, 0x81, 0x3e, 0x51, 0xb8, 0x58, 0xb8, 0xd4, - 0x98, 0x99, 0x9d, 0x1a, 0xb2, 0x2d, 0xa6, 0x56, 0x24, 0x50, 0x73, 0x74, 0x7f, 0x6f, 0xb2, 0xa6, - 0xde, 0x30, 0x64, 0x40, 0xbe, 0x56, 0x80, 0x51, 0xc7, 0x35, 0x69, 0x8b, 0xda, 0xd4, 0x08, 0x5c, - 0x6f, 0xa2, 0x78, 0xb1, 0x74, 0xa9, 0x31, 0xf3, 0xb9, 0xa1, 0x39, 0x66, 0x7c, 0xd1, 0xd4, 0xb5, - 0x18, 0x83, 0xcb, 0x4e, 0xe0, 0xed, 0x36, 0x1f, 0xfc, 0xf6, 0xde, 0xe4, 0x03, 0xfb, 0x7b, 0x93, - 0xa3, 0xf1, 0x2c, 0x4c, 0xd4, 0x84, 0xac, 0x43, 0x23, 0x70, 0x6d, 0x26, 0x32, 0xcb, 0x75, 0xfc, - 0x89, 0x12, 0xaf, 0xd8, 0x85, 0x29, 0x21, 0x6d, 0xc6, 0x7e, 0x8a, 0x75, 0x97, 0xa9, 0x9d, 0x27, - 0xa7, 0xd6, 0x42, 0xb2, 0xe6, 0x69, 0x09, 0xdc, 0x88, 0xd2, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, 0x27, - 0x7c, 0x6a, 0xf4, 0x3c, 0x2b, 0xd8, 0x9d, 0x73, 0x9d, 0x80, 0xde, 0x0a, 0x26, 0xca, 0x5c, 0xca, - 0x4f, 0x64, 0x41, 0xaf, 0xba, 0x66, 0x2b, 0x49, 0xdd, 0x3c, 0xbd, 0xbf, 0x37, 0x79, 0x22, 0x95, - 0x88, 0x69, 0x4c, 0xe2, 0xc0, 0x49, 0xab, 0xa3, 0xb7, 0xe9, 0x6a, 0xcf, 0xb6, 0x5b, 0xd4, 0xf0, - 0x68, 0xe0, 0x4f, 0x54, 0xf8, 0x27, 0x5c, 0xca, 0xe2, 0xb3, 0xec, 0x1a, 0xba, 0x7d, 0x7d, 0xe3, - 0x35, 0x6a, 0x04, 0x48, 0x37, 0xa9, 0x47, 0x1d, 0x83, 0x36, 0x27, 0xe4, 0xc7, 0x9c, 0x5c, 0x4a, - 0x21, 0x61, 0x1f, 0x36, 0x59, 0x84, 0x53, 0x5d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, 0xaf, - 0xe9, 0x1d, 0x3a, 0x31, 0x72, 0xb1, 0x70, 0xa9, 0xde, 0x3c, 0x2b, 0x61, 0x4e, 0xad, 0xa6, 0x09, - 0xb0, 0xbf, 0x0c, 0xb9, 0x04, 0x35, 0x95, 0x38, 0x51, 0xbd, 0x58, 0xb8, 0x54, 0x11, 0x7d, 0x47, - 0x95, 0xc5, 0x30, 0x97, 0x2c, 0x40, 0x4d, 0xdf, 0xdc, 0xb4, 0x1c, 0x46, 0x59, 0xe3, 0x22, 0x3c, - 0x9f, 0xf5, 0x69, 0xb3, 0x92, 0x46, 0xe0, 0xa8, 0x37, 0x0c, 0xcb, 0x92, 0x17, 0x81, 0xf8, 0xd4, - 0xdb, 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xcf, 0x09, 0x78, 0xdd, 0xeb, 0xbc, 0xee, 0xe7, 0x64, - 0xdd, 0x49, 0xab, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x17, 0xe0, 0xa4, 0x1c, 0x76, 0x91, 0x14, 0x80, - 0x23, 0x3d, 0xc8, 0x04, 0x89, 0xa9, 0x3c, 0xec, 0xa3, 0x26, 0x26, 0x9c, 0xd7, 0x7b, 0x81, 0xdb, - 0x61, 0x90, 0x49, 0xa6, 0x6b, 0xee, 0x36, 0x75, 0x26, 0x1a, 0x17, 0x0b, 0x97, 0x6a, 0xcd, 0x8b, - 0xfb, 0x7b, 0x93, 0xe7, 0x67, 0xef, 0x40, 0x87, 0x77, 0x44, 0x21, 0xd7, 0xa1, 0x6e, 0x3a, 0xfe, - 0xaa, 0x6b, 0x5b, 0xc6, 0xee, 0xc4, 0x28, 0xaf, 0xe0, 0x93, 0xf2, 0x53, 0xeb, 0xf3, 0xd7, 0x5a, - 0x22, 0xe3, 0xf6, 0xde, 0xe4, 0xf9, 0xfe, 0xd9, 0x71, 0x2a, 0xcc, 0xc7, 0x08, 0x83, 0xac, 0x70, - 0xc0, 0x39, 0xd7, 0xd9, 0xb4, 0xda, 0x13, 0x63, 0xbc, 0x35, 0x2e, 0x0e, 0xe8, 0xd0, 0xf3, 0xd7, - 0x5a, 0x82, 0xae, 0x39, 0x26, 0xd9, 0x89, 0x57, 0x8c, 0x10, 0xce, 0x3d, 0x0f, 0xa7, 0xfa, 0x46, - 0x2d, 0x39, 0x09, 0xa5, 0x6d, 0xba, 0xcb, 0x27, 0xa5, 0x3a, 0xb2, 0x47, 0xf2, 0x20, 0x54, 0x76, - 0x74, 0xbb, 0x47, 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xac, 0xf8, 0x5c, 0x41, 0xfb, 0x8d, 0x12, - 0x8c, 0xaa, 0xb9, 0xa0, 0x65, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x5b, 0xce, 0x68, 0x1f, - 0x1f, 0x7a, 0x7e, 0x59, 0x76, 0xdb, 0xcd, 0xea, 0xfe, 0xde, 0x64, 0x69, 0xd9, 0x6d, 0x23, 0x43, - 0x24, 0x06, 0x54, 0xb6, 0xf5, 0xcd, 0x6d, 0x9d, 0xd7, 0xa1, 0x31, 0xd3, 0x1c, 0x1a, 0xfa, 0x2a, - 0x43, 0x61, 0x75, 0x6d, 0xd6, 0xf7, 0xf7, 0x26, 0x2b, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, 0xbe, - 0x61, 0xeb, 0xc6, 0xf6, 0x96, 0x6b, 0xd3, 0x89, 0x52, 0x4e, 0x46, 0x4d, 0x85, 0x24, 0x1a, 0x20, - 0x7c, 0xc5, 0x88, 0x07, 0x31, 0x60, 0xa4, 0x67, 0xfa, 0x96, 0xb3, 0x2d, 0x67, 0xa7, 0xe7, 0x87, - 0xe6, 0xb6, 0x3e, 0xcf, 0xbf, 0x09, 0xf6, 0xf7, 0x26, 0x47, 0xc4, 0x33, 0x4a, 0x68, 0xed, 0x7b, - 0x0d, 0x18, 0x57, 0x8d, 0x74, 0x83, 0x7a, 0x01, 0xbd, 0x45, 0x2e, 0x42, 0xd9, 0x61, 0x83, 0x86, - 0x37, 0x72, 0x73, 0x54, 0xf6, 0xc9, 0x32, 0x1f, 0x2c, 0x3c, 0x87, 0xd5, 0x4c, 0x2c, 0xb8, 0x52, - 0xe0, 0xc3, 0xd7, 0xac, 0xc5, 0x61, 0x44, 0xcd, 0xc4, 0x33, 0x4a, 0x68, 0xf2, 0x0a, 0x94, 0xf9, - 0xc7, 0x0b, 0x51, 0x7f, 0x62, 0x78, 0x16, 0xec, 0xd3, 0x6b, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, - 0xd6, 0x15, 0x7b, 0xe6, 0xa6, 0x14, 0xec, 0xc7, 0x73, 0x08, 0x76, 0x41, 0x74, 0xc5, 0xf5, 0xf9, - 0x05, 0x64, 0x88, 0xe4, 0x97, 0x0a, 0x70, 0xca, 0x70, 0x9d, 0x40, 0x67, 0x4a, 0x80, 0x5a, 0xfe, - 0x26, 0x2a, 0x9c, 0xcf, 0x8b, 0x43, 0xf3, 0x99, 0x4b, 0x23, 0x36, 0x1f, 0x62, 0xb3, 0x79, 0x5f, - 0x32, 0xf6, 0xf3, 0x26, 0xbf, 0x52, 0x80, 0x87, 0xd8, 0x2c, 0xdb, 0x47, 0xcc, 0xd7, 0x86, 0xa3, - 0xad, 0xd5, 0xd9, 0xfd, 0xbd, 0xc9, 0x87, 0x96, 0xb2, 0x98, 0x61, 0x76, 0x1d, 0x58, 0xed, 0x4e, - 0xeb, 0xfd, 0x0a, 0x03, 0x5f, 0x77, 0x1a, 0x33, 0xcb, 0x47, 0xa9, 0x84, 0x34, 0x1f, 0x91, 0x5d, - 0x39, 0x4b, 0xe7, 0xc2, 0xac, 0x5a, 0x90, 0xcb, 0x50, 0xdd, 0x71, 0xed, 0x5e, 0x87, 0xfa, 0x13, - 0x35, 0xbe, 0x72, 0x9f, 0xcb, 0x9a, 0x50, 0x6f, 0x70, 0x92, 0xe6, 0x09, 0x09, 0x5f, 0x15, 0xef, - 0x3e, 0xaa, 0xb2, 0xc4, 0x82, 0x11, 0xdb, 0xea, 0x58, 0x81, 0xcf, 0x97, 0xb4, 0xc6, 0xcc, 0xe5, - 0xa1, 0x3f, 0x4b, 0x0c, 0xd1, 0x65, 0x0e, 0x26, 0x46, 0x8d, 0x78, 0x46, 0xc9, 0x80, 0x4d, 0x85, - 0xbe, 0xa1, 0xdb, 0x62, 0xc9, 0x6b, 0xcc, 0x7c, 0x72, 0xf8, 0x61, 0xc3, 0x50, 0x9a, 0x63, 0xf2, - 0x9b, 0x2a, 0xfc, 0x15, 0x05, 0x36, 0xf9, 0x2c, 0x8c, 0x27, 0x5a, 0xd3, 0x9f, 0x68, 0x70, 0xe9, - 0x3c, 0x9a, 0x25, 0x9d, 0x90, 0xaa, 0x79, 0x46, 0x82, 0x8d, 0x27, 0x7a, 0x88, 0x8f, 0x29, 0x30, - 0x72, 0x15, 0x6a, 0xbe, 0x65, 0x52, 0x43, 0xf7, 0xfc, 0x89, 0xd1, 0x83, 0x00, 0x9f, 0x94, 0xc0, - 0xb5, 0x96, 0x2c, 0x86, 0x21, 0x00, 0x99, 0x02, 0xe8, 0xea, 0x5e, 0x60, 0x09, 0x15, 0x72, 0x8c, - 0xab, 0x33, 0xe3, 0xfb, 0x7b, 0x93, 0xb0, 0x1a, 0xa6, 0x62, 0x8c, 0x82, 0xd1, 0xb3, 0xb2, 0x4b, - 0x4e, 0xb7, 0x17, 0xf8, 0x13, 0xe3, 0x17, 0x4b, 0x97, 0xea, 0x82, 0xbe, 0x15, 0xa6, 0x62, 0x8c, - 0x82, 0x7c, 0xab, 0x00, 0x8f, 0x44, 0xaf, 0xfd, 0x83, 0xec, 0xc4, 0x91, 0x0f, 0xb2, 0xc9, 0xfd, - 0xbd, 0xc9, 0x47, 0x5a, 0x83, 0x59, 0xe2, 0x9d, 0xea, 0xa3, 0xbd, 0x04, 0x63, 0xb3, 0xbd, 0x60, - 0xcb, 0xf5, 0xac, 0x37, 0xb8, 0x3a, 0x4c, 0x16, 0xa0, 0x12, 0x70, 0xb5, 0x46, 0xac, 0xcb, 0x8f, - 0x67, 0x89, 0x5a, 0xa8, 0x98, 0x57, 0xe9, 0xae, 0xd2, 0x06, 0xc4, 0xfa, 0x28, 0xd4, 0x1c, 0x51, - 0x5c, 0xfb, 0xf5, 0x02, 0xd4, 0x9b, 0xba, 0x6f, 0x19, 0x0c, 0x9e, 0xcc, 0x41, 0xb9, 0xe7, 0x53, - 0xef, 0x70, 0xa0, 0x7c, 0x96, 0x5e, 0xf7, 0xa9, 0x87, 0xbc, 0x30, 0xb9, 0x0e, 0xb5, 0xae, 0xee, - 0xfb, 0x37, 0x5d, 0xcf, 0x94, 0x2b, 0xcd, 0x01, 0x81, 0x84, 0xbe, 0x2a, 0x8b, 0x62, 0x08, 0xa2, - 0x35, 0x20, 0x5a, 0x6a, 0xb5, 0x1f, 0x14, 0xe0, 0x74, 0xb3, 0xb7, 0xb9, 0x49, 0x3d, 0xa9, 0x9e, - 0x09, 0xc5, 0x87, 0x50, 0xa8, 0x78, 0xd4, 0xb4, 0x7c, 0x59, 0xf7, 0xf9, 0xa1, 0x9b, 0x0e, 0x19, - 0x8a, 0xd4, 0xb3, 0xb8, 0xbc, 0x78, 0x02, 0x0a, 0x74, 0xd2, 0x83, 0xfa, 0x6b, 0x34, 0xf0, 0x03, - 0x8f, 0xea, 0x1d, 0xf9, 0x75, 0x57, 0x86, 0x66, 0xf5, 0x22, 0x0d, 0x5a, 0x1c, 0x29, 0xae, 0xd6, - 0x85, 0x89, 0x18, 0x71, 0xd2, 0xfe, 0xa8, 0x02, 0xa3, 0x73, 0x6e, 0x67, 0xc3, 0x72, 0xa8, 0x79, - 0xd9, 0x6c, 0x53, 0xf2, 0x2a, 0x94, 0xa9, 0xd9, 0xa6, 0xf2, 0x6b, 0x87, 0x5f, 0x67, 0x19, 0x58, - 0xa4, 0x2d, 0xb0, 0x37, 0xe4, 0xc0, 0x64, 0x19, 0xc6, 0x37, 0x3d, 0xb7, 0x23, 0xa6, 0xae, 0xb5, - 0xdd, 0xae, 0x54, 0x15, 0x9b, 0xff, 0x43, 0x4d, 0x07, 0x0b, 0x89, 0xdc, 0xdb, 0x7b, 0x93, 0x10, - 0xbd, 0x61, 0xaa, 0x2c, 0x79, 0x19, 0x26, 0xa2, 0x94, 0x70, 0x0c, 0xcf, 0x31, 0xbd, 0x9a, 0xab, - 0x0a, 0x95, 0xe6, 0xf9, 0xfd, 0xbd, 0xc9, 0x89, 0x85, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x79, 0xb3, - 0x00, 0x27, 0xa3, 0x4c, 0x31, 0xaf, 0x4a, 0x0d, 0xe1, 0x88, 0x26, 0x6c, 0xbe, 0x01, 0x59, 0x48, - 0xb1, 0xc0, 0x3e, 0xa6, 0x64, 0x01, 0x46, 0x03, 0x37, 0x26, 0xaf, 0x0a, 0x97, 0x97, 0xa6, 0x76, - 0xcc, 0x6b, 0xee, 0x40, 0x69, 0x25, 0xca, 0x11, 0x84, 0x33, 0xea, 0x3d, 0x25, 0xa9, 0x11, 0x2e, - 0xa9, 0x73, 0xfb, 0x7b, 0x93, 0x67, 0xd6, 0x32, 0x29, 0x70, 0x40, 0x49, 0xf2, 0xa5, 0x02, 0x8c, - 0xab, 0x2c, 0x29, 0xa3, 0xea, 0x51, 0xca, 0x88, 0xb0, 0x1e, 0xb1, 0x96, 0x60, 0x80, 0x29, 0x86, - 0xda, 0x8f, 0xca, 0x50, 0x0f, 0x67, 0x36, 0xf2, 0x18, 0x54, 0xf8, 0x5e, 0x58, 0x2a, 0xac, 0xe1, - 0x92, 0xc5, 0xb7, 0xcc, 0x28, 0xf2, 0xc8, 0xe3, 0x50, 0x35, 0xdc, 0x4e, 0x47, 0x77, 0x4c, 0x6e, - 0xdf, 0xa8, 0x37, 0x1b, 0x6c, 0xa5, 0x9e, 0x13, 0x49, 0xa8, 0xf2, 0xc8, 0x79, 0x28, 0xeb, 0x5e, - 0x5b, 0x98, 0x1a, 0xea, 0x62, 0x3e, 0x9a, 0xf5, 0xda, 0x3e, 0xf2, 0x54, 0xf2, 0x51, 0x28, 0x51, - 0x67, 0x67, 0xa2, 0x3c, 0x58, 0x15, 0xb8, 0xec, 0xec, 0xdc, 0xd0, 0xbd, 0x66, 0x43, 0xd6, 0xa1, - 0x74, 0xd9, 0xd9, 0x41, 0x56, 0x86, 0x2c, 0x43, 0x95, 0x3a, 0x3b, 0xac, 0xed, 0xa5, 0x0d, 0xe0, - 0x03, 0x03, 0x8a, 0x33, 0x12, 0xa9, 0x15, 0x87, 0x0a, 0x85, 0x4c, 0x46, 0x05, 0x41, 0x3e, 0x0d, - 0xa3, 0x42, 0xb7, 0x58, 0x61, 0x6d, 0xe2, 0x4f, 0x8c, 0x70, 0xc8, 0xc9, 0xc1, 0xca, 0x09, 0xa7, - 0x8b, 0x6c, 0x2e, 0xb1, 0x44, 0x1f, 0x13, 0x50, 0xe4, 0xd3, 0x50, 0x57, 0xe6, 0x34, 0xd5, 0xb2, - 0x99, 0xe6, 0x0a, 0x94, 0x44, 0x48, 0x5f, 0xef, 0x59, 0x1e, 0xed, 0x50, 0x27, 0xf0, 0x9b, 0xa7, - 0xd4, 0x06, 0x56, 0xe5, 0xfa, 0x18, 0xa1, 0x91, 0x8d, 0x7e, 0xbb, 0x8b, 0x30, 0x1a, 0x3c, 0x36, - 0x60, 0x56, 0x1f, 0xc2, 0xe8, 0xf2, 0x39, 0x38, 0x11, 0x1a, 0x46, 0xe4, 0xde, 0x5a, 0x98, 0x11, - 0x9e, 0x62, 0xc5, 0x97, 0x92, 0x59, 0xb7, 0xf7, 0x26, 0x1f, 0xcd, 0xd8, 0x5d, 0x47, 0x04, 0x98, - 0x06, 0xd3, 0xfe, 0xa0, 0x04, 0xfd, 0x6a, 0x77, 0x52, 0x68, 0x85, 0xa3, 0x16, 0x5a, 0xfa, 0x83, - 0xc4, 0xf4, 0xf9, 0x9c, 0x2c, 0x96, 0xff, 0xa3, 0xb2, 0x1a, 0xa6, 0x74, 0xd4, 0x0d, 0x73, 0xbf, - 0x8c, 0x1d, 0xed, 0xad, 0x32, 0x8c, 0xcf, 0xeb, 0xb4, 0xe3, 0x3a, 0x77, 0xdd, 0x84, 0x14, 0xee, - 0x8b, 0x4d, 0xc8, 0x25, 0xa8, 0x79, 0xb4, 0x6b, 0x5b, 0x86, 0xee, 0xf3, 0xa6, 0x97, 0xe6, 0x38, - 0x94, 0x69, 0x18, 0xe6, 0x0e, 0xd8, 0x7c, 0x96, 0xee, 0xcb, 0xcd, 0x67, 0xf9, 0xdd, 0xdf, 0x7c, - 0x6a, 0x5f, 0x2a, 0x02, 0x57, 0x54, 0xc8, 0x45, 0x28, 0xb3, 0x45, 0x38, 0x6d, 0xf2, 0xe0, 0x1d, - 0x87, 0xe7, 0x90, 0x73, 0x50, 0x0c, 0x5c, 0x39, 0xf2, 0x40, 0xe6, 0x17, 0xd7, 0x5c, 0x2c, 0x06, - 0x2e, 0x79, 0x03, 0xc0, 0x70, 0x1d, 0xd3, 0x52, 0x56, 0xea, 0x7c, 0x1f, 0xb6, 0xe0, 0x7a, 0x37, - 0x75, 0xcf, 0x9c, 0x0b, 0x11, 0xc5, 0xf6, 0x23, 0x7a, 0xc7, 0x18, 0x37, 0xf2, 0x3c, 0x8c, 0xb8, - 0xce, 0x42, 0xcf, 0xb6, 0xb9, 0x40, 0xeb, 0xcd, 0xff, 0xc9, 0xf6, 0x84, 0xd7, 0x79, 0xca, 0xed, - 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, 0x6f, 0x2f, 0x79, 0x56, 0x60, 0x39, 0xed, 0x56, 0xe0, 0xe9, - 0x01, 0x6d, 0xef, 0xa2, 0x2c, 0xa6, 0x7d, 0xb5, 0x00, 0x8d, 0x05, 0xeb, 0x16, 0x35, 0x5f, 0xb2, - 0x1c, 0xd3, 0xbd, 0x49, 0x10, 0x46, 0x6c, 0xea, 0xb4, 0x83, 0x2d, 0xd9, 0xfb, 0xa7, 0x62, 0x63, - 0x2d, 0x3c, 0xdc, 0x88, 0xea, 0xdf, 0xa1, 0x81, 0xce, 0x46, 0xdf, 0x7c, 0x4f, 0x9a, 0xdf, 0xc5, - 0xa6, 0x94, 0x23, 0xa0, 0x44, 0x22, 0xd3, 0x50, 0x17, 0xda, 0xa7, 0xe5, 0xb4, 0xb9, 0x0c, 0x6b, - 0xd1, 0xa4, 0xd7, 0x52, 0x19, 0x18, 0xd1, 0x68, 0xbb, 0x70, 0xaa, 0x4f, 0x0c, 0xc4, 0x84, 0x72, - 0xa0, 0xb7, 0xd5, 0xfc, 0xba, 0x30, 0xb4, 0x80, 0xd7, 0xf4, 0x76, 0x4c, 0xb8, 0x7c, 0x8d, 0x5f, - 0xd3, 0xd9, 0x1a, 0xcf, 0xd0, 0xb5, 0x9f, 0x14, 0xa0, 0xb6, 0xd0, 0x73, 0x0c, 0xbe, 0x37, 0xba, - 0xbb, 0x29, 0x4c, 0x29, 0x0c, 0xc5, 0x4c, 0x85, 0xa1, 0x07, 0x23, 0xdb, 0x37, 0x43, 0x85, 0xa2, - 0x31, 0xb3, 0x32, 0x7c, 0xaf, 0x90, 0x55, 0x9a, 0xba, 0xca, 0xf1, 0xc4, 0x19, 0xca, 0xb8, 0xac, - 0xd0, 0xc8, 0xd5, 0x97, 0x38, 0x53, 0xc9, 0xec, 0xdc, 0x47, 0xa1, 0x11, 0x23, 0x3b, 0x94, 0xd1, - 0xf6, 0x77, 0xca, 0x30, 0xb2, 0xd8, 0x6a, 0xcd, 0xae, 0x2e, 0x91, 0xa7, 0xa1, 0x21, 0xcd, 0xeb, - 0xd7, 0x22, 0x19, 0x84, 0xa7, 0x2b, 0xad, 0x28, 0x0b, 0xe3, 0x74, 0x4c, 0x1d, 0xf3, 0xa8, 0x6e, - 0x77, 0xe4, 0x60, 0x09, 0xd5, 0x31, 0x64, 0x89, 0x28, 0xf2, 0x88, 0x0e, 0xe3, 0x6c, 0x87, 0xc7, - 0x44, 0x28, 0x76, 0x6f, 0x72, 0xd8, 0x1c, 0x70, 0x7f, 0xc7, 0x95, 0xc4, 0xf5, 0x04, 0x00, 0xa6, - 0x00, 0xc9, 0x73, 0x50, 0xd3, 0x7b, 0xc1, 0x16, 0x57, 0xa0, 0xc5, 0xd8, 0x38, 0xcf, 0x4f, 0x1f, - 0x64, 0xda, 0xed, 0xbd, 0xc9, 0xd1, 0xab, 0xd8, 0x7c, 0x5a, 0xbd, 0x63, 0x48, 0xcd, 0x2a, 0xa7, - 0x76, 0x8c, 0xb2, 0x72, 0x95, 0x43, 0x57, 0x6e, 0x35, 0x01, 0x80, 0x29, 0x40, 0xf2, 0x0a, 0x8c, - 0x6e, 0xd3, 0xdd, 0x40, 0xdf, 0x90, 0x0c, 0x46, 0x0e, 0xc3, 0xe0, 0x24, 0x53, 0xe1, 0xae, 0xc6, - 0x8a, 0x63, 0x02, 0x8c, 0xf8, 0xf0, 0xe0, 0x36, 0xf5, 0x36, 0xa8, 0xe7, 0xca, 0xdd, 0xa7, 0x64, - 0x52, 0x3d, 0x0c, 0x93, 0x89, 0xfd, 0xbd, 0xc9, 0x07, 0xaf, 0x66, 0xc0, 0x60, 0x26, 0xb8, 0xf6, - 0xe3, 0x22, 0x9c, 0x58, 0x14, 0xe7, 0x9b, 0xae, 0x27, 0x16, 0x61, 0x72, 0x16, 0x4a, 0x5e, 0xb7, - 0xc7, 0x7b, 0x4e, 0x49, 0xd8, 0x49, 0x71, 0x75, 0x1d, 0x59, 0x1a, 0x79, 0x19, 0x6a, 0xa6, 0x9c, - 0x32, 0xe4, 0xe6, 0xf7, 0xb0, 0x13, 0x0d, 0x5f, 0x04, 0xd5, 0x1b, 0x86, 0x68, 0x4c, 0xd3, 0xef, - 0xf8, 0xed, 0x96, 0xf5, 0x06, 0x95, 0xfb, 0x41, 0xae, 0xe9, 0xaf, 0x88, 0x24, 0x54, 0x79, 0x6c, - 0x55, 0xdd, 0xa6, 0xbb, 0x62, 0x37, 0x54, 0x8e, 0x56, 0xd5, 0xab, 0x32, 0x0d, 0xc3, 0x5c, 0x32, - 0xa9, 0x06, 0x0b, 0xeb, 0x05, 0x65, 0xb1, 0x93, 0xbf, 0xc1, 0x12, 0xe4, 0xb8, 0x61, 0x53, 0xe6, - 0x6b, 0x56, 0x10, 0x50, 0x4f, 0x36, 0xe3, 0x50, 0x53, 0xe6, 0x8b, 0x1c, 0x01, 0x25, 0x12, 0xf9, - 0x10, 0xd4, 0x39, 0x78, 0xd3, 0x76, 0x37, 0x78, 0xc3, 0xd5, 0xc5, 0x9e, 0xfe, 0x86, 0x4a, 0xc4, - 0x28, 0x5f, 0xfb, 0x69, 0x11, 0xce, 0x2c, 0xd2, 0x40, 0x68, 0x35, 0xf3, 0xb4, 0x6b, 0xbb, 0xbb, - 0x4c, 0xb5, 0x44, 0xfa, 0x3a, 0x79, 0x01, 0xc0, 0xf2, 0x37, 0x5a, 0x3b, 0x06, 0x1f, 0x07, 0x62, - 0x0c, 0x5f, 0x94, 0x43, 0x12, 0x96, 0x5a, 0x4d, 0x99, 0x73, 0x3b, 0xf1, 0x86, 0xb1, 0x32, 0xd1, - 0xf6, 0xaa, 0x78, 0x87, 0xed, 0x55, 0x0b, 0xa0, 0x1b, 0x29, 0xa8, 0x25, 0x4e, 0xf9, 0x7f, 0x14, - 0x9b, 0xc3, 0xe8, 0xa6, 0x31, 0x98, 0x3c, 0x2a, 0xa3, 0x03, 0x27, 0x4d, 0xba, 0xa9, 0xf7, 0xec, - 0x20, 0x54, 0xaa, 0xe5, 0x20, 0x3e, 0xb8, 0x5e, 0x1e, 0x9e, 0xbd, 0xce, 0xa7, 0x90, 0xb0, 0x0f, - 0x5b, 0xfb, 0xdd, 0x12, 0x9c, 0x5b, 0xa4, 0x41, 0x68, 0x71, 0x91, 0xb3, 0x63, 0xab, 0x4b, 0x0d, - 0xd6, 0x0a, 0x6f, 0x16, 0x60, 0xc4, 0xd6, 0x37, 0xa8, 0xcd, 0x56, 0x2f, 0xf6, 0x35, 0xaf, 0x0e, - 0xbd, 0x10, 0x0c, 0xe6, 0x32, 0xb5, 0xcc, 0x39, 0xa4, 0x96, 0x06, 0x91, 0x88, 0x92, 0x3d, 0x9b, - 0xd4, 0x0d, 0xbb, 0xe7, 0x07, 0xd4, 0x5b, 0x75, 0xbd, 0x40, 0xea, 0x93, 0xe1, 0xa4, 0x3e, 0x17, - 0x65, 0x61, 0x9c, 0x8e, 0xcc, 0x00, 0x18, 0xb6, 0x45, 0x9d, 0x80, 0x97, 0x12, 0xe3, 0x8a, 0xa8, - 0xf6, 0x9d, 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0x75, 0x5c, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, - 0x64, 0xb5, 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, - 0x15, 0x8b, 0xb2, 0x30, 0x4e, 0xc7, 0xd6, 0xbc, 0xd8, 0xf7, 0x1f, 0x6a, 0xcd, 0xfb, 0x66, 0x1d, - 0x2e, 0x24, 0xc4, 0x1a, 0xe8, 0x01, 0xdd, 0xec, 0xd9, 0x2d, 0x1a, 0xa8, 0x06, 0x1c, 0x72, 0x2d, - 0xfc, 0xf9, 0xa8, 0xdd, 0x85, 0x57, 0x85, 0x71, 0x34, 0xed, 0xde, 0x57, 0xc1, 0x03, 0xb5, 0xfd, - 0x34, 0xd4, 0x1d, 0x3d, 0xf0, 0xf9, 0xc0, 0x95, 0x63, 0x34, 0x54, 0xc3, 0xae, 0xa9, 0x0c, 0x8c, - 0x68, 0xc8, 0x2a, 0x3c, 0x28, 0x45, 0x7c, 0xf9, 0x56, 0xd7, 0xf5, 0x02, 0xea, 0x89, 0xb2, 0x72, - 0x39, 0x95, 0x65, 0x1f, 0x5c, 0xc9, 0xa0, 0xc1, 0xcc, 0x92, 0x64, 0x05, 0x4e, 0x1b, 0xe2, 0xa4, - 0x99, 0xda, 0xae, 0x6e, 0x2a, 0x40, 0x61, 0xe0, 0x0a, 0xb7, 0x46, 0x73, 0xfd, 0x24, 0x98, 0x55, - 0x2e, 0xdd, 0x9b, 0x47, 0x86, 0xea, 0xcd, 0xd5, 0x61, 0x7a, 0x73, 0x6d, 0xb8, 0xde, 0x5c, 0x3f, - 0x58, 0x6f, 0x66, 0x92, 0x67, 0xfd, 0x88, 0x7a, 0x4c, 0x3d, 0x11, 0x2b, 0x6c, 0xcc, 0x91, 0x21, - 0x94, 0x7c, 0x2b, 0x83, 0x06, 0x33, 0x4b, 0x92, 0x0d, 0x38, 0x27, 0xd2, 0x2f, 0x3b, 0x86, 0xb7, - 0xdb, 0x65, 0x0b, 0x4f, 0x0c, 0xb7, 0x91, 0xb0, 0x30, 0x9e, 0x6b, 0x0d, 0xa4, 0xc4, 0x3b, 0xa0, - 0x90, 0xff, 0x0b, 0x63, 0xa2, 0x95, 0x56, 0xf4, 0x2e, 0x87, 0x15, 0x6e, 0x0d, 0x0f, 0x49, 0xd8, - 0xb1, 0xb9, 0x78, 0x26, 0x26, 0x69, 0xc9, 0x2c, 0x9c, 0xe8, 0xee, 0x18, 0xec, 0x71, 0x69, 0xf3, - 0x1a, 0xa5, 0x26, 0x35, 0xf9, 0x69, 0x4d, 0xbd, 0xf9, 0xb0, 0x32, 0x74, 0xac, 0x26, 0xb3, 0x31, - 0x4d, 0x4f, 0x9e, 0x83, 0x51, 0x3f, 0xd0, 0xbd, 0x40, 0x9a, 0xf5, 0x26, 0xc6, 0x85, 0xdb, 0x87, - 0xb2, 0x7a, 0xb5, 0x62, 0x79, 0x98, 0xa0, 0xcc, 0x5c, 0x2f, 0x4e, 0x1c, 0xdf, 0x7a, 0x91, 0x67, - 0xb6, 0xfa, 0xd3, 0x22, 0x5c, 0x5c, 0xa4, 0xc1, 0x8a, 0xeb, 0x48, 0xa3, 0x68, 0xd6, 0xb2, 0x7f, - 0x20, 0x9b, 0x68, 0x72, 0xd1, 0x2e, 0x1e, 0xe9, 0xa2, 0x5d, 0x3a, 0xa2, 0x45, 0xbb, 0x7c, 0x8c, - 0x8b, 0xf6, 0xef, 0x17, 0xe1, 0xe1, 0x84, 0x24, 0x57, 0x5d, 0x53, 0x4d, 0xf8, 0xef, 0x0b, 0xf0, - 0x00, 0x02, 0xbc, 0x2d, 0xf4, 0x4e, 0x7e, 0xac, 0x95, 0xd2, 0x78, 0xbe, 0x92, 0xd6, 0x78, 0x5e, - 0xc9, 0xb3, 0xf2, 0x65, 0x70, 0x38, 0xd0, 0x8a, 0xf7, 0x22, 0x10, 0x4f, 0x1e, 0xc2, 0x09, 0xd3, - 0x4f, 0x4c, 0xe9, 0x09, 0xfd, 0xca, 0xb0, 0x8f, 0x02, 0x33, 0x4a, 0x91, 0x16, 0x3c, 0xe4, 0x53, - 0x27, 0xb0, 0x1c, 0x6a, 0x27, 0xe1, 0x84, 0x36, 0xf4, 0xa8, 0x84, 0x7b, 0xa8, 0x95, 0x45, 0x84, - 0xd9, 0x65, 0xf3, 0xcc, 0x03, 0x7f, 0x01, 0x5c, 0xe5, 0x14, 0xa2, 0x39, 0x32, 0x8d, 0xe5, 0xcd, - 0xb4, 0xc6, 0xf2, 0x6a, 0xfe, 0x76, 0x1b, 0x4e, 0x5b, 0x99, 0x01, 0xe0, 0xad, 0x10, 0x57, 0x57, - 0xc2, 0x45, 0x1a, 0xc3, 0x1c, 0x8c, 0x51, 0xb1, 0x05, 0x48, 0xc9, 0x39, 0xae, 0xa9, 0x84, 0x0b, - 0x50, 0x2b, 0x9e, 0x89, 0x49, 0xda, 0x81, 0xda, 0x4e, 0x65, 0x68, 0x6d, 0xe7, 0x45, 0x20, 0x09, - 0xc3, 0xa3, 0xc0, 0x1b, 0x49, 0xba, 0x35, 0x2e, 0xf5, 0x51, 0x60, 0x46, 0xa9, 0x01, 0x5d, 0xb9, - 0x7a, 0xb4, 0x5d, 0xb9, 0x36, 0x7c, 0x57, 0x26, 0xaf, 0xc2, 0x59, 0xce, 0x4a, 0xca, 0x27, 0x09, - 0x2c, 0xf4, 0x9e, 0x0f, 0x48, 0xe0, 0xb3, 0x38, 0x88, 0x10, 0x07, 0x63, 0xb0, 0xf6, 0x31, 0x3c, - 0x6a, 0x32, 0xe6, 0xba, 0x3d, 0x58, 0x27, 0x9a, 0xcb, 0xa0, 0xc1, 0xcc, 0x92, 0xac, 0x8b, 0x05, - 0xac, 0x1b, 0xea, 0x1b, 0x36, 0x35, 0xa5, 0x5b, 0x67, 0xd8, 0xc5, 0xd6, 0x96, 0x5b, 0x32, 0x07, - 0x63, 0x54, 0x59, 0x6a, 0xca, 0xe8, 0x21, 0xd5, 0x94, 0x45, 0x6e, 0xa5, 0xdf, 0x4c, 0x68, 0x43, - 0x52, 0xd7, 0x09, 0x1d, 0x75, 0xe7, 0xd2, 0x04, 0xd8, 0x5f, 0x86, 0x6b, 0x89, 0x86, 0x67, 0x75, - 0x03, 0x3f, 0x89, 0x35, 0x9e, 0xd2, 0x12, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd3, 0xcf, 0xb7, 0xa8, - 0x6e, 0x07, 0x5b, 0x49, 0xc0, 0x13, 0x49, 0xfd, 0xfc, 0x4a, 0x3f, 0x09, 0x66, 0x95, 0xcb, 0x5c, - 0x90, 0x4e, 0xde, 0x9f, 0x6a, 0xd5, 0x97, 0x4b, 0x70, 0x76, 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, - 0x66, 0x94, 0x77, 0xc1, 0x8c, 0xf2, 0x8d, 0x0a, 0x9c, 0x5e, 0xa4, 0x41, 0x9f, 0x36, 0xf6, 0xdf, - 0x54, 0xfc, 0x2b, 0x70, 0x3a, 0x72, 0xe5, 0x6a, 0x05, 0xae, 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, - 0xd5, 0x4f, 0x82, 0x59, 0xe5, 0xc8, 0xa7, 0xe1, 0x61, 0xbe, 0xd4, 0x3b, 0x6d, 0x61, 0x9f, 0x15, - 0xc6, 0x84, 0xd8, 0x35, 0x81, 0x49, 0x09, 0xf9, 0x70, 0x2b, 0x9b, 0x0c, 0x07, 0x95, 0x27, 0x5f, - 0x80, 0xd1, 0xae, 0xd5, 0xa5, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, - 0xda, 0xc0, 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0xd6, 0x8e, 0xb1, 0xa7, 0xfe, 0x7b, 0x11, - 0xaa, 0x8b, 0x9e, 0xdb, 0xeb, 0x36, 0x77, 0x49, 0x1b, 0x46, 0x6e, 0xf2, 0xc3, 0x33, 0x79, 0x34, - 0x35, 0xbc, 0x3b, 0xb4, 0x38, 0x83, 0x8b, 0x54, 0x22, 0xf1, 0x8e, 0x12, 0x9e, 0x75, 0xe2, 0x6d, - 0xba, 0x4b, 0x4d, 0x79, 0x86, 0x16, 0x76, 0xe2, 0xab, 0x2c, 0x11, 0x45, 0x1e, 0xe9, 0xc0, 0x09, - 0xdd, 0xb6, 0xdd, 0x9b, 0xd4, 0x5c, 0xd6, 0x03, 0xea, 0x50, 0x5f, 0x1d, 0x49, 0x1e, 0xd6, 0x2c, - 0xcd, 0xcf, 0xf5, 0x67, 0x93, 0x50, 0x98, 0xc6, 0x26, 0xaf, 0x41, 0xd5, 0x0f, 0x5c, 0x4f, 0x29, - 0x5b, 0x8d, 0x99, 0xb9, 0xe1, 0x1b, 0xbd, 0xf9, 0xa9, 0x96, 0x80, 0x12, 0x36, 0x7b, 0xf9, 0x82, - 0x8a, 0x81, 0xf6, 0xf5, 0x02, 0xc0, 0x95, 0xb5, 0xb5, 0x55, 0x79, 0xbc, 0x60, 0x42, 0x59, 0xef, - 0x85, 0x07, 0x95, 0xc3, 0x1f, 0x08, 0x26, 0xfc, 0x21, 0xe5, 0x19, 0x5e, 0x2f, 0xd8, 0x42, 0x8e, - 0x4e, 0x3e, 0x08, 0x55, 0xa9, 0x20, 0x4b, 0xb1, 0x87, 0xae, 0x05, 0x52, 0x89, 0x46, 0x95, 0xaf, - 0xfd, 0x76, 0x11, 0x60, 0xc9, 0xb4, 0x69, 0x4b, 0x79, 0xb0, 0xd7, 0x83, 0x2d, 0x8f, 0xfa, 0x5b, - 0xae, 0x6d, 0x0e, 0x79, 0x9a, 0xca, 0x6d, 0xfe, 0x6b, 0x0a, 0x04, 0x23, 0x3c, 0x62, 0xc2, 0xa8, - 0x1f, 0xd0, 0xee, 0x92, 0x13, 0x50, 0x6f, 0x47, 0xb7, 0x87, 0x3c, 0x44, 0x39, 0x29, 0xec, 0x22, - 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x0d, 0xcb, 0x31, 0xc4, 0x00, 0x69, 0xee, 0x0e, 0xd9, 0x91, - 0x4e, 0xb0, 0x1d, 0xc7, 0x52, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xfb, 0x45, 0x38, 0xc3, 0xf9, 0xb1, - 0x6a, 0x24, 0xfc, 0x31, 0xc9, 0xff, 0xef, 0xbb, 0x07, 0xf7, 0xbf, 0x0f, 0xc6, 0x5a, 0x5c, 0xa3, - 0x5a, 0xa1, 0x81, 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0xec, 0xf2, 0x5b, 0x0f, 0xca, 0x3e, 0x9b, 0xaf, - 0x84, 0xf4, 0x5a, 0x43, 0x77, 0xa1, 0xec, 0x0f, 0xe0, 0xb3, 0x57, 0x78, 0x6a, 0xcc, 0x67, 0x2d, - 0xce, 0x8e, 0xfc, 0x2c, 0x8c, 0xf8, 0x81, 0x1e, 0xf4, 0xd4, 0xd0, 0x5c, 0x3f, 0x6a, 0xc6, 0x1c, - 0x3c, 0x9a, 0x47, 0xc4, 0x3b, 0x4a, 0xa6, 0xda, 0xf7, 0x0b, 0x70, 0x2e, 0xbb, 0xe0, 0xb2, 0xe5, - 0x07, 0xe4, 0xff, 0xf5, 0x89, 0xfd, 0x80, 0x2d, 0xce, 0x4a, 0x73, 0xa1, 0x87, 0x0e, 0xd9, 0x2a, - 0x25, 0x26, 0xf2, 0x00, 0x2a, 0x56, 0x40, 0x3b, 0x6a, 0x7f, 0x79, 0xfd, 0x88, 0x3f, 0x3d, 0xb6, - 0xb4, 0x33, 0x2e, 0x28, 0x98, 0x69, 0x6f, 0x15, 0x07, 0x7d, 0x32, 0x5f, 0x3e, 0xec, 0xa4, 0xcf, - 0xef, 0xd5, 0x7c, 0x3e, 0xbf, 0xc9, 0x0a, 0xf5, 0xbb, 0xfe, 0xfe, 0x4c, 0xbf, 0xeb, 0xef, 0xf5, - 0xfc, 0xae, 0xbf, 0x29, 0x31, 0x0c, 0xf4, 0x00, 0x7e, 0xa7, 0x04, 0xe7, 0xef, 0xd4, 0x6d, 0xd8, - 0x7a, 0x26, 0x7b, 0x67, 0xde, 0xf5, 0xec, 0xce, 0xfd, 0x90, 0xcc, 0x40, 0xa5, 0xbb, 0xa5, 0xfb, - 0x4a, 0x29, 0x53, 0x1b, 0x96, 0xca, 0x2a, 0x4b, 0xbc, 0xcd, 0x26, 0x0d, 0xae, 0xcc, 0xf1, 0x57, - 0x14, 0xa4, 0x6c, 0x3a, 0xee, 0x50, 0xdf, 0x8f, 0x6c, 0x02, 0xe1, 0x74, 0xbc, 0x22, 0x92, 0x51, - 0xe5, 0x93, 0x00, 0x46, 0x84, 0x89, 0x59, 0xae, 0x4c, 0xc3, 0x3b, 0x72, 0x65, 0xb8, 0x89, 0x47, - 0x1f, 0x25, 0x4f, 0x2b, 0x24, 0x2f, 0x32, 0x05, 0xe5, 0x20, 0x72, 0xda, 0x55, 0x5b, 0xf3, 0x72, - 0x86, 0x7e, 0xca, 0xe9, 0xd8, 0xc6, 0xde, 0xdd, 0xe0, 0x46, 0x75, 0x53, 0x9e, 0x9f, 0x5b, 0xae, - 0xc3, 0x15, 0xb2, 0x52, 0xb4, 0xb1, 0xbf, 0xde, 0x47, 0x81, 0x19, 0xa5, 0xb4, 0xbf, 0xae, 0xc1, - 0x99, 0xec, 0xfe, 0xc0, 0xe4, 0xb6, 0x43, 0x3d, 0x9f, 0x61, 0x17, 0x92, 0x72, 0xbb, 0x21, 0x92, - 0x51, 0xe5, 0xbf, 0xa7, 0x1d, 0xce, 0xbe, 0x51, 0x80, 0xb3, 0x9e, 0x3c, 0x23, 0xba, 0x17, 0x4e, - 0x67, 0x8f, 0x0a, 0x73, 0xc6, 0x00, 0x86, 0x38, 0xb8, 0x2e, 0xe4, 0x37, 0x0b, 0x30, 0xd1, 0x49, - 0xd9, 0x39, 0x8e, 0xf1, 0xc2, 0x18, 0xf7, 0x8a, 0x5f, 0x19, 0xc0, 0x0f, 0x07, 0xd6, 0x84, 0x7c, - 0x01, 0x1a, 0x5d, 0xd6, 0x2f, 0xfc, 0x80, 0x3a, 0x86, 0xba, 0x33, 0x36, 0xfc, 0x48, 0x5a, 0x8d, - 0xb0, 0x94, 0x2b, 0x9a, 0xd0, 0x0f, 0x62, 0x19, 0x18, 0xe7, 0x78, 0x9f, 0xdf, 0x10, 0xbb, 0x04, - 0x35, 0x9f, 0x06, 0x81, 0xe5, 0xb4, 0xc5, 0x7e, 0xa3, 0x2e, 0xc6, 0x4a, 0x4b, 0xa6, 0x61, 0x98, - 0x4b, 0x3e, 0x04, 0x75, 0x7e, 0xe4, 0x34, 0xeb, 0xb5, 0xfd, 0x89, 0x3a, 0x77, 0x17, 0x1b, 0x13, - 0x0e, 0x70, 0x32, 0x11, 0xa3, 0x7c, 0xf2, 0x14, 0x8c, 0x6e, 0xf0, 0xe1, 0x2b, 0xaf, 0xf3, 0x0a, - 0x1b, 0x17, 0xd7, 0xd6, 0x9a, 0xb1, 0x74, 0x4c, 0x50, 0x91, 0x19, 0x00, 0x1a, 0x9e, 0xcb, 0xa5, - 0xed, 0x59, 0xd1, 0x89, 0x1d, 0xc6, 0xa8, 0xc8, 0xa3, 0x50, 0x0a, 0x6c, 0x9f, 0xdb, 0xb0, 0x6a, - 0xd1, 0x16, 0x74, 0x6d, 0xb9, 0x85, 0x2c, 0x5d, 0xfb, 0x69, 0x01, 0x4e, 0xa4, 0x2e, 0x97, 0xb0, - 0x22, 0x3d, 0xcf, 0x96, 0xd3, 0x48, 0x58, 0x64, 0x1d, 0x97, 0x91, 0xa5, 0x93, 0x57, 0xa5, 0x5a, - 0x5e, 0xcc, 0x19, 0xb9, 0xe0, 0x9a, 0x1e, 0xf8, 0x4c, 0x0f, 0xef, 0xd3, 0xc8, 0xf9, 0x31, 0x5f, - 0x54, 0x1f, 0xb9, 0x0e, 0xc4, 0x8e, 0xf9, 0xa2, 0x3c, 0x4c, 0x50, 0xa6, 0x0c, 0x7e, 0xe5, 0x83, - 0x18, 0xfc, 0xb4, 0xaf, 0x16, 0x63, 0x12, 0x90, 0x9a, 0xfd, 0x5d, 0x24, 0xf0, 0x04, 0x5b, 0x40, - 0xc3, 0xc5, 0xbd, 0x1e, 0x5f, 0xff, 0xf8, 0x62, 0x2c, 0x73, 0xc9, 0x4b, 0x42, 0xf6, 0xa5, 0x9c, - 0xb7, 0x50, 0xd7, 0x96, 0x5b, 0xc2, 0xbb, 0x4a, 0xb5, 0x5a, 0xd8, 0x04, 0xe5, 0x63, 0x6a, 0x02, - 0xed, 0xcf, 0x4b, 0xd0, 0x78, 0xd1, 0xdd, 0x78, 0x8f, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, 0x77, - 0x71, 0x99, 0x5a, 0x87, 0x87, 0x83, 0xc0, 0x6e, 0x51, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, - 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, 0x35, 0xe5, 0x71, 0xd2, 0x23, 0xfb, 0x7b, 0x93, 0x0f, 0xaf, - 0xad, 0x2d, 0x67, 0x91, 0xe0, 0xa0, 0xb2, 0x7c, 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0xdf, - 0x94, 0x91, 0x3e, 0x37, 0x62, 0xda, 0x88, 0xa5, 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xf5, 0xf0, - 0xe6, 0x3b, 0x79, 0x1c, 0xaa, 0x1b, 0x9e, 0xbb, 0x4d, 0x3d, 0x71, 0x72, 0x27, 0x6f, 0xca, 0x34, - 0x45, 0x12, 0xaa, 0x3c, 0xf2, 0x18, 0x54, 0x02, 0xb7, 0x6b, 0x19, 0x69, 0x83, 0xda, 0x1a, 0x4b, - 0x44, 0x91, 0x77, 0x7c, 0x1d, 0xfc, 0x89, 0x84, 0x6a, 0x57, 0x1f, 0xa8, 0x8c, 0xbd, 0x02, 0x65, - 0x5f, 0xf7, 0x6d, 0xb9, 0x9e, 0xe6, 0xb8, 0x44, 0x3e, 0xdb, 0x5a, 0x96, 0x97, 0xc8, 0x67, 0x5b, - 0xcb, 0xc8, 0x41, 0xb5, 0x1f, 0x15, 0xa1, 0x21, 0xe4, 0x26, 0x66, 0x85, 0xa3, 0x94, 0xdc, 0xf3, - 0xdc, 0x95, 0xc2, 0xef, 0x75, 0xa8, 0xc7, 0xcd, 0x4c, 0x72, 0x92, 0x8b, 0x9f, 0x0f, 0x44, 0x99, - 0xa1, 0x3b, 0x45, 0x94, 0xa4, 0x44, 0x5f, 0x3e, 0x46, 0xd1, 0x57, 0x0e, 0x24, 0xfa, 0x91, 0xe3, - 0x10, 0xfd, 0x9b, 0x45, 0xa8, 0x2f, 0x5b, 0x9b, 0xd4, 0xd8, 0x35, 0x6c, 0x7e, 0x27, 0xd0, 0xa4, - 0x36, 0x0d, 0xe8, 0xa2, 0xa7, 0x1b, 0x74, 0x95, 0x7a, 0x16, 0x8f, 0xd9, 0xc2, 0xc6, 0x07, 0x9f, - 0x81, 0xe4, 0x9d, 0xc0, 0xf9, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x59, 0x82, 0x51, 0x93, 0xfa, 0x96, - 0x47, 0xcd, 0xd5, 0xd8, 0x46, 0xe5, 0x71, 0xb5, 0xd4, 0xcc, 0xc7, 0xf2, 0x6e, 0xef, 0x4d, 0x8e, - 0x29, 0x03, 0xa5, 0xd8, 0xb1, 0x24, 0x8a, 0xb2, 0x21, 0xdf, 0xd5, 0x7b, 0x7e, 0x56, 0x1d, 0x63, - 0x43, 0x7e, 0x35, 0x9b, 0x04, 0x07, 0x95, 0xd5, 0x2a, 0x50, 0x5a, 0x76, 0xdb, 0xda, 0x5b, 0x25, - 0x08, 0x83, 0xfb, 0x90, 0x9f, 0x2b, 0x40, 0x43, 0x77, 0x1c, 0x37, 0x90, 0x81, 0x73, 0xc4, 0x09, - 0x3c, 0xe6, 0x8e, 0x21, 0x34, 0x35, 0x1b, 0x81, 0x8a, 0xc3, 0xdb, 0xf0, 0x40, 0x39, 0x96, 0x83, - 0x71, 0xde, 0xa4, 0x97, 0x3a, 0x4f, 0x5e, 0xc9, 0x5f, 0x8b, 0x03, 0x9c, 0x1e, 0x9f, 0xfb, 0x24, - 0x9c, 0x4c, 0x57, 0xf6, 0x30, 0xc7, 0x41, 0xb9, 0x0e, 0xe6, 0x8b, 0x00, 0x91, 0x4f, 0xc9, 0x3d, - 0x30, 0x62, 0x59, 0x09, 0x23, 0xd6, 0xe2, 0xf0, 0x02, 0x0e, 0x2b, 0x3d, 0xd0, 0x70, 0xf5, 0x7a, - 0xca, 0x70, 0xb5, 0x74, 0x14, 0xcc, 0xee, 0x6c, 0xac, 0xfa, 0xad, 0x02, 0x9c, 0x8c, 0x88, 0xe5, - 0x0d, 0xd9, 0x67, 0x61, 0xcc, 0xa3, 0xba, 0xd9, 0xd4, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, - 0x6f, 0xf6, 0xa9, 0xfd, 0xbd, 0xc9, 0x31, 0x8c, 0x67, 0x60, 0x92, 0x8e, 0xe8, 0xd0, 0x60, 0x09, - 0x6b, 0x56, 0x87, 0xba, 0xbd, 0x60, 0x48, 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, - 0xed, 0x9d, 0x02, 0x8c, 0xc7, 0x2b, 0x7c, 0xec, 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, - 0xda, 0x64, 0x80, 0x15, 0xed, 0xc7, 0xb5, 0xf8, 0xa7, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, - 0xa3, 0xb1, 0xe0, 0xbd, 0x1f, 0x35, 0x66, 0x90, 0x96, 0x5b, 0xbe, 0x8f, 0xb5, 0xdc, 0x77, 0x33, - 0xf4, 0x4c, 0x2c, 0x7c, 0xca, 0x48, 0x8e, 0xf0, 0x29, 0x9d, 0x30, 0x7c, 0x4a, 0xf5, 0xc8, 0x26, - 0x9d, 0x83, 0x84, 0x50, 0xa9, 0xdd, 0xd3, 0x10, 0x2a, 0xf5, 0xe3, 0x0a, 0xa1, 0x02, 0x79, 0x43, - 0xa8, 0x7c, 0xa5, 0x00, 0xe3, 0x66, 0xe2, 0xc6, 0x2c, 0xb7, 0x2d, 0xe4, 0x59, 0x6a, 0x92, 0x17, - 0x70, 0xc5, 0x95, 0xa9, 0x64, 0x1a, 0xa6, 0x58, 0x6a, 0x3f, 0x2c, 0xc7, 0xd7, 0x81, 0x7b, 0x6d, - 0xaa, 0x7e, 0x26, 0x69, 0xaa, 0xbe, 0x98, 0x36, 0x55, 0x9f, 0x88, 0x79, 0x91, 0xc6, 0xcd, 0xd5, - 0x1f, 0x8e, 0x4d, 0x8f, 0x6c, 0x4e, 0x1a, 0x8b, 0x24, 0x9d, 0x31, 0x45, 0x7e, 0x18, 0x6a, 0xbe, - 0x0a, 0xc3, 0x28, 0x36, 0x36, 0x51, 0xbb, 0xa8, 0x10, 0x89, 0x21, 0x05, 0xd3, 0xc4, 0x3d, 0xaa, - 0xfb, 0xae, 0x93, 0xd6, 0xc4, 0x91, 0xa7, 0xa2, 0xcc, 0x8d, 0x9b, 0xcc, 0x47, 0xee, 0x62, 0x32, - 0xd7, 0xa1, 0x61, 0xeb, 0x7e, 0xb0, 0xde, 0x35, 0xf5, 0x80, 0x9a, 0x72, 0xbc, 0xfd, 0xaf, 0x83, - 0xad, 0x55, 0x6c, 0xfd, 0x8b, 0x14, 0xc2, 0xe5, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x51, 0xf6, - 0xca, 0x47, 0x83, 0x39, 0xab, 0x42, 0x00, 0x1c, 0x86, 0x47, 0x68, 0xe9, 0x59, 0x8e, 0xe1, 0x60, - 0x02, 0x75, 0x80, 0x55, 0xbd, 0x3e, 0x94, 0x55, 0xfd, 0x2b, 0x75, 0x68, 0x5c, 0xd3, 0x03, 0x6b, - 0x87, 0xf2, 0x53, 0x9c, 0xe3, 0x31, 0xa5, 0xff, 0x6a, 0x01, 0xce, 0x24, 0x5d, 0xf5, 0x8e, 0xd1, - 0x9e, 0xce, 0x03, 0x7f, 0x60, 0x26, 0x37, 0x1c, 0x50, 0x0b, 0x6e, 0x59, 0xef, 0xf3, 0xfc, 0x3b, - 0x6e, 0xcb, 0x7a, 0x6b, 0x10, 0x43, 0x1c, 0x5c, 0x97, 0xf7, 0x8a, 0x65, 0xfd, 0xfe, 0x0e, 0xcc, - 0x96, 0xb2, 0xfb, 0x57, 0xef, 0x1b, 0xbb, 0x7f, 0xed, 0xbe, 0x50, 0xb6, 0xba, 0x31, 0xbb, 0x7f, - 0x3d, 0xa7, 0xff, 0x89, 0xf4, 0x6e, 0x17, 0x68, 0x83, 0xce, 0x0f, 0xf8, 0xc5, 0x74, 0x65, 0x8f, - 0x65, 0x3a, 0xca, 0x86, 0xee, 0x5b, 0x86, 0x5c, 0xf6, 0x72, 0x04, 0xa2, 0x54, 0x11, 0xbb, 0xc4, - 0x31, 0x35, 0x7f, 0x45, 0x81, 0x1d, 0x45, 0x06, 0x2b, 0xe6, 0x8a, 0x0c, 0x46, 0xe6, 0xa0, 0xec, - 0xb0, 0xdd, 0x73, 0xe9, 0xd0, 0xb1, 0xc0, 0xae, 0x5d, 0xa5, 0xbb, 0xc8, 0x0b, 0x6b, 0x6f, 0x17, - 0x01, 0xd8, 0xe7, 0x1f, 0xcc, 0x02, 0xff, 0x41, 0xa8, 0xfa, 0x3d, 0xbe, 0x57, 0x96, 0x0b, 0x76, - 0xe4, 0xb4, 0x23, 0x92, 0x51, 0xe5, 0x93, 0xc7, 0xa0, 0xf2, 0x7a, 0x8f, 0xf6, 0xd4, 0x71, 0x72, - 0xa8, 0xae, 0x7d, 0x8a, 0x25, 0xa2, 0xc8, 0x3b, 0x3e, 0x6b, 0x9a, 0xb2, 0xd4, 0x57, 0x8e, 0xcb, - 0x52, 0x5f, 0x87, 0xea, 0x35, 0x97, 0xfb, 0x00, 0x6a, 0xff, 0x52, 0x04, 0x88, 0x7c, 0xac, 0xc8, - 0xd7, 0x0b, 0xf0, 0x50, 0x38, 0xe0, 0x02, 0xa1, 0x75, 0xcf, 0xd9, 0xba, 0xd5, 0xc9, 0x6d, 0xb5, - 0xcf, 0x1a, 0xec, 0x7c, 0x06, 0x5a, 0xcd, 0x62, 0x87, 0xd9, 0xb5, 0x20, 0x08, 0x35, 0xda, 0xe9, - 0x06, 0xbb, 0xf3, 0x96, 0x27, 0x7b, 0x60, 0xa6, 0x2b, 0xdf, 0x65, 0x49, 0x23, 0x8a, 0xca, 0xad, - 0x21, 0x1f, 0x44, 0x2a, 0x07, 0x43, 0x1c, 0xb2, 0x05, 0x35, 0xc7, 0x7d, 0xd5, 0x67, 0xe2, 0x90, - 0xdd, 0xf1, 0x85, 0xe1, 0x45, 0x2e, 0xc4, 0x2a, 0xac, 0xbc, 0xf2, 0x05, 0xab, 0x8e, 0x14, 0xf6, - 0xd7, 0x8a, 0x70, 0x3a, 0x43, 0x0e, 0xe4, 0x05, 0x38, 0x29, 0xdd, 0xd9, 0xa2, 0xf0, 0xc4, 0x85, - 0x28, 0x3c, 0x71, 0x2b, 0x95, 0x87, 0x7d, 0xd4, 0xe4, 0x55, 0x00, 0xdd, 0x30, 0xa8, 0xef, 0xaf, - 0xb8, 0xa6, 0xd2, 0x47, 0x9f, 0xdf, 0xdf, 0x9b, 0x84, 0xd9, 0x30, 0xf5, 0xf6, 0xde, 0xe4, 0x47, - 0xb2, 0x3c, 0x54, 0x53, 0x72, 0x8e, 0x0a, 0x60, 0x0c, 0x92, 0x7c, 0x0e, 0x40, 0x6c, 0xbd, 0xc2, - 0x4b, 0xf4, 0x77, 0xb1, 0x57, 0x4c, 0xa9, 0x70, 0x45, 0x53, 0x9f, 0xea, 0xe9, 0x4e, 0x60, 0x05, - 0xbb, 0x22, 0x66, 0xc9, 0x8d, 0x10, 0x05, 0x63, 0x88, 0xda, 0x9f, 0x14, 0xa1, 0xa6, 0x2c, 0xa5, - 0xf7, 0xc0, 0x3c, 0xd6, 0x4e, 0x98, 0xc7, 0x8e, 0xc8, 0x27, 0x35, 0xcb, 0x38, 0xe6, 0xa6, 0x8c, - 0x63, 0x8b, 0xf9, 0x59, 0xdd, 0xd9, 0x34, 0xf6, 0xad, 0x22, 0x8c, 0x2b, 0xd2, 0xbc, 0x86, 0xb1, - 0x4f, 0xc0, 0x09, 0x71, 0x96, 0xbc, 0xa2, 0xdf, 0x12, 0xe1, 0x5b, 0xb8, 0xc0, 0xca, 0xc2, 0x0d, - 0xb4, 0x99, 0xcc, 0xc2, 0x34, 0x2d, 0xeb, 0xd6, 0x22, 0x69, 0x9d, 0xed, 0x23, 0xc4, 0xe9, 0x93, - 0xd8, 0xef, 0xf0, 0x6e, 0xdd, 0x4c, 0xe5, 0x61, 0x1f, 0x75, 0xda, 0x32, 0x57, 0x3e, 0x06, 0xcb, - 0xdc, 0xdf, 0x14, 0x60, 0x34, 0x92, 0xd7, 0xb1, 0xdb, 0xe5, 0x36, 0x93, 0x76, 0xb9, 0xd9, 0xdc, - 0xdd, 0x61, 0x80, 0x55, 0xee, 0x17, 0xab, 0x90, 0x70, 0x8d, 0x26, 0x1b, 0x70, 0xce, 0xca, 0x74, - 0xf0, 0x8a, 0xcd, 0x36, 0xe1, 0x5d, 0xdf, 0xa5, 0x81, 0x94, 0x78, 0x07, 0x14, 0xd2, 0x83, 0xda, - 0x0e, 0xf5, 0x02, 0xcb, 0xa0, 0xea, 0xfb, 0x16, 0x73, 0xab, 0x64, 0xd2, 0xf6, 0x18, 0xca, 0xf4, - 0x86, 0x64, 0x80, 0x21, 0x2b, 0xb2, 0x01, 0x15, 0x6a, 0xb6, 0xa9, 0x0a, 0xa8, 0x93, 0x33, 0x5c, - 0x65, 0x28, 0x4f, 0xf6, 0xe6, 0xa3, 0x80, 0x26, 0x3e, 0xd4, 0x6d, 0x75, 0xb6, 0x24, 0xfb, 0xe1, - 0xf0, 0x0a, 0x56, 0x78, 0x4a, 0x15, 0xdd, 0xb5, 0x0f, 0x93, 0x30, 0xe2, 0x43, 0xb6, 0x43, 0x23, - 0x57, 0xe5, 0x88, 0x26, 0x8f, 0x3b, 0x98, 0xb8, 0x7c, 0xa8, 0xdf, 0xd4, 0x03, 0xea, 0x75, 0x74, - 0x6f, 0x5b, 0xee, 0x36, 0x86, 0xff, 0xc2, 0x97, 0x14, 0x52, 0xf4, 0x85, 0x61, 0x12, 0x46, 0x7c, - 0x88, 0x0b, 0xf5, 0x40, 0xaa, 0xcf, 0xca, 0x92, 0x37, 0x3c, 0x53, 0xa5, 0x88, 0xfb, 0xd2, 0x45, - 0x5a, 0xbd, 0x62, 0xc4, 0x83, 0xec, 0x24, 0x42, 0xf9, 0x8a, 0x00, 0xce, 0xcd, 0x1c, 0x16, 0x61, - 0x09, 0x15, 0x2d, 0x37, 0xd9, 0x21, 0x81, 0xb5, 0xb7, 0x2b, 0xd1, 0xb4, 0x7c, 0xaf, 0xed, 0x54, - 0x4f, 0x25, 0xed, 0x54, 0x17, 0xd2, 0x76, 0xaa, 0xd4, 0x11, 0xe5, 0xe1, 0x9d, 0x2a, 0x53, 0x16, - 0xa2, 0xf2, 0x31, 0x58, 0x88, 0x9e, 0x84, 0xc6, 0x0e, 0x9f, 0x09, 0x44, 0x74, 0x9e, 0x0a, 0x5f, - 0x46, 0xf8, 0xcc, 0x7e, 0x23, 0x4a, 0xc6, 0x38, 0x0d, 0x2b, 0x22, 0x34, 0x90, 0x28, 0xbc, 0xa9, - 0x2c, 0xd2, 0x8a, 0x92, 0x31, 0x4e, 0xc3, 0xfd, 0xb1, 0x2c, 0x67, 0x5b, 0x14, 0xa8, 0xf2, 0x02, - 0xc2, 0x1f, 0x4b, 0x25, 0x62, 0x94, 0x4f, 0x2e, 0x41, 0xad, 0x67, 0x6e, 0x0a, 0xda, 0x1a, 0xa7, - 0xe5, 0x1a, 0xe6, 0xfa, 0xfc, 0x82, 0x8c, 0x16, 0xa4, 0x72, 0x59, 0x4d, 0x3a, 0x7a, 0x57, 0x65, - 0xf0, 0xbd, 0xa1, 0xac, 0xc9, 0x4a, 0x94, 0x8c, 0x71, 0x1a, 0xf2, 0x31, 0x18, 0xf7, 0xa8, 0xd9, - 0x33, 0x68, 0x58, 0x0a, 0x78, 0x29, 0x6e, 0x15, 0xc5, 0x44, 0x0e, 0xa6, 0x28, 0x07, 0xd8, 0xb9, - 0x1a, 0x43, 0xd9, 0xb9, 0xbe, 0x57, 0x00, 0xd2, 0xef, 0xbf, 0x4c, 0xb6, 0x60, 0xc4, 0xe1, 0xd6, - 0xaf, 0xdc, 0x01, 0x91, 0x63, 0x46, 0x34, 0x31, 0x2d, 0xc9, 0x04, 0x89, 0x4f, 0x1c, 0xa8, 0xd1, - 0x5b, 0x01, 0xf5, 0x9c, 0xf0, 0x3e, 0xc3, 0xd1, 0x04, 0x5f, 0x16, 0xbb, 0x01, 0x89, 0x8c, 0x21, - 0x0f, 0xed, 0x07, 0x45, 0x68, 0xc4, 0xe8, 0xee, 0xb6, 0xa9, 0xe4, 0x57, 0xaa, 0x85, 0xd1, 0x69, - 0xdd, 0xb3, 0xe5, 0x08, 0x8b, 0x5d, 0xa9, 0x96, 0x59, 0xb8, 0x8c, 0x71, 0x3a, 0x32, 0x03, 0xd0, - 0xd1, 0xfd, 0x80, 0x7a, 0x7c, 0xf5, 0x4d, 0x5d, 0x64, 0x5e, 0x09, 0x73, 0x30, 0x46, 0x45, 0x2e, - 0xca, 0xf0, 0xd9, 0xe5, 0x64, 0xe0, 0xb9, 0x01, 0xb1, 0xb1, 0x2b, 0x47, 0x10, 0x1b, 0x9b, 0xb4, - 0xe1, 0xa4, 0xaa, 0xb5, 0xca, 0x3d, 0x5c, 0x58, 0x32, 0xb1, 0x7f, 0x49, 0x41, 0x60, 0x1f, 0xa8, - 0xf6, 0x76, 0x01, 0xc6, 0x12, 0x26, 0x0f, 0x11, 0x32, 0x4e, 0x79, 0xdf, 0x27, 0x42, 0xc6, 0xc5, - 0x9c, 0xe6, 0x9f, 0x80, 0x11, 0x21, 0xa0, 0xb4, 0x53, 0x9d, 0x10, 0x21, 0xca, 0x5c, 0x36, 0x97, - 0x49, 0xa3, 0x6a, 0x7a, 0x2e, 0x93, 0x56, 0x57, 0x54, 0xf9, 0xc2, 0xdc, 0x2e, 0x6a, 0xd7, 0x6f, - 0x6e, 0x17, 0xe9, 0x18, 0x52, 0x68, 0x3f, 0x2c, 0x01, 0x77, 0x41, 0x21, 0xcf, 0x42, 0xbd, 0x43, - 0x8d, 0x2d, 0xdd, 0xb1, 0x7c, 0x15, 0x32, 0x92, 0xed, 0x6e, 0xeb, 0x2b, 0x2a, 0xf1, 0x36, 0x03, - 0x98, 0x6d, 0x2d, 0x73, 0x2f, 0xef, 0x88, 0x96, 0x18, 0x30, 0xd2, 0xf6, 0x7d, 0xbd, 0x6b, 0xe5, - 0x3e, 0x01, 0x15, 0x21, 0xfa, 0xc4, 0x20, 0x12, 0xcf, 0x28, 0xa1, 0x89, 0x01, 0x95, 0xae, 0xad, - 0x5b, 0x4e, 0xee, 0x7f, 0x94, 0xb0, 0x2f, 0x58, 0x65, 0x48, 0xc2, 0xa4, 0xc3, 0x1f, 0x51, 0x60, - 0x93, 0x1e, 0x34, 0x7c, 0xc3, 0xd3, 0x3b, 0xfe, 0x96, 0x3e, 0xf3, 0xf4, 0x33, 0xb9, 0x95, 0xa4, - 0x88, 0x95, 0x98, 0xb3, 0xe7, 0x70, 0x76, 0xa5, 0x75, 0x65, 0x76, 0xe6, 0xe9, 0x67, 0x30, 0xce, - 0x27, 0xce, 0xf6, 0xe9, 0x27, 0x67, 0x64, 0xbf, 0x3f, 0x72, 0xb6, 0x4f, 0x3f, 0x39, 0x83, 0x71, - 0x3e, 0xda, 0x7f, 0x14, 0xa0, 0x1e, 0xd2, 0x92, 0x75, 0x00, 0x36, 0x02, 0x65, 0x50, 0xbd, 0x43, - 0x05, 0xb8, 0xe7, 0xbb, 0xe2, 0xf5, 0xb0, 0x30, 0xc6, 0x80, 0x32, 0xa2, 0x0e, 0x16, 0x8f, 0x3a, - 0xea, 0xe0, 0x34, 0xd4, 0xb7, 0x74, 0xc7, 0xf4, 0xb7, 0xf4, 0x6d, 0x31, 0x11, 0xc5, 0xe2, 0x70, - 0x5e, 0x51, 0x19, 0x18, 0xd1, 0x68, 0xff, 0x5a, 0x01, 0x71, 0x6c, 0xc9, 0x86, 0x8a, 0x69, 0xf9, - 0xc2, 0x6f, 0xb6, 0xc0, 0x4b, 0x86, 0x43, 0x65, 0x5e, 0xa6, 0x63, 0x48, 0x41, 0xce, 0x42, 0xa9, - 0x63, 0x39, 0xf2, 0xc4, 0x83, 0x1b, 0xbc, 0x56, 0x2c, 0x07, 0x59, 0x1a, 0xcf, 0xd2, 0x6f, 0x49, - 0x97, 0x27, 0x91, 0xa5, 0xdf, 0x42, 0x96, 0xc6, 0xb6, 0xa0, 0xb6, 0xeb, 0x6e, 0x6f, 0xe8, 0xc6, - 0xb6, 0xf2, 0x8c, 0x2a, 0xf3, 0x85, 0x90, 0x6f, 0x41, 0x97, 0x93, 0x59, 0x98, 0xa6, 0x25, 0x8b, - 0x70, 0xc2, 0x70, 0x5d, 0xdb, 0x74, 0x6f, 0x3a, 0xaa, 0xb8, 0x50, 0x1d, 0xf8, 0x49, 0xc2, 0x3c, - 0xed, 0x7a, 0xd4, 0x60, 0xfa, 0xc5, 0x5c, 0x92, 0x08, 0xd3, 0xa5, 0xc8, 0x3a, 0x3c, 0xfc, 0x06, - 0xf5, 0x5c, 0x39, 0x5d, 0xb4, 0x6c, 0x4a, 0xbb, 0x0a, 0x50, 0x28, 0x16, 0xdc, 0x53, 0xeb, 0x33, - 0xd9, 0x24, 0x38, 0xa8, 0x2c, 0xf7, 0xf9, 0xd4, 0xbd, 0x36, 0x0d, 0x56, 0x3d, 0xd7, 0xa0, 0xbe, - 0x6f, 0x39, 0x6d, 0x05, 0x5b, 0x8d, 0x60, 0xd7, 0xb2, 0x49, 0x70, 0x50, 0x59, 0xf2, 0x32, 0x4c, - 0x88, 0x2c, 0xb1, 0x6a, 0xcf, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0x27, 0xd7, 0x98, - 0x38, 0xa0, 0x58, 0x1b, 0x40, 0x83, 0x03, 0x4b, 0xf3, 0x3f, 0x69, 0xc9, 0xe3, 0xa9, 0x55, 0xea, - 0xf1, 0x7e, 0x20, 0xf5, 0x19, 0xf1, 0x27, 0xad, 0x54, 0x1e, 0xf6, 0x51, 0x13, 0x84, 0x33, 0xfc, - 0xb8, 0x7b, 0xbd, 0x9b, 0x12, 0xba, 0xd4, 0x70, 0xf8, 0x39, 0x54, 0x2b, 0x93, 0x02, 0x07, 0x94, - 0x64, 0xdf, 0xcb, 0x73, 0xe6, 0xdd, 0x9b, 0x4e, 0x1a, 0xb5, 0x11, 0x7d, 0x6f, 0x6b, 0x00, 0x0d, - 0x0e, 0x2c, 0xad, 0xfd, 0x71, 0x11, 0xc6, 0x12, 0x37, 0x9f, 0xef, 0xbb, 0x1b, 0xa6, 0x4c, 0x55, - 0xec, 0xf8, 0xed, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, 0x77, 0x95, 0xaa, 0x5b, 0xea, 0x7c, 0xf4, - 0xaf, 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0x0c, 0x9f, 0x79, 0xff, 0x69, 0xa0, 0x64, - 0xc4, 0xad, 0x9f, 0x7c, 0x6d, 0x10, 0xb6, 0x4f, 0x01, 0xaf, 0x05, 0x30, 0x1a, 0xa7, 0x60, 0x23, - 0x3e, 0xd2, 0xaa, 0xaa, 0x09, 0x8d, 0x6a, 0x09, 0x4a, 0x41, 0x30, 0xec, 0xdd, 0x55, 0x61, 0x48, - 0x5f, 0x5b, 0x46, 0x86, 0xa1, 0x6d, 0xb2, 0xb6, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0x20, 0xe3, 0x75, - 0xa8, 0x06, 0xd2, 0x96, 0x34, 0xdc, 0xdd, 0x5b, 0x6e, 0xd7, 0x55, 0x76, 0x24, 0x85, 0xa5, 0xfd, - 0x6d, 0x11, 0xea, 0xe1, 0xbe, 0xef, 0x00, 0x01, 0x82, 0x5d, 0xa8, 0x87, 0x8e, 0x31, 0xb9, 0xff, - 0x4f, 0x16, 0xf9, 0x6b, 0xf0, 0xad, 0x4a, 0xf8, 0x8a, 0x11, 0x8f, 0xb8, 0xd3, 0x4d, 0x29, 0x87, - 0xd3, 0x4d, 0x17, 0xaa, 0x81, 0x67, 0xb5, 0xdb, 0x52, 0x09, 0xcd, 0xe3, 0x75, 0x13, 0x8a, 0x6b, - 0x4d, 0x00, 0x4a, 0xc9, 0x8a, 0x17, 0x54, 0x6c, 0xb4, 0xd7, 0xe0, 0x64, 0x9a, 0x92, 0x6b, 0x68, - 0xc6, 0x16, 0x35, 0x7b, 0xb6, 0x92, 0x71, 0xa4, 0xa1, 0xc9, 0x74, 0x0c, 0x29, 0xd8, 0x2e, 0x8d, - 0x35, 0xd3, 0x1b, 0xae, 0xa3, 0xf6, 0xbf, 0x5c, 0xd9, 0x5d, 0x93, 0x69, 0x18, 0xe6, 0x6a, 0xff, - 0x5c, 0x82, 0xb3, 0xd1, 0xee, 0x7d, 0x45, 0x77, 0xf4, 0xf6, 0x01, 0x7e, 0x4a, 0xf5, 0xfe, 0x6d, - 0x86, 0xc3, 0x46, 0x79, 0x2f, 0xdd, 0x07, 0x51, 0xde, 0x7f, 0x54, 0x00, 0xee, 0xc4, 0x47, 0xbe, - 0x00, 0xa3, 0x7a, 0xec, 0x7f, 0x84, 0xb2, 0x39, 0x2f, 0xe7, 0x6e, 0x4e, 0xee, 0x2b, 0x18, 0x3a, - 0xa5, 0xc4, 0x53, 0x31, 0xc1, 0x90, 0xb8, 0x50, 0xdb, 0xd4, 0x6d, 0x9b, 0x29, 0x2d, 0xb9, 0x4f, - 0x23, 0x12, 0xcc, 0x79, 0x37, 0x5f, 0x90, 0xd0, 0x18, 0x32, 0xd1, 0xfe, 0xa9, 0x00, 0x63, 0x2d, - 0xdb, 0x32, 0x2d, 0xa7, 0x7d, 0x8c, 0xe1, 0xdd, 0xaf, 0x43, 0xc5, 0xb7, 0x2d, 0x93, 0x0e, 0x39, - 0x8f, 0x8b, 0x15, 0x84, 0x01, 0xa0, 0xc0, 0x49, 0xc6, 0x8b, 0x2f, 0x1d, 0x20, 0x5e, 0xfc, 0x4f, - 0x46, 0x40, 0x3a, 0x82, 0x92, 0x1e, 0xd4, 0xdb, 0x2a, 0x0c, 0xb5, 0xfc, 0xc6, 0x2b, 0x39, 0x42, - 0x98, 0x25, 0x02, 0x5a, 0x8b, 0x59, 0x37, 0x4c, 0xc4, 0x88, 0x13, 0xa1, 0xc9, 0x5f, 0x50, 0xce, - 0xe7, 0xfc, 0x05, 0xa5, 0x60, 0xd7, 0xff, 0x13, 0x4a, 0x1d, 0xca, 0x5b, 0x41, 0xd0, 0x95, 0xe3, - 0x6a, 0x78, 0x4f, 0xdf, 0x28, 0x8a, 0x86, 0xd0, 0x46, 0xd8, 0x3b, 0x72, 0x68, 0xc6, 0xc2, 0xd1, - 0xc3, 0x3f, 0x1f, 0xcd, 0xe5, 0x3a, 0x91, 0x8e, 0xb3, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0xe7, 0xa1, - 0x11, 0x78, 0xba, 0xe3, 0x6f, 0xba, 0x5e, 0x87, 0x7a, 0x72, 0x1b, 0xb7, 0x90, 0xe3, 0x2f, 0x8c, - 0x6b, 0x11, 0x9a, 0x38, 0xea, 0x4a, 0x24, 0x61, 0x9c, 0x1b, 0xd9, 0x86, 0x5a, 0xcf, 0x14, 0x15, - 0x93, 0xf6, 0x8d, 0xd9, 0x3c, 0x3f, 0xd6, 0x8c, 0x9d, 0x37, 0xab, 0x37, 0x0c, 0x19, 0x24, 0x7f, - 0xf2, 0x55, 0x3d, 0xaa, 0x9f, 0x7c, 0xc5, 0x7b, 0x63, 0xd6, 0x15, 0x7f, 0xd2, 0x91, 0x1a, 0xa5, - 0xd3, 0x96, 0xee, 0x32, 0x0b, 0xb9, 0x95, 0x3d, 0xc1, 0xb2, 0x11, 0x6a, 0xa5, 0x4e, 0x1b, 0x15, - 0x0f, 0xad, 0x03, 0xd2, 0x0c, 0x4d, 0x8c, 0xc4, 0xaf, 0x30, 0xc4, 0xbd, 0x93, 0xe9, 0x83, 0xcd, - 0x07, 0xe1, 0x3f, 0x19, 0x62, 0xa1, 0x78, 0x33, 0xff, 0x79, 0xa1, 0xfd, 0x5d, 0x11, 0x4a, 0x6b, - 0xcb, 0x2d, 0x11, 0x5e, 0x8f, 0xff, 0x67, 0x86, 0xb6, 0xb6, 0xad, 0xee, 0x0d, 0xea, 0x59, 0x9b, - 0xbb, 0x72, 0x77, 0x1a, 0x0b, 0xaf, 0x97, 0xa6, 0xc0, 0x8c, 0x52, 0xe4, 0x15, 0x18, 0x35, 0xf4, - 0x39, 0xea, 0x05, 0xc3, 0xec, 0xbd, 0xf9, 0x05, 0xbb, 0xb9, 0xd9, 0xa8, 0x38, 0x26, 0xc0, 0xc8, - 0x3a, 0x80, 0x11, 0x41, 0x97, 0x0e, 0x6d, 0x31, 0x88, 0x01, 0xc7, 0x80, 0x08, 0x42, 0x7d, 0x9b, - 0x91, 0x72, 0xd4, 0xf2, 0x61, 0x50, 0x79, 0xcf, 0xb9, 0xaa, 0xca, 0x62, 0x04, 0xa3, 0x39, 0x30, - 0x96, 0xf8, 0x3f, 0x06, 0xf9, 0x28, 0xd4, 0xdc, 0x6e, 0x6c, 0x3a, 0xad, 0xf3, 0xed, 0x74, 0xed, - 0xba, 0x4c, 0xbb, 0xbd, 0x37, 0x39, 0xb6, 0xec, 0xb6, 0x2d, 0x43, 0x25, 0x60, 0x48, 0x4e, 0x34, - 0x18, 0xe1, 0xb7, 0x62, 0xd4, 0xdf, 0x31, 0xf8, 0xda, 0xc1, 0x03, 0xd8, 0xfb, 0x28, 0x73, 0xb4, - 0x2f, 0x96, 0x21, 0x3a, 0xbc, 0x21, 0x3e, 0x8c, 0x08, 0xaf, 0x5f, 0x39, 0x73, 0x1f, 0xab, 0x83, - 0xb1, 0x64, 0x45, 0xda, 0x50, 0x7a, 0xcd, 0xdd, 0xc8, 0x3d, 0x71, 0xc7, 0xae, 0xc3, 0x0a, 0x73, - 0x52, 0x2c, 0x01, 0x19, 0x07, 0xf2, 0x6b, 0x05, 0x38, 0xe5, 0xa7, 0x95, 0x4e, 0xd9, 0x1d, 0x30, - 0xbf, 0x76, 0x9d, 0x56, 0x63, 0xa5, 0x07, 0xe5, 0xa0, 0x6c, 0xec, 0xaf, 0x0b, 0x93, 0xbf, 0x38, - 0x55, 0x91, 0xdd, 0x69, 0x31, 0xe7, 0x3f, 0xdd, 0x92, 0xf2, 0x4f, 0xa6, 0xa1, 0x64, 0xa5, 0x7d, - 0xb9, 0x08, 0x8d, 0xd8, 0x6c, 0x9d, 0xfb, 0xa7, 0x2b, 0xb7, 0x52, 0x3f, 0x5d, 0x59, 0x1d, 0xfe, - 0x90, 0x31, 0xaa, 0xd5, 0x71, 0xff, 0x77, 0xe5, 0xcf, 0x8a, 0x50, 0x5a, 0x9f, 0x5f, 0x48, 0x6e, - 0x17, 0x0b, 0xf7, 0x60, 0xbb, 0xb8, 0x05, 0xd5, 0x8d, 0x9e, 0x65, 0x07, 0x96, 0x93, 0xfb, 0xc2, - 0xbe, 0xfa, 0x47, 0x8d, 0xbc, 0xf7, 0x2a, 0x50, 0x51, 0xc1, 0x93, 0x36, 0x54, 0xdb, 0x22, 0x62, - 0x5a, 0x6e, 0xd7, 0x2b, 0x19, 0x79, 0x4d, 0x30, 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x5d, 0x90, 0x7f, - 0xb9, 0xbe, 0xe7, 0xd2, 0xd4, 0x3e, 0x0f, 0xa1, 0x16, 0x70, 0xef, 0x99, 0xff, 0x5b, 0x01, 0x92, - 0x8a, 0xcf, 0xbd, 0xef, 0x4d, 0xdb, 0xe9, 0xde, 0x34, 0x7f, 0x14, 0x83, 0x2f, 0xbb, 0x43, 0x69, - 0x7f, 0x58, 0x84, 0x91, 0x7b, 0x76, 0xc9, 0x92, 0x26, 0xbc, 0xc8, 0xe6, 0x72, 0x4e, 0x8c, 0x03, - 0x7d, 0xc8, 0x3a, 0x29, 0x1f, 0xb2, 0xbc, 0x7f, 0xd5, 0xbc, 0x8b, 0x07, 0xd9, 0x5f, 0x15, 0x40, - 0x4e, 0xcb, 0x4b, 0x8e, 0x1f, 0xe8, 0x8e, 0xc1, 0x7f, 0xee, 0x2e, 0xd7, 0x80, 0xbc, 0xae, 0x0a, - 0xd2, 0x9d, 0x47, 0x2c, 0xfb, 0xfc, 0x59, 0xcd, 0xf9, 0xe4, 0xc3, 0x50, 0xdb, 0x72, 0xfd, 0x80, - 0xcf, 0xf3, 0xc5, 0xa4, 0x5d, 0xe7, 0x8a, 0x4c, 0xc7, 0x90, 0x22, 0x7d, 0xa4, 0x57, 0x19, 0x7c, - 0xa4, 0xa7, 0x7d, 0xb3, 0x08, 0xa3, 0xef, 0x95, 0x9b, 0xa2, 0x59, 0x3e, 0x77, 0xa5, 0x9c, 0x3e, - 0x77, 0xe5, 0xc3, 0xf8, 0xdc, 0x69, 0xdf, 0x29, 0x00, 0xdc, 0xb3, 0x6b, 0xaa, 0x66, 0xd2, 0x1d, - 0x2e, 0x77, 0xbf, 0xca, 0x76, 0x86, 0xfb, 0xbd, 0x8a, 0xfa, 0x24, 0xee, 0x0a, 0xf7, 0x66, 0x01, - 0xc6, 0xf5, 0x84, 0x7b, 0x59, 0x6e, 0xd5, 0x32, 0xe5, 0xad, 0x16, 0x5e, 0xc9, 0x4b, 0xa6, 0x63, - 0x8a, 0x2d, 0x79, 0x2e, 0x0a, 0x91, 0x7a, 0x2d, 0xea, 0xf6, 0x7d, 0xb1, 0x4d, 0xb9, 0x9a, 0x93, - 0xa0, 0xbc, 0x8b, 0x3b, 0x5f, 0xe9, 0x48, 0xdc, 0xf9, 0xe2, 0x17, 0x95, 0xca, 0x77, 0xbc, 0xa8, - 0xb4, 0x03, 0xf5, 0x4d, 0xcf, 0xed, 0x70, 0x8f, 0x39, 0xf9, 0x3f, 0xce, 0xcb, 0x39, 0xd6, 0x94, - 0xe8, 0x4f, 0xd4, 0x91, 0x8d, 0x67, 0x41, 0xe1, 0x63, 0xc4, 0x8a, 0x1b, 0xa4, 0x5d, 0xc1, 0x75, - 0xe4, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, 0x47, 0xc5, 0x26, 0xe9, 0x25, 0x57, 0xbd, 0x37, - 0x5e, 0x72, 0xda, 0x2f, 0x94, 0xd5, 0x04, 0x76, 0xdf, 0x45, 0xe3, 0x7b, 0xef, 0x5f, 0x6f, 0x4c, - 0xdf, 0x3d, 0xac, 0xde, 0xc3, 0xbb, 0x87, 0xb5, 0xa1, 0x7c, 0xb2, 0xf6, 0x4a, 0x90, 0xda, 0x37, - 0xbd, 0x7f, 0x3a, 0xf1, 0x5f, 0xea, 0x74, 0xe2, 0xad, 0x22, 0x44, 0x13, 0xc1, 0x21, 0xdd, 0x2c, - 0x5e, 0x86, 0x5a, 0x47, 0xbf, 0x35, 0x4f, 0x6d, 0x7d, 0x37, 0xcf, 0x4f, 0x14, 0x57, 0x24, 0x06, - 0x86, 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0x40, 0xc6, 0xb9, 0xad, 0xcd, 0x51, 0x4c, 0x64, 0x61, 0xcf, - 0x8a, 0xde, 0x31, 0xc6, 0x46, 0xfb, 0xcb, 0x22, 0xc8, 0x88, 0xd7, 0x84, 0x42, 0x65, 0xd3, 0xba, - 0x45, 0xcd, 0xdc, 0x2e, 0x87, 0xb1, 0x5f, 0xdb, 0x0a, 0x73, 0x3a, 0x4f, 0x40, 0x81, 0xce, 0xed, - 0xa4, 0xe2, 0x78, 0x44, 0xca, 0x2f, 0x87, 0x9d, 0x34, 0x7e, 0xcc, 0x22, 0xed, 0xa4, 0x22, 0x09, - 0x15, 0x0f, 0x61, 0x96, 0xe5, 0x67, 0xd4, 0x52, 0xa4, 0x79, 0xcc, 0xb2, 0xb1, 0xb3, 0x6e, 0x65, - 0x96, 0xf5, 0xc5, 0xe5, 0x63, 0xc9, 0xa3, 0xf9, 0xd9, 0x6f, 0x7f, 0xf7, 0xc2, 0x03, 0xdf, 0xf9, - 0xee, 0x85, 0x07, 0xde, 0xf9, 0xee, 0x85, 0x07, 0xbe, 0xb8, 0x7f, 0xa1, 0xf0, 0xed, 0xfd, 0x0b, - 0x85, 0xef, 0xec, 0x5f, 0x28, 0xbc, 0xb3, 0x7f, 0xa1, 0xf0, 0x0f, 0xfb, 0x17, 0x0a, 0xbf, 0xfc, - 0x8f, 0x17, 0x1e, 0xf8, 0xcc, 0xb3, 0x51, 0x15, 0xa6, 0x55, 0x15, 0xa6, 0x15, 0xc3, 0xe9, 0xee, - 0x76, 0x7b, 0x9a, 0x55, 0x21, 0x4a, 0x51, 0x55, 0xf8, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, - 0x35, 0xa0, 0x09, 0x68, 0x92, 0x00, 0x00, + // 7439 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x1d, 0xd7, + 0x75, 0xb6, 0xcf, 0x8d, 0x3c, 0x67, 0x1d, 0x92, 0xa2, 0xb6, 0x64, 0x99, 0x92, 0x65, 0x51, 0x19, + 0xff, 0xf6, 0xaf, 0xfc, 0x49, 0xc8, 0xdf, 0xfc, 0x7d, 0x4b, 0xfe, 0x24, 0x36, 0x0f, 0x29, 0x52, + 0x94, 0x48, 0x89, 0x59, 0x87, 0x94, 0x9d, 0xf8, 0x4f, 0xfc, 0x0f, 0x67, 0x36, 0x0f, 0xc7, 0x9c, + 0x33, 0x73, 0x3c, 0x33, 0x87, 0x12, 0x9d, 0x16, 0xb9, 0x3d, 0xd8, 0x45, 0x5b, 0xb4, 0xc8, 0x53, + 0x80, 0x22, 0x2d, 0x5a, 0x14, 0xc8, 0x43, 0x90, 0x3e, 0x14, 0x70, 0x1f, 0x0a, 0xf4, 0x8a, 0xa2, + 0x4d, 0x8b, 0x5e, 0xf2, 0x50, 0xa0, 0x2e, 0x0a, 0x10, 0x0d, 0x8b, 0x3e, 0xb4, 0x40, 0x83, 0xa0, + 0x01, 0x9a, 0x44, 0x08, 0x90, 0x62, 0xdf, 0xe6, 0x76, 0xe6, 0x48, 0xe4, 0x19, 0x52, 0x96, 0x5b, + 0xbf, 0xcd, 0xec, 0xbd, 0xf6, 0xb7, 0xf6, 0xac, 0x7d, 0x5b, 0x7b, 0xed, 0xb5, 0xd7, 0xc0, 0x62, + 0xcb, 0x0a, 0xb6, 0xba, 0x1b, 0x53, 0x86, 0xdb, 0x9e, 0x76, 0xba, 0x6d, 0xbd, 0xe3, 0xb9, 0xaf, + 0xf1, 0x87, 0x4d, 0xdb, 0xbd, 0x35, 0xdd, 0xd9, 0x6e, 0x4d, 0xeb, 0x1d, 0xcb, 0x8f, 0x52, 0x76, + 0x9e, 0xd2, 0xed, 0xce, 0x96, 0xfe, 0xd4, 0x74, 0x8b, 0x3a, 0xd4, 0xd3, 0x03, 0x6a, 0x4e, 0x75, + 0x3c, 0x37, 0x70, 0xc9, 0x73, 0x11, 0xd0, 0x94, 0x02, 0x9a, 0x52, 0xc5, 0xa6, 0x3a, 0xdb, 0xad, + 0x29, 0x06, 0x14, 0xa5, 0x28, 0xa0, 0x73, 0x1f, 0x89, 0xd5, 0xa0, 0xe5, 0xb6, 0xdc, 0x69, 0x8e, + 0xb7, 0xd1, 0xdd, 0xe4, 0x6f, 0xfc, 0x85, 0x3f, 0x09, 0x3e, 0xe7, 0xb4, 0xed, 0xe7, 0xfd, 0x29, + 0xcb, 0x65, 0xd5, 0x9a, 0x36, 0x5c, 0x8f, 0x4e, 0xef, 0xf4, 0xd4, 0xe5, 0xdc, 0xd3, 0x11, 0x4d, + 0x5b, 0x37, 0xb6, 0x2c, 0x87, 0x7a, 0xbb, 0xea, 0x5b, 0xa6, 0x3d, 0xea, 0xbb, 0x5d, 0xcf, 0xa0, + 0x87, 0x2a, 0xe5, 0x4f, 0xb7, 0x69, 0xa0, 0x67, 0xf1, 0x9a, 0xee, 0x57, 0xca, 0xeb, 0x3a, 0x81, + 0xd5, 0xee, 0x65, 0xf3, 0xec, 0xbd, 0x0a, 0xf8, 0xc6, 0x16, 0x6d, 0xeb, 0xe9, 0x72, 0xda, 0x3f, + 0xd4, 0xe0, 0xd4, 0xec, 0x86, 0x1f, 0x78, 0xba, 0x11, 0xac, 0xba, 0xe6, 0x1a, 0x6d, 0x77, 0x6c, + 0x3d, 0xa0, 0x64, 0x1b, 0xaa, 0xac, 0x6e, 0xa6, 0x1e, 0xe8, 0x13, 0x85, 0x8b, 0x85, 0x4b, 0xf5, + 0x99, 0xd9, 0xa9, 0x01, 0xdb, 0x62, 0x6a, 0x45, 0x02, 0x35, 0x46, 0xf6, 0xf7, 0x26, 0xab, 0xea, + 0x0d, 0x43, 0x06, 0xe4, 0x6b, 0x05, 0x18, 0x71, 0x5c, 0x93, 0x36, 0xa9, 0x4d, 0x8d, 0xc0, 0xf5, + 0x26, 0x8a, 0x17, 0x4b, 0x97, 0xea, 0x33, 0x9f, 0x1b, 0x98, 0x63, 0xc6, 0x17, 0x4d, 0x5d, 0x8f, + 0x31, 0xb8, 0xec, 0x04, 0xde, 0x6e, 0xe3, 0xf4, 0xb7, 0xf7, 0x26, 0x1f, 0xda, 0xdf, 0x9b, 0x1c, + 0x89, 0x67, 0x61, 0xa2, 0x26, 0x64, 0x1d, 0xea, 0x81, 0x6b, 0x33, 0x91, 0x59, 0xae, 0xe3, 0x4f, + 0x94, 0x78, 0xc5, 0x2e, 0x4c, 0x09, 0x69, 0x33, 0xf6, 0x53, 0xac, 0xbb, 0x4c, 0xed, 0x3c, 0x35, + 0xb5, 0x16, 0x92, 0x35, 0x4e, 0x49, 0xe0, 0x7a, 0x94, 0xe6, 0x63, 0x1c, 0x87, 0x50, 0x38, 0xe1, + 0x53, 0xa3, 0xeb, 0x59, 0xc1, 0xee, 0x9c, 0xeb, 0x04, 0xf4, 0x76, 0x30, 0x51, 0xe6, 0x52, 0x7e, + 0x32, 0x0b, 0x7a, 0xd5, 0x35, 0x9b, 0x49, 0xea, 0xc6, 0xa9, 0xfd, 0xbd, 0xc9, 0x13, 0xa9, 0x44, + 0x4c, 0x63, 0x12, 0x07, 0xc6, 0xad, 0xb6, 0xde, 0xa2, 0xab, 0x5d, 0xdb, 0x6e, 0x52, 0xc3, 0xa3, + 0x81, 0x3f, 0x51, 0xe1, 0x9f, 0x70, 0x29, 0x8b, 0xcf, 0xb2, 0x6b, 0xe8, 0xf6, 0x8d, 0x8d, 0xd7, + 0xa8, 0x11, 0x20, 0xdd, 0xa4, 0x1e, 0x75, 0x0c, 0xda, 0x98, 0x90, 0x1f, 0x33, 0xbe, 0x94, 0x42, + 0xc2, 0x1e, 0x6c, 0xb2, 0x08, 0x27, 0x3b, 0x9e, 0xe5, 0xf2, 0x2a, 0xd8, 0xba, 0xef, 0x5f, 0xd7, + 0xdb, 0x74, 0x62, 0xe8, 0x62, 0xe1, 0x52, 0xad, 0x71, 0x56, 0xc2, 0x9c, 0x5c, 0x4d, 0x13, 0x60, + 0x6f, 0x19, 0x72, 0x09, 0xaa, 0x2a, 0x71, 0x62, 0xf8, 0x62, 0xe1, 0x52, 0x45, 0xf4, 0x1d, 0x55, + 0x16, 0xc3, 0x5c, 0xb2, 0x00, 0x55, 0x7d, 0x73, 0xd3, 0x72, 0x18, 0x65, 0x95, 0x8b, 0xf0, 0x7c, + 0xd6, 0xa7, 0xcd, 0x4a, 0x1a, 0x81, 0xa3, 0xde, 0x30, 0x2c, 0x4b, 0xae, 0x02, 0xf1, 0xa9, 0xb7, + 0x63, 0x19, 0x74, 0xd6, 0x30, 0xdc, 0xae, 0x13, 0xf0, 0xba, 0xd7, 0x78, 0xdd, 0xcf, 0xc9, 0xba, + 0x93, 0x66, 0x0f, 0x05, 0x66, 0x94, 0x22, 0x2f, 0xc2, 0xb8, 0x1c, 0x76, 0x91, 0x14, 0x80, 0x23, + 0x9d, 0x66, 0x82, 0xc4, 0x54, 0x1e, 0xf6, 0x50, 0x13, 0x13, 0xce, 0xeb, 0xdd, 0xc0, 0x6d, 0x33, + 0xc8, 0x24, 0xd3, 0x35, 0x77, 0x9b, 0x3a, 0x13, 0xf5, 0x8b, 0x85, 0x4b, 0xd5, 0xc6, 0xc5, 0xfd, + 0xbd, 0xc9, 0xf3, 0xb3, 0x77, 0xa1, 0xc3, 0xbb, 0xa2, 0x90, 0x1b, 0x50, 0x33, 0x1d, 0x7f, 0xd5, + 0xb5, 0x2d, 0x63, 0x77, 0x62, 0x84, 0x57, 0xf0, 0x29, 0xf9, 0xa9, 0xb5, 0xf9, 0xeb, 0x4d, 0x91, + 0x71, 0x67, 0x6f, 0xf2, 0x7c, 0xef, 0xec, 0x38, 0x15, 0xe6, 0x63, 0x84, 0x41, 0x56, 0x38, 0xe0, + 0x9c, 0xeb, 0x6c, 0x5a, 0xad, 0x89, 0x51, 0xde, 0x1a, 0x17, 0xfb, 0x74, 0xe8, 0xf9, 0xeb, 0x4d, + 0x41, 0xd7, 0x18, 0x95, 0xec, 0xc4, 0x2b, 0x46, 0x08, 0xe7, 0x5e, 0x80, 0x93, 0x3d, 0xa3, 0x96, + 0x8c, 0x43, 0x69, 0x9b, 0xee, 0xf2, 0x49, 0xa9, 0x86, 0xec, 0x91, 0x9c, 0x86, 0xca, 0x8e, 0x6e, + 0x77, 0xe9, 0x44, 0x91, 0xa7, 0x89, 0x97, 0x8f, 0x15, 0x9f, 0x2f, 0x68, 0xbf, 0x51, 0x82, 0x11, + 0x35, 0x17, 0x34, 0x2d, 0x67, 0x9b, 0xbc, 0x04, 0x25, 0xdb, 0x6d, 0xc9, 0x19, 0xed, 0xe3, 0x03, + 0xcf, 0x2f, 0xcb, 0x6e, 0xab, 0x31, 0xbc, 0xbf, 0x37, 0x59, 0x5a, 0x76, 0x5b, 0xc8, 0x10, 0x89, + 0x01, 0x95, 0x6d, 0x7d, 0x73, 0x5b, 0xe7, 0x75, 0xa8, 0xcf, 0x34, 0x06, 0x86, 0xbe, 0xc6, 0x50, + 0x58, 0x5d, 0x1b, 0xb5, 0xfd, 0xbd, 0xc9, 0x0a, 0x7f, 0x45, 0x81, 0x4d, 0x5c, 0xa8, 0x6d, 0xd8, + 0xba, 0xb1, 0xbd, 0xe5, 0xda, 0x74, 0xa2, 0x94, 0x93, 0x51, 0x43, 0x21, 0x89, 0x06, 0x08, 0x5f, + 0x31, 0xe2, 0x41, 0x0c, 0x18, 0xea, 0x9a, 0xbe, 0xe5, 0x6c, 0xcb, 0xd9, 0xe9, 0x85, 0x81, 0xb9, + 0xad, 0xcf, 0xf3, 0x6f, 0x82, 0xfd, 0xbd, 0xc9, 0x21, 0xf1, 0x8c, 0x12, 0x5a, 0xfb, 0x5e, 0x1d, + 0xc6, 0x54, 0x23, 0xdd, 0xa4, 0x5e, 0x40, 0x6f, 0x93, 0x8b, 0x50, 0x76, 0xd8, 0xa0, 0xe1, 0x8d, + 0xdc, 0x18, 0x91, 0x7d, 0xb2, 0xcc, 0x07, 0x0b, 0xcf, 0x61, 0x35, 0x13, 0x0b, 0xae, 0x14, 0xf8, + 0xe0, 0x35, 0x6b, 0x72, 0x18, 0x51, 0x33, 0xf1, 0x8c, 0x12, 0x9a, 0xbc, 0x02, 0x65, 0xfe, 0xf1, + 0x42, 0xd4, 0x9f, 0x18, 0x9c, 0x05, 0xfb, 0xf4, 0x2a, 0xfb, 0x02, 0xfe, 0xe1, 0x1c, 0x94, 0x75, + 0xc5, 0xae, 0xb9, 0x29, 0x05, 0xfb, 0xf1, 0x1c, 0x82, 0x5d, 0x10, 0x5d, 0x71, 0x7d, 0x7e, 0x01, + 0x19, 0x22, 0xf9, 0xa5, 0x02, 0x9c, 0x34, 0x5c, 0x27, 0xd0, 0x99, 0x12, 0xa0, 0x96, 0xbf, 0x89, + 0x0a, 0xe7, 0x73, 0x75, 0x60, 0x3e, 0x73, 0x69, 0xc4, 0xc6, 0xc3, 0x6c, 0x36, 0xef, 0x49, 0xc6, + 0x5e, 0xde, 0xe4, 0x57, 0x0a, 0xf0, 0x30, 0x9b, 0x65, 0x7b, 0x88, 0xf9, 0xda, 0x70, 0xb4, 0xb5, + 0x3a, 0xbb, 0xbf, 0x37, 0xf9, 0xf0, 0x52, 0x16, 0x33, 0xcc, 0xae, 0x03, 0xab, 0xdd, 0x29, 0xbd, + 0x57, 0x61, 0xe0, 0xeb, 0x4e, 0x7d, 0x66, 0xf9, 0x28, 0x95, 0x90, 0xc6, 0xa3, 0xb2, 0x2b, 0x67, + 0xe9, 0x5c, 0x98, 0x55, 0x0b, 0x72, 0x19, 0x86, 0x77, 0x5c, 0xbb, 0xdb, 0xa6, 0xfe, 0x44, 0x95, + 0xaf, 0xdc, 0xe7, 0xb2, 0x26, 0xd4, 0x9b, 0x9c, 0xa4, 0x71, 0x42, 0xc2, 0x0f, 0x8b, 0x77, 0x1f, + 0x55, 0x59, 0x62, 0xc1, 0x90, 0x6d, 0xb5, 0xad, 0xc0, 0xe7, 0x4b, 0x5a, 0x7d, 0xe6, 0xf2, 0xc0, + 0x9f, 0x25, 0x86, 0xe8, 0x32, 0x07, 0x13, 0xa3, 0x46, 0x3c, 0xa3, 0x64, 0xc0, 0xa6, 0x42, 0xdf, + 0xd0, 0x6d, 0xb1, 0xe4, 0xd5, 0x67, 0x3e, 0x39, 0xf8, 0xb0, 0x61, 0x28, 0x8d, 0x51, 0xf9, 0x4d, + 0x15, 0xfe, 0x8a, 0x02, 0x9b, 0x7c, 0x16, 0xc6, 0x12, 0xad, 0xe9, 0x4f, 0xd4, 0xb9, 0x74, 0x1e, + 0xcb, 0x92, 0x4e, 0x48, 0xd5, 0x38, 0x23, 0xc1, 0xc6, 0x12, 0x3d, 0xc4, 0xc7, 0x14, 0x18, 0xb9, + 0x06, 0x55, 0xdf, 0x32, 0xa9, 0xa1, 0x7b, 0xfe, 0xc4, 0xc8, 0x41, 0x80, 0xc7, 0x25, 0x70, 0xb5, + 0x29, 0x8b, 0x61, 0x08, 0x40, 0xa6, 0x00, 0x3a, 0xba, 0x17, 0x58, 0x42, 0x85, 0x1c, 0xe5, 0xea, + 0xcc, 0xd8, 0xfe, 0xde, 0x24, 0xac, 0x86, 0xa9, 0x18, 0xa3, 0x60, 0xf4, 0xac, 0xec, 0x92, 0xd3, + 0xe9, 0x06, 0xfe, 0xc4, 0xd8, 0xc5, 0xd2, 0xa5, 0x9a, 0xa0, 0x6f, 0x86, 0xa9, 0x18, 0xa3, 0x20, + 0xdf, 0x2a, 0xc0, 0xa3, 0xd1, 0x6b, 0xef, 0x20, 0x3b, 0x71, 0xe4, 0x83, 0x6c, 0x72, 0x7f, 0x6f, + 0xf2, 0xd1, 0x66, 0x7f, 0x96, 0x78, 0xb7, 0xfa, 0x68, 0x2f, 0xc1, 0xe8, 0x6c, 0x37, 0xd8, 0x72, + 0x3d, 0xeb, 0x0d, 0xae, 0x0e, 0x93, 0x05, 0xa8, 0x04, 0x5c, 0xad, 0x11, 0xeb, 0xf2, 0x13, 0x59, + 0xa2, 0x16, 0x2a, 0xe6, 0x35, 0xba, 0xab, 0xb4, 0x01, 0xb1, 0x3e, 0x0a, 0x35, 0x47, 0x14, 0xd7, + 0x7e, 0xbd, 0x00, 0xb5, 0x86, 0xee, 0x5b, 0x06, 0x83, 0x27, 0x73, 0x50, 0xee, 0xfa, 0xd4, 0x3b, + 0x1c, 0x28, 0x9f, 0xa5, 0xd7, 0x7d, 0xea, 0x21, 0x2f, 0x4c, 0x6e, 0x40, 0xb5, 0xa3, 0xfb, 0xfe, + 0x2d, 0xd7, 0x33, 0xe5, 0x4a, 0x73, 0x40, 0x20, 0xa1, 0xaf, 0xca, 0xa2, 0x18, 0x82, 0x68, 0x75, + 0x88, 0x96, 0x5a, 0xed, 0x07, 0x05, 0x38, 0xd5, 0xe8, 0x6e, 0x6e, 0x52, 0x4f, 0xaa, 0x67, 0x42, + 0xf1, 0x21, 0x14, 0x2a, 0x1e, 0x35, 0x2d, 0x5f, 0xd6, 0x7d, 0x7e, 0xe0, 0xa6, 0x43, 0x86, 0x22, + 0xf5, 0x2c, 0x2e, 0x2f, 0x9e, 0x80, 0x02, 0x9d, 0x74, 0xa1, 0xf6, 0x1a, 0x0d, 0xfc, 0xc0, 0xa3, + 0x7a, 0x5b, 0x7e, 0xdd, 0x95, 0x81, 0x59, 0x5d, 0xa5, 0x41, 0x93, 0x23, 0xc5, 0xd5, 0xba, 0x30, + 0x11, 0x23, 0x4e, 0xda, 0x1f, 0x55, 0x60, 0x64, 0xce, 0x6d, 0x6f, 0x58, 0x0e, 0x35, 0x2f, 0x9b, + 0x2d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0x5b, 0x54, 0x7e, 0xed, 0xe0, 0xeb, 0x2c, 0x03, 0x8b, 0xb4, + 0x05, 0xf6, 0x86, 0x1c, 0x98, 0x2c, 0xc3, 0xd8, 0xa6, 0xe7, 0xb6, 0xc5, 0xd4, 0xb5, 0xb6, 0xdb, + 0x91, 0xaa, 0x62, 0xe3, 0x7f, 0xa8, 0xe9, 0x60, 0x21, 0x91, 0x7b, 0x67, 0x6f, 0x12, 0xa2, 0x37, + 0x4c, 0x95, 0x25, 0x2f, 0xc3, 0x44, 0x94, 0x12, 0x8e, 0xe1, 0x39, 0xa6, 0x57, 0x73, 0x55, 0xa1, + 0xd2, 0x38, 0xbf, 0xbf, 0x37, 0x39, 0xb1, 0xd0, 0x87, 0x06, 0xfb, 0x96, 0x26, 0x6f, 0x16, 0x60, + 0x3c, 0xca, 0x14, 0xf3, 0xaa, 0xd4, 0x10, 0x8e, 0x68, 0xc2, 0xe6, 0x1b, 0x90, 0x85, 0x14, 0x0b, + 0xec, 0x61, 0x4a, 0x16, 0x60, 0x24, 0x70, 0x63, 0xf2, 0xaa, 0x70, 0x79, 0x69, 0x6a, 0xc7, 0xbc, + 0xe6, 0xf6, 0x95, 0x56, 0xa2, 0x1c, 0x41, 0x38, 0xa3, 0xde, 0x53, 0x92, 0x1a, 0xe2, 0x92, 0x3a, + 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x02, 0xfb, 0x94, 0x24, 0x5f, 0x2a, 0xc0, 0x98, 0xca, + 0x92, 0x32, 0x1a, 0x3e, 0x4a, 0x19, 0x11, 0xd6, 0x23, 0xd6, 0x12, 0x0c, 0x30, 0xc5, 0x50, 0xfb, + 0x51, 0x19, 0x6a, 0xe1, 0xcc, 0x46, 0x1e, 0x87, 0x0a, 0xdf, 0x0b, 0x4b, 0x85, 0x35, 0x5c, 0xb2, + 0xf8, 0x96, 0x19, 0x45, 0x1e, 0x79, 0x02, 0x86, 0x0d, 0xb7, 0xdd, 0xd6, 0x1d, 0x93, 0xdb, 0x37, + 0x6a, 0x8d, 0x3a, 0x5b, 0xa9, 0xe7, 0x44, 0x12, 0xaa, 0x3c, 0x72, 0x1e, 0xca, 0xba, 0xd7, 0x12, + 0xa6, 0x86, 0x9a, 0x98, 0x8f, 0x66, 0xbd, 0x96, 0x8f, 0x3c, 0x95, 0x7c, 0x14, 0x4a, 0xd4, 0xd9, + 0x99, 0x28, 0xf7, 0x57, 0x05, 0x2e, 0x3b, 0x3b, 0x37, 0x75, 0xaf, 0x51, 0x97, 0x75, 0x28, 0x5d, + 0x76, 0x76, 0x90, 0x95, 0x21, 0xcb, 0x30, 0x4c, 0x9d, 0x1d, 0xd6, 0xf6, 0xd2, 0x06, 0xf0, 0x81, + 0x3e, 0xc5, 0x19, 0x89, 0xd4, 0x8a, 0x43, 0x85, 0x42, 0x26, 0xa3, 0x82, 0x20, 0x9f, 0x86, 0x11, + 0xa1, 0x5b, 0xac, 0xb0, 0x36, 0xf1, 0x27, 0x86, 0x38, 0xe4, 0x64, 0x7f, 0xe5, 0x84, 0xd3, 0x45, + 0x36, 0x97, 0x58, 0xa2, 0x8f, 0x09, 0x28, 0xf2, 0x69, 0xa8, 0x29, 0x73, 0x9a, 0x6a, 0xd9, 0x4c, + 0x73, 0x05, 0x4a, 0x22, 0xa4, 0xaf, 0x77, 0x2d, 0x8f, 0xb6, 0xa9, 0x13, 0xf8, 0x8d, 0x93, 0x6a, + 0x03, 0xab, 0x72, 0x7d, 0x8c, 0xd0, 0xc8, 0x46, 0xaf, 0xdd, 0x45, 0x18, 0x0d, 0x1e, 0xef, 0x33, + 0xab, 0x0f, 0x60, 0x74, 0xf9, 0x1c, 0x9c, 0x08, 0x0d, 0x23, 0x72, 0x6f, 0x2d, 0xcc, 0x08, 0x4f, + 0xb3, 0xe2, 0x4b, 0xc9, 0xac, 0x3b, 0x7b, 0x93, 0x8f, 0x65, 0xec, 0xae, 0x23, 0x02, 0x4c, 0x83, + 0x69, 0x7f, 0x50, 0x82, 0x5e, 0xb5, 0x3b, 0x29, 0xb4, 0xc2, 0x51, 0x0b, 0x2d, 0xfd, 0x41, 0x62, + 0xfa, 0x7c, 0x5e, 0x16, 0xcb, 0xff, 0x51, 0x59, 0x0d, 0x53, 0x3a, 0xea, 0x86, 0x79, 0x50, 0xc6, + 0x8e, 0xf6, 0x56, 0x19, 0xc6, 0xe6, 0x75, 0xda, 0x76, 0x9d, 0x7b, 0x6e, 0x42, 0x0a, 0x0f, 0xc4, + 0x26, 0xe4, 0x12, 0x54, 0x3d, 0xda, 0xb1, 0x2d, 0x43, 0xf7, 0x79, 0xd3, 0x4b, 0x73, 0x1c, 0xca, + 0x34, 0x0c, 0x73, 0xfb, 0x6c, 0x3e, 0x4b, 0x0f, 0xe4, 0xe6, 0xb3, 0xfc, 0xee, 0x6f, 0x3e, 0xb5, + 0x2f, 0x15, 0x81, 0x2b, 0x2a, 0xe4, 0x22, 0x94, 0xd9, 0x22, 0x9c, 0x36, 0x79, 0xf0, 0x8e, 0xc3, + 0x73, 0xc8, 0x39, 0x28, 0x06, 0xae, 0x1c, 0x79, 0x20, 0xf3, 0x8b, 0x6b, 0x2e, 0x16, 0x03, 0x97, + 0xbc, 0x01, 0x60, 0xb8, 0x8e, 0x69, 0x29, 0x2b, 0x75, 0xbe, 0x0f, 0x5b, 0x70, 0xbd, 0x5b, 0xba, + 0x67, 0xce, 0x85, 0x88, 0x62, 0xfb, 0x11, 0xbd, 0x63, 0x8c, 0x1b, 0x79, 0x01, 0x86, 0x5c, 0x67, + 0xa1, 0x6b, 0xdb, 0x5c, 0xa0, 0xb5, 0xc6, 0xff, 0x64, 0x7b, 0xc2, 0x1b, 0x3c, 0xe5, 0xce, 0xde, + 0xe4, 0x59, 0xa1, 0xdf, 0xb2, 0xb7, 0x97, 0x3c, 0x2b, 0xb0, 0x9c, 0x56, 0x33, 0xf0, 0xf4, 0x80, + 0xb6, 0x76, 0x51, 0x16, 0xd3, 0xbe, 0x5a, 0x80, 0xfa, 0x82, 0x75, 0x9b, 0x9a, 0x2f, 0x59, 0x8e, + 0xe9, 0xde, 0x22, 0x08, 0x43, 0x36, 0x75, 0x5a, 0xc1, 0x96, 0xec, 0xfd, 0x53, 0xb1, 0xb1, 0x16, + 0x1e, 0x6e, 0x44, 0xf5, 0x6f, 0xd3, 0x40, 0x67, 0xa3, 0x6f, 0xbe, 0x2b, 0xcd, 0xef, 0x62, 0x53, + 0xca, 0x11, 0x50, 0x22, 0x91, 0x69, 0xa8, 0x09, 0xed, 0xd3, 0x72, 0x5a, 0x5c, 0x86, 0xd5, 0x68, + 0xd2, 0x6b, 0xaa, 0x0c, 0x8c, 0x68, 0xb4, 0x5d, 0x38, 0xd9, 0x23, 0x06, 0x62, 0x42, 0x39, 0xd0, + 0x5b, 0x6a, 0x7e, 0x5d, 0x18, 0x58, 0xc0, 0x6b, 0x7a, 0x2b, 0x26, 0x5c, 0xbe, 0xc6, 0xaf, 0xe9, + 0x6c, 0x8d, 0x67, 0xe8, 0xda, 0x4f, 0x0a, 0x50, 0x5d, 0xe8, 0x3a, 0x06, 0xdf, 0x1b, 0xdd, 0xdb, + 0x14, 0xa6, 0x14, 0x86, 0x62, 0xa6, 0xc2, 0xd0, 0x85, 0xa1, 0xed, 0x5b, 0xa1, 0x42, 0x51, 0x9f, + 0x59, 0x19, 0xbc, 0x57, 0xc8, 0x2a, 0x4d, 0x5d, 0xe3, 0x78, 0xe2, 0x0c, 0x65, 0x4c, 0x56, 0x68, + 0xe8, 0xda, 0x4b, 0x9c, 0xa9, 0x64, 0x76, 0xee, 0xa3, 0x50, 0x8f, 0x91, 0x1d, 0xca, 0x68, 0xfb, + 0x3b, 0x65, 0x18, 0x5a, 0x6c, 0x36, 0x67, 0x57, 0x97, 0xc8, 0x33, 0x50, 0x97, 0xe6, 0xf5, 0xeb, + 0x91, 0x0c, 0xc2, 0xd3, 0x95, 0x66, 0x94, 0x85, 0x71, 0x3a, 0xa6, 0x8e, 0x79, 0x54, 0xb7, 0xdb, + 0x72, 0xb0, 0x84, 0xea, 0x18, 0xb2, 0x44, 0x14, 0x79, 0x44, 0x87, 0x31, 0xb6, 0xc3, 0x63, 0x22, + 0x14, 0xbb, 0x37, 0x39, 0x6c, 0x0e, 0xb8, 0xbf, 0xe3, 0x4a, 0xe2, 0x7a, 0x02, 0x00, 0x53, 0x80, + 0xe4, 0x79, 0xa8, 0xea, 0xdd, 0x60, 0x8b, 0x2b, 0xd0, 0x62, 0x6c, 0x9c, 0xe7, 0xa7, 0x0f, 0x32, + 0xed, 0xce, 0xde, 0xe4, 0xc8, 0x35, 0x6c, 0x3c, 0xa3, 0xde, 0x31, 0xa4, 0x66, 0x95, 0x53, 0x3b, + 0x46, 0x59, 0xb9, 0xca, 0xa1, 0x2b, 0xb7, 0x9a, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x05, 0x46, 0xb6, + 0xe9, 0x6e, 0xa0, 0x6f, 0x48, 0x06, 0x43, 0x87, 0x61, 0x30, 0xce, 0x54, 0xb8, 0x6b, 0xb1, 0xe2, + 0x98, 0x00, 0x23, 0x3e, 0x9c, 0xde, 0xa6, 0xde, 0x06, 0xf5, 0x5c, 0xb9, 0xfb, 0x94, 0x4c, 0x86, + 0x0f, 0xc3, 0x64, 0x62, 0x7f, 0x6f, 0xf2, 0xf4, 0xb5, 0x0c, 0x18, 0xcc, 0x04, 0xd7, 0x7e, 0x5c, + 0x84, 0x13, 0x8b, 0xe2, 0x7c, 0xd3, 0xf5, 0xc4, 0x22, 0x4c, 0xce, 0x42, 0xc9, 0xeb, 0x74, 0x79, + 0xcf, 0x29, 0x09, 0x3b, 0x29, 0xae, 0xae, 0x23, 0x4b, 0x23, 0x2f, 0x43, 0xd5, 0x94, 0x53, 0x86, + 0xdc, 0xfc, 0x1e, 0x76, 0xa2, 0xe1, 0x8b, 0xa0, 0x7a, 0xc3, 0x10, 0x8d, 0x69, 0xfa, 0x6d, 0xbf, + 0xd5, 0xb4, 0xde, 0xa0, 0x72, 0x3f, 0xc8, 0x35, 0xfd, 0x15, 0x91, 0x84, 0x2a, 0x8f, 0xad, 0xaa, + 0xdb, 0x74, 0x57, 0xec, 0x86, 0xca, 0xd1, 0xaa, 0x7a, 0x4d, 0xa6, 0x61, 0x98, 0x4b, 0x26, 0xd5, + 0x60, 0x61, 0xbd, 0xa0, 0x2c, 0x76, 0xf2, 0x37, 0x59, 0x82, 0x1c, 0x37, 0x6c, 0xca, 0x7c, 0xcd, + 0x0a, 0x02, 0xea, 0xc9, 0x66, 0x1c, 0x68, 0xca, 0xbc, 0xca, 0x11, 0x50, 0x22, 0x91, 0x0f, 0x41, + 0x8d, 0x83, 0x37, 0x6c, 0x77, 0x83, 0x37, 0x5c, 0x4d, 0xec, 0xe9, 0x6f, 0xaa, 0x44, 0x8c, 0xf2, + 0xb5, 0x9f, 0x16, 0xe1, 0xcc, 0x22, 0x0d, 0x84, 0x56, 0x33, 0x4f, 0x3b, 0xb6, 0xbb, 0xcb, 0x54, + 0x4b, 0xa4, 0xaf, 0x93, 0x17, 0x01, 0x2c, 0x7f, 0xa3, 0xb9, 0x63, 0xf0, 0x71, 0x20, 0xc6, 0xf0, + 0x45, 0x39, 0x24, 0x61, 0xa9, 0xd9, 0x90, 0x39, 0x77, 0x12, 0x6f, 0x18, 0x2b, 0x13, 0x6d, 0xaf, + 0x8a, 0x77, 0xd9, 0x5e, 0x35, 0x01, 0x3a, 0x91, 0x82, 0x5a, 0xe2, 0x94, 0xff, 0x47, 0xb1, 0x39, + 0x8c, 0x6e, 0x1a, 0x83, 0xc9, 0xa3, 0x32, 0x3a, 0x30, 0x6e, 0xd2, 0x4d, 0xbd, 0x6b, 0x07, 0xa1, + 0x52, 0x2d, 0x07, 0xf1, 0xc1, 0xf5, 0xf2, 0xf0, 0xec, 0x75, 0x3e, 0x85, 0x84, 0x3d, 0xd8, 0xda, + 0xef, 0x96, 0xe0, 0xdc, 0x22, 0x0d, 0x42, 0x8b, 0x8b, 0x9c, 0x1d, 0x9b, 0x1d, 0x6a, 0xb0, 0x56, + 0x78, 0xb3, 0x00, 0x43, 0xb6, 0xbe, 0x41, 0x6d, 0xb6, 0x7a, 0xb1, 0xaf, 0x79, 0x75, 0xe0, 0x85, + 0xa0, 0x3f, 0x97, 0xa9, 0x65, 0xce, 0x21, 0xb5, 0x34, 0x88, 0x44, 0x94, 0xec, 0xd9, 0xa4, 0x6e, + 0xd8, 0x5d, 0x3f, 0xa0, 0xde, 0xaa, 0xeb, 0x05, 0x52, 0x9f, 0x0c, 0x27, 0xf5, 0xb9, 0x28, 0x0b, + 0xe3, 0x74, 0x64, 0x06, 0xc0, 0xb0, 0x2d, 0xea, 0x04, 0xbc, 0x94, 0x18, 0x57, 0x44, 0xb5, 0xef, + 0x5c, 0x98, 0x83, 0x31, 0x2a, 0xc6, 0xaa, 0xed, 0x3a, 0x56, 0xe0, 0x0a, 0x56, 0xe5, 0x24, 0xab, + 0x95, 0x28, 0x0b, 0xe3, 0x74, 0xbc, 0x18, 0x0d, 0x3c, 0xcb, 0xf0, 0x79, 0xb1, 0x4a, 0xaa, 0x58, + 0x94, 0x85, 0x71, 0x3a, 0xb6, 0xe6, 0xc5, 0xbe, 0xff, 0x50, 0x6b, 0xde, 0x37, 0x6b, 0x70, 0x21, + 0x21, 0xd6, 0x40, 0x0f, 0xe8, 0x66, 0xd7, 0x6e, 0xd2, 0x40, 0x35, 0xe0, 0x80, 0x6b, 0xe1, 0xcf, + 0x47, 0xed, 0x2e, 0xbc, 0x2a, 0x8c, 0xa3, 0x69, 0xf7, 0x9e, 0x0a, 0x1e, 0xa8, 0xed, 0xa7, 0xa1, + 0xe6, 0xe8, 0x81, 0xcf, 0x07, 0xae, 0x1c, 0xa3, 0xa1, 0x1a, 0x76, 0x5d, 0x65, 0x60, 0x44, 0x43, + 0x56, 0xe1, 0xb4, 0x14, 0xf1, 0xe5, 0xdb, 0x1d, 0xd7, 0x0b, 0xa8, 0x27, 0xca, 0xca, 0xe5, 0x54, + 0x96, 0x3d, 0xbd, 0x92, 0x41, 0x83, 0x99, 0x25, 0xc9, 0x0a, 0x9c, 0x32, 0xc4, 0x49, 0x33, 0xb5, + 0x5d, 0xdd, 0x54, 0x80, 0xc2, 0xc0, 0x15, 0x6e, 0x8d, 0xe6, 0x7a, 0x49, 0x30, 0xab, 0x5c, 0xba, + 0x37, 0x0f, 0x0d, 0xd4, 0x9b, 0x87, 0x07, 0xe9, 0xcd, 0xd5, 0xc1, 0x7a, 0x73, 0xed, 0x60, 0xbd, + 0x99, 0x49, 0x9e, 0xf5, 0x23, 0xea, 0x31, 0xf5, 0x44, 0xac, 0xb0, 0x31, 0x47, 0x86, 0x50, 0xf2, + 0xcd, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x36, 0xe0, 0x9c, 0x48, 0xbf, 0xec, 0x18, 0xde, 0x6e, 0x87, + 0x2d, 0x3c, 0x31, 0xdc, 0x7a, 0xc2, 0xc2, 0x78, 0xae, 0xd9, 0x97, 0x12, 0xef, 0x82, 0x42, 0xfe, + 0x2f, 0x8c, 0x8a, 0x56, 0x5a, 0xd1, 0x3b, 0x1c, 0x56, 0xb8, 0x35, 0x3c, 0x2c, 0x61, 0x47, 0xe7, + 0xe2, 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, 0xb3, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0xeb, 0x94, + 0x9a, 0xd4, 0xe4, 0xa7, 0x35, 0xb5, 0xc6, 0x23, 0xca, 0xd0, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, + 0x79, 0x1e, 0x46, 0xfc, 0x40, 0xf7, 0x02, 0x69, 0xd6, 0x9b, 0x18, 0x13, 0x6e, 0x1f, 0xca, 0xea, + 0xd5, 0x8c, 0xe5, 0x61, 0x82, 0x32, 0x73, 0xbd, 0x38, 0x71, 0x7c, 0xeb, 0x45, 0x9e, 0xd9, 0xea, + 0xcf, 0x8a, 0x70, 0x71, 0x91, 0x06, 0x2b, 0xae, 0x23, 0x8d, 0xa2, 0x59, 0xcb, 0xfe, 0x81, 0x6c, + 0xa2, 0xc9, 0x45, 0xbb, 0x78, 0xa4, 0x8b, 0x76, 0xe9, 0x88, 0x16, 0xed, 0xf2, 0x31, 0x2e, 0xda, + 0xbf, 0x5f, 0x84, 0x47, 0x12, 0x92, 0x5c, 0x75, 0x4d, 0x35, 0xe1, 0xbf, 0x2f, 0xc0, 0x03, 0x08, + 0xf0, 0x8e, 0xd0, 0x3b, 0xf9, 0xb1, 0x56, 0x4a, 0xe3, 0xf9, 0x4a, 0x5a, 0xe3, 0x79, 0x25, 0xcf, + 0xca, 0x97, 0xc1, 0xe1, 0x40, 0x2b, 0xde, 0x55, 0x20, 0x9e, 0x3c, 0x84, 0x13, 0xa6, 0x9f, 0x98, + 0xd2, 0x13, 0xfa, 0x95, 0x61, 0x0f, 0x05, 0x66, 0x94, 0x22, 0x4d, 0x78, 0xd8, 0xa7, 0x4e, 0x60, + 0x39, 0xd4, 0x4e, 0xc2, 0x09, 0x6d, 0xe8, 0x31, 0x09, 0xf7, 0x70, 0x33, 0x8b, 0x08, 0xb3, 0xcb, + 0xe6, 0x99, 0x07, 0xfe, 0x12, 0xb8, 0xca, 0x29, 0x44, 0x73, 0x64, 0x1a, 0xcb, 0x9b, 0x69, 0x8d, + 0xe5, 0xd5, 0xfc, 0xed, 0x36, 0x98, 0xb6, 0x32, 0x03, 0xc0, 0x5b, 0x21, 0xae, 0xae, 0x84, 0x8b, + 0x34, 0x86, 0x39, 0x18, 0xa3, 0x62, 0x0b, 0x90, 0x92, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x66, + 0x3c, 0x13, 0x93, 0xb4, 0x7d, 0xb5, 0x9d, 0xca, 0xc0, 0xda, 0xce, 0x55, 0x20, 0x09, 0xc3, 0xa3, + 0xc0, 0x1b, 0x4a, 0xba, 0x35, 0x2e, 0xf5, 0x50, 0x60, 0x46, 0xa9, 0x3e, 0x5d, 0x79, 0xf8, 0x68, + 0xbb, 0x72, 0x75, 0xf0, 0xae, 0x4c, 0x5e, 0x85, 0xb3, 0x9c, 0x95, 0x94, 0x4f, 0x12, 0x58, 0xe8, + 0x3d, 0x1f, 0x90, 0xc0, 0x67, 0xb1, 0x1f, 0x21, 0xf6, 0xc7, 0x60, 0xed, 0x63, 0x78, 0xd4, 0x64, + 0xcc, 0x75, 0xbb, 0xbf, 0x4e, 0x34, 0x97, 0x41, 0x83, 0x99, 0x25, 0x59, 0x17, 0x0b, 0x58, 0x37, + 0xd4, 0x37, 0x6c, 0x6a, 0x4a, 0xb7, 0xce, 0xb0, 0x8b, 0xad, 0x2d, 0x37, 0x65, 0x0e, 0xc6, 0xa8, + 0xb2, 0xd4, 0x94, 0x91, 0x43, 0xaa, 0x29, 0x8b, 0xdc, 0x4a, 0xbf, 0x99, 0xd0, 0x86, 0xa4, 0xae, + 0x13, 0x3a, 0xea, 0xce, 0xa5, 0x09, 0xb0, 0xb7, 0x0c, 0xd7, 0x12, 0x0d, 0xcf, 0xea, 0x04, 0x7e, + 0x12, 0x6b, 0x2c, 0xa5, 0x25, 0x66, 0xd0, 0x60, 0x66, 0x49, 0xa6, 0x9f, 0x6f, 0x51, 0xdd, 0x0e, + 0xb6, 0x92, 0x80, 0x27, 0x92, 0xfa, 0xf9, 0x95, 0x5e, 0x12, 0xcc, 0x2a, 0x97, 0xb9, 0x20, 0x8d, + 0x3f, 0x98, 0x6a, 0xd5, 0x97, 0x4b, 0x70, 0x76, 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, 0x66, 0x94, + 0x77, 0xc1, 0x8c, 0xf2, 0x8d, 0x0a, 0x9c, 0x5a, 0xa4, 0x41, 0x8f, 0x36, 0xf6, 0xdf, 0x54, 0xfc, + 0x2b, 0x70, 0x2a, 0x72, 0xe5, 0x6a, 0x06, 0xae, 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, 0xd9, 0x4b, + 0x82, 0x59, 0xe5, 0xc8, 0xa7, 0xe1, 0x11, 0xbe, 0xd4, 0x3b, 0x2d, 0x61, 0x9f, 0x15, 0xc6, 0x84, + 0xd8, 0x35, 0x81, 0x49, 0x09, 0xf9, 0x48, 0x33, 0x9b, 0x0c, 0xfb, 0x95, 0x27, 0x5f, 0x80, 0x91, + 0x8e, 0xd5, 0xa1, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, 0xda, 0xc0, + 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0x56, 0x8f, 0xb1, 0xa7, 0xfe, 0x7b, 0x11, 0x86, 0x17, + 0x3d, 0xb7, 0xdb, 0x69, 0xec, 0x92, 0x16, 0x0c, 0xdd, 0xe2, 0x87, 0x67, 0xf2, 0x68, 0x6a, 0x70, + 0x77, 0x68, 0x71, 0x06, 0x17, 0xa9, 0x44, 0xe2, 0x1d, 0x25, 0x3c, 0xeb, 0xc4, 0xdb, 0x74, 0x97, + 0x9a, 0xf2, 0x0c, 0x2d, 0xec, 0xc4, 0xd7, 0x58, 0x22, 0x8a, 0x3c, 0xd2, 0x86, 0x13, 0xba, 0x6d, + 0xbb, 0xb7, 0xa8, 0xb9, 0xac, 0x07, 0xd4, 0xa1, 0xbe, 0x3a, 0x92, 0x3c, 0xac, 0x59, 0x9a, 0x9f, + 0xeb, 0xcf, 0x26, 0xa1, 0x30, 0x8d, 0x4d, 0x5e, 0x83, 0x61, 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0xd5, + 0x67, 0xe6, 0x06, 0x6f, 0xf4, 0xc6, 0xa7, 0x9a, 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, + 0xda, 0xd7, 0x0b, 0x00, 0x57, 0xd6, 0xd6, 0x56, 0xe5, 0xf1, 0x82, 0x09, 0x65, 0xbd, 0x1b, 0x1e, + 0x54, 0x0e, 0x7e, 0x20, 0x98, 0xf0, 0x87, 0x94, 0x67, 0x78, 0xdd, 0x60, 0x0b, 0x39, 0x3a, 0xf9, + 0x20, 0x0c, 0x4b, 0x05, 0x59, 0x8a, 0x3d, 0x74, 0x2d, 0x90, 0x4a, 0x34, 0xaa, 0x7c, 0xed, 0xb7, + 0x8b, 0x00, 0x4b, 0xa6, 0x4d, 0x9b, 0xca, 0x83, 0xbd, 0x16, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, + 0x73, 0xc0, 0xd3, 0x54, 0x6e, 0xf3, 0x5f, 0x53, 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfc, 0x80, + 0x76, 0x96, 0x9c, 0x80, 0x7a, 0x3b, 0xba, 0x3d, 0xe0, 0x21, 0xca, 0xb8, 0xb0, 0x8b, 0x44, 0x38, + 0x98, 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, 0x03, 0xa4, 0xb1, 0x3b, 0x60, 0x47, 0x3a, 0xc1, + 0x76, 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0xef, 0x17, 0xe1, 0x0c, 0xe7, 0xc7, 0xaa, 0x91, + 0xf0, 0xc7, 0x24, 0xff, 0xbf, 0xe7, 0x1e, 0xdc, 0xff, 0x3e, 0x18, 0x6b, 0x71, 0x8d, 0x6a, 0x85, + 0x06, 0x7a, 0xa4, 0xcf, 0x45, 0x69, 0xb1, 0xcb, 0x6f, 0x5d, 0x28, 0xfb, 0x6c, 0xbe, 0x12, 0xd2, + 0x6b, 0x0e, 0xdc, 0x85, 0xb2, 0x3f, 0x80, 0xcf, 0x5e, 0xe1, 0xa9, 0x31, 0x9f, 0xb5, 0x38, 0x3b, + 0xf2, 0xb3, 0x30, 0xe4, 0x07, 0x7a, 0xd0, 0x55, 0x43, 0x73, 0xfd, 0xa8, 0x19, 0x73, 0xf0, 0x68, + 0x1e, 0x11, 0xef, 0x28, 0x99, 0x6a, 0xdf, 0x2f, 0xc0, 0xb9, 0xec, 0x82, 0xcb, 0x96, 0x1f, 0x90, + 0xff, 0xd7, 0x23, 0xf6, 0x03, 0xb6, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0x3a, 0x64, 0xab, 0x94, 0x98, + 0xc8, 0x03, 0xa8, 0x58, 0x01, 0x6d, 0xab, 0xfd, 0xe5, 0x8d, 0x23, 0xfe, 0xf4, 0xd8, 0xd2, 0xce, + 0xb8, 0xa0, 0x60, 0xa6, 0xbd, 0x55, 0xec, 0xf7, 0xc9, 0x7c, 0xf9, 0xb0, 0x93, 0x3e, 0xbf, 0xd7, + 0xf2, 0xf9, 0xfc, 0x26, 0x2b, 0xd4, 0xeb, 0xfa, 0xfb, 0x33, 0xbd, 0xae, 0xbf, 0x37, 0xf2, 0xbb, + 0xfe, 0xa6, 0xc4, 0xd0, 0xd7, 0x03, 0xf8, 0x9d, 0x12, 0x9c, 0xbf, 0x5b, 0xb7, 0x61, 0xeb, 0x99, + 0xec, 0x9d, 0x79, 0xd7, 0xb3, 0xbb, 0xf7, 0x43, 0x32, 0x03, 0x95, 0xce, 0x96, 0xee, 0x2b, 0xa5, + 0x4c, 0x6d, 0x58, 0x2a, 0xab, 0x2c, 0xf1, 0x0e, 0x9b, 0x34, 0xb8, 0x32, 0xc7, 0x5f, 0x51, 0x90, + 0xb2, 0xe9, 0xb8, 0x4d, 0x7d, 0x3f, 0xb2, 0x09, 0x84, 0xd3, 0xf1, 0x8a, 0x48, 0x46, 0x95, 0x4f, + 0x02, 0x18, 0x12, 0x26, 0x66, 0xb9, 0x32, 0x0d, 0xee, 0xc8, 0x95, 0xe1, 0x26, 0x1e, 0x7d, 0x94, + 0x3c, 0xad, 0x90, 0xbc, 0xc8, 0x14, 0x94, 0x83, 0xc8, 0x69, 0x57, 0x6d, 0xcd, 0xcb, 0x19, 0xfa, + 0x29, 0xa7, 0x63, 0x1b, 0x7b, 0x77, 0x83, 0x1b, 0xd5, 0x4d, 0x79, 0x7e, 0x6e, 0xb9, 0x0e, 0x57, + 0xc8, 0x4a, 0xd1, 0xc6, 0xfe, 0x46, 0x0f, 0x05, 0x66, 0x94, 0xd2, 0xfe, 0xa6, 0x0a, 0x67, 0xb2, + 0xfb, 0x03, 0x93, 0xdb, 0x0e, 0xf5, 0x7c, 0x86, 0x5d, 0x48, 0xca, 0xed, 0xa6, 0x48, 0x46, 0x95, + 0xff, 0x9e, 0x76, 0x38, 0xfb, 0x46, 0x01, 0xce, 0x7a, 0xf2, 0x8c, 0xe8, 0x7e, 0x38, 0x9d, 0x3d, + 0x26, 0xcc, 0x19, 0x7d, 0x18, 0x62, 0xff, 0xba, 0x90, 0xdf, 0x2c, 0xc0, 0x44, 0x3b, 0x65, 0xe7, + 0x38, 0xc6, 0x0b, 0x63, 0xdc, 0x2b, 0x7e, 0xa5, 0x0f, 0x3f, 0xec, 0x5b, 0x13, 0xf2, 0x05, 0xa8, + 0x77, 0x58, 0xbf, 0xf0, 0x03, 0xea, 0x18, 0xea, 0xce, 0xd8, 0xe0, 0x23, 0x69, 0x35, 0xc2, 0x52, + 0xae, 0x68, 0x42, 0x3f, 0x88, 0x65, 0x60, 0x9c, 0xe3, 0x03, 0x7e, 0x43, 0xec, 0x12, 0x54, 0x7d, + 0x1a, 0x04, 0x96, 0xd3, 0x12, 0xfb, 0x8d, 0x9a, 0x18, 0x2b, 0x4d, 0x99, 0x86, 0x61, 0x2e, 0xf9, + 0x10, 0xd4, 0xf8, 0x91, 0xd3, 0xac, 0xd7, 0xf2, 0x27, 0x6a, 0xdc, 0x5d, 0x6c, 0x54, 0x38, 0xc0, + 0xc9, 0x44, 0x8c, 0xf2, 0xc9, 0xd3, 0x30, 0xb2, 0xc1, 0x87, 0xaf, 0xbc, 0xce, 0x2b, 0x6c, 0x5c, + 0x5c, 0x5b, 0x6b, 0xc4, 0xd2, 0x31, 0x41, 0x45, 0x66, 0x00, 0x68, 0x78, 0x2e, 0x97, 0xb6, 0x67, + 0x45, 0x27, 0x76, 0x18, 0xa3, 0x22, 0x8f, 0x41, 0x29, 0xb0, 0x7d, 0x6e, 0xc3, 0xaa, 0x46, 0x5b, + 0xd0, 0xb5, 0xe5, 0x26, 0xb2, 0x74, 0xed, 0xa7, 0x05, 0x38, 0x91, 0xba, 0x5c, 0xc2, 0x8a, 0x74, + 0x3d, 0x5b, 0x4e, 0x23, 0x61, 0x91, 0x75, 0x5c, 0x46, 0x96, 0x4e, 0x5e, 0x95, 0x6a, 0x79, 0x31, + 0x67, 0xe4, 0x82, 0xeb, 0x7a, 0xe0, 0x33, 0x3d, 0xbc, 0x47, 0x23, 0xe7, 0xc7, 0x7c, 0x51, 0x7d, + 0xe4, 0x3a, 0x10, 0x3b, 0xe6, 0x8b, 0xf2, 0x30, 0x41, 0x99, 0x32, 0xf8, 0x95, 0x0f, 0x62, 0xf0, + 0xd3, 0xbe, 0x5a, 0x8c, 0x49, 0x40, 0x6a, 0xf6, 0xf7, 0x90, 0xc0, 0x93, 0x6c, 0x01, 0x0d, 0x17, + 0xf7, 0x5a, 0x7c, 0xfd, 0xe3, 0x8b, 0xb1, 0xcc, 0x25, 0x2f, 0x09, 0xd9, 0x97, 0x72, 0xde, 0x42, + 0x5d, 0x5b, 0x6e, 0x0a, 0xef, 0x2a, 0xd5, 0x6a, 0x61, 0x13, 0x94, 0x8f, 0xa9, 0x09, 0xb4, 0xbf, + 0x28, 0x41, 0xfd, 0xaa, 0xbb, 0xf1, 0x1e, 0xf1, 0xa0, 0xce, 0x5e, 0xa6, 0x8a, 0xef, 0xe2, 0x32, + 0xb5, 0x0e, 0x8f, 0x04, 0x81, 0xdd, 0xa4, 0x86, 0xeb, 0x98, 0xfe, 0xec, 0x66, 0x40, 0xbd, 0x05, + 0xcb, 0xb1, 0xfc, 0x2d, 0x6a, 0xca, 0xe3, 0xa4, 0x47, 0xf7, 0xf7, 0x26, 0x1f, 0x59, 0x5b, 0x5b, + 0xce, 0x22, 0xc1, 0x7e, 0x65, 0xf9, 0xb4, 0xa1, 0x1b, 0xdb, 0xee, 0xe6, 0x26, 0xbf, 0x29, 0x23, + 0x7d, 0x6e, 0xc4, 0xb4, 0x11, 0x4b, 0xc7, 0x04, 0x95, 0xf6, 0x76, 0x11, 0x6a, 0xe1, 0xcd, 0x77, + 0xf2, 0x04, 0x0c, 0x6f, 0x78, 0xee, 0x36, 0xf5, 0xc4, 0xc9, 0x9d, 0xbc, 0x29, 0xd3, 0x10, 0x49, + 0xa8, 0xf2, 0xc8, 0xe3, 0x50, 0x09, 0xdc, 0x8e, 0x65, 0xa4, 0x0d, 0x6a, 0x6b, 0x2c, 0x11, 0x45, + 0xde, 0xf1, 0x75, 0xf0, 0x27, 0x13, 0xaa, 0x5d, 0xad, 0xaf, 0x32, 0xf6, 0x0a, 0x94, 0x7d, 0xdd, + 0xb7, 0xe5, 0x7a, 0x9a, 0xe3, 0x12, 0xf9, 0x6c, 0x73, 0x59, 0x5e, 0x22, 0x9f, 0x6d, 0x2e, 0x23, + 0x07, 0xd5, 0x7e, 0x54, 0x84, 0xba, 0x90, 0x9b, 0x98, 0x15, 0x8e, 0x52, 0x72, 0x2f, 0x70, 0x57, + 0x0a, 0xbf, 0xdb, 0xa6, 0x1e, 0x37, 0x33, 0xc9, 0x49, 0x2e, 0x7e, 0x3e, 0x10, 0x65, 0x86, 0xee, + 0x14, 0x51, 0x92, 0x12, 0x7d, 0xf9, 0x18, 0x45, 0x5f, 0x39, 0x90, 0xe8, 0x87, 0x8e, 0x43, 0xf4, + 0x6f, 0x16, 0xa1, 0xb6, 0x6c, 0x6d, 0x52, 0x63, 0xd7, 0xb0, 0xf9, 0x9d, 0x40, 0x93, 0xda, 0x34, + 0xa0, 0x8b, 0x9e, 0x6e, 0xd0, 0x55, 0xea, 0x59, 0x3c, 0x66, 0x0b, 0x1b, 0x1f, 0x7c, 0x06, 0x92, + 0x77, 0x02, 0xe7, 0xfb, 0xd0, 0x60, 0xdf, 0xd2, 0x64, 0x09, 0x46, 0x4c, 0xea, 0x5b, 0x1e, 0x35, + 0x57, 0x63, 0x1b, 0x95, 0x27, 0xd4, 0x52, 0x33, 0x1f, 0xcb, 0xbb, 0xb3, 0x37, 0x39, 0xaa, 0x0c, + 0x94, 0x62, 0xc7, 0x92, 0x28, 0xca, 0x86, 0x7c, 0x47, 0xef, 0xfa, 0x59, 0x75, 0x8c, 0x0d, 0xf9, + 0xd5, 0x6c, 0x12, 0xec, 0x57, 0x56, 0xab, 0x40, 0x69, 0xd9, 0x6d, 0x69, 0x6f, 0x95, 0x20, 0x0c, + 0xee, 0x43, 0x7e, 0xae, 0x00, 0x75, 0xdd, 0x71, 0xdc, 0x40, 0x06, 0xce, 0x11, 0x27, 0xf0, 0x98, + 0x3b, 0x86, 0xd0, 0xd4, 0x6c, 0x04, 0x2a, 0x0e, 0x6f, 0xc3, 0x03, 0xe5, 0x58, 0x0e, 0xc6, 0x79, + 0x93, 0x6e, 0xea, 0x3c, 0x79, 0x25, 0x7f, 0x2d, 0x0e, 0x70, 0x7a, 0x7c, 0xee, 0x93, 0x30, 0x9e, + 0xae, 0xec, 0x61, 0x8e, 0x83, 0x72, 0x1d, 0xcc, 0x17, 0x01, 0x22, 0x9f, 0x92, 0xfb, 0x60, 0xc4, + 0xb2, 0x12, 0x46, 0xac, 0xc5, 0xc1, 0x05, 0x1c, 0x56, 0xba, 0xaf, 0xe1, 0xea, 0xf5, 0x94, 0xe1, + 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, 0x58, 0xf5, 0x5b, 0x05, 0x18, 0x8f, 0x88, 0xe5, 0x0d, 0xd9, + 0xe7, 0x60, 0xd4, 0xa3, 0xba, 0xd9, 0xd0, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, 0x6f, 0xf6, + 0xc9, 0xfd, 0xbd, 0xc9, 0x51, 0x8c, 0x67, 0x60, 0x92, 0x8e, 0xe8, 0x50, 0x67, 0x09, 0x6b, 0x56, + 0x9b, 0xba, 0xdd, 0x60, 0x40, 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x9d, + 0x02, 0x8c, 0xc5, 0x2b, 0x7c, 0xec, 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, 0xda, 0xa4, + 0x8f, 0x15, 0xed, 0xc7, 0xd5, 0xf8, 0xa7, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, 0xab, 0xb1, + 0xe0, 0xbd, 0x1f, 0x35, 0xa6, 0x9f, 0x96, 0x5b, 0x7e, 0x80, 0xb5, 0xdc, 0x77, 0x33, 0xf4, 0x4c, + 0x2c, 0x7c, 0xca, 0x50, 0x8e, 0xf0, 0x29, 0xed, 0x30, 0x7c, 0xca, 0xf0, 0x91, 0x4d, 0x3a, 0x07, + 0x09, 0xa1, 0x52, 0xbd, 0xaf, 0x21, 0x54, 0x6a, 0xc7, 0x15, 0x42, 0x05, 0xf2, 0x86, 0x50, 0xf9, + 0x4a, 0x01, 0xc6, 0xcc, 0xc4, 0x8d, 0x59, 0x6e, 0x5b, 0xc8, 0xb3, 0xd4, 0x24, 0x2f, 0xe0, 0x8a, + 0x2b, 0x53, 0xc9, 0x34, 0x4c, 0xb1, 0xd4, 0x7e, 0x58, 0x8e, 0xaf, 0x03, 0xf7, 0xdb, 0x54, 0xfd, + 0x6c, 0xd2, 0x54, 0x7d, 0x31, 0x6d, 0xaa, 0x3e, 0x11, 0xf3, 0x22, 0x8d, 0x9b, 0xab, 0x3f, 0x1c, + 0x9b, 0x1e, 0xd9, 0x9c, 0x34, 0x1a, 0x49, 0x3a, 0x63, 0x8a, 0xfc, 0x30, 0x54, 0x7d, 0x15, 0x86, + 0x51, 0x6c, 0x6c, 0xa2, 0x76, 0x51, 0x21, 0x12, 0x43, 0x0a, 0xa6, 0x89, 0x7b, 0x54, 0xf7, 0x5d, + 0x27, 0xad, 0x89, 0x23, 0x4f, 0x45, 0x99, 0x1b, 0x37, 0x99, 0x0f, 0xdd, 0xc3, 0x64, 0xae, 0x43, + 0xdd, 0xd6, 0xfd, 0x60, 0xbd, 0x63, 0xea, 0x01, 0x35, 0xe5, 0x78, 0xfb, 0x5f, 0x07, 0x5b, 0xab, + 0xd8, 0xfa, 0x17, 0x29, 0x84, 0xcb, 0x11, 0x0c, 0xc6, 0x31, 0x89, 0x09, 0x23, 0xec, 0x95, 0x8f, + 0x06, 0x73, 0x56, 0x85, 0x00, 0x38, 0x0c, 0x8f, 0xd0, 0xd2, 0xb3, 0x1c, 0xc3, 0xc1, 0x04, 0x6a, + 0x1f, 0xab, 0x7a, 0x6d, 0x20, 0xab, 0xfa, 0x57, 0x6a, 0x50, 0xbf, 0xae, 0x07, 0xd6, 0x0e, 0xe5, + 0xa7, 0x38, 0xc7, 0x63, 0x4a, 0xff, 0xd5, 0x02, 0x9c, 0x49, 0xba, 0xea, 0x1d, 0xa3, 0x3d, 0x9d, + 0x07, 0xfe, 0xc0, 0x4c, 0x6e, 0xd8, 0xa7, 0x16, 0xdc, 0xb2, 0xde, 0xe3, 0xf9, 0x77, 0xdc, 0x96, + 0xf5, 0x66, 0x3f, 0x86, 0xd8, 0xbf, 0x2e, 0xef, 0x15, 0xcb, 0xfa, 0x83, 0x1d, 0x98, 0x2d, 0x65, + 0xf7, 0x1f, 0x7e, 0x60, 0xec, 0xfe, 0xd5, 0x07, 0x42, 0xd9, 0xea, 0xc4, 0xec, 0xfe, 0xb5, 0x9c, + 0xfe, 0x27, 0xd2, 0xbb, 0x5d, 0xa0, 0xf5, 0x3b, 0x3f, 0xe0, 0x17, 0xd3, 0x95, 0x3d, 0x96, 0xe9, + 0x28, 0x1b, 0xba, 0x6f, 0x19, 0x72, 0xd9, 0xcb, 0x11, 0x88, 0x52, 0x45, 0xec, 0x12, 0xc7, 0xd4, + 0xfc, 0x15, 0x05, 0x76, 0x14, 0x19, 0xac, 0x98, 0x2b, 0x32, 0x18, 0x99, 0x83, 0xb2, 0xc3, 0x76, + 0xcf, 0xa5, 0x43, 0xc7, 0x02, 0xbb, 0x7e, 0x8d, 0xee, 0x22, 0x2f, 0xac, 0xbd, 0x5d, 0x04, 0x60, + 0x9f, 0x7f, 0x30, 0x0b, 0xfc, 0x07, 0x61, 0xd8, 0xef, 0xf2, 0xbd, 0xb2, 0x5c, 0xb0, 0x23, 0xa7, + 0x1d, 0x91, 0x8c, 0x2a, 0x9f, 0x3c, 0x0e, 0x95, 0xd7, 0xbb, 0xb4, 0xab, 0x8e, 0x93, 0x43, 0x75, + 0xed, 0x53, 0x2c, 0x11, 0x45, 0xde, 0xf1, 0x59, 0xd3, 0x94, 0xa5, 0xbe, 0x72, 0x5c, 0x96, 0xfa, + 0x1a, 0x0c, 0x5f, 0x77, 0xb9, 0x0f, 0xa0, 0xf6, 0xaf, 0x45, 0x80, 0xc8, 0xc7, 0x8a, 0x7c, 0xbd, + 0x00, 0x0f, 0x87, 0x03, 0x2e, 0x10, 0x5a, 0xf7, 0x9c, 0xad, 0x5b, 0xed, 0xdc, 0x56, 0xfb, 0xac, + 0xc1, 0xce, 0x67, 0xa0, 0xd5, 0x2c, 0x76, 0x98, 0x5d, 0x0b, 0x82, 0x50, 0xa5, 0xed, 0x4e, 0xb0, + 0x3b, 0x6f, 0x79, 0xb2, 0x07, 0x66, 0xba, 0xf2, 0x5d, 0x96, 0x34, 0xa2, 0xa8, 0xdc, 0x1a, 0xf2, + 0x41, 0xa4, 0x72, 0x30, 0xc4, 0x21, 0x5b, 0x50, 0x75, 0xdc, 0x57, 0x7d, 0x26, 0x0e, 0xd9, 0x1d, + 0x5f, 0x1c, 0x5c, 0xe4, 0x42, 0xac, 0xc2, 0xca, 0x2b, 0x5f, 0x70, 0xd8, 0x91, 0xc2, 0xfe, 0x5a, + 0x11, 0x4e, 0x65, 0xc8, 0x81, 0xbc, 0x08, 0xe3, 0xd2, 0x9d, 0x2d, 0x0a, 0x4f, 0x5c, 0x88, 0xc2, + 0x13, 0x37, 0x53, 0x79, 0xd8, 0x43, 0x4d, 0x5e, 0x05, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0x8a, 0x6b, + 0x2a, 0x7d, 0xf4, 0x85, 0xfd, 0xbd, 0x49, 0x98, 0x0d, 0x53, 0xef, 0xec, 0x4d, 0x7e, 0x24, 0xcb, + 0x43, 0x35, 0x25, 0xe7, 0xa8, 0x00, 0xc6, 0x20, 0xc9, 0xe7, 0x00, 0xc4, 0xd6, 0x2b, 0xbc, 0x44, + 0x7f, 0x0f, 0x7b, 0xc5, 0x94, 0x0a, 0x57, 0x34, 0xf5, 0xa9, 0xae, 0xee, 0x04, 0x56, 0xb0, 0x2b, + 0x62, 0x96, 0xdc, 0x0c, 0x51, 0x30, 0x86, 0xa8, 0xfd, 0x69, 0x11, 0xaa, 0xca, 0x52, 0x7a, 0x1f, + 0xcc, 0x63, 0xad, 0x84, 0x79, 0xec, 0x88, 0x7c, 0x52, 0xb3, 0x8c, 0x63, 0x6e, 0xca, 0x38, 0xb6, + 0x98, 0x9f, 0xd5, 0xdd, 0x4d, 0x63, 0xdf, 0x2a, 0xc2, 0x98, 0x22, 0xcd, 0x6b, 0x18, 0xfb, 0x04, + 0x9c, 0x10, 0x67, 0xc9, 0x2b, 0xfa, 0x6d, 0x11, 0xbe, 0x85, 0x0b, 0xac, 0x2c, 0xdc, 0x40, 0x1b, + 0xc9, 0x2c, 0x4c, 0xd3, 0xb2, 0x6e, 0x2d, 0x92, 0xd6, 0xd9, 0x3e, 0x42, 0x9c, 0x3e, 0x89, 0xfd, + 0x0e, 0xef, 0xd6, 0x8d, 0x54, 0x1e, 0xf6, 0x50, 0xa7, 0x2d, 0x73, 0xe5, 0x63, 0xb0, 0xcc, 0xfd, + 0x6d, 0x01, 0x46, 0x22, 0x79, 0x1d, 0xbb, 0x5d, 0x6e, 0x33, 0x69, 0x97, 0x9b, 0xcd, 0xdd, 0x1d, + 0xfa, 0x58, 0xe5, 0x7e, 0x71, 0x18, 0x12, 0xae, 0xd1, 0x64, 0x03, 0xce, 0x59, 0x99, 0x0e, 0x5e, + 0xb1, 0xd9, 0x26, 0xbc, 0xeb, 0xbb, 0xd4, 0x97, 0x12, 0xef, 0x82, 0x42, 0xba, 0x50, 0xdd, 0xa1, + 0x5e, 0x60, 0x19, 0x54, 0x7d, 0xdf, 0x62, 0x6e, 0x95, 0x4c, 0xda, 0x1e, 0x43, 0x99, 0xde, 0x94, + 0x0c, 0x30, 0x64, 0x45, 0x36, 0xa0, 0x42, 0xcd, 0x16, 0x55, 0x01, 0x75, 0x72, 0x86, 0xab, 0x0c, + 0xe5, 0xc9, 0xde, 0x7c, 0x14, 0xd0, 0xc4, 0x87, 0x9a, 0xad, 0xce, 0x96, 0x64, 0x3f, 0x1c, 0x5c, + 0xc1, 0x0a, 0x4f, 0xa9, 0xa2, 0xbb, 0xf6, 0x61, 0x12, 0x46, 0x7c, 0xc8, 0x76, 0x68, 0xe4, 0xaa, + 0x1c, 0xd1, 0xe4, 0x71, 0x17, 0x13, 0x97, 0x0f, 0xb5, 0x5b, 0x7a, 0x40, 0xbd, 0xb6, 0xee, 0x6d, + 0xcb, 0xdd, 0xc6, 0xe0, 0x5f, 0xf8, 0x92, 0x42, 0x8a, 0xbe, 0x30, 0x4c, 0xc2, 0x88, 0x0f, 0x71, + 0xa1, 0x16, 0x48, 0xf5, 0x59, 0x59, 0xf2, 0x06, 0x67, 0xaa, 0x14, 0x71, 0x5f, 0xba, 0x48, 0xab, + 0x57, 0x8c, 0x78, 0x90, 0x9d, 0x44, 0x28, 0x5f, 0x11, 0xc0, 0xb9, 0x91, 0xc3, 0x22, 0x2c, 0xa1, + 0xa2, 0xe5, 0x26, 0x3b, 0x24, 0xb0, 0xf6, 0x76, 0x25, 0x9a, 0x96, 0xef, 0xb7, 0x9d, 0xea, 0xe9, + 0xa4, 0x9d, 0xea, 0x42, 0xda, 0x4e, 0x95, 0x3a, 0xa2, 0x3c, 0xbc, 0x53, 0x65, 0xca, 0x42, 0x54, + 0x3e, 0x06, 0x0b, 0xd1, 0x53, 0x50, 0xdf, 0xe1, 0x33, 0x81, 0x88, 0xce, 0x53, 0xe1, 0xcb, 0x08, + 0x9f, 0xd9, 0x6f, 0x46, 0xc9, 0x18, 0xa7, 0x61, 0x45, 0x84, 0x06, 0x12, 0x85, 0x37, 0x95, 0x45, + 0x9a, 0x51, 0x32, 0xc6, 0x69, 0xb8, 0x3f, 0x96, 0xe5, 0x6c, 0x8b, 0x02, 0xc3, 0xbc, 0x80, 0xf0, + 0xc7, 0x52, 0x89, 0x18, 0xe5, 0x93, 0x4b, 0x50, 0xed, 0x9a, 0x9b, 0x82, 0xb6, 0xca, 0x69, 0xb9, + 0x86, 0xb9, 0x3e, 0xbf, 0x20, 0xa3, 0x05, 0xa9, 0x5c, 0x56, 0x93, 0xb6, 0xde, 0x51, 0x19, 0x7c, + 0x6f, 0x28, 0x6b, 0xb2, 0x12, 0x25, 0x63, 0x9c, 0x86, 0x7c, 0x0c, 0xc6, 0x3c, 0x6a, 0x76, 0x0d, + 0x1a, 0x96, 0x02, 0x5e, 0x8a, 0x5b, 0x45, 0x31, 0x91, 0x83, 0x29, 0xca, 0x3e, 0x76, 0xae, 0xfa, + 0x40, 0x76, 0xae, 0xef, 0x15, 0x80, 0xf4, 0xfa, 0x2f, 0x93, 0x2d, 0x18, 0x72, 0xb8, 0xf5, 0x2b, + 0x77, 0x40, 0xe4, 0x98, 0x11, 0x4d, 0x4c, 0x4b, 0x32, 0x41, 0xe2, 0x13, 0x07, 0xaa, 0xf4, 0x76, + 0x40, 0x3d, 0x27, 0xbc, 0xcf, 0x70, 0x34, 0xc1, 0x97, 0xc5, 0x6e, 0x40, 0x22, 0x63, 0xc8, 0x43, + 0xfb, 0x41, 0x11, 0xea, 0x31, 0xba, 0x7b, 0x6d, 0x2a, 0xf9, 0x95, 0x6a, 0x61, 0x74, 0x5a, 0xf7, + 0x6c, 0x39, 0xc2, 0x62, 0x57, 0xaa, 0x65, 0x16, 0x2e, 0x63, 0x9c, 0x8e, 0xcc, 0x00, 0xb4, 0x75, + 0x3f, 0xa0, 0x1e, 0x5f, 0x7d, 0x53, 0x17, 0x99, 0x57, 0xc2, 0x1c, 0x8c, 0x51, 0x91, 0x8b, 0x32, + 0x7c, 0x76, 0x39, 0x19, 0x78, 0xae, 0x4f, 0x6c, 0xec, 0xca, 0x11, 0xc4, 0xc6, 0x26, 0x2d, 0x18, + 0x57, 0xb5, 0x56, 0xb9, 0x87, 0x0b, 0x4b, 0x26, 0xf6, 0x2f, 0x29, 0x08, 0xec, 0x01, 0xd5, 0xde, + 0x2e, 0xc0, 0x68, 0xc2, 0xe4, 0x21, 0x42, 0xc6, 0x29, 0xef, 0xfb, 0x44, 0xc8, 0xb8, 0x98, 0xd3, + 0xfc, 0x93, 0x30, 0x24, 0x04, 0x94, 0x76, 0xaa, 0x13, 0x22, 0x44, 0x99, 0xcb, 0xe6, 0x32, 0x69, + 0x54, 0x4d, 0xcf, 0x65, 0xd2, 0xea, 0x8a, 0x2a, 0x5f, 0x98, 0xdb, 0x45, 0xed, 0x7a, 0xcd, 0xed, + 0x22, 0x1d, 0x43, 0x0a, 0xed, 0x87, 0x25, 0xe0, 0x2e, 0x28, 0xe4, 0x39, 0xa8, 0xb5, 0xa9, 0xb1, + 0xa5, 0x3b, 0x96, 0xaf, 0x42, 0x46, 0xb2, 0xdd, 0x6d, 0x6d, 0x45, 0x25, 0xde, 0x61, 0x00, 0xb3, + 0xcd, 0x65, 0xee, 0xe5, 0x1d, 0xd1, 0x12, 0x03, 0x86, 0x5a, 0xbe, 0xaf, 0x77, 0xac, 0xdc, 0x27, + 0xa0, 0x22, 0x44, 0x9f, 0x18, 0x44, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb1, 0x75, 0xcb, + 0xc9, 0xfd, 0x8f, 0x12, 0xf6, 0x05, 0xab, 0x0c, 0x49, 0x98, 0x74, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, + 0x85, 0xba, 0x6f, 0x78, 0x7a, 0xdb, 0xdf, 0xd2, 0x67, 0x9e, 0x79, 0x36, 0xb7, 0x92, 0x14, 0xb1, + 0x12, 0x73, 0xf6, 0x1c, 0xce, 0xae, 0x34, 0xaf, 0xcc, 0xce, 0x3c, 0xf3, 0x2c, 0xc6, 0xf9, 0xc4, + 0xd9, 0x3e, 0xf3, 0xd4, 0x8c, 0xec, 0xf7, 0x47, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x30, 0xce, 0x47, + 0xfb, 0x8f, 0x02, 0xd4, 0x42, 0x5a, 0xb2, 0x0e, 0xc0, 0x46, 0xa0, 0x0c, 0xaa, 0x77, 0xa8, 0x00, + 0xf7, 0x7c, 0x57, 0xbc, 0x1e, 0x16, 0xc6, 0x18, 0x50, 0x46, 0xd4, 0xc1, 0xe2, 0x51, 0x47, 0x1d, + 0x9c, 0x86, 0xda, 0x96, 0xee, 0x98, 0xfe, 0x96, 0xbe, 0x2d, 0x26, 0xa2, 0x58, 0x1c, 0xce, 0x2b, + 0x2a, 0x03, 0x23, 0x1a, 0xed, 0x8f, 0x87, 0x40, 0x1c, 0x5b, 0xb2, 0xa1, 0x62, 0x5a, 0xbe, 0xf0, + 0x9b, 0x2d, 0xf0, 0x92, 0xe1, 0x50, 0x99, 0x97, 0xe9, 0x18, 0x52, 0x90, 0xb3, 0x50, 0x6a, 0x5b, + 0x8e, 0x3c, 0xf1, 0xe0, 0x06, 0xaf, 0x15, 0xcb, 0x41, 0x96, 0xc6, 0xb3, 0xf4, 0xdb, 0xd2, 0xe5, + 0x49, 0x64, 0xe9, 0xb7, 0x91, 0xa5, 0xb1, 0x2d, 0xa8, 0xed, 0xba, 0xdb, 0x1b, 0xba, 0xb1, 0xad, + 0x3c, 0xa3, 0xca, 0x7c, 0x21, 0xe4, 0x5b, 0xd0, 0xe5, 0x64, 0x16, 0xa6, 0x69, 0xc9, 0x3a, 0x3c, + 0xf2, 0x06, 0xf5, 0x5c, 0x39, 0xca, 0x9b, 0x36, 0xa5, 0x1d, 0x05, 0x23, 0x54, 0x08, 0xee, 0x60, + 0xf5, 0x99, 0x6c, 0x12, 0xec, 0x57, 0x96, 0xbb, 0x6a, 0xea, 0x5e, 0x8b, 0x06, 0xab, 0x9e, 0x6b, + 0x50, 0xdf, 0xb7, 0x9c, 0x96, 0x82, 0x1d, 0x8a, 0x60, 0xd7, 0xb2, 0x49, 0xb0, 0x5f, 0x59, 0xf2, + 0x32, 0x4c, 0x88, 0x2c, 0xb1, 0xd8, 0xce, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0xe9, + 0xd6, 0xa8, 0x38, 0x57, 0x58, 0xeb, 0x43, 0x83, 0x7d, 0x4b, 0x93, 0xab, 0x30, 0xae, 0x4e, 0x95, + 0x56, 0xa9, 0xd7, 0x0c, 0x8f, 0xb2, 0x47, 0x1b, 0x17, 0xd8, 0x7e, 0x6f, 0x9e, 0x76, 0x3c, 0x6a, + 0x70, 0xad, 0x2b, 0x45, 0x85, 0x3d, 0xe5, 0x08, 0xc2, 0x19, 0x7e, 0x5e, 0xbd, 0xde, 0x99, 0x73, + 0x5d, 0xdb, 0x74, 0x6f, 0x39, 0xea, 0xdb, 0x85, 0x62, 0xc3, 0x0f, 0x92, 0x9a, 0x99, 0x14, 0xd8, + 0xa7, 0x24, 0xfb, 0x72, 0x9e, 0x33, 0xef, 0xde, 0x72, 0xd2, 0xa8, 0x10, 0x7d, 0x79, 0xb3, 0x0f, + 0x0d, 0xf6, 0x2d, 0x4d, 0x16, 0x80, 0xa4, 0xbf, 0x60, 0xbd, 0xc3, 0x95, 0xa1, 0xd1, 0xc6, 0x19, + 0x11, 0x1f, 0x23, 0x9d, 0x8b, 0x19, 0x25, 0xc8, 0x32, 0x9c, 0x4e, 0xa7, 0x32, 0x76, 0xdc, 0x49, + 0x7e, 0x54, 0x44, 0xc6, 0xc4, 0x8c, 0x7c, 0xcc, 0x2c, 0xa5, 0xfd, 0x49, 0x11, 0x46, 0x13, 0x17, + 0xaa, 0x1f, 0xb8, 0x8b, 0xab, 0x4c, 0x03, 0x6d, 0xfb, 0xad, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, + 0x77, 0x8d, 0xaa, 0xcb, 0xef, 0x7c, 0x52, 0x59, 0x49, 0xe4, 0x60, 0x8a, 0x92, 0x6c, 0x42, 0x45, + 0xd8, 0x53, 0xf3, 0xfe, 0x2a, 0x41, 0xc9, 0x88, 0x1b, 0x55, 0xf9, 0x92, 0x23, 0x4c, 0xaa, 0x02, + 0x5e, 0x0b, 0x60, 0x24, 0x4e, 0xc1, 0x26, 0x92, 0x48, 0x59, 0x1b, 0x4e, 0x28, 0x6a, 0x4b, 0x50, + 0x0a, 0x82, 0x41, 0xaf, 0xc4, 0x0a, 0xfb, 0xfc, 0xda, 0x32, 0x32, 0x0c, 0x6d, 0x93, 0xb5, 0x9d, + 0xef, 0x5b, 0xae, 0x23, 0xe3, 0x23, 0xaf, 0xc3, 0x70, 0x20, 0x4d, 0x54, 0x83, 0x5d, 0xe9, 0xe5, + 0xe6, 0x62, 0x65, 0x9e, 0x52, 0x58, 0xda, 0xdf, 0x15, 0xa1, 0x16, 0x6e, 0x27, 0x0f, 0x10, 0x77, + 0xd8, 0x85, 0x5a, 0xe8, 0x6f, 0x93, 0xfb, 0xb7, 0x67, 0x91, 0x1b, 0x08, 0xdf, 0x01, 0x85, 0xaf, + 0x18, 0xf1, 0x88, 0xfb, 0xf2, 0x94, 0x72, 0xf8, 0xf2, 0x74, 0x60, 0x38, 0xf0, 0xac, 0x56, 0x4b, + 0xea, 0xb6, 0x79, 0x9c, 0x79, 0x42, 0x71, 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, + 0xf6, 0x1a, 0x8c, 0xa7, 0x29, 0xb9, 0xe2, 0x67, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x19, 0x47, 0x8a, + 0x9f, 0x4c, 0xc7, 0x90, 0x82, 0x6d, 0xfe, 0x58, 0x33, 0xbd, 0xe1, 0x3a, 0x6a, 0x5b, 0xcd, 0x75, + 0xe8, 0x35, 0x99, 0x86, 0x61, 0xae, 0xf6, 0x2f, 0x25, 0x38, 0x1b, 0x19, 0x05, 0x56, 0x74, 0x47, + 0x6f, 0x1d, 0xe0, 0x5f, 0x57, 0xef, 0x5f, 0x92, 0x38, 0x6c, 0xf0, 0xf8, 0xd2, 0x03, 0x10, 0x3c, + 0xfe, 0x47, 0x05, 0xe0, 0xbe, 0x81, 0xe4, 0x0b, 0x30, 0xa2, 0xc7, 0x7e, 0x73, 0x28, 0x9b, 0xf3, + 0x72, 0xee, 0xe6, 0xe4, 0x2e, 0x88, 0xa1, 0xaf, 0x4b, 0x3c, 0x15, 0x13, 0x0c, 0x89, 0x0b, 0xd5, + 0x4d, 0xdd, 0xb6, 0x99, 0x2e, 0x94, 0xfb, 0x90, 0x23, 0xc1, 0x9c, 0x77, 0xf3, 0x05, 0x09, 0x8d, + 0x21, 0x13, 0xed, 0x9f, 0x0b, 0x30, 0xda, 0xb4, 0x2d, 0xd3, 0x72, 0x5a, 0xc7, 0x18, 0x35, 0xfe, + 0x06, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x80, 0xf3, 0xb8, 0x58, 0x41, 0x18, 0x00, 0x0a, 0x9c, 0x64, + 0x18, 0xfa, 0xd2, 0x01, 0xc2, 0xd0, 0xff, 0x64, 0x08, 0xa4, 0x7f, 0x29, 0xe9, 0x42, 0xad, 0xa5, + 0xa2, 0x5b, 0xcb, 0x6f, 0xbc, 0x92, 0x23, 0x32, 0x5a, 0x22, 0x4e, 0xb6, 0x98, 0x75, 0xc3, 0x44, + 0x8c, 0x38, 0x11, 0x9a, 0xfc, 0xb3, 0xe5, 0x7c, 0xce, 0x3f, 0x5b, 0x0a, 0x76, 0xbd, 0xff, 0xb6, + 0xd4, 0xa1, 0xbc, 0x15, 0x04, 0x1d, 0x39, 0xae, 0x06, 0x77, 0x20, 0x8e, 0x82, 0x73, 0x08, 0x6d, + 0x84, 0xbd, 0x23, 0x87, 0x66, 0x2c, 0x1c, 0x3d, 0xfc, 0xa1, 0xd2, 0x5c, 0xae, 0x83, 0xee, 0x38, + 0x0b, 0xf6, 0x8e, 0x1c, 0x9a, 0x7c, 0x1e, 0xea, 0x81, 0xa7, 0x3b, 0xfe, 0xa6, 0xeb, 0xb5, 0xa9, + 0x27, 0x77, 0x87, 0x0b, 0x39, 0x7e, 0xee, 0xb8, 0x16, 0xa1, 0x89, 0x13, 0xb4, 0x44, 0x12, 0xc6, + 0xb9, 0x91, 0x6d, 0xa8, 0x76, 0x4d, 0x51, 0x31, 0x69, 0x36, 0x99, 0xcd, 0xf3, 0xbf, 0xce, 0xd8, + 0x31, 0xb6, 0x7a, 0xc3, 0x90, 0x41, 0xf2, 0xdf, 0x61, 0xc3, 0x47, 0xf5, 0xef, 0xb0, 0x78, 0x6f, + 0xcc, 0x8a, 0x1c, 0x40, 0xda, 0x52, 0xa3, 0x74, 0x5a, 0xd2, 0x0b, 0x67, 0x21, 0xb7, 0xb2, 0x27, + 0x58, 0xd6, 0x43, 0xad, 0xd4, 0x69, 0xa1, 0xe2, 0xa1, 0xb5, 0x41, 0x5a, 0xb7, 0x89, 0x91, 0xf8, + 0xc3, 0x86, 0xb8, 0xce, 0x32, 0x7d, 0xb0, 0xf9, 0x20, 0xfc, 0xd5, 0x43, 0x2c, 0xc2, 0x6f, 0xe6, + 0xaf, 0x34, 0xb4, 0xbf, 0x2f, 0x42, 0x69, 0x6d, 0xb9, 0x29, 0xa2, 0xf6, 0xf1, 0xdf, 0xd7, 0xd0, + 0xe6, 0xb6, 0xd5, 0xb9, 0x49, 0x3d, 0x6b, 0x73, 0x57, 0x6e, 0x7a, 0x63, 0x51, 0xfb, 0xd2, 0x14, + 0x98, 0x51, 0x8a, 0xbc, 0x02, 0x23, 0x86, 0x3e, 0x47, 0xbd, 0x60, 0x90, 0x2d, 0x3d, 0xbf, 0xb7, + 0x37, 0x37, 0x1b, 0x15, 0xc7, 0x04, 0x18, 0x59, 0x07, 0x30, 0x22, 0xe8, 0xd2, 0xa1, 0x0d, 0x11, + 0x31, 0xe0, 0x18, 0x10, 0x41, 0xa8, 0x6d, 0x33, 0x52, 0x8e, 0x5a, 0x3e, 0x0c, 0x2a, 0xef, 0x39, + 0xd7, 0x54, 0x59, 0x8c, 0x60, 0x34, 0x07, 0x46, 0x13, 0xbf, 0xdd, 0x20, 0x1f, 0x85, 0xaa, 0xdb, + 0x89, 0x4d, 0xa7, 0x35, 0xee, 0xef, 0x57, 0xbd, 0x21, 0xd3, 0xee, 0xec, 0x4d, 0x8e, 0x2e, 0xbb, + 0x2d, 0xcb, 0x50, 0x09, 0x18, 0x92, 0x13, 0x0d, 0x86, 0xf8, 0x65, 0x1b, 0xf5, 0xd3, 0x0d, 0xbe, + 0x76, 0xf0, 0xb8, 0xf8, 0x3e, 0xca, 0x1c, 0xed, 0x8b, 0x65, 0x88, 0xce, 0x84, 0x88, 0x0f, 0x43, + 0xc2, 0x99, 0x58, 0xce, 0xdc, 0xc7, 0xea, 0xb7, 0x2c, 0x59, 0x91, 0x16, 0x94, 0x5e, 0x73, 0x37, + 0x72, 0x4f, 0xdc, 0xb1, 0x5b, 0xb6, 0xc2, 0x4a, 0x15, 0x4b, 0x40, 0xc6, 0x81, 0xfc, 0x5a, 0x01, + 0x4e, 0xfa, 0x69, 0xa5, 0x53, 0x76, 0x07, 0xcc, 0xaf, 0x5d, 0xa7, 0xd5, 0x58, 0xe9, 0x98, 0xd9, + 0x2f, 0x1b, 0x7b, 0xeb, 0xc2, 0xe4, 0x2f, 0x0e, 0x6b, 0x64, 0x77, 0x5a, 0xcc, 0xf9, 0xab, 0xb8, + 0xa4, 0xfc, 0x93, 0x69, 0x28, 0x59, 0x69, 0x5f, 0x2e, 0x42, 0x3d, 0x36, 0x5b, 0xe7, 0xfe, 0x97, + 0xcb, 0xed, 0xd4, 0xbf, 0x5c, 0x56, 0x07, 0x3f, 0xbb, 0x8c, 0x6a, 0x75, 0xdc, 0xbf, 0x73, 0xf9, + 0xf3, 0x22, 0x94, 0xd6, 0xe7, 0x17, 0x92, 0xdb, 0xc5, 0xc2, 0x7d, 0xd8, 0x2e, 0x6e, 0xc1, 0xf0, + 0x46, 0xd7, 0xb2, 0x03, 0xcb, 0xc9, 0x1d, 0x07, 0x40, 0xfd, 0xfa, 0x46, 0x5e, 0xa7, 0x15, 0xa8, + 0xa8, 0xe0, 0x49, 0x0b, 0x86, 0x5b, 0x22, 0x10, 0x5b, 0x6e, 0x8f, 0x2e, 0x19, 0xd0, 0x4d, 0x30, + 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x5d, 0x90, 0x3f, 0xcf, 0xbe, 0xef, 0xd2, 0xd4, 0x3e, 0x0f, 0xa1, + 0x16, 0x70, 0xff, 0x99, 0xff, 0x5b, 0x01, 0x92, 0x8a, 0xcf, 0xfd, 0xef, 0x4d, 0xdb, 0xe9, 0xde, + 0x34, 0x7f, 0x14, 0x83, 0x2f, 0xbb, 0x43, 0x69, 0x7f, 0x58, 0x84, 0xa1, 0xfb, 0x76, 0x77, 0x93, + 0x26, 0x9c, 0xd3, 0xe6, 0x72, 0x4e, 0x8c, 0x7d, 0x5d, 0xd3, 0xda, 0x29, 0xd7, 0xb4, 0xbc, 0x3f, + 0xeb, 0xbc, 0x87, 0x63, 0xda, 0x5f, 0x17, 0x40, 0x4e, 0xcb, 0x4b, 0x8e, 0x1f, 0xe8, 0x8e, 0xc1, + 0xff, 0x19, 0x2f, 0xd7, 0x80, 0xbc, 0x1e, 0x10, 0xd2, 0x4b, 0x48, 0x2c, 0xfb, 0xfc, 0x59, 0xcd, + 0xf9, 0xe4, 0xc3, 0x50, 0xdd, 0x72, 0xfd, 0x80, 0xcf, 0xf3, 0xc5, 0xa4, 0x5d, 0xe7, 0x8a, 0x4c, + 0xc7, 0x90, 0x22, 0x7d, 0x52, 0x58, 0xe9, 0x7f, 0x52, 0xa8, 0x7d, 0xb3, 0x08, 0x23, 0xef, 0x95, + 0x0b, 0xa8, 0x59, 0xae, 0x7c, 0xa5, 0x9c, 0xae, 0x7c, 0xe5, 0xc3, 0xb8, 0xf2, 0x69, 0xdf, 0x29, + 0x00, 0xdc, 0xb7, 0xdb, 0xaf, 0x66, 0xd2, 0xcb, 0x2e, 0x77, 0xbf, 0xca, 0xf6, 0xb1, 0xfb, 0xbd, + 0x8a, 0xfa, 0x24, 0xee, 0x61, 0xf7, 0x66, 0x01, 0xc6, 0xf4, 0x84, 0xd7, 0x5a, 0x6e, 0xd5, 0x32, + 0xe5, 0x04, 0x17, 0xde, 0xf4, 0x4b, 0xa6, 0x63, 0x8a, 0x2d, 0x79, 0x3e, 0x8a, 0xbc, 0x7a, 0x3d, + 0xea, 0xf6, 0x3d, 0x21, 0x53, 0xb9, 0x9a, 0x93, 0xa0, 0xbc, 0x87, 0x97, 0x60, 0xe9, 0x48, 0xbc, + 0x04, 0xe3, 0xf7, 0x9f, 0xca, 0x77, 0xbd, 0xff, 0xb4, 0x03, 0xb5, 0x4d, 0xcf, 0x6d, 0x73, 0x47, + 0x3c, 0xf9, 0x9b, 0xcf, 0xcb, 0x39, 0xd6, 0x94, 0xe8, 0x07, 0xd7, 0x91, 0x8d, 0x67, 0x41, 0xe1, + 0x63, 0xc4, 0x8a, 0x1b, 0xa4, 0x5d, 0xc1, 0x75, 0xe8, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, + 0x47, 0xc5, 0x26, 0xe9, 0x7c, 0x37, 0x7c, 0x7f, 0x9c, 0xef, 0xb4, 0x5f, 0x28, 0xab, 0x09, 0xec, + 0x81, 0x0b, 0xf2, 0xf7, 0xde, 0xbf, 0x35, 0x99, 0xbe, 0xd2, 0x38, 0x7c, 0x1f, 0xaf, 0x34, 0x56, + 0x07, 0x72, 0xf5, 0xda, 0x2b, 0x41, 0x6a, 0xdf, 0xf4, 0xfe, 0xe9, 0xc4, 0x7f, 0xa9, 0xd3, 0x89, + 0xb7, 0x8a, 0x10, 0x4d, 0x04, 0x87, 0xf4, 0xde, 0x78, 0x19, 0xaa, 0x6d, 0xfd, 0xf6, 0x3c, 0xb5, + 0xf5, 0xdd, 0x3c, 0xff, 0x66, 0x5c, 0x91, 0x18, 0x18, 0xa2, 0x11, 0x1f, 0xc0, 0x0a, 0xe3, 0x23, + 0xe7, 0xb6, 0x36, 0x47, 0xa1, 0x96, 0x85, 0x3d, 0x2b, 0x7a, 0xc7, 0x18, 0x1b, 0xed, 0xaf, 0x8a, + 0x20, 0x03, 0x69, 0x13, 0x0a, 0x95, 0x4d, 0xeb, 0x36, 0x35, 0x73, 0x7b, 0x32, 0xc6, 0xfe, 0x98, + 0x2b, 0xcc, 0xe9, 0x3c, 0x01, 0x05, 0x3a, 0xb7, 0x93, 0x8a, 0xe3, 0x11, 0x29, 0xbf, 0x1c, 0x76, + 0xd2, 0xf8, 0x31, 0x8b, 0xb4, 0x93, 0x8a, 0x24, 0x54, 0x3c, 0x84, 0x59, 0x96, 0x9f, 0x51, 0x4b, + 0x91, 0xe6, 0x31, 0xcb, 0xc6, 0xce, 0xba, 0x95, 0x59, 0xd6, 0x17, 0x77, 0x9a, 0x25, 0x8f, 0xc6, + 0x67, 0xbf, 0xfd, 0xdd, 0x0b, 0x0f, 0x7d, 0xe7, 0xbb, 0x17, 0x1e, 0x7a, 0xe7, 0xbb, 0x17, 0x1e, + 0xfa, 0xe2, 0xfe, 0x85, 0xc2, 0xb7, 0xf7, 0x2f, 0x14, 0xbe, 0xb3, 0x7f, 0xa1, 0xf0, 0xce, 0xfe, + 0x85, 0xc2, 0x3f, 0xee, 0x5f, 0x28, 0xfc, 0xf2, 0x3f, 0x5d, 0x78, 0xe8, 0x33, 0xcf, 0x45, 0x55, + 0x98, 0x56, 0x55, 0x98, 0x56, 0x0c, 0xa7, 0x3b, 0xdb, 0xad, 0x69, 0x56, 0x85, 0x28, 0x45, 0x55, + 0xe1, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x9c, 0x8e, 0xe2, 0xbf, 0x92, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -7436,39 +7437,44 @@ func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ReplicasPerScaleDown != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicasPerScaleDown)) + i-- + dAtA[i] = 0x60 + } + if m.ReplicasPerScaleUp != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicasPerScaleUp)) + i-- + dAtA[i] = 0x58 + } if m.ScaleDownCooldownSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleDownCooldownSeconds)) i-- - dAtA[i] = 0x58 + dAtA[i] = 0x50 } if m.ScaleUpCooldownSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.ScaleUpCooldownSeconds)) i-- - dAtA[i] = 0x50 + dAtA[i] = 0x48 } - if m.ReplicasPerScale != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicasPerScale)) + if m.DeprecatedReplicasPerScale != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedReplicasPerScale)) i-- - dAtA[i] = 0x48 + dAtA[i] = 0x40 } if m.TargetBufferAvailability != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetBufferAvailability)) i-- - dAtA[i] = 0x40 + dAtA[i] = 0x38 } if m.TargetProcessingSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetProcessingSeconds)) i-- - dAtA[i] = 0x38 + dAtA[i] = 0x30 } if m.ZeroReplicaSleepSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.ZeroReplicaSleepSeconds)) i-- - dAtA[i] = 0x30 - } - if m.DeprecatedCooldownSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedCooldownSeconds)) - i-- dAtA[i] = 0x28 } if m.LookbackSeconds != nil { @@ -10571,9 +10577,6 @@ func (m *Scale) Size() (n int) { if m.LookbackSeconds != nil { n += 1 + sovGenerated(uint64(*m.LookbackSeconds)) } - if m.DeprecatedCooldownSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeprecatedCooldownSeconds)) - } if m.ZeroReplicaSleepSeconds != nil { n += 1 + sovGenerated(uint64(*m.ZeroReplicaSleepSeconds)) } @@ -10583,8 +10586,8 @@ func (m *Scale) Size() (n int) { if m.TargetBufferAvailability != nil { n += 1 + sovGenerated(uint64(*m.TargetBufferAvailability)) } - if m.ReplicasPerScale != nil { - n += 1 + sovGenerated(uint64(*m.ReplicasPerScale)) + if m.DeprecatedReplicasPerScale != nil { + n += 1 + sovGenerated(uint64(*m.DeprecatedReplicasPerScale)) } if m.ScaleUpCooldownSeconds != nil { n += 1 + sovGenerated(uint64(*m.ScaleUpCooldownSeconds)) @@ -10592,6 +10595,12 @@ func (m *Scale) Size() (n int) { if m.ScaleDownCooldownSeconds != nil { n += 1 + sovGenerated(uint64(*m.ScaleDownCooldownSeconds)) } + if m.ReplicasPerScaleUp != nil { + n += 1 + sovGenerated(uint64(*m.ReplicasPerScaleUp)) + } + if m.ReplicasPerScaleDown != nil { + n += 1 + sovGenerated(uint64(*m.ReplicasPerScaleDown)) + } return n } @@ -12226,13 +12235,14 @@ func (this *Scale) String() string { `Min:` + valueToStringGenerated(this.Min) + `,`, `Max:` + valueToStringGenerated(this.Max) + `,`, `LookbackSeconds:` + valueToStringGenerated(this.LookbackSeconds) + `,`, - `DeprecatedCooldownSeconds:` + valueToStringGenerated(this.DeprecatedCooldownSeconds) + `,`, `ZeroReplicaSleepSeconds:` + valueToStringGenerated(this.ZeroReplicaSleepSeconds) + `,`, `TargetProcessingSeconds:` + valueToStringGenerated(this.TargetProcessingSeconds) + `,`, `TargetBufferAvailability:` + valueToStringGenerated(this.TargetBufferAvailability) + `,`, - `ReplicasPerScale:` + valueToStringGenerated(this.ReplicasPerScale) + `,`, + `DeprecatedReplicasPerScale:` + valueToStringGenerated(this.DeprecatedReplicasPerScale) + `,`, `ScaleUpCooldownSeconds:` + valueToStringGenerated(this.ScaleUpCooldownSeconds) + `,`, `ScaleDownCooldownSeconds:` + valueToStringGenerated(this.ScaleDownCooldownSeconds) + `,`, + `ReplicasPerScaleUp:` + valueToStringGenerated(this.ReplicasPerScaleUp) + `,`, + `ReplicasPerScaleDown:` + valueToStringGenerated(this.ReplicasPerScaleDown) + `,`, `}`, }, "") return s @@ -26397,7 +26407,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { m.LookbackSeconds = &v case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCooldownSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ZeroReplicaSleepSeconds", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26414,10 +26424,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.DeprecatedCooldownSeconds = &v + m.ZeroReplicaSleepSeconds = &v case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroReplicaSleepSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetProcessingSeconds", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26434,10 +26444,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.ZeroReplicaSleepSeconds = &v + m.TargetProcessingSeconds = &v case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetProcessingSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetBufferAvailability", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26454,10 +26464,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.TargetProcessingSeconds = &v + m.TargetBufferAvailability = &v case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetBufferAvailability", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedReplicasPerScale", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26474,10 +26484,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.TargetBufferAvailability = &v + m.DeprecatedReplicasPerScale = &v case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicasPerScale", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScaleUpCooldownSeconds", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26494,10 +26504,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.ReplicasPerScale = &v + m.ScaleUpCooldownSeconds = &v case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleUpCooldownSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownCooldownSeconds", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26514,10 +26524,10 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.ScaleUpCooldownSeconds = &v + m.ScaleDownCooldownSeconds = &v case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownCooldownSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReplicasPerScaleUp", wireType) } var v uint32 for shift := uint(0); ; shift += 7 { @@ -26534,7 +26544,27 @@ func (m *Scale) Unmarshal(dAtA []byte) error { break } } - m.ScaleDownCooldownSeconds = &v + m.ReplicasPerScaleUp = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicasPerScaleDown", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReplicasPerScaleDown = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 8f12a27eca..588b26c9af 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -1264,41 +1264,47 @@ message Scale { // +optional optional uint32 lookbackSeconds = 4; - // Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. - // Cooldown seconds after a scaling operation before another one. - // +optional - optional uint32 cooldownSeconds = 5; - // After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. // +optional - optional uint32 zeroReplicaSleepSeconds = 6; + optional uint32 zeroReplicaSleepSeconds = 5; // TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast // you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing // rate, thus less replicas. It's only effective for source vertices. // +optional - optional uint32 targetProcessingSeconds = 7; + optional uint32 targetProcessingSeconds = 6; // TargetBufferAvailability is used to define the target percentage of the buffer availability. // A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. // It only applies to UDF and Sink vertices because only they have buffers to read. // +optional - optional uint32 targetBufferAvailability = 8; + optional uint32 targetBufferAvailability = 7; - // ReplicasPerScale defines maximum replicas can be scaled up or down at once. - // The is use to prevent too aggressive scaling operations + // DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. + // The is use to prevent from too aggressive scaling operations + // Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead // +optional - optional uint32 replicasPerScale = 9; + optional uint32 replicasPerScale = 8; // ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. // It defaults to the CooldownSeconds if not set. // +optional - optional uint32 scaleUpCooldownSeconds = 10; + optional uint32 scaleUpCooldownSeconds = 9; // ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. // It defaults to the CooldownSeconds if not set. // +optional - optional uint32 scaleDownCooldownSeconds = 11; + optional uint32 scaleDownCooldownSeconds = 10; + + // ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. + // The is use to prevent from too aggressive scaling up operations + // +optional + optional uint32 replicasPerScaleUp = 11; + + // ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. + // The is use to prevent from too aggressive scaling down operations + // +optional + optional uint32 replicasPerScaleDown = 12; } // ServingSource is the HTTP endpoint for Numaflow. diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 79d63b30c4..7189543f3b 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -4167,13 +4167,6 @@ func schema_pkg_apis_numaflow_v1alpha1_Scale(ref common.ReferenceCallback) commo Format: "int64", }, }, - "cooldownSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one.", - Type: []string{"integer"}, - Format: "int64", - }, - }, "zeroReplicaSleepSeconds": { SchemaProps: spec.SchemaProps{ Description: "After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek.", @@ -4197,7 +4190,7 @@ func schema_pkg_apis_numaflow_v1alpha1_Scale(ref common.ReferenceCallback) commo }, "replicasPerScale": { SchemaProps: spec.SchemaProps{ - Description: "ReplicasPerScale defines maximum replicas can be scaled up or down at once. The is use to prevent too aggressive scaling operations", + Description: "DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. The is use to prevent from too aggressive scaling operations Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead", Type: []string{"integer"}, Format: "int64", }, @@ -4216,6 +4209,20 @@ func schema_pkg_apis_numaflow_v1alpha1_Scale(ref common.ReferenceCallback) commo Format: "int64", }, }, + "replicasPerScaleUp": { + SchemaProps: spec.SchemaProps{ + Description: "ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. The is use to prevent from too aggressive scaling up operations", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "replicasPerScaleDown": { + SchemaProps: spec.SchemaProps{ + Description: "ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. The is use to prevent from too aggressive scaling down operations", + Type: []string{"integer"}, + Format: "int64", + }, + }, }, }, }, diff --git a/pkg/apis/numaflow/v1alpha1/scale.go b/pkg/apis/numaflow/v1alpha1/scale.go index c775468b17..9fbe58da04 100644 --- a/pkg/apis/numaflow/v1alpha1/scale.go +++ b/pkg/apis/numaflow/v1alpha1/scale.go @@ -31,35 +31,40 @@ type Scale struct { // Lookback seconds to calculate the average pending messages and processing rate. // +optional LookbackSeconds *uint32 `json:"lookbackSeconds,omitempty" protobuf:"varint,4,opt,name=lookbackSeconds"` - // Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. - // Cooldown seconds after a scaling operation before another one. - // +optional - DeprecatedCooldownSeconds *uint32 `json:"cooldownSeconds,omitempty" protobuf:"varint,5,opt,name=cooldownSeconds"` // After scaling down the source vertex to 0, sleep how many seconds before scaling the source vertex back up to peek. // +optional - ZeroReplicaSleepSeconds *uint32 `json:"zeroReplicaSleepSeconds,omitempty" protobuf:"varint,6,opt,name=zeroReplicaSleepSeconds"` + ZeroReplicaSleepSeconds *uint32 `json:"zeroReplicaSleepSeconds,omitempty" protobuf:"varint,5,opt,name=zeroReplicaSleepSeconds"` // TargetProcessingSeconds is used to tune the aggressiveness of autoscaling for source vertices, it measures how fast // you want the vertex to process all the pending messages. Typically increasing the value, which leads to lower processing // rate, thus less replicas. It's only effective for source vertices. // +optional - TargetProcessingSeconds *uint32 `json:"targetProcessingSeconds,omitempty" protobuf:"varint,7,opt,name=targetProcessingSeconds"` + TargetProcessingSeconds *uint32 `json:"targetProcessingSeconds,omitempty" protobuf:"varint,6,opt,name=targetProcessingSeconds"` // TargetBufferAvailability is used to define the target percentage of the buffer availability. // A valid and meaningful value should be less than the BufferUsageLimit defined in the Edge spec (or Pipeline spec), for example, 50. // It only applies to UDF and Sink vertices because only they have buffers to read. // +optional - TargetBufferAvailability *uint32 `json:"targetBufferAvailability,omitempty" protobuf:"varint,8,opt,name=targetBufferAvailability"` - // ReplicasPerScale defines maximum replicas can be scaled up or down at once. - // The is use to prevent too aggressive scaling operations + TargetBufferAvailability *uint32 `json:"targetBufferAvailability,omitempty" protobuf:"varint,7,opt,name=targetBufferAvailability"` + // DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. + // The is use to prevent from too aggressive scaling operations + // Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead // +optional - ReplicasPerScale *uint32 `json:"replicasPerScale,omitempty" protobuf:"varint,9,opt,name=replicasPerScale"` + DeprecatedReplicasPerScale *uint32 `json:"replicasPerScale,omitempty" protobuf:"varint,8,opt,name=replicasPerScale"` // ScaleUpCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling up. // It defaults to the CooldownSeconds if not set. // +optional - ScaleUpCooldownSeconds *uint32 `json:"scaleUpCooldownSeconds,omitempty" protobuf:"varint,10,opt,name=scaleUpCooldownSeconds"` + ScaleUpCooldownSeconds *uint32 `json:"scaleUpCooldownSeconds,omitempty" protobuf:"varint,9,opt,name=scaleUpCooldownSeconds"` // ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. // It defaults to the CooldownSeconds if not set. // +optional - ScaleDownCooldownSeconds *uint32 `json:"scaleDownCooldownSeconds,omitempty" protobuf:"varint,11,opt,name=scaleDownCooldownSeconds"` + ScaleDownCooldownSeconds *uint32 `json:"scaleDownCooldownSeconds,omitempty" protobuf:"varint,10,opt,name=scaleDownCooldownSeconds"` + // ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. + // The is use to prevent from too aggressive scaling up operations + // +optional + ReplicasPerScaleUp *uint32 `json:"replicasPerScaleUp,omitempty" protobuf:"varint,11,opt,name=replicasPerScaleUp"` + // ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. + // The is use to prevent from too aggressive scaling down operations + // +optional + ReplicasPerScaleDown *uint32 `json:"replicasPerScaleDown,omitempty" protobuf:"varint,12,opt,name=replicasPerScaleDown"` } func (s Scale) GetLookbackSeconds() int { @@ -73,9 +78,6 @@ func (s Scale) GetScaleUpCooldownSeconds() int { if s.ScaleUpCooldownSeconds != nil { return int(*s.ScaleUpCooldownSeconds) } - if s.DeprecatedCooldownSeconds != nil { - return int(*s.DeprecatedCooldownSeconds) - } return DefaultCooldownSeconds } @@ -83,9 +85,6 @@ func (s Scale) GetScaleDownCooldownSeconds() int { if s.ScaleDownCooldownSeconds != nil { return int(*s.ScaleDownCooldownSeconds) } - if s.DeprecatedCooldownSeconds != nil { - return int(*s.DeprecatedCooldownSeconds) - } return DefaultCooldownSeconds } @@ -110,9 +109,22 @@ func (s Scale) GetTargetBufferAvailability() int { return DefaultTargetBufferAvailability } -func (s Scale) GetReplicasPerScale() int { - if s.ReplicasPerScale != nil { - return int(*s.ReplicasPerScale) +func (s Scale) GetReplicasPerScaleUp() int { + if s.ReplicasPerScaleUp != nil { + return int(*s.ReplicasPerScaleUp) + } + if s.DeprecatedReplicasPerScale != nil { + return int(*s.DeprecatedReplicasPerScale) + } + return DefaultReplicasPerScale +} + +func (s Scale) GetReplicasPerScaleDown() int { + if s.ReplicasPerScaleDown != nil { + return int(*s.ReplicasPerScaleDown) + } + if s.DeprecatedReplicasPerScale != nil { + return int(*s.DeprecatedReplicasPerScale) } return DefaultReplicasPerScale } diff --git a/pkg/apis/numaflow/v1alpha1/scale_test.go b/pkg/apis/numaflow/v1alpha1/scale_test.go index 9bb09399eb..49bfbb9a91 100644 --- a/pkg/apis/numaflow/v1alpha1/scale_test.go +++ b/pkg/apis/numaflow/v1alpha1/scale_test.go @@ -30,14 +30,16 @@ func Test_Scale_Parameters(t *testing.T) { assert.Equal(t, DefaultCooldownSeconds, s.GetScaleUpCooldownSeconds()) assert.Equal(t, DefaultCooldownSeconds, s.GetScaleDownCooldownSeconds()) assert.Equal(t, DefaultLookbackSeconds, s.GetLookbackSeconds()) - assert.Equal(t, DefaultReplicasPerScale, s.GetReplicasPerScale()) + assert.Equal(t, DefaultReplicasPerScale, s.GetReplicasPerScaleUp()) + assert.Equal(t, DefaultReplicasPerScale, s.GetReplicasPerScaleDown()) assert.Equal(t, DefaultTargetBufferAvailability, s.GetTargetBufferAvailability()) assert.Equal(t, DefaultTargetProcessingSeconds, s.GetTargetProcessingSeconds()) assert.Equal(t, DefaultZeroReplicaSleepSeconds, s.GetZeroReplicaSleepSeconds()) upcds := uint32(100) downcds := uint32(99) lbs := uint32(101) - rps := uint32(3) + rpsu := uint32(3) + rpsd := uint32(4) tps := uint32(102) tbu := uint32(33) zrss := uint32(44) @@ -47,7 +49,8 @@ func Test_Scale_Parameters(t *testing.T) { ScaleUpCooldownSeconds: &upcds, ScaleDownCooldownSeconds: &downcds, LookbackSeconds: &lbs, - ReplicasPerScale: &rps, + ReplicasPerScaleUp: &rpsu, + ReplicasPerScaleDown: &rpsd, TargetProcessingSeconds: &tps, TargetBufferAvailability: &tbu, ZeroReplicaSleepSeconds: &zrss, @@ -57,7 +60,8 @@ func Test_Scale_Parameters(t *testing.T) { assert.Equal(t, int(upcds), s.GetScaleUpCooldownSeconds()) assert.Equal(t, int(downcds), s.GetScaleDownCooldownSeconds()) assert.Equal(t, int(lbs), s.GetLookbackSeconds()) - assert.Equal(t, int(rps), s.GetReplicasPerScale()) + assert.Equal(t, int(rpsu), s.GetReplicasPerScaleUp()) + assert.Equal(t, int(rpsd), s.GetReplicasPerScaleDown()) assert.Equal(t, int(tbu), s.GetTargetBufferAvailability()) assert.Equal(t, int(tps), s.GetTargetProcessingSeconds()) assert.Equal(t, int(zrss), s.GetZeroReplicaSleepSeconds()) diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index b450852954..81d1a53700 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -2035,11 +2035,6 @@ func (in *Scale) DeepCopyInto(out *Scale) { *out = new(uint32) **out = **in } - if in.DeprecatedCooldownSeconds != nil { - in, out := &in.DeprecatedCooldownSeconds, &out.DeprecatedCooldownSeconds - *out = new(uint32) - **out = **in - } if in.ZeroReplicaSleepSeconds != nil { in, out := &in.ZeroReplicaSleepSeconds, &out.ZeroReplicaSleepSeconds *out = new(uint32) @@ -2055,8 +2050,8 @@ func (in *Scale) DeepCopyInto(out *Scale) { *out = new(uint32) **out = **in } - if in.ReplicasPerScale != nil { - in, out := &in.ReplicasPerScale, &out.ReplicasPerScale + if in.DeprecatedReplicasPerScale != nil { + in, out := &in.DeprecatedReplicasPerScale, &out.DeprecatedReplicasPerScale *out = new(uint32) **out = **in } @@ -2070,6 +2065,16 @@ func (in *Scale) DeepCopyInto(out *Scale) { *out = new(uint32) **out = **in } + if in.ReplicasPerScaleUp != nil { + in, out := &in.ReplicasPerScaleUp, &out.ReplicasPerScaleUp + *out = new(uint32) + **out = **in + } + if in.ReplicasPerScaleDown != nil { + in, out := &in.ReplicasPerScaleDown, &out.ReplicasPerScaleDown + *out = new(uint32) + **out = **in + } return } diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index 7557b236b4..fa085fc2b4 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -239,11 +239,11 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) if current > max || current < min { // Someone might have manually scaled up/down the MonoVertex return s.patchMonoVertexReplicas(ctx, monoVtx, desired) } - maxAllowed := int32(monoVtx.Spec.Scale.GetReplicasPerScale()) if desired < current { + maxAllowedDown := int32(monoVtx.Spec.Scale.GetReplicasPerScaleDown()) diff := current - desired - if diff > maxAllowed { - diff = maxAllowed + if diff > maxAllowedDown { + diff = maxAllowedDown } if secondsSinceLastScale < scaleDownCooldown { log.Infof("Cooldown period for scaling down, skip scaling.") @@ -252,9 +252,10 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) return s.patchMonoVertexReplicas(ctx, monoVtx, current-diff) // We scale down gradually } if desired > current { + maxAllowedUp := int32(monoVtx.Spec.Scale.GetReplicasPerScaleUp()) diff := desired - current - if diff > maxAllowed { - diff = maxAllowed + if diff > maxAllowedUp { + diff = maxAllowedUp } if secondsSinceLastScale < scaleUpCooldown { log.Infof("Cooldown period for scaling up, skip scaling.") diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index 741f11a668..03f581e6d7 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -311,11 +311,11 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err if current > max || current < min { // Someone might have manually scaled up/down the vertex return s.patchVertexReplicas(ctx, vertex, desired) } - maxAllowed := int32(vertex.Spec.Scale.GetReplicasPerScale()) if desired < current { + maxAllowedDown := int32(vertex.Spec.Scale.GetReplicasPerScaleDown()) diff := current - desired - if diff > maxAllowed { - diff = maxAllowed + if diff > maxAllowedDown { + diff = maxAllowedDown } if secondsSinceLastScale < scaleDownCooldown { log.Infof("Cooldown period for scaling down, skip scaling.") @@ -338,9 +338,10 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err log.Infof("Vertex %s has back pressure in downstream vertices, skip scaling.", key) return nil } + maxAllowedUp := int32(vertex.Spec.Scale.GetReplicasPerScaleUp()) diff := desired - current - if diff > maxAllowed { - diff = maxAllowed + if diff > maxAllowedUp { + diff = maxAllowedUp } if secondsSinceLastScale < scaleUpCooldown { log.Infof("Cooldown period for scaling up, skip scaling.") @@ -375,7 +376,7 @@ func (s *Scaler) desiredReplicas(_ context.Context, vertex *dfv1.Vertex, partiti // then we figure out how many replicas are needed to keep the available buffer length at target level. if pending >= partitionBufferLengths[i] { // Simply return current replica number + max allowed if the pending messages are more than available buffer length - desired = int32(vertex.Status.Replicas) + int32(vertex.Spec.Scale.GetReplicasPerScale()) + desired = int32(vertex.Status.Replicas) + int32(vertex.Spec.Scale.GetReplicasPerScaleUp()) } else { singleReplicaContribution := float64(partitionBufferLengths[i]-pending) / float64(vertex.Status.Replicas) desired = int32(math.Round(float64(partitionAvailableBufferLengths[i]) / singleReplicaContribution)) diff --git a/rust/numaflow-models/src/models/scale.rs b/rust/numaflow-models/src/models/scale.rs index 5037968d7c..2cf2f89ccf 100644 --- a/rust/numaflow-models/src/models/scale.rs +++ b/rust/numaflow-models/src/models/scale.rs @@ -20,9 +20,6 @@ limitations under the License. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Scale { - /// Deprecated: Use scaleUpCooldownSeconds and scaleDownCooldownSeconds instead. Cooldown seconds after a scaling operation before another one. - #[serde(rename = "cooldownSeconds", skip_serializing_if = "Option::is_none")] - pub cooldown_seconds: Option, /// Whether to disable autoscaling. Set to \"true\" when using Kubernetes HPA or any other 3rd party autoscaling strategies. #[serde(rename = "disabled", skip_serializing_if = "Option::is_none")] pub disabled: Option, @@ -35,9 +32,18 @@ pub struct Scale { /// Minimum replicas. #[serde(rename = "min", skip_serializing_if = "Option::is_none")] pub min: Option, - /// ReplicasPerScale defines maximum replicas can be scaled up or down at once. The is use to prevent too aggressive scaling operations + /// DeprecatedReplicasPerScale defines the number of maximum replicas that can be changed in a single scale up or down operation. The is use to prevent from too aggressive scaling operations Deprecated: Use ReplicasPerScaleUp and ReplicasPerScaleDown instead #[serde(rename = "replicasPerScale", skip_serializing_if = "Option::is_none")] pub replicas_per_scale: Option, + /// ReplicasPerScaleDown defines the number of maximum replicas that can be changed in a single scaled down operation. The is use to prevent from too aggressive scaling down operations + #[serde( + rename = "replicasPerScaleDown", + skip_serializing_if = "Option::is_none" + )] + pub replicas_per_scale_down: Option, + /// ReplicasPerScaleUp defines the number of maximum replicas that can be changed in a single scaled up operation. The is use to prevent from too aggressive scaling up operations + #[serde(rename = "replicasPerScaleUp", skip_serializing_if = "Option::is_none")] + pub replicas_per_scale_up: Option, /// ScaleDownCooldownSeconds defines the cooldown seconds after a scaling operation, before a follow-up scaling down. It defaults to the CooldownSeconds if not set. #[serde( rename = "scaleDownCooldownSeconds", @@ -74,12 +80,13 @@ impl Scale { /// Scale defines the parameters for autoscaling. pub fn new() -> Scale { Scale { - cooldown_seconds: None, disabled: None, lookback_seconds: None, max: None, min: None, replicas_per_scale: None, + replicas_per_scale_down: None, + replicas_per_scale_up: None, scale_down_cooldown_seconds: None, scale_up_cooldown_seconds: None, target_buffer_availability: None, From 53d1131d82c8029e546c2f39305d1bcf80f1b60e Mon Sep 17 00:00:00 2001 From: xdevxy <115589853+xdevxy@users.noreply.github.com> Date: Mon, 26 Aug 2024 11:31:13 -0700 Subject: [PATCH 032/188] fix: log format with config load error (#2000) Signed-off-by: Hao Hao --- pkg/reconciler/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index 0565aefad1..0e5f86542c 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -51,7 +51,7 @@ import ( func Start(namespaced bool, managedNamespace string) { logger := logging.NewLogger().Named("controller-manager") config, err := reconciler.LoadConfig(func(err error) { - logger.Errorf("Failed to reload global configuration file", zap.Error(err)) + logger.Errorw("Failed to reload global configuration file", zap.Error(err)) }) if err != nil { logger.Fatalw("Failed to load global configuration file", zap.Error(err)) From 79ed1d1f1423eac24b6c2edd2cf6aa56c6754324 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 26 Aug 2024 17:02:45 -0700 Subject: [PATCH 033/188] chore: mark imageErr as unhealthy (#2004) Signed-off-by: Sidhant Kohli --- pkg/reconciler/util.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/reconciler/util.go b/pkg/reconciler/util.go index f12573ab08..70f0fe7376 100644 --- a/pkg/reconciler/util.go +++ b/pkg/reconciler/util.go @@ -18,6 +18,7 @@ package reconciler import ( "fmt" + "slices" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -25,6 +26,10 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" ) +// unhealthyWaitingStatus contains the status messages for a pod in waiting state +// which should be considered as unhealthy +var unhealthyWaitingStatus = []string{"CrashLoopBackOff", "ImagePullBackOff"} + // CheckVertexPodsStatus checks the status by iterating over pods objects func CheckVertexPodsStatus(vertexPods *corev1.PodList) (healthy bool, reason string, message string) { // TODO: Need to revisit later. @@ -45,7 +50,7 @@ func CheckVertexPodsStatus(vertexPods *corev1.PodList) (healthy bool, reason str func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { for _, c := range pod.Status.ContainerStatuses { - if c.State.Waiting != nil && c.State.Waiting.Reason == "CrashLoopBackOff" { + if c.State.Waiting != nil && slices.Contains(unhealthyWaitingStatus, c.State.Waiting.Reason) { return false, c.State.Waiting.Reason } if c.State.Terminated != nil && c.State.Terminated.Reason == "Error" { From 2ba54117d7015126c6894d196d42848bd2e37644 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 28 Aug 2024 16:50:14 -0700 Subject: [PATCH 034/188] feat: enable resourceClaims for vertex and monovtx (#2009) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 90 ++ api/openapi-spec/swagger.json | 90 ++ ...w.numaproj.io_interstepbufferservices.yaml | 32 + .../numaflow.numaproj.io_monovertices.yaml | 32 + .../full/numaflow.numaproj.io_pipelines.yaml | 80 ++ .../full/numaflow.numaproj.io_vertices.yaml | 16 + config/install.yaml | 160 +++ config/namespace-install.yaml | 160 +++ docs/APIs.md | 23 + .../configuration/container-resources.md | 19 +- .../configuration/pipeline-customization.md | 11 +- .../configuration/pod-specifications.md | 37 + mkdocs.yml | 1 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 994 ++++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 8 + .../numaflow/v1alpha1/openapi_generated.go | 220 +++- pkg/apis/numaflow/v1alpha1/pod_template.go | 10 + .../numaflow/v1alpha1/pod_template_test.go | 6 + .../v1alpha1/zz_generated.deepcopy.go | 7 + rust/numaflow-models/Makefile | 1 + .../src/models/abstract_pod_template.rs | 4 + .../src/models/abstract_vertex.rs | 4 + .../src/models/daemon_template.rs | 4 + .../src/models/jet_stream_buffer_service.rs | 4 + .../src/models/job_template.rs | 4 + .../src/models/mono_vertex_spec.rs | 4 + .../src/models/native_redis.rs | 4 + .../models/side_inputs_manager_template.rs | 4 + .../numaflow-models/src/models/vertex_spec.rs | 4 + .../src/models/vertex_template.rs | 4 + 30 files changed, 1557 insertions(+), 480 deletions(-) create mode 100644 docs/user-guide/reference/configuration/pod-specifications.md diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index e3601b73a4..66b8540090 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -17600,6 +17600,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -17717,6 +17726,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18002,6 +18020,15 @@ "format": "int32", "type": "integer" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18766,6 +18793,15 @@ "format": "int32", "type": "integer" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18906,6 +18942,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19165,6 +19210,15 @@ "format": "int32", "type": "integer" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19321,6 +19375,15 @@ "format": "int32", "type": "integer" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19946,6 +20009,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -20374,6 +20446,15 @@ "format": "int32", "type": "integer" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -20545,6 +20626,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 55540a4b09..a7eff01898 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -17605,6 +17605,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -17725,6 +17734,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18007,6 +18025,15 @@ "type": "integer", "format": "int32" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18762,6 +18789,15 @@ "type": "integer", "format": "int32" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -18902,6 +18938,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19161,6 +19206,15 @@ "type": "integer", "format": "int32" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19317,6 +19371,15 @@ "type": "integer", "format": "int32" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -19933,6 +19996,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -20356,6 +20428,15 @@ "type": "integer", "format": "int32" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" @@ -20523,6 +20604,15 @@ "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, "runtimeClassName": { "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "type": "string" diff --git a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml index c09fb798f4..acc540cd10 100644 --- a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml @@ -1044,6 +1044,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -2172,6 +2188,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 7967f0362d..a26426074f 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -1428,6 +1428,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -2206,6 +2222,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 00c4a0b008..070ba0b033 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -1904,6 +1904,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -2617,6 +2633,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -3508,6 +3540,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -4396,6 +4444,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -5935,6 +5999,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index cd7e10a99f..d1a77b9375 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -1583,6 +1583,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: diff --git a/config/install.yaml b/config/install.yaml index cf347b8bcf..575bac0cc3 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -1043,6 +1043,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -2171,6 +2187,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -4040,6 +4072,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -4818,6 +4866,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: @@ -10064,6 +10128,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -10777,6 +10857,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -11668,6 +11764,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -12556,6 +12668,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -14095,6 +14223,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: @@ -19520,6 +19664,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index be25fca8be..3b3737b3a9 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -1043,6 +1043,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -2171,6 +2187,22 @@ spec: default: 3 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -4040,6 +4072,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -4818,6 +4866,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: @@ -10064,6 +10128,22 @@ spec: replicas: format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -10777,6 +10857,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -11668,6 +11764,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -12556,6 +12668,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string securityContext: @@ -14095,6 +14223,22 @@ spec: type: integer priorityClassName: type: string + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: @@ -19520,6 +19664,22 @@ spec: default: 1 format: int32 type: integer + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array runtimeClassName: type: string scale: diff --git a/docs/APIs.md b/docs/APIs.md index a5c42c14f7..46a865e83b 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -369,6 +369,29 @@ merged to the generated DNS configuration based on DNSPolicy. + + + + +resourceClaims
+
+\[\]Kubernetes core/v1.PodResourceClaim + + + + +(Optional) +

+ +ResourceClaims defines which ResourceClaims must be allocated and +reserved before the Pod is allowed to start. The resources will be made +available to those containers which consume them by name. +

+ + + + + diff --git a/docs/user-guide/reference/configuration/container-resources.md b/docs/user-guide/reference/configuration/container-resources.md index 31e3e410d4..09609be34f 100644 --- a/docs/user-guide/reference/configuration/container-resources.md +++ b/docs/user-guide/reference/configuration/container-resources.md @@ -24,6 +24,8 @@ spec: requests: cpu: "1" memory: 4Gi + claims: + - name: my-claim ``` ## UDF Container @@ -47,6 +49,8 @@ spec: requests: cpu: "1" memory: 4Gi + claims: + - name: my-claim ``` ## UDSource Container @@ -71,6 +75,8 @@ spec: requests: cpu: "1" memory: 4Gi + claims: + - name: my-claim ``` ## Source Transformer Container @@ -95,6 +101,8 @@ spec: requests: cpu: "1" memory: 4Gi + claims: + - name: my-claim ``` ## UDSink Container @@ -109,6 +117,9 @@ metadata: spec: vertices: - name: my-vertex + resourceClaims: + - name: my-claim + xxx sink: udsink: container: @@ -119,6 +130,8 @@ spec: requests: cpu: "1" memory: 4Gi + claims: + - name: my-claim ``` ## Init Container @@ -136,11 +149,11 @@ spec: initContainerTemplate: resources: limits: - cpu: "3" - memory: 6Gi + cpu: "2" + memory: 2Gi requests: cpu: "1" - memory: 4Gi + memory: 1Gi ``` Container resources for [user init-containers](init-containers.md) are instead specified at `.spec.vertices[*].initContainers[*].resources`. diff --git a/docs/user-guide/reference/configuration/pipeline-customization.md b/docs/user-guide/reference/configuration/pipeline-customization.md index 55e9e2c466..d332fc8e6f 100644 --- a/docs/user-guide/reference/configuration/pipeline-customization.md +++ b/docs/user-guide/reference/configuration/pipeline-customization.md @@ -39,6 +39,9 @@ spec: priorityClassName: my-priority-class-name priority: 50 serviceAccountName: my-service-account + resourceClaims: + - name: my-claim + xxx affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -61,6 +64,8 @@ spec: resources: limits: memory: 500Mi + claims: + - name: my-claim initContainerTemplate: env: - name: MY_ENV_NAME @@ -137,7 +142,6 @@ spec: my-label-name: my-label-value annotations: my-annotation-name: my-annotation-value - # Pod spec nodeSelector: my-node-label-name: my-node-label-value tolerations: @@ -210,6 +214,9 @@ spec: priorityClassName: my-priority-class-name priority: 50 serviceAccountName: my-service-account + resourceClaims: + - name: my-claim + xxx affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -228,6 +235,8 @@ spec: resources: limits: memory: 500Mi + claims: + - name: my-claim initContainerTemplate: env: - name: MY_ENV_NAME diff --git a/docs/user-guide/reference/configuration/pod-specifications.md b/docs/user-guide/reference/configuration/pod-specifications.md new file mode 100644 index 0000000000..0587ca169a --- /dev/null +++ b/docs/user-guide/reference/configuration/pod-specifications.md @@ -0,0 +1,37 @@ +# Pod Specifications + +Most of the Kunernetes Pod specification fields are supported in the spec of `Pipeline`, `MonoVertex` and `InterStepBufferService`. Those fields include: + +- `nodeSelector` +- `tolerations` +- `securityContext` +- `imagePullSecrets` +- `priorityClassName` +- `priority` +- `affinity` +- `serviceAccountName` +- `runtimeClassName` +- `automountServiceAccountToken` +- `dnsPolicy` +- `dnsConfig` +- `resourceClaims` + +All the fields above are optional, click [here](../../../APIs.md#numaflow.numaproj.io/v1alpha1.AbstractPodTemplate) to see full list of supported fields. + +These fields can be specified in the `Pipeline` spec under: + +- `spec.vertices[*]` +- `spec.templates.daemon` +- `spec.templates.job` +- `spec.templates.sideInputsManager` +- `spec.templates.vertex` + +Or in the `MonoVertex` spec under: + +- `spec` +- `spec.daemonTemplate` + +Or in `InterStepBufferService` spec at: + +- `spec.jetstream` +- `spec.redis.native` diff --git a/mkdocs.yml b/mkdocs.yml index 43cd0a7950..7c9161d480 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -93,6 +93,7 @@ nav: - user-guide/reference/multi-partition.md - user-guide/reference/side-inputs.md - Configuration: + - user-guide/reference/configuration/pod-specifications.md - user-guide/reference/configuration/container-resources.md - user-guide/reference/configuration/volumes.md - user-guide/reference/configuration/environment-variables.md diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index b152ea965c..d905171bec 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2704,472 +2704,474 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7439 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x1d, 0xd7, - 0x75, 0xb6, 0xcf, 0x8d, 0x3c, 0x67, 0x1d, 0x92, 0xa2, 0xb6, 0x64, 0x99, 0x92, 0x65, 0x51, 0x19, - 0xff, 0xf6, 0xaf, 0xfc, 0x49, 0xc8, 0xdf, 0xfc, 0x7d, 0x4b, 0xfe, 0x24, 0x36, 0x0f, 0x29, 0x52, - 0x94, 0x48, 0x89, 0x59, 0x87, 0x94, 0x9d, 0xf8, 0x4f, 0xfc, 0x0f, 0x67, 0x36, 0x0f, 0xc7, 0x9c, - 0x33, 0x73, 0x3c, 0x33, 0x87, 0x12, 0x9d, 0x16, 0xb9, 0x3d, 0xd8, 0x45, 0x5b, 0xb4, 0xc8, 0x53, - 0x80, 0x22, 0x2d, 0x5a, 0x14, 0xc8, 0x43, 0x90, 0x3e, 0x14, 0x70, 0x1f, 0x0a, 0xf4, 0x8a, 0xa2, - 0x4d, 0x8b, 0x5e, 0xf2, 0x50, 0xa0, 0x2e, 0x0a, 0x10, 0x0d, 0x8b, 0x3e, 0xb4, 0x40, 0x83, 0xa0, - 0x01, 0x9a, 0x44, 0x08, 0x90, 0x62, 0xdf, 0xe6, 0x76, 0xe6, 0x48, 0xe4, 0x19, 0x52, 0x96, 0x5b, - 0xbf, 0xcd, 0xec, 0xbd, 0xf6, 0xb7, 0xf6, 0xac, 0x7d, 0x5b, 0x7b, 0xed, 0xb5, 0xd7, 0xc0, 0x62, - 0xcb, 0x0a, 0xb6, 0xba, 0x1b, 0x53, 0x86, 0xdb, 0x9e, 0x76, 0xba, 0x6d, 0xbd, 0xe3, 0xb9, 0xaf, - 0xf1, 0x87, 0x4d, 0xdb, 0xbd, 0x35, 0xdd, 0xd9, 0x6e, 0x4d, 0xeb, 0x1d, 0xcb, 0x8f, 0x52, 0x76, - 0x9e, 0xd2, 0xed, 0xce, 0x96, 0xfe, 0xd4, 0x74, 0x8b, 0x3a, 0xd4, 0xd3, 0x03, 0x6a, 0x4e, 0x75, - 0x3c, 0x37, 0x70, 0xc9, 0x73, 0x11, 0xd0, 0x94, 0x02, 0x9a, 0x52, 0xc5, 0xa6, 0x3a, 0xdb, 0xad, - 0x29, 0x06, 0x14, 0xa5, 0x28, 0xa0, 0x73, 0x1f, 0x89, 0xd5, 0xa0, 0xe5, 0xb6, 0xdc, 0x69, 0x8e, - 0xb7, 0xd1, 0xdd, 0xe4, 0x6f, 0xfc, 0x85, 0x3f, 0x09, 0x3e, 0xe7, 0xb4, 0xed, 0xe7, 0xfd, 0x29, - 0xcb, 0x65, 0xd5, 0x9a, 0x36, 0x5c, 0x8f, 0x4e, 0xef, 0xf4, 0xd4, 0xe5, 0xdc, 0xd3, 0x11, 0x4d, - 0x5b, 0x37, 0xb6, 0x2c, 0x87, 0x7a, 0xbb, 0xea, 0x5b, 0xa6, 0x3d, 0xea, 0xbb, 0x5d, 0xcf, 0xa0, - 0x87, 0x2a, 0xe5, 0x4f, 0xb7, 0x69, 0xa0, 0x67, 0xf1, 0x9a, 0xee, 0x57, 0xca, 0xeb, 0x3a, 0x81, - 0xd5, 0xee, 0x65, 0xf3, 0xec, 0xbd, 0x0a, 0xf8, 0xc6, 0x16, 0x6d, 0xeb, 0xe9, 0x72, 0xda, 0x3f, - 0xd4, 0xe0, 0xd4, 0xec, 0x86, 0x1f, 0x78, 0xba, 0x11, 0xac, 0xba, 0xe6, 0x1a, 0x6d, 0x77, 0x6c, - 0x3d, 0xa0, 0x64, 0x1b, 0xaa, 0xac, 0x6e, 0xa6, 0x1e, 0xe8, 0x13, 0x85, 0x8b, 0x85, 0x4b, 0xf5, - 0x99, 0xd9, 0xa9, 0x01, 0xdb, 0x62, 0x6a, 0x45, 0x02, 0x35, 0x46, 0xf6, 0xf7, 0x26, 0xab, 0xea, - 0x0d, 0x43, 0x06, 0xe4, 0x6b, 0x05, 0x18, 0x71, 0x5c, 0x93, 0x36, 0xa9, 0x4d, 0x8d, 0xc0, 0xf5, - 0x26, 0x8a, 0x17, 0x4b, 0x97, 0xea, 0x33, 0x9f, 0x1b, 0x98, 0x63, 0xc6, 0x17, 0x4d, 0x5d, 0x8f, - 0x31, 0xb8, 0xec, 0x04, 0xde, 0x6e, 0xe3, 0xf4, 0xb7, 0xf7, 0x26, 0x1f, 0xda, 0xdf, 0x9b, 0x1c, - 0x89, 0x67, 0x61, 0xa2, 0x26, 0x64, 0x1d, 0xea, 0x81, 0x6b, 0x33, 0x91, 0x59, 0xae, 0xe3, 0x4f, - 0x94, 0x78, 0xc5, 0x2e, 0x4c, 0x09, 0x69, 0x33, 0xf6, 0x53, 0xac, 0xbb, 0x4c, 0xed, 0x3c, 0x35, - 0xb5, 0x16, 0x92, 0x35, 0x4e, 0x49, 0xe0, 0x7a, 0x94, 0xe6, 0x63, 0x1c, 0x87, 0x50, 0x38, 0xe1, - 0x53, 0xa3, 0xeb, 0x59, 0xc1, 0xee, 0x9c, 0xeb, 0x04, 0xf4, 0x76, 0x30, 0x51, 0xe6, 0x52, 0x7e, - 0x32, 0x0b, 0x7a, 0xd5, 0x35, 0x9b, 0x49, 0xea, 0xc6, 0xa9, 0xfd, 0xbd, 0xc9, 0x13, 0xa9, 0x44, - 0x4c, 0x63, 0x12, 0x07, 0xc6, 0xad, 0xb6, 0xde, 0xa2, 0xab, 0x5d, 0xdb, 0x6e, 0x52, 0xc3, 0xa3, - 0x81, 0x3f, 0x51, 0xe1, 0x9f, 0x70, 0x29, 0x8b, 0xcf, 0xb2, 0x6b, 0xe8, 0xf6, 0x8d, 0x8d, 0xd7, - 0xa8, 0x11, 0x20, 0xdd, 0xa4, 0x1e, 0x75, 0x0c, 0xda, 0x98, 0x90, 0x1f, 0x33, 0xbe, 0x94, 0x42, - 0xc2, 0x1e, 0x6c, 0xb2, 0x08, 0x27, 0x3b, 0x9e, 0xe5, 0xf2, 0x2a, 0xd8, 0xba, 0xef, 0x5f, 0xd7, - 0xdb, 0x74, 0x62, 0xe8, 0x62, 0xe1, 0x52, 0xad, 0x71, 0x56, 0xc2, 0x9c, 0x5c, 0x4d, 0x13, 0x60, - 0x6f, 0x19, 0x72, 0x09, 0xaa, 0x2a, 0x71, 0x62, 0xf8, 0x62, 0xe1, 0x52, 0x45, 0xf4, 0x1d, 0x55, - 0x16, 0xc3, 0x5c, 0xb2, 0x00, 0x55, 0x7d, 0x73, 0xd3, 0x72, 0x18, 0x65, 0x95, 0x8b, 0xf0, 0x7c, - 0xd6, 0xa7, 0xcd, 0x4a, 0x1a, 0x81, 0xa3, 0xde, 0x30, 0x2c, 0x4b, 0xae, 0x02, 0xf1, 0xa9, 0xb7, - 0x63, 0x19, 0x74, 0xd6, 0x30, 0xdc, 0xae, 0x13, 0xf0, 0xba, 0xd7, 0x78, 0xdd, 0xcf, 0xc9, 0xba, - 0x93, 0x66, 0x0f, 0x05, 0x66, 0x94, 0x22, 0x2f, 0xc2, 0xb8, 0x1c, 0x76, 0x91, 0x14, 0x80, 0x23, - 0x9d, 0x66, 0x82, 0xc4, 0x54, 0x1e, 0xf6, 0x50, 0x13, 0x13, 0xce, 0xeb, 0xdd, 0xc0, 0x6d, 0x33, - 0xc8, 0x24, 0xd3, 0x35, 0x77, 0x9b, 0x3a, 0x13, 0xf5, 0x8b, 0x85, 0x4b, 0xd5, 0xc6, 0xc5, 0xfd, - 0xbd, 0xc9, 0xf3, 0xb3, 0x77, 0xa1, 0xc3, 0xbb, 0xa2, 0x90, 0x1b, 0x50, 0x33, 0x1d, 0x7f, 0xd5, - 0xb5, 0x2d, 0x63, 0x77, 0x62, 0x84, 0x57, 0xf0, 0x29, 0xf9, 0xa9, 0xb5, 0xf9, 0xeb, 0x4d, 0x91, - 0x71, 0x67, 0x6f, 0xf2, 0x7c, 0xef, 0xec, 0x38, 0x15, 0xe6, 0x63, 0x84, 0x41, 0x56, 0x38, 0xe0, - 0x9c, 0xeb, 0x6c, 0x5a, 0xad, 0x89, 0x51, 0xde, 0x1a, 0x17, 0xfb, 0x74, 0xe8, 0xf9, 0xeb, 0x4d, - 0x41, 0xd7, 0x18, 0x95, 0xec, 0xc4, 0x2b, 0x46, 0x08, 0xe7, 0x5e, 0x80, 0x93, 0x3d, 0xa3, 0x96, - 0x8c, 0x43, 0x69, 0x9b, 0xee, 0xf2, 0x49, 0xa9, 0x86, 0xec, 0x91, 0x9c, 0x86, 0xca, 0x8e, 0x6e, - 0x77, 0xe9, 0x44, 0x91, 0xa7, 0x89, 0x97, 0x8f, 0x15, 0x9f, 0x2f, 0x68, 0xbf, 0x51, 0x82, 0x11, - 0x35, 0x17, 0x34, 0x2d, 0x67, 0x9b, 0xbc, 0x04, 0x25, 0xdb, 0x6d, 0xc9, 0x19, 0xed, 0xe3, 0x03, - 0xcf, 0x2f, 0xcb, 0x6e, 0xab, 0x31, 0xbc, 0xbf, 0x37, 0x59, 0x5a, 0x76, 0x5b, 0xc8, 0x10, 0x89, - 0x01, 0x95, 0x6d, 0x7d, 0x73, 0x5b, 0xe7, 0x75, 0xa8, 0xcf, 0x34, 0x06, 0x86, 0xbe, 0xc6, 0x50, - 0x58, 0x5d, 0x1b, 0xb5, 0xfd, 0xbd, 0xc9, 0x0a, 0x7f, 0x45, 0x81, 0x4d, 0x5c, 0xa8, 0x6d, 0xd8, - 0xba, 0xb1, 0xbd, 0xe5, 0xda, 0x74, 0xa2, 0x94, 0x93, 0x51, 0x43, 0x21, 0x89, 0x06, 0x08, 0x5f, - 0x31, 0xe2, 0x41, 0x0c, 0x18, 0xea, 0x9a, 0xbe, 0xe5, 0x6c, 0xcb, 0xd9, 0xe9, 0x85, 0x81, 0xb9, - 0xad, 0xcf, 0xf3, 0x6f, 0x82, 0xfd, 0xbd, 0xc9, 0x21, 0xf1, 0x8c, 0x12, 0x5a, 0xfb, 0x5e, 0x1d, - 0xc6, 0x54, 0x23, 0xdd, 0xa4, 0x5e, 0x40, 0x6f, 0x93, 0x8b, 0x50, 0x76, 0xd8, 0xa0, 0xe1, 0x8d, - 0xdc, 0x18, 0x91, 0x7d, 0xb2, 0xcc, 0x07, 0x0b, 0xcf, 0x61, 0x35, 0x13, 0x0b, 0xae, 0x14, 0xf8, - 0xe0, 0x35, 0x6b, 0x72, 0x18, 0x51, 0x33, 0xf1, 0x8c, 0x12, 0x9a, 0xbc, 0x02, 0x65, 0xfe, 0xf1, - 0x42, 0xd4, 0x9f, 0x18, 0x9c, 0x05, 0xfb, 0xf4, 0x2a, 0xfb, 0x02, 0xfe, 0xe1, 0x1c, 0x94, 0x75, - 0xc5, 0xae, 0xb9, 0x29, 0x05, 0xfb, 0xf1, 0x1c, 0x82, 0x5d, 0x10, 0x5d, 0x71, 0x7d, 0x7e, 0x01, - 0x19, 0x22, 0xf9, 0xa5, 0x02, 0x9c, 0x34, 0x5c, 0x27, 0xd0, 0x99, 0x12, 0xa0, 0x96, 0xbf, 0x89, - 0x0a, 0xe7, 0x73, 0x75, 0x60, 0x3e, 0x73, 0x69, 0xc4, 0xc6, 0xc3, 0x6c, 0x36, 0xef, 0x49, 0xc6, - 0x5e, 0xde, 0xe4, 0x57, 0x0a, 0xf0, 0x30, 0x9b, 0x65, 0x7b, 0x88, 0xf9, 0xda, 0x70, 0xb4, 0xb5, - 0x3a, 0xbb, 0xbf, 0x37, 0xf9, 0xf0, 0x52, 0x16, 0x33, 0xcc, 0xae, 0x03, 0xab, 0xdd, 0x29, 0xbd, - 0x57, 0x61, 0xe0, 0xeb, 0x4e, 0x7d, 0x66, 0xf9, 0x28, 0x95, 0x90, 0xc6, 0xa3, 0xb2, 0x2b, 0x67, - 0xe9, 0x5c, 0x98, 0x55, 0x0b, 0x72, 0x19, 0x86, 0x77, 0x5c, 0xbb, 0xdb, 0xa6, 0xfe, 0x44, 0x95, - 0xaf, 0xdc, 0xe7, 0xb2, 0x26, 0xd4, 0x9b, 0x9c, 0xa4, 0x71, 0x42, 0xc2, 0x0f, 0x8b, 0x77, 0x1f, - 0x55, 0x59, 0x62, 0xc1, 0x90, 0x6d, 0xb5, 0xad, 0xc0, 0xe7, 0x4b, 0x5a, 0x7d, 0xe6, 0xf2, 0xc0, - 0x9f, 0x25, 0x86, 0xe8, 0x32, 0x07, 0x13, 0xa3, 0x46, 0x3c, 0xa3, 0x64, 0xc0, 0xa6, 0x42, 0xdf, - 0xd0, 0x6d, 0xb1, 0xe4, 0xd5, 0x67, 0x3e, 0x39, 0xf8, 0xb0, 0x61, 0x28, 0x8d, 0x51, 0xf9, 0x4d, - 0x15, 0xfe, 0x8a, 0x02, 0x9b, 0x7c, 0x16, 0xc6, 0x12, 0xad, 0xe9, 0x4f, 0xd4, 0xb9, 0x74, 0x1e, - 0xcb, 0x92, 0x4e, 0x48, 0xd5, 0x38, 0x23, 0xc1, 0xc6, 0x12, 0x3d, 0xc4, 0xc7, 0x14, 0x18, 0xb9, - 0x06, 0x55, 0xdf, 0x32, 0xa9, 0xa1, 0x7b, 0xfe, 0xc4, 0xc8, 0x41, 0x80, 0xc7, 0x25, 0x70, 0xb5, - 0x29, 0x8b, 0x61, 0x08, 0x40, 0xa6, 0x00, 0x3a, 0xba, 0x17, 0x58, 0x42, 0x85, 0x1c, 0xe5, 0xea, - 0xcc, 0xd8, 0xfe, 0xde, 0x24, 0xac, 0x86, 0xa9, 0x18, 0xa3, 0x60, 0xf4, 0xac, 0xec, 0x92, 0xd3, - 0xe9, 0x06, 0xfe, 0xc4, 0xd8, 0xc5, 0xd2, 0xa5, 0x9a, 0xa0, 0x6f, 0x86, 0xa9, 0x18, 0xa3, 0x20, - 0xdf, 0x2a, 0xc0, 0xa3, 0xd1, 0x6b, 0xef, 0x20, 0x3b, 0x71, 0xe4, 0x83, 0x6c, 0x72, 0x7f, 0x6f, - 0xf2, 0xd1, 0x66, 0x7f, 0x96, 0x78, 0xb7, 0xfa, 0x68, 0x2f, 0xc1, 0xe8, 0x6c, 0x37, 0xd8, 0x72, - 0x3d, 0xeb, 0x0d, 0xae, 0x0e, 0x93, 0x05, 0xa8, 0x04, 0x5c, 0xad, 0x11, 0xeb, 0xf2, 0x13, 0x59, - 0xa2, 0x16, 0x2a, 0xe6, 0x35, 0xba, 0xab, 0xb4, 0x01, 0xb1, 0x3e, 0x0a, 0x35, 0x47, 0x14, 0xd7, - 0x7e, 0xbd, 0x00, 0xb5, 0x86, 0xee, 0x5b, 0x06, 0x83, 0x27, 0x73, 0x50, 0xee, 0xfa, 0xd4, 0x3b, - 0x1c, 0x28, 0x9f, 0xa5, 0xd7, 0x7d, 0xea, 0x21, 0x2f, 0x4c, 0x6e, 0x40, 0xb5, 0xa3, 0xfb, 0xfe, - 0x2d, 0xd7, 0x33, 0xe5, 0x4a, 0x73, 0x40, 0x20, 0xa1, 0xaf, 0xca, 0xa2, 0x18, 0x82, 0x68, 0x75, - 0x88, 0x96, 0x5a, 0xed, 0x07, 0x05, 0x38, 0xd5, 0xe8, 0x6e, 0x6e, 0x52, 0x4f, 0xaa, 0x67, 0x42, - 0xf1, 0x21, 0x14, 0x2a, 0x1e, 0x35, 0x2d, 0x5f, 0xd6, 0x7d, 0x7e, 0xe0, 0xa6, 0x43, 0x86, 0x22, - 0xf5, 0x2c, 0x2e, 0x2f, 0x9e, 0x80, 0x02, 0x9d, 0x74, 0xa1, 0xf6, 0x1a, 0x0d, 0xfc, 0xc0, 0xa3, - 0x7a, 0x5b, 0x7e, 0xdd, 0x95, 0x81, 0x59, 0x5d, 0xa5, 0x41, 0x93, 0x23, 0xc5, 0xd5, 0xba, 0x30, - 0x11, 0x23, 0x4e, 0xda, 0x1f, 0x55, 0x60, 0x64, 0xce, 0x6d, 0x6f, 0x58, 0x0e, 0x35, 0x2f, 0x9b, - 0x2d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0x5b, 0x54, 0x7e, 0xed, 0xe0, 0xeb, 0x2c, 0x03, 0x8b, 0xb4, - 0x05, 0xf6, 0x86, 0x1c, 0x98, 0x2c, 0xc3, 0xd8, 0xa6, 0xe7, 0xb6, 0xc5, 0xd4, 0xb5, 0xb6, 0xdb, - 0x91, 0xaa, 0x62, 0xe3, 0x7f, 0xa8, 0xe9, 0x60, 0x21, 0x91, 0x7b, 0x67, 0x6f, 0x12, 0xa2, 0x37, - 0x4c, 0x95, 0x25, 0x2f, 0xc3, 0x44, 0x94, 0x12, 0x8e, 0xe1, 0x39, 0xa6, 0x57, 0x73, 0x55, 0xa1, - 0xd2, 0x38, 0xbf, 0xbf, 0x37, 0x39, 0xb1, 0xd0, 0x87, 0x06, 0xfb, 0x96, 0x26, 0x6f, 0x16, 0x60, - 0x3c, 0xca, 0x14, 0xf3, 0xaa, 0xd4, 0x10, 0x8e, 0x68, 0xc2, 0xe6, 0x1b, 0x90, 0x85, 0x14, 0x0b, - 0xec, 0x61, 0x4a, 0x16, 0x60, 0x24, 0x70, 0x63, 0xf2, 0xaa, 0x70, 0x79, 0x69, 0x6a, 0xc7, 0xbc, - 0xe6, 0xf6, 0x95, 0x56, 0xa2, 0x1c, 0x41, 0x38, 0xa3, 0xde, 0x53, 0x92, 0x1a, 0xe2, 0x92, 0x3a, - 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x02, 0xfb, 0x94, 0x24, 0x5f, 0x2a, 0xc0, 0x98, 0xca, - 0x92, 0x32, 0x1a, 0x3e, 0x4a, 0x19, 0x11, 0xd6, 0x23, 0xd6, 0x12, 0x0c, 0x30, 0xc5, 0x50, 0xfb, - 0x51, 0x19, 0x6a, 0xe1, 0xcc, 0x46, 0x1e, 0x87, 0x0a, 0xdf, 0x0b, 0x4b, 0x85, 0x35, 0x5c, 0xb2, - 0xf8, 0x96, 0x19, 0x45, 0x1e, 0x79, 0x02, 0x86, 0x0d, 0xb7, 0xdd, 0xd6, 0x1d, 0x93, 0xdb, 0x37, - 0x6a, 0x8d, 0x3a, 0x5b, 0xa9, 0xe7, 0x44, 0x12, 0xaa, 0x3c, 0x72, 0x1e, 0xca, 0xba, 0xd7, 0x12, - 0xa6, 0x86, 0x9a, 0x98, 0x8f, 0x66, 0xbd, 0x96, 0x8f, 0x3c, 0x95, 0x7c, 0x14, 0x4a, 0xd4, 0xd9, - 0x99, 0x28, 0xf7, 0x57, 0x05, 0x2e, 0x3b, 0x3b, 0x37, 0x75, 0xaf, 0x51, 0x97, 0x75, 0x28, 0x5d, - 0x76, 0x76, 0x90, 0x95, 0x21, 0xcb, 0x30, 0x4c, 0x9d, 0x1d, 0xd6, 0xf6, 0xd2, 0x06, 0xf0, 0x81, - 0x3e, 0xc5, 0x19, 0x89, 0xd4, 0x8a, 0x43, 0x85, 0x42, 0x26, 0xa3, 0x82, 0x20, 0x9f, 0x86, 0x11, - 0xa1, 0x5b, 0xac, 0xb0, 0x36, 0xf1, 0x27, 0x86, 0x38, 0xe4, 0x64, 0x7f, 0xe5, 0x84, 0xd3, 0x45, - 0x36, 0x97, 0x58, 0xa2, 0x8f, 0x09, 0x28, 0xf2, 0x69, 0xa8, 0x29, 0x73, 0x9a, 0x6a, 0xd9, 0x4c, - 0x73, 0x05, 0x4a, 0x22, 0xa4, 0xaf, 0x77, 0x2d, 0x8f, 0xb6, 0xa9, 0x13, 0xf8, 0x8d, 0x93, 0x6a, - 0x03, 0xab, 0x72, 0x7d, 0x8c, 0xd0, 0xc8, 0x46, 0xaf, 0xdd, 0x45, 0x18, 0x0d, 0x1e, 0xef, 0x33, - 0xab, 0x0f, 0x60, 0x74, 0xf9, 0x1c, 0x9c, 0x08, 0x0d, 0x23, 0x72, 0x6f, 0x2d, 0xcc, 0x08, 0x4f, - 0xb3, 0xe2, 0x4b, 0xc9, 0xac, 0x3b, 0x7b, 0x93, 0x8f, 0x65, 0xec, 0xae, 0x23, 0x02, 0x4c, 0x83, - 0x69, 0x7f, 0x50, 0x82, 0x5e, 0xb5, 0x3b, 0x29, 0xb4, 0xc2, 0x51, 0x0b, 0x2d, 0xfd, 0x41, 0x62, - 0xfa, 0x7c, 0x5e, 0x16, 0xcb, 0xff, 0x51, 0x59, 0x0d, 0x53, 0x3a, 0xea, 0x86, 0x79, 0x50, 0xc6, - 0x8e, 0xf6, 0x56, 0x19, 0xc6, 0xe6, 0x75, 0xda, 0x76, 0x9d, 0x7b, 0x6e, 0x42, 0x0a, 0x0f, 0xc4, - 0x26, 0xe4, 0x12, 0x54, 0x3d, 0xda, 0xb1, 0x2d, 0x43, 0xf7, 0x79, 0xd3, 0x4b, 0x73, 0x1c, 0xca, - 0x34, 0x0c, 0x73, 0xfb, 0x6c, 0x3e, 0x4b, 0x0f, 0xe4, 0xe6, 0xb3, 0xfc, 0xee, 0x6f, 0x3e, 0xb5, - 0x2f, 0x15, 0x81, 0x2b, 0x2a, 0xe4, 0x22, 0x94, 0xd9, 0x22, 0x9c, 0x36, 0x79, 0xf0, 0x8e, 0xc3, - 0x73, 0xc8, 0x39, 0x28, 0x06, 0xae, 0x1c, 0x79, 0x20, 0xf3, 0x8b, 0x6b, 0x2e, 0x16, 0x03, 0x97, - 0xbc, 0x01, 0x60, 0xb8, 0x8e, 0x69, 0x29, 0x2b, 0x75, 0xbe, 0x0f, 0x5b, 0x70, 0xbd, 0x5b, 0xba, - 0x67, 0xce, 0x85, 0x88, 0x62, 0xfb, 0x11, 0xbd, 0x63, 0x8c, 0x1b, 0x79, 0x01, 0x86, 0x5c, 0x67, - 0xa1, 0x6b, 0xdb, 0x5c, 0xa0, 0xb5, 0xc6, 0xff, 0x64, 0x7b, 0xc2, 0x1b, 0x3c, 0xe5, 0xce, 0xde, - 0xe4, 0x59, 0xa1, 0xdf, 0xb2, 0xb7, 0x97, 0x3c, 0x2b, 0xb0, 0x9c, 0x56, 0x33, 0xf0, 0xf4, 0x80, - 0xb6, 0x76, 0x51, 0x16, 0xd3, 0xbe, 0x5a, 0x80, 0xfa, 0x82, 0x75, 0x9b, 0x9a, 0x2f, 0x59, 0x8e, - 0xe9, 0xde, 0x22, 0x08, 0x43, 0x36, 0x75, 0x5a, 0xc1, 0x96, 0xec, 0xfd, 0x53, 0xb1, 0xb1, 0x16, - 0x1e, 0x6e, 0x44, 0xf5, 0x6f, 0xd3, 0x40, 0x67, 0xa3, 0x6f, 0xbe, 0x2b, 0xcd, 0xef, 0x62, 0x53, - 0xca, 0x11, 0x50, 0x22, 0x91, 0x69, 0xa8, 0x09, 0xed, 0xd3, 0x72, 0x5a, 0x5c, 0x86, 0xd5, 0x68, - 0xd2, 0x6b, 0xaa, 0x0c, 0x8c, 0x68, 0xb4, 0x5d, 0x38, 0xd9, 0x23, 0x06, 0x62, 0x42, 0x39, 0xd0, - 0x5b, 0x6a, 0x7e, 0x5d, 0x18, 0x58, 0xc0, 0x6b, 0x7a, 0x2b, 0x26, 0x5c, 0xbe, 0xc6, 0xaf, 0xe9, - 0x6c, 0x8d, 0x67, 0xe8, 0xda, 0x4f, 0x0a, 0x50, 0x5d, 0xe8, 0x3a, 0x06, 0xdf, 0x1b, 0xdd, 0xdb, - 0x14, 0xa6, 0x14, 0x86, 0x62, 0xa6, 0xc2, 0xd0, 0x85, 0xa1, 0xed, 0x5b, 0xa1, 0x42, 0x51, 0x9f, - 0x59, 0x19, 0xbc, 0x57, 0xc8, 0x2a, 0x4d, 0x5d, 0xe3, 0x78, 0xe2, 0x0c, 0x65, 0x4c, 0x56, 0x68, - 0xe8, 0xda, 0x4b, 0x9c, 0xa9, 0x64, 0x76, 0xee, 0xa3, 0x50, 0x8f, 0x91, 0x1d, 0xca, 0x68, 0xfb, - 0x3b, 0x65, 0x18, 0x5a, 0x6c, 0x36, 0x67, 0x57, 0x97, 0xc8, 0x33, 0x50, 0x97, 0xe6, 0xf5, 0xeb, - 0x91, 0x0c, 0xc2, 0xd3, 0x95, 0x66, 0x94, 0x85, 0x71, 0x3a, 0xa6, 0x8e, 0x79, 0x54, 0xb7, 0xdb, - 0x72, 0xb0, 0x84, 0xea, 0x18, 0xb2, 0x44, 0x14, 0x79, 0x44, 0x87, 0x31, 0xb6, 0xc3, 0x63, 0x22, - 0x14, 0xbb, 0x37, 0x39, 0x6c, 0x0e, 0xb8, 0xbf, 0xe3, 0x4a, 0xe2, 0x7a, 0x02, 0x00, 0x53, 0x80, - 0xe4, 0x79, 0xa8, 0xea, 0xdd, 0x60, 0x8b, 0x2b, 0xd0, 0x62, 0x6c, 0x9c, 0xe7, 0xa7, 0x0f, 0x32, - 0xed, 0xce, 0xde, 0xe4, 0xc8, 0x35, 0x6c, 0x3c, 0xa3, 0xde, 0x31, 0xa4, 0x66, 0x95, 0x53, 0x3b, - 0x46, 0x59, 0xb9, 0xca, 0xa1, 0x2b, 0xb7, 0x9a, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x05, 0x46, 0xb6, - 0xe9, 0x6e, 0xa0, 0x6f, 0x48, 0x06, 0x43, 0x87, 0x61, 0x30, 0xce, 0x54, 0xb8, 0x6b, 0xb1, 0xe2, - 0x98, 0x00, 0x23, 0x3e, 0x9c, 0xde, 0xa6, 0xde, 0x06, 0xf5, 0x5c, 0xb9, 0xfb, 0x94, 0x4c, 0x86, - 0x0f, 0xc3, 0x64, 0x62, 0x7f, 0x6f, 0xf2, 0xf4, 0xb5, 0x0c, 0x18, 0xcc, 0x04, 0xd7, 0x7e, 0x5c, - 0x84, 0x13, 0x8b, 0xe2, 0x7c, 0xd3, 0xf5, 0xc4, 0x22, 0x4c, 0xce, 0x42, 0xc9, 0xeb, 0x74, 0x79, - 0xcf, 0x29, 0x09, 0x3b, 0x29, 0xae, 0xae, 0x23, 0x4b, 0x23, 0x2f, 0x43, 0xd5, 0x94, 0x53, 0x86, - 0xdc, 0xfc, 0x1e, 0x76, 0xa2, 0xe1, 0x8b, 0xa0, 0x7a, 0xc3, 0x10, 0x8d, 0x69, 0xfa, 0x6d, 0xbf, - 0xd5, 0xb4, 0xde, 0xa0, 0x72, 0x3f, 0xc8, 0x35, 0xfd, 0x15, 0x91, 0x84, 0x2a, 0x8f, 0xad, 0xaa, - 0xdb, 0x74, 0x57, 0xec, 0x86, 0xca, 0xd1, 0xaa, 0x7a, 0x4d, 0xa6, 0x61, 0x98, 0x4b, 0x26, 0xd5, - 0x60, 0x61, 0xbd, 0xa0, 0x2c, 0x76, 0xf2, 0x37, 0x59, 0x82, 0x1c, 0x37, 0x6c, 0xca, 0x7c, 0xcd, - 0x0a, 0x02, 0xea, 0xc9, 0x66, 0x1c, 0x68, 0xca, 0xbc, 0xca, 0x11, 0x50, 0x22, 0x91, 0x0f, 0x41, - 0x8d, 0x83, 0x37, 0x6c, 0x77, 0x83, 0x37, 0x5c, 0x4d, 0xec, 0xe9, 0x6f, 0xaa, 0x44, 0x8c, 0xf2, - 0xb5, 0x9f, 0x16, 0xe1, 0xcc, 0x22, 0x0d, 0x84, 0x56, 0x33, 0x4f, 0x3b, 0xb6, 0xbb, 0xcb, 0x54, - 0x4b, 0xa4, 0xaf, 0x93, 0x17, 0x01, 0x2c, 0x7f, 0xa3, 0xb9, 0x63, 0xf0, 0x71, 0x20, 0xc6, 0xf0, - 0x45, 0x39, 0x24, 0x61, 0xa9, 0xd9, 0x90, 0x39, 0x77, 0x12, 0x6f, 0x18, 0x2b, 0x13, 0x6d, 0xaf, - 0x8a, 0x77, 0xd9, 0x5e, 0x35, 0x01, 0x3a, 0x91, 0x82, 0x5a, 0xe2, 0x94, 0xff, 0x47, 0xb1, 0x39, - 0x8c, 0x6e, 0x1a, 0x83, 0xc9, 0xa3, 0x32, 0x3a, 0x30, 0x6e, 0xd2, 0x4d, 0xbd, 0x6b, 0x07, 0xa1, - 0x52, 0x2d, 0x07, 0xf1, 0xc1, 0xf5, 0xf2, 0xf0, 0xec, 0x75, 0x3e, 0x85, 0x84, 0x3d, 0xd8, 0xda, - 0xef, 0x96, 0xe0, 0xdc, 0x22, 0x0d, 0x42, 0x8b, 0x8b, 0x9c, 0x1d, 0x9b, 0x1d, 0x6a, 0xb0, 0x56, - 0x78, 0xb3, 0x00, 0x43, 0xb6, 0xbe, 0x41, 0x6d, 0xb6, 0x7a, 0xb1, 0xaf, 0x79, 0x75, 0xe0, 0x85, - 0xa0, 0x3f, 0x97, 0xa9, 0x65, 0xce, 0x21, 0xb5, 0x34, 0x88, 0x44, 0x94, 0xec, 0xd9, 0xa4, 0x6e, - 0xd8, 0x5d, 0x3f, 0xa0, 0xde, 0xaa, 0xeb, 0x05, 0x52, 0x9f, 0x0c, 0x27, 0xf5, 0xb9, 0x28, 0x0b, - 0xe3, 0x74, 0x64, 0x06, 0xc0, 0xb0, 0x2d, 0xea, 0x04, 0xbc, 0x94, 0x18, 0x57, 0x44, 0xb5, 0xef, - 0x5c, 0x98, 0x83, 0x31, 0x2a, 0xc6, 0xaa, 0xed, 0x3a, 0x56, 0xe0, 0x0a, 0x56, 0xe5, 0x24, 0xab, - 0x95, 0x28, 0x0b, 0xe3, 0x74, 0xbc, 0x18, 0x0d, 0x3c, 0xcb, 0xf0, 0x79, 0xb1, 0x4a, 0xaa, 0x58, - 0x94, 0x85, 0x71, 0x3a, 0xb6, 0xe6, 0xc5, 0xbe, 0xff, 0x50, 0x6b, 0xde, 0x37, 0x6b, 0x70, 0x21, - 0x21, 0xd6, 0x40, 0x0f, 0xe8, 0x66, 0xd7, 0x6e, 0xd2, 0x40, 0x35, 0xe0, 0x80, 0x6b, 0xe1, 0xcf, - 0x47, 0xed, 0x2e, 0xbc, 0x2a, 0x8c, 0xa3, 0x69, 0xf7, 0x9e, 0x0a, 0x1e, 0xa8, 0xed, 0xa7, 0xa1, - 0xe6, 0xe8, 0x81, 0xcf, 0x07, 0xae, 0x1c, 0xa3, 0xa1, 0x1a, 0x76, 0x5d, 0x65, 0x60, 0x44, 0x43, - 0x56, 0xe1, 0xb4, 0x14, 0xf1, 0xe5, 0xdb, 0x1d, 0xd7, 0x0b, 0xa8, 0x27, 0xca, 0xca, 0xe5, 0x54, - 0x96, 0x3d, 0xbd, 0x92, 0x41, 0x83, 0x99, 0x25, 0xc9, 0x0a, 0x9c, 0x32, 0xc4, 0x49, 0x33, 0xb5, - 0x5d, 0xdd, 0x54, 0x80, 0xc2, 0xc0, 0x15, 0x6e, 0x8d, 0xe6, 0x7a, 0x49, 0x30, 0xab, 0x5c, 0xba, - 0x37, 0x0f, 0x0d, 0xd4, 0x9b, 0x87, 0x07, 0xe9, 0xcd, 0xd5, 0xc1, 0x7a, 0x73, 0xed, 0x60, 0xbd, - 0x99, 0x49, 0x9e, 0xf5, 0x23, 0xea, 0x31, 0xf5, 0x44, 0xac, 0xb0, 0x31, 0x47, 0x86, 0x50, 0xf2, - 0xcd, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x36, 0xe0, 0x9c, 0x48, 0xbf, 0xec, 0x18, 0xde, 0x6e, 0x87, - 0x2d, 0x3c, 0x31, 0xdc, 0x7a, 0xc2, 0xc2, 0x78, 0xae, 0xd9, 0x97, 0x12, 0xef, 0x82, 0x42, 0xfe, - 0x2f, 0x8c, 0x8a, 0x56, 0x5a, 0xd1, 0x3b, 0x1c, 0x56, 0xb8, 0x35, 0x3c, 0x2c, 0x61, 0x47, 0xe7, - 0xe2, 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, 0xb3, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0xeb, 0x94, - 0x9a, 0xd4, 0xe4, 0xa7, 0x35, 0xb5, 0xc6, 0x23, 0xca, 0xd0, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, - 0x79, 0x1e, 0x46, 0xfc, 0x40, 0xf7, 0x02, 0x69, 0xd6, 0x9b, 0x18, 0x13, 0x6e, 0x1f, 0xca, 0xea, - 0xd5, 0x8c, 0xe5, 0x61, 0x82, 0x32, 0x73, 0xbd, 0x38, 0x71, 0x7c, 0xeb, 0x45, 0x9e, 0xd9, 0xea, - 0xcf, 0x8a, 0x70, 0x71, 0x91, 0x06, 0x2b, 0xae, 0x23, 0x8d, 0xa2, 0x59, 0xcb, 0xfe, 0x81, 0x6c, - 0xa2, 0xc9, 0x45, 0xbb, 0x78, 0xa4, 0x8b, 0x76, 0xe9, 0x88, 0x16, 0xed, 0xf2, 0x31, 0x2e, 0xda, - 0xbf, 0x5f, 0x84, 0x47, 0x12, 0x92, 0x5c, 0x75, 0x4d, 0x35, 0xe1, 0xbf, 0x2f, 0xc0, 0x03, 0x08, - 0xf0, 0x8e, 0xd0, 0x3b, 0xf9, 0xb1, 0x56, 0x4a, 0xe3, 0xf9, 0x4a, 0x5a, 0xe3, 0x79, 0x25, 0xcf, - 0xca, 0x97, 0xc1, 0xe1, 0x40, 0x2b, 0xde, 0x55, 0x20, 0x9e, 0x3c, 0x84, 0x13, 0xa6, 0x9f, 0x98, - 0xd2, 0x13, 0xfa, 0x95, 0x61, 0x0f, 0x05, 0x66, 0x94, 0x22, 0x4d, 0x78, 0xd8, 0xa7, 0x4e, 0x60, - 0x39, 0xd4, 0x4e, 0xc2, 0x09, 0x6d, 0xe8, 0x31, 0x09, 0xf7, 0x70, 0x33, 0x8b, 0x08, 0xb3, 0xcb, - 0xe6, 0x99, 0x07, 0xfe, 0x12, 0xb8, 0xca, 0x29, 0x44, 0x73, 0x64, 0x1a, 0xcb, 0x9b, 0x69, 0x8d, - 0xe5, 0xd5, 0xfc, 0xed, 0x36, 0x98, 0xb6, 0x32, 0x03, 0xc0, 0x5b, 0x21, 0xae, 0xae, 0x84, 0x8b, - 0x34, 0x86, 0x39, 0x18, 0xa3, 0x62, 0x0b, 0x90, 0x92, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x66, - 0x3c, 0x13, 0x93, 0xb4, 0x7d, 0xb5, 0x9d, 0xca, 0xc0, 0xda, 0xce, 0x55, 0x20, 0x09, 0xc3, 0xa3, - 0xc0, 0x1b, 0x4a, 0xba, 0x35, 0x2e, 0xf5, 0x50, 0x60, 0x46, 0xa9, 0x3e, 0x5d, 0x79, 0xf8, 0x68, - 0xbb, 0x72, 0x75, 0xf0, 0xae, 0x4c, 0x5e, 0x85, 0xb3, 0x9c, 0x95, 0x94, 0x4f, 0x12, 0x58, 0xe8, - 0x3d, 0x1f, 0x90, 0xc0, 0x67, 0xb1, 0x1f, 0x21, 0xf6, 0xc7, 0x60, 0xed, 0x63, 0x78, 0xd4, 0x64, - 0xcc, 0x75, 0xbb, 0xbf, 0x4e, 0x34, 0x97, 0x41, 0x83, 0x99, 0x25, 0x59, 0x17, 0x0b, 0x58, 0x37, - 0xd4, 0x37, 0x6c, 0x6a, 0x4a, 0xb7, 0xce, 0xb0, 0x8b, 0xad, 0x2d, 0x37, 0x65, 0x0e, 0xc6, 0xa8, - 0xb2, 0xd4, 0x94, 0x91, 0x43, 0xaa, 0x29, 0x8b, 0xdc, 0x4a, 0xbf, 0x99, 0xd0, 0x86, 0xa4, 0xae, - 0x13, 0x3a, 0xea, 0xce, 0xa5, 0x09, 0xb0, 0xb7, 0x0c, 0xd7, 0x12, 0x0d, 0xcf, 0xea, 0x04, 0x7e, - 0x12, 0x6b, 0x2c, 0xa5, 0x25, 0x66, 0xd0, 0x60, 0x66, 0x49, 0xa6, 0x9f, 0x6f, 0x51, 0xdd, 0x0e, - 0xb6, 0x92, 0x80, 0x27, 0x92, 0xfa, 0xf9, 0x95, 0x5e, 0x12, 0xcc, 0x2a, 0x97, 0xb9, 0x20, 0x8d, - 0x3f, 0x98, 0x6a, 0xd5, 0x97, 0x4b, 0x70, 0x76, 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, 0x66, 0x94, - 0x77, 0xc1, 0x8c, 0xf2, 0x8d, 0x0a, 0x9c, 0x5a, 0xa4, 0x41, 0x8f, 0x36, 0xf6, 0xdf, 0x54, 0xfc, - 0x2b, 0x70, 0x2a, 0x72, 0xe5, 0x6a, 0x06, 0xae, 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, 0xd9, 0x4b, - 0x82, 0x59, 0xe5, 0xc8, 0xa7, 0xe1, 0x11, 0xbe, 0xd4, 0x3b, 0x2d, 0x61, 0x9f, 0x15, 0xc6, 0x84, - 0xd8, 0x35, 0x81, 0x49, 0x09, 0xf9, 0x48, 0x33, 0x9b, 0x0c, 0xfb, 0x95, 0x27, 0x5f, 0x80, 0x91, - 0x8e, 0xd5, 0xa1, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, 0xda, 0xc0, - 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0x56, 0x8f, 0xb1, 0xa7, 0xfe, 0x7b, 0x11, 0x86, 0x17, - 0x3d, 0xb7, 0xdb, 0x69, 0xec, 0x92, 0x16, 0x0c, 0xdd, 0xe2, 0x87, 0x67, 0xf2, 0x68, 0x6a, 0x70, - 0x77, 0x68, 0x71, 0x06, 0x17, 0xa9, 0x44, 0xe2, 0x1d, 0x25, 0x3c, 0xeb, 0xc4, 0xdb, 0x74, 0x97, - 0x9a, 0xf2, 0x0c, 0x2d, 0xec, 0xc4, 0xd7, 0x58, 0x22, 0x8a, 0x3c, 0xd2, 0x86, 0x13, 0xba, 0x6d, - 0xbb, 0xb7, 0xa8, 0xb9, 0xac, 0x07, 0xd4, 0xa1, 0xbe, 0x3a, 0x92, 0x3c, 0xac, 0x59, 0x9a, 0x9f, - 0xeb, 0xcf, 0x26, 0xa1, 0x30, 0x8d, 0x4d, 0x5e, 0x83, 0x61, 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0xd5, - 0x67, 0xe6, 0x06, 0x6f, 0xf4, 0xc6, 0xa7, 0x9a, 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, - 0xda, 0xd7, 0x0b, 0x00, 0x57, 0xd6, 0xd6, 0x56, 0xe5, 0xf1, 0x82, 0x09, 0x65, 0xbd, 0x1b, 0x1e, - 0x54, 0x0e, 0x7e, 0x20, 0x98, 0xf0, 0x87, 0x94, 0x67, 0x78, 0xdd, 0x60, 0x0b, 0x39, 0x3a, 0xf9, - 0x20, 0x0c, 0x4b, 0x05, 0x59, 0x8a, 0x3d, 0x74, 0x2d, 0x90, 0x4a, 0x34, 0xaa, 0x7c, 0xed, 0xb7, - 0x8b, 0x00, 0x4b, 0xa6, 0x4d, 0x9b, 0xca, 0x83, 0xbd, 0x16, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, - 0x73, 0xc0, 0xd3, 0x54, 0x6e, 0xf3, 0x5f, 0x53, 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfc, 0x80, - 0x76, 0x96, 0x9c, 0x80, 0x7a, 0x3b, 0xba, 0x3d, 0xe0, 0x21, 0xca, 0xb8, 0xb0, 0x8b, 0x44, 0x38, - 0x98, 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, 0x03, 0xa4, 0xb1, 0x3b, 0x60, 0x47, 0x3a, 0xc1, - 0x76, 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0xef, 0x17, 0xe1, 0x0c, 0xe7, 0xc7, 0xaa, 0x91, - 0xf0, 0xc7, 0x24, 0xff, 0xbf, 0xe7, 0x1e, 0xdc, 0xff, 0x3e, 0x18, 0x6b, 0x71, 0x8d, 0x6a, 0x85, - 0x06, 0x7a, 0xa4, 0xcf, 0x45, 0x69, 0xb1, 0xcb, 0x6f, 0x5d, 0x28, 0xfb, 0x6c, 0xbe, 0x12, 0xd2, - 0x6b, 0x0e, 0xdc, 0x85, 0xb2, 0x3f, 0x80, 0xcf, 0x5e, 0xe1, 0xa9, 0x31, 0x9f, 0xb5, 0x38, 0x3b, - 0xf2, 0xb3, 0x30, 0xe4, 0x07, 0x7a, 0xd0, 0x55, 0x43, 0x73, 0xfd, 0xa8, 0x19, 0x73, 0xf0, 0x68, - 0x1e, 0x11, 0xef, 0x28, 0x99, 0x6a, 0xdf, 0x2f, 0xc0, 0xb9, 0xec, 0x82, 0xcb, 0x96, 0x1f, 0x90, - 0xff, 0xd7, 0x23, 0xf6, 0x03, 0xb6, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0x3a, 0x64, 0xab, 0x94, 0x98, - 0xc8, 0x03, 0xa8, 0x58, 0x01, 0x6d, 0xab, 0xfd, 0xe5, 0x8d, 0x23, 0xfe, 0xf4, 0xd8, 0xd2, 0xce, - 0xb8, 0xa0, 0x60, 0xa6, 0xbd, 0x55, 0xec, 0xf7, 0xc9, 0x7c, 0xf9, 0xb0, 0x93, 0x3e, 0xbf, 0xd7, - 0xf2, 0xf9, 0xfc, 0x26, 0x2b, 0xd4, 0xeb, 0xfa, 0xfb, 0x33, 0xbd, 0xae, 0xbf, 0x37, 0xf2, 0xbb, - 0xfe, 0xa6, 0xc4, 0xd0, 0xd7, 0x03, 0xf8, 0x9d, 0x12, 0x9c, 0xbf, 0x5b, 0xb7, 0x61, 0xeb, 0x99, - 0xec, 0x9d, 0x79, 0xd7, 0xb3, 0xbb, 0xf7, 0x43, 0x32, 0x03, 0x95, 0xce, 0x96, 0xee, 0x2b, 0xa5, - 0x4c, 0x6d, 0x58, 0x2a, 0xab, 0x2c, 0xf1, 0x0e, 0x9b, 0x34, 0xb8, 0x32, 0xc7, 0x5f, 0x51, 0x90, - 0xb2, 0xe9, 0xb8, 0x4d, 0x7d, 0x3f, 0xb2, 0x09, 0x84, 0xd3, 0xf1, 0x8a, 0x48, 0x46, 0x95, 0x4f, - 0x02, 0x18, 0x12, 0x26, 0x66, 0xb9, 0x32, 0x0d, 0xee, 0xc8, 0x95, 0xe1, 0x26, 0x1e, 0x7d, 0x94, - 0x3c, 0xad, 0x90, 0xbc, 0xc8, 0x14, 0x94, 0x83, 0xc8, 0x69, 0x57, 0x6d, 0xcd, 0xcb, 0x19, 0xfa, - 0x29, 0xa7, 0x63, 0x1b, 0x7b, 0x77, 0x83, 0x1b, 0xd5, 0x4d, 0x79, 0x7e, 0x6e, 0xb9, 0x0e, 0x57, - 0xc8, 0x4a, 0xd1, 0xc6, 0xfe, 0x46, 0x0f, 0x05, 0x66, 0x94, 0xd2, 0xfe, 0xa6, 0x0a, 0x67, 0xb2, - 0xfb, 0x03, 0x93, 0xdb, 0x0e, 0xf5, 0x7c, 0x86, 0x5d, 0x48, 0xca, 0xed, 0xa6, 0x48, 0x46, 0x95, - 0xff, 0x9e, 0x76, 0x38, 0xfb, 0x46, 0x01, 0xce, 0x7a, 0xf2, 0x8c, 0xe8, 0x7e, 0x38, 0x9d, 0x3d, - 0x26, 0xcc, 0x19, 0x7d, 0x18, 0x62, 0xff, 0xba, 0x90, 0xdf, 0x2c, 0xc0, 0x44, 0x3b, 0x65, 0xe7, - 0x38, 0xc6, 0x0b, 0x63, 0xdc, 0x2b, 0x7e, 0xa5, 0x0f, 0x3f, 0xec, 0x5b, 0x13, 0xf2, 0x05, 0xa8, - 0x77, 0x58, 0xbf, 0xf0, 0x03, 0xea, 0x18, 0xea, 0xce, 0xd8, 0xe0, 0x23, 0x69, 0x35, 0xc2, 0x52, - 0xae, 0x68, 0x42, 0x3f, 0x88, 0x65, 0x60, 0x9c, 0xe3, 0x03, 0x7e, 0x43, 0xec, 0x12, 0x54, 0x7d, - 0x1a, 0x04, 0x96, 0xd3, 0x12, 0xfb, 0x8d, 0x9a, 0x18, 0x2b, 0x4d, 0x99, 0x86, 0x61, 0x2e, 0xf9, - 0x10, 0xd4, 0xf8, 0x91, 0xd3, 0xac, 0xd7, 0xf2, 0x27, 0x6a, 0xdc, 0x5d, 0x6c, 0x54, 0x38, 0xc0, - 0xc9, 0x44, 0x8c, 0xf2, 0xc9, 0xd3, 0x30, 0xb2, 0xc1, 0x87, 0xaf, 0xbc, 0xce, 0x2b, 0x6c, 0x5c, - 0x5c, 0x5b, 0x6b, 0xc4, 0xd2, 0x31, 0x41, 0x45, 0x66, 0x00, 0x68, 0x78, 0x2e, 0x97, 0xb6, 0x67, - 0x45, 0x27, 0x76, 0x18, 0xa3, 0x22, 0x8f, 0x41, 0x29, 0xb0, 0x7d, 0x6e, 0xc3, 0xaa, 0x46, 0x5b, - 0xd0, 0xb5, 0xe5, 0x26, 0xb2, 0x74, 0xed, 0xa7, 0x05, 0x38, 0x91, 0xba, 0x5c, 0xc2, 0x8a, 0x74, - 0x3d, 0x5b, 0x4e, 0x23, 0x61, 0x91, 0x75, 0x5c, 0x46, 0x96, 0x4e, 0x5e, 0x95, 0x6a, 0x79, 0x31, - 0x67, 0xe4, 0x82, 0xeb, 0x7a, 0xe0, 0x33, 0x3d, 0xbc, 0x47, 0x23, 0xe7, 0xc7, 0x7c, 0x51, 0x7d, - 0xe4, 0x3a, 0x10, 0x3b, 0xe6, 0x8b, 0xf2, 0x30, 0x41, 0x99, 0x32, 0xf8, 0x95, 0x0f, 0x62, 0xf0, - 0xd3, 0xbe, 0x5a, 0x8c, 0x49, 0x40, 0x6a, 0xf6, 0xf7, 0x90, 0xc0, 0x93, 0x6c, 0x01, 0x0d, 0x17, - 0xf7, 0x5a, 0x7c, 0xfd, 0xe3, 0x8b, 0xb1, 0xcc, 0x25, 0x2f, 0x09, 0xd9, 0x97, 0x72, 0xde, 0x42, - 0x5d, 0x5b, 0x6e, 0x0a, 0xef, 0x2a, 0xd5, 0x6a, 0x61, 0x13, 0x94, 0x8f, 0xa9, 0x09, 0xb4, 0xbf, - 0x28, 0x41, 0xfd, 0xaa, 0xbb, 0xf1, 0x1e, 0xf1, 0xa0, 0xce, 0x5e, 0xa6, 0x8a, 0xef, 0xe2, 0x32, - 0xb5, 0x0e, 0x8f, 0x04, 0x81, 0xdd, 0xa4, 0x86, 0xeb, 0x98, 0xfe, 0xec, 0x66, 0x40, 0xbd, 0x05, - 0xcb, 0xb1, 0xfc, 0x2d, 0x6a, 0xca, 0xe3, 0xa4, 0x47, 0xf7, 0xf7, 0x26, 0x1f, 0x59, 0x5b, 0x5b, - 0xce, 0x22, 0xc1, 0x7e, 0x65, 0xf9, 0xb4, 0xa1, 0x1b, 0xdb, 0xee, 0xe6, 0x26, 0xbf, 0x29, 0x23, - 0x7d, 0x6e, 0xc4, 0xb4, 0x11, 0x4b, 0xc7, 0x04, 0x95, 0xf6, 0x76, 0x11, 0x6a, 0xe1, 0xcd, 0x77, - 0xf2, 0x04, 0x0c, 0x6f, 0x78, 0xee, 0x36, 0xf5, 0xc4, 0xc9, 0x9d, 0xbc, 0x29, 0xd3, 0x10, 0x49, - 0xa8, 0xf2, 0xc8, 0xe3, 0x50, 0x09, 0xdc, 0x8e, 0x65, 0xa4, 0x0d, 0x6a, 0x6b, 0x2c, 0x11, 0x45, - 0xde, 0xf1, 0x75, 0xf0, 0x27, 0x13, 0xaa, 0x5d, 0xad, 0xaf, 0x32, 0xf6, 0x0a, 0x94, 0x7d, 0xdd, - 0xb7, 0xe5, 0x7a, 0x9a, 0xe3, 0x12, 0xf9, 0x6c, 0x73, 0x59, 0x5e, 0x22, 0x9f, 0x6d, 0x2e, 0x23, - 0x07, 0xd5, 0x7e, 0x54, 0x84, 0xba, 0x90, 0x9b, 0x98, 0x15, 0x8e, 0x52, 0x72, 0x2f, 0x70, 0x57, - 0x0a, 0xbf, 0xdb, 0xa6, 0x1e, 0x37, 0x33, 0xc9, 0x49, 0x2e, 0x7e, 0x3e, 0x10, 0x65, 0x86, 0xee, - 0x14, 0x51, 0x92, 0x12, 0x7d, 0xf9, 0x18, 0x45, 0x5f, 0x39, 0x90, 0xe8, 0x87, 0x8e, 0x43, 0xf4, - 0x6f, 0x16, 0xa1, 0xb6, 0x6c, 0x6d, 0x52, 0x63, 0xd7, 0xb0, 0xf9, 0x9d, 0x40, 0x93, 0xda, 0x34, - 0xa0, 0x8b, 0x9e, 0x6e, 0xd0, 0x55, 0xea, 0x59, 0x3c, 0x66, 0x0b, 0x1b, 0x1f, 0x7c, 0x06, 0x92, - 0x77, 0x02, 0xe7, 0xfb, 0xd0, 0x60, 0xdf, 0xd2, 0x64, 0x09, 0x46, 0x4c, 0xea, 0x5b, 0x1e, 0x35, - 0x57, 0x63, 0x1b, 0x95, 0x27, 0xd4, 0x52, 0x33, 0x1f, 0xcb, 0xbb, 0xb3, 0x37, 0x39, 0xaa, 0x0c, - 0x94, 0x62, 0xc7, 0x92, 0x28, 0xca, 0x86, 0x7c, 0x47, 0xef, 0xfa, 0x59, 0x75, 0x8c, 0x0d, 0xf9, - 0xd5, 0x6c, 0x12, 0xec, 0x57, 0x56, 0xab, 0x40, 0x69, 0xd9, 0x6d, 0x69, 0x6f, 0x95, 0x20, 0x0c, - 0xee, 0x43, 0x7e, 0xae, 0x00, 0x75, 0xdd, 0x71, 0xdc, 0x40, 0x06, 0xce, 0x11, 0x27, 0xf0, 0x98, - 0x3b, 0x86, 0xd0, 0xd4, 0x6c, 0x04, 0x2a, 0x0e, 0x6f, 0xc3, 0x03, 0xe5, 0x58, 0x0e, 0xc6, 0x79, - 0x93, 0x6e, 0xea, 0x3c, 0x79, 0x25, 0x7f, 0x2d, 0x0e, 0x70, 0x7a, 0x7c, 0xee, 0x93, 0x30, 0x9e, - 0xae, 0xec, 0x61, 0x8e, 0x83, 0x72, 0x1d, 0xcc, 0x17, 0x01, 0x22, 0x9f, 0x92, 0xfb, 0x60, 0xc4, - 0xb2, 0x12, 0x46, 0xac, 0xc5, 0xc1, 0x05, 0x1c, 0x56, 0xba, 0xaf, 0xe1, 0xea, 0xf5, 0x94, 0xe1, - 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, 0x58, 0xf5, 0x5b, 0x05, 0x18, 0x8f, 0x88, 0xe5, 0x0d, 0xd9, - 0xe7, 0x60, 0xd4, 0xa3, 0xba, 0xd9, 0xd0, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, 0x6f, 0xf6, - 0xc9, 0xfd, 0xbd, 0xc9, 0x51, 0x8c, 0x67, 0x60, 0x92, 0x8e, 0xe8, 0x50, 0x67, 0x09, 0x6b, 0x56, - 0x9b, 0xba, 0xdd, 0x60, 0x40, 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x9d, - 0x02, 0x8c, 0xc5, 0x2b, 0x7c, 0xec, 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, 0xda, 0xa4, - 0x8f, 0x15, 0xed, 0xc7, 0xd5, 0xf8, 0xa7, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, 0xab, 0xb1, - 0xe0, 0xbd, 0x1f, 0x35, 0xa6, 0x9f, 0x96, 0x5b, 0x7e, 0x80, 0xb5, 0xdc, 0x77, 0x33, 0xf4, 0x4c, - 0x2c, 0x7c, 0xca, 0x50, 0x8e, 0xf0, 0x29, 0xed, 0x30, 0x7c, 0xca, 0xf0, 0x91, 0x4d, 0x3a, 0x07, - 0x09, 0xa1, 0x52, 0xbd, 0xaf, 0x21, 0x54, 0x6a, 0xc7, 0x15, 0x42, 0x05, 0xf2, 0x86, 0x50, 0xf9, - 0x4a, 0x01, 0xc6, 0xcc, 0xc4, 0x8d, 0x59, 0x6e, 0x5b, 0xc8, 0xb3, 0xd4, 0x24, 0x2f, 0xe0, 0x8a, - 0x2b, 0x53, 0xc9, 0x34, 0x4c, 0xb1, 0xd4, 0x7e, 0x58, 0x8e, 0xaf, 0x03, 0xf7, 0xdb, 0x54, 0xfd, - 0x6c, 0xd2, 0x54, 0x7d, 0x31, 0x6d, 0xaa, 0x3e, 0x11, 0xf3, 0x22, 0x8d, 0x9b, 0xab, 0x3f, 0x1c, - 0x9b, 0x1e, 0xd9, 0x9c, 0x34, 0x1a, 0x49, 0x3a, 0x63, 0x8a, 0xfc, 0x30, 0x54, 0x7d, 0x15, 0x86, - 0x51, 0x6c, 0x6c, 0xa2, 0x76, 0x51, 0x21, 0x12, 0x43, 0x0a, 0xa6, 0x89, 0x7b, 0x54, 0xf7, 0x5d, - 0x27, 0xad, 0x89, 0x23, 0x4f, 0x45, 0x99, 0x1b, 0x37, 0x99, 0x0f, 0xdd, 0xc3, 0x64, 0xae, 0x43, - 0xdd, 0xd6, 0xfd, 0x60, 0xbd, 0x63, 0xea, 0x01, 0x35, 0xe5, 0x78, 0xfb, 0x5f, 0x07, 0x5b, 0xab, - 0xd8, 0xfa, 0x17, 0x29, 0x84, 0xcb, 0x11, 0x0c, 0xc6, 0x31, 0x89, 0x09, 0x23, 0xec, 0x95, 0x8f, - 0x06, 0x73, 0x56, 0x85, 0x00, 0x38, 0x0c, 0x8f, 0xd0, 0xd2, 0xb3, 0x1c, 0xc3, 0xc1, 0x04, 0x6a, - 0x1f, 0xab, 0x7a, 0x6d, 0x20, 0xab, 0xfa, 0x57, 0x6a, 0x50, 0xbf, 0xae, 0x07, 0xd6, 0x0e, 0xe5, - 0xa7, 0x38, 0xc7, 0x63, 0x4a, 0xff, 0xd5, 0x02, 0x9c, 0x49, 0xba, 0xea, 0x1d, 0xa3, 0x3d, 0x9d, - 0x07, 0xfe, 0xc0, 0x4c, 0x6e, 0xd8, 0xa7, 0x16, 0xdc, 0xb2, 0xde, 0xe3, 0xf9, 0x77, 0xdc, 0x96, - 0xf5, 0x66, 0x3f, 0x86, 0xd8, 0xbf, 0x2e, 0xef, 0x15, 0xcb, 0xfa, 0x83, 0x1d, 0x98, 0x2d, 0x65, - 0xf7, 0x1f, 0x7e, 0x60, 0xec, 0xfe, 0xd5, 0x07, 0x42, 0xd9, 0xea, 0xc4, 0xec, 0xfe, 0xb5, 0x9c, - 0xfe, 0x27, 0xd2, 0xbb, 0x5d, 0xa0, 0xf5, 0x3b, 0x3f, 0xe0, 0x17, 0xd3, 0x95, 0x3d, 0x96, 0xe9, - 0x28, 0x1b, 0xba, 0x6f, 0x19, 0x72, 0xd9, 0xcb, 0x11, 0x88, 0x52, 0x45, 0xec, 0x12, 0xc7, 0xd4, - 0xfc, 0x15, 0x05, 0x76, 0x14, 0x19, 0xac, 0x98, 0x2b, 0x32, 0x18, 0x99, 0x83, 0xb2, 0xc3, 0x76, - 0xcf, 0xa5, 0x43, 0xc7, 0x02, 0xbb, 0x7e, 0x8d, 0xee, 0x22, 0x2f, 0xac, 0xbd, 0x5d, 0x04, 0x60, - 0x9f, 0x7f, 0x30, 0x0b, 0xfc, 0x07, 0x61, 0xd8, 0xef, 0xf2, 0xbd, 0xb2, 0x5c, 0xb0, 0x23, 0xa7, - 0x1d, 0x91, 0x8c, 0x2a, 0x9f, 0x3c, 0x0e, 0x95, 0xd7, 0xbb, 0xb4, 0xab, 0x8e, 0x93, 0x43, 0x75, - 0xed, 0x53, 0x2c, 0x11, 0x45, 0xde, 0xf1, 0x59, 0xd3, 0x94, 0xa5, 0xbe, 0x72, 0x5c, 0x96, 0xfa, - 0x1a, 0x0c, 0x5f, 0x77, 0xb9, 0x0f, 0xa0, 0xf6, 0xaf, 0x45, 0x80, 0xc8, 0xc7, 0x8a, 0x7c, 0xbd, - 0x00, 0x0f, 0x87, 0x03, 0x2e, 0x10, 0x5a, 0xf7, 0x9c, 0xad, 0x5b, 0xed, 0xdc, 0x56, 0xfb, 0xac, - 0xc1, 0xce, 0x67, 0xa0, 0xd5, 0x2c, 0x76, 0x98, 0x5d, 0x0b, 0x82, 0x50, 0xa5, 0xed, 0x4e, 0xb0, - 0x3b, 0x6f, 0x79, 0xb2, 0x07, 0x66, 0xba, 0xf2, 0x5d, 0x96, 0x34, 0xa2, 0xa8, 0xdc, 0x1a, 0xf2, - 0x41, 0xa4, 0x72, 0x30, 0xc4, 0x21, 0x5b, 0x50, 0x75, 0xdc, 0x57, 0x7d, 0x26, 0x0e, 0xd9, 0x1d, - 0x5f, 0x1c, 0x5c, 0xe4, 0x42, 0xac, 0xc2, 0xca, 0x2b, 0x5f, 0x70, 0xd8, 0x91, 0xc2, 0xfe, 0x5a, - 0x11, 0x4e, 0x65, 0xc8, 0x81, 0xbc, 0x08, 0xe3, 0xd2, 0x9d, 0x2d, 0x0a, 0x4f, 0x5c, 0x88, 0xc2, - 0x13, 0x37, 0x53, 0x79, 0xd8, 0x43, 0x4d, 0x5e, 0x05, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0x8a, 0x6b, - 0x2a, 0x7d, 0xf4, 0x85, 0xfd, 0xbd, 0x49, 0x98, 0x0d, 0x53, 0xef, 0xec, 0x4d, 0x7e, 0x24, 0xcb, - 0x43, 0x35, 0x25, 0xe7, 0xa8, 0x00, 0xc6, 0x20, 0xc9, 0xe7, 0x00, 0xc4, 0xd6, 0x2b, 0xbc, 0x44, - 0x7f, 0x0f, 0x7b, 0xc5, 0x94, 0x0a, 0x57, 0x34, 0xf5, 0xa9, 0xae, 0xee, 0x04, 0x56, 0xb0, 0x2b, - 0x62, 0x96, 0xdc, 0x0c, 0x51, 0x30, 0x86, 0xa8, 0xfd, 0x69, 0x11, 0xaa, 0xca, 0x52, 0x7a, 0x1f, - 0xcc, 0x63, 0xad, 0x84, 0x79, 0xec, 0x88, 0x7c, 0x52, 0xb3, 0x8c, 0x63, 0x6e, 0xca, 0x38, 0xb6, - 0x98, 0x9f, 0xd5, 0xdd, 0x4d, 0x63, 0xdf, 0x2a, 0xc2, 0x98, 0x22, 0xcd, 0x6b, 0x18, 0xfb, 0x04, - 0x9c, 0x10, 0x67, 0xc9, 0x2b, 0xfa, 0x6d, 0x11, 0xbe, 0x85, 0x0b, 0xac, 0x2c, 0xdc, 0x40, 0x1b, - 0xc9, 0x2c, 0x4c, 0xd3, 0xb2, 0x6e, 0x2d, 0x92, 0xd6, 0xd9, 0x3e, 0x42, 0x9c, 0x3e, 0x89, 0xfd, - 0x0e, 0xef, 0xd6, 0x8d, 0x54, 0x1e, 0xf6, 0x50, 0xa7, 0x2d, 0x73, 0xe5, 0x63, 0xb0, 0xcc, 0xfd, - 0x6d, 0x01, 0x46, 0x22, 0x79, 0x1d, 0xbb, 0x5d, 0x6e, 0x33, 0x69, 0x97, 0x9b, 0xcd, 0xdd, 0x1d, - 0xfa, 0x58, 0xe5, 0x7e, 0x71, 0x18, 0x12, 0xae, 0xd1, 0x64, 0x03, 0xce, 0x59, 0x99, 0x0e, 0x5e, - 0xb1, 0xd9, 0x26, 0xbc, 0xeb, 0xbb, 0xd4, 0x97, 0x12, 0xef, 0x82, 0x42, 0xba, 0x50, 0xdd, 0xa1, - 0x5e, 0x60, 0x19, 0x54, 0x7d, 0xdf, 0x62, 0x6e, 0x95, 0x4c, 0xda, 0x1e, 0x43, 0x99, 0xde, 0x94, - 0x0c, 0x30, 0x64, 0x45, 0x36, 0xa0, 0x42, 0xcd, 0x16, 0x55, 0x01, 0x75, 0x72, 0x86, 0xab, 0x0c, - 0xe5, 0xc9, 0xde, 0x7c, 0x14, 0xd0, 0xc4, 0x87, 0x9a, 0xad, 0xce, 0x96, 0x64, 0x3f, 0x1c, 0x5c, - 0xc1, 0x0a, 0x4f, 0xa9, 0xa2, 0xbb, 0xf6, 0x61, 0x12, 0x46, 0x7c, 0xc8, 0x76, 0x68, 0xe4, 0xaa, - 0x1c, 0xd1, 0xe4, 0x71, 0x17, 0x13, 0x97, 0x0f, 0xb5, 0x5b, 0x7a, 0x40, 0xbd, 0xb6, 0xee, 0x6d, - 0xcb, 0xdd, 0xc6, 0xe0, 0x5f, 0xf8, 0x92, 0x42, 0x8a, 0xbe, 0x30, 0x4c, 0xc2, 0x88, 0x0f, 0x71, - 0xa1, 0x16, 0x48, 0xf5, 0x59, 0x59, 0xf2, 0x06, 0x67, 0xaa, 0x14, 0x71, 0x5f, 0xba, 0x48, 0xab, - 0x57, 0x8c, 0x78, 0x90, 0x9d, 0x44, 0x28, 0x5f, 0x11, 0xc0, 0xb9, 0x91, 0xc3, 0x22, 0x2c, 0xa1, - 0xa2, 0xe5, 0x26, 0x3b, 0x24, 0xb0, 0xf6, 0x76, 0x25, 0x9a, 0x96, 0xef, 0xb7, 0x9d, 0xea, 0xe9, - 0xa4, 0x9d, 0xea, 0x42, 0xda, 0x4e, 0x95, 0x3a, 0xa2, 0x3c, 0xbc, 0x53, 0x65, 0xca, 0x42, 0x54, - 0x3e, 0x06, 0x0b, 0xd1, 0x53, 0x50, 0xdf, 0xe1, 0x33, 0x81, 0x88, 0xce, 0x53, 0xe1, 0xcb, 0x08, - 0x9f, 0xd9, 0x6f, 0x46, 0xc9, 0x18, 0xa7, 0x61, 0x45, 0x84, 0x06, 0x12, 0x85, 0x37, 0x95, 0x45, - 0x9a, 0x51, 0x32, 0xc6, 0x69, 0xb8, 0x3f, 0x96, 0xe5, 0x6c, 0x8b, 0x02, 0xc3, 0xbc, 0x80, 0xf0, - 0xc7, 0x52, 0x89, 0x18, 0xe5, 0x93, 0x4b, 0x50, 0xed, 0x9a, 0x9b, 0x82, 0xb6, 0xca, 0x69, 0xb9, - 0x86, 0xb9, 0x3e, 0xbf, 0x20, 0xa3, 0x05, 0xa9, 0x5c, 0x56, 0x93, 0xb6, 0xde, 0x51, 0x19, 0x7c, - 0x6f, 0x28, 0x6b, 0xb2, 0x12, 0x25, 0x63, 0x9c, 0x86, 0x7c, 0x0c, 0xc6, 0x3c, 0x6a, 0x76, 0x0d, - 0x1a, 0x96, 0x02, 0x5e, 0x8a, 0x5b, 0x45, 0x31, 0x91, 0x83, 0x29, 0xca, 0x3e, 0x76, 0xae, 0xfa, - 0x40, 0x76, 0xae, 0xef, 0x15, 0x80, 0xf4, 0xfa, 0x2f, 0x93, 0x2d, 0x18, 0x72, 0xb8, 0xf5, 0x2b, - 0x77, 0x40, 0xe4, 0x98, 0x11, 0x4d, 0x4c, 0x4b, 0x32, 0x41, 0xe2, 0x13, 0x07, 0xaa, 0xf4, 0x76, - 0x40, 0x3d, 0x27, 0xbc, 0xcf, 0x70, 0x34, 0xc1, 0x97, 0xc5, 0x6e, 0x40, 0x22, 0x63, 0xc8, 0x43, - 0xfb, 0x41, 0x11, 0xea, 0x31, 0xba, 0x7b, 0x6d, 0x2a, 0xf9, 0x95, 0x6a, 0x61, 0x74, 0x5a, 0xf7, - 0x6c, 0x39, 0xc2, 0x62, 0x57, 0xaa, 0x65, 0x16, 0x2e, 0x63, 0x9c, 0x8e, 0xcc, 0x00, 0xb4, 0x75, - 0x3f, 0xa0, 0x1e, 0x5f, 0x7d, 0x53, 0x17, 0x99, 0x57, 0xc2, 0x1c, 0x8c, 0x51, 0x91, 0x8b, 0x32, - 0x7c, 0x76, 0x39, 0x19, 0x78, 0xae, 0x4f, 0x6c, 0xec, 0xca, 0x11, 0xc4, 0xc6, 0x26, 0x2d, 0x18, - 0x57, 0xb5, 0x56, 0xb9, 0x87, 0x0b, 0x4b, 0x26, 0xf6, 0x2f, 0x29, 0x08, 0xec, 0x01, 0xd5, 0xde, - 0x2e, 0xc0, 0x68, 0xc2, 0xe4, 0x21, 0x42, 0xc6, 0x29, 0xef, 0xfb, 0x44, 0xc8, 0xb8, 0x98, 0xd3, - 0xfc, 0x93, 0x30, 0x24, 0x04, 0x94, 0x76, 0xaa, 0x13, 0x22, 0x44, 0x99, 0xcb, 0xe6, 0x32, 0x69, - 0x54, 0x4d, 0xcf, 0x65, 0xd2, 0xea, 0x8a, 0x2a, 0x5f, 0x98, 0xdb, 0x45, 0xed, 0x7a, 0xcd, 0xed, - 0x22, 0x1d, 0x43, 0x0a, 0xed, 0x87, 0x25, 0xe0, 0x2e, 0x28, 0xe4, 0x39, 0xa8, 0xb5, 0xa9, 0xb1, - 0xa5, 0x3b, 0x96, 0xaf, 0x42, 0x46, 0xb2, 0xdd, 0x6d, 0x6d, 0x45, 0x25, 0xde, 0x61, 0x00, 0xb3, - 0xcd, 0x65, 0xee, 0xe5, 0x1d, 0xd1, 0x12, 0x03, 0x86, 0x5a, 0xbe, 0xaf, 0x77, 0xac, 0xdc, 0x27, - 0xa0, 0x22, 0x44, 0x9f, 0x18, 0x44, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb1, 0x75, 0xcb, - 0xc9, 0xfd, 0x8f, 0x12, 0xf6, 0x05, 0xab, 0x0c, 0x49, 0x98, 0x74, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, - 0x85, 0xba, 0x6f, 0x78, 0x7a, 0xdb, 0xdf, 0xd2, 0x67, 0x9e, 0x79, 0x36, 0xb7, 0x92, 0x14, 0xb1, - 0x12, 0x73, 0xf6, 0x1c, 0xce, 0xae, 0x34, 0xaf, 0xcc, 0xce, 0x3c, 0xf3, 0x2c, 0xc6, 0xf9, 0xc4, - 0xd9, 0x3e, 0xf3, 0xd4, 0x8c, 0xec, 0xf7, 0x47, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x30, 0xce, 0x47, - 0xfb, 0x8f, 0x02, 0xd4, 0x42, 0x5a, 0xb2, 0x0e, 0xc0, 0x46, 0xa0, 0x0c, 0xaa, 0x77, 0xa8, 0x00, - 0xf7, 0x7c, 0x57, 0xbc, 0x1e, 0x16, 0xc6, 0x18, 0x50, 0x46, 0xd4, 0xc1, 0xe2, 0x51, 0x47, 0x1d, - 0x9c, 0x86, 0xda, 0x96, 0xee, 0x98, 0xfe, 0x96, 0xbe, 0x2d, 0x26, 0xa2, 0x58, 0x1c, 0xce, 0x2b, - 0x2a, 0x03, 0x23, 0x1a, 0xed, 0x8f, 0x87, 0x40, 0x1c, 0x5b, 0xb2, 0xa1, 0x62, 0x5a, 0xbe, 0xf0, - 0x9b, 0x2d, 0xf0, 0x92, 0xe1, 0x50, 0x99, 0x97, 0xe9, 0x18, 0x52, 0x90, 0xb3, 0x50, 0x6a, 0x5b, - 0x8e, 0x3c, 0xf1, 0xe0, 0x06, 0xaf, 0x15, 0xcb, 0x41, 0x96, 0xc6, 0xb3, 0xf4, 0xdb, 0xd2, 0xe5, - 0x49, 0x64, 0xe9, 0xb7, 0x91, 0xa5, 0xb1, 0x2d, 0xa8, 0xed, 0xba, 0xdb, 0x1b, 0xba, 0xb1, 0xad, - 0x3c, 0xa3, 0xca, 0x7c, 0x21, 0xe4, 0x5b, 0xd0, 0xe5, 0x64, 0x16, 0xa6, 0x69, 0xc9, 0x3a, 0x3c, - 0xf2, 0x06, 0xf5, 0x5c, 0x39, 0xca, 0x9b, 0x36, 0xa5, 0x1d, 0x05, 0x23, 0x54, 0x08, 0xee, 0x60, - 0xf5, 0x99, 0x6c, 0x12, 0xec, 0x57, 0x96, 0xbb, 0x6a, 0xea, 0x5e, 0x8b, 0x06, 0xab, 0x9e, 0x6b, - 0x50, 0xdf, 0xb7, 0x9c, 0x96, 0x82, 0x1d, 0x8a, 0x60, 0xd7, 0xb2, 0x49, 0xb0, 0x5f, 0x59, 0xf2, - 0x32, 0x4c, 0x88, 0x2c, 0xb1, 0xd8, 0xce, 0xee, 0xe8, 0x96, 0xad, 0x6f, 0x58, 0xb6, 0xfa, 0xe9, - 0xd6, 0xa8, 0x38, 0x57, 0x58, 0xeb, 0x43, 0x83, 0x7d, 0x4b, 0x93, 0xab, 0x30, 0xae, 0x4e, 0x95, - 0x56, 0xa9, 0xd7, 0x0c, 0x8f, 0xb2, 0x47, 0x1b, 0x17, 0xd8, 0x7e, 0x6f, 0x9e, 0x76, 0x3c, 0x6a, - 0x70, 0xad, 0x2b, 0x45, 0x85, 0x3d, 0xe5, 0x08, 0xc2, 0x19, 0x7e, 0x5e, 0xbd, 0xde, 0x99, 0x73, - 0x5d, 0xdb, 0x74, 0x6f, 0x39, 0xea, 0xdb, 0x85, 0x62, 0xc3, 0x0f, 0x92, 0x9a, 0x99, 0x14, 0xd8, - 0xa7, 0x24, 0xfb, 0x72, 0x9e, 0x33, 0xef, 0xde, 0x72, 0xd2, 0xa8, 0x10, 0x7d, 0x79, 0xb3, 0x0f, - 0x0d, 0xf6, 0x2d, 0x4d, 0x16, 0x80, 0xa4, 0xbf, 0x60, 0xbd, 0xc3, 0x95, 0xa1, 0xd1, 0xc6, 0x19, - 0x11, 0x1f, 0x23, 0x9d, 0x8b, 0x19, 0x25, 0xc8, 0x32, 0x9c, 0x4e, 0xa7, 0x32, 0x76, 0xdc, 0x49, - 0x7e, 0x54, 0x44, 0xc6, 0xc4, 0x8c, 0x7c, 0xcc, 0x2c, 0xa5, 0xfd, 0x49, 0x11, 0x46, 0x13, 0x17, - 0xaa, 0x1f, 0xb8, 0x8b, 0xab, 0x4c, 0x03, 0x6d, 0xfb, 0xad, 0xa5, 0xf9, 0x2b, 0x54, 0x37, 0xa9, - 0x77, 0x8d, 0xaa, 0xcb, 0xef, 0x7c, 0x52, 0x59, 0x49, 0xe4, 0x60, 0x8a, 0x92, 0x6c, 0x42, 0x45, - 0xd8, 0x53, 0xf3, 0xfe, 0x2a, 0x41, 0xc9, 0x88, 0x1b, 0x55, 0xf9, 0x92, 0x23, 0x4c, 0xaa, 0x02, - 0x5e, 0x0b, 0x60, 0x24, 0x4e, 0xc1, 0x26, 0x92, 0x48, 0x59, 0x1b, 0x4e, 0x28, 0x6a, 0x4b, 0x50, - 0x0a, 0x82, 0x41, 0xaf, 0xc4, 0x0a, 0xfb, 0xfc, 0xda, 0x32, 0x32, 0x0c, 0x6d, 0x93, 0xb5, 0x9d, - 0xef, 0x5b, 0xae, 0x23, 0xe3, 0x23, 0xaf, 0xc3, 0x70, 0x20, 0x4d, 0x54, 0x83, 0x5d, 0xe9, 0xe5, - 0xe6, 0x62, 0x65, 0x9e, 0x52, 0x58, 0xda, 0xdf, 0x15, 0xa1, 0x16, 0x6e, 0x27, 0x0f, 0x10, 0x77, - 0xd8, 0x85, 0x5a, 0xe8, 0x6f, 0x93, 0xfb, 0xb7, 0x67, 0x91, 0x1b, 0x08, 0xdf, 0x01, 0x85, 0xaf, - 0x18, 0xf1, 0x88, 0xfb, 0xf2, 0x94, 0x72, 0xf8, 0xf2, 0x74, 0x60, 0x38, 0xf0, 0xac, 0x56, 0x4b, - 0xea, 0xb6, 0x79, 0x9c, 0x79, 0x42, 0x71, 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, - 0xf6, 0x1a, 0x8c, 0xa7, 0x29, 0xb9, 0xe2, 0x67, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x19, 0x47, 0x8a, - 0x9f, 0x4c, 0xc7, 0x90, 0x82, 0x6d, 0xfe, 0x58, 0x33, 0xbd, 0xe1, 0x3a, 0x6a, 0x5b, 0xcd, 0x75, - 0xe8, 0x35, 0x99, 0x86, 0x61, 0xae, 0xf6, 0x2f, 0x25, 0x38, 0x1b, 0x19, 0x05, 0x56, 0x74, 0x47, - 0x6f, 0x1d, 0xe0, 0x5f, 0x57, 0xef, 0x5f, 0x92, 0x38, 0x6c, 0xf0, 0xf8, 0xd2, 0x03, 0x10, 0x3c, - 0xfe, 0x47, 0x05, 0xe0, 0xbe, 0x81, 0xe4, 0x0b, 0x30, 0xa2, 0xc7, 0x7e, 0x73, 0x28, 0x9b, 0xf3, - 0x72, 0xee, 0xe6, 0xe4, 0x2e, 0x88, 0xa1, 0xaf, 0x4b, 0x3c, 0x15, 0x13, 0x0c, 0x89, 0x0b, 0xd5, - 0x4d, 0xdd, 0xb6, 0x99, 0x2e, 0x94, 0xfb, 0x90, 0x23, 0xc1, 0x9c, 0x77, 0xf3, 0x05, 0x09, 0x8d, - 0x21, 0x13, 0xed, 0x9f, 0x0b, 0x30, 0xda, 0xb4, 0x2d, 0xd3, 0x72, 0x5a, 0xc7, 0x18, 0x35, 0xfe, - 0x06, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x80, 0xf3, 0xb8, 0x58, 0x41, 0x18, 0x00, 0x0a, 0x9c, 0x64, - 0x18, 0xfa, 0xd2, 0x01, 0xc2, 0xd0, 0xff, 0x64, 0x08, 0xa4, 0x7f, 0x29, 0xe9, 0x42, 0xad, 0xa5, - 0xa2, 0x5b, 0xcb, 0x6f, 0xbc, 0x92, 0x23, 0x32, 0x5a, 0x22, 0x4e, 0xb6, 0x98, 0x75, 0xc3, 0x44, - 0x8c, 0x38, 0x11, 0x9a, 0xfc, 0xb3, 0xe5, 0x7c, 0xce, 0x3f, 0x5b, 0x0a, 0x76, 0xbd, 0xff, 0xb6, - 0xd4, 0xa1, 0xbc, 0x15, 0x04, 0x1d, 0x39, 0xae, 0x06, 0x77, 0x20, 0x8e, 0x82, 0x73, 0x08, 0x6d, - 0x84, 0xbd, 0x23, 0x87, 0x66, 0x2c, 0x1c, 0x3d, 0xfc, 0xa1, 0xd2, 0x5c, 0xae, 0x83, 0xee, 0x38, - 0x0b, 0xf6, 0x8e, 0x1c, 0x9a, 0x7c, 0x1e, 0xea, 0x81, 0xa7, 0x3b, 0xfe, 0xa6, 0xeb, 0xb5, 0xa9, - 0x27, 0x77, 0x87, 0x0b, 0x39, 0x7e, 0xee, 0xb8, 0x16, 0xa1, 0x89, 0x13, 0xb4, 0x44, 0x12, 0xc6, - 0xb9, 0x91, 0x6d, 0xa8, 0x76, 0x4d, 0x51, 0x31, 0x69, 0x36, 0x99, 0xcd, 0xf3, 0xbf, 0xce, 0xd8, - 0x31, 0xb6, 0x7a, 0xc3, 0x90, 0x41, 0xf2, 0xdf, 0x61, 0xc3, 0x47, 0xf5, 0xef, 0xb0, 0x78, 0x6f, - 0xcc, 0x8a, 0x1c, 0x40, 0xda, 0x52, 0xa3, 0x74, 0x5a, 0xd2, 0x0b, 0x67, 0x21, 0xb7, 0xb2, 0x27, - 0x58, 0xd6, 0x43, 0xad, 0xd4, 0x69, 0xa1, 0xe2, 0xa1, 0xb5, 0x41, 0x5a, 0xb7, 0x89, 0x91, 0xf8, - 0xc3, 0x86, 0xb8, 0xce, 0x32, 0x7d, 0xb0, 0xf9, 0x20, 0xfc, 0xd5, 0x43, 0x2c, 0xc2, 0x6f, 0xe6, - 0xaf, 0x34, 0xb4, 0xbf, 0x2f, 0x42, 0x69, 0x6d, 0xb9, 0x29, 0xa2, 0xf6, 0xf1, 0xdf, 0xd7, 0xd0, - 0xe6, 0xb6, 0xd5, 0xb9, 0x49, 0x3d, 0x6b, 0x73, 0x57, 0x6e, 0x7a, 0x63, 0x51, 0xfb, 0xd2, 0x14, - 0x98, 0x51, 0x8a, 0xbc, 0x02, 0x23, 0x86, 0x3e, 0x47, 0xbd, 0x60, 0x90, 0x2d, 0x3d, 0xbf, 0xb7, - 0x37, 0x37, 0x1b, 0x15, 0xc7, 0x04, 0x18, 0x59, 0x07, 0x30, 0x22, 0xe8, 0xd2, 0xa1, 0x0d, 0x11, - 0x31, 0xe0, 0x18, 0x10, 0x41, 0xa8, 0x6d, 0x33, 0x52, 0x8e, 0x5a, 0x3e, 0x0c, 0x2a, 0xef, 0x39, - 0xd7, 0x54, 0x59, 0x8c, 0x60, 0x34, 0x07, 0x46, 0x13, 0xbf, 0xdd, 0x20, 0x1f, 0x85, 0xaa, 0xdb, - 0x89, 0x4d, 0xa7, 0x35, 0xee, 0xef, 0x57, 0xbd, 0x21, 0xd3, 0xee, 0xec, 0x4d, 0x8e, 0x2e, 0xbb, - 0x2d, 0xcb, 0x50, 0x09, 0x18, 0x92, 0x13, 0x0d, 0x86, 0xf8, 0x65, 0x1b, 0xf5, 0xd3, 0x0d, 0xbe, - 0x76, 0xf0, 0xb8, 0xf8, 0x3e, 0xca, 0x1c, 0xed, 0x8b, 0x65, 0x88, 0xce, 0x84, 0x88, 0x0f, 0x43, - 0xc2, 0x99, 0x58, 0xce, 0xdc, 0xc7, 0xea, 0xb7, 0x2c, 0x59, 0x91, 0x16, 0x94, 0x5e, 0x73, 0x37, - 0x72, 0x4f, 0xdc, 0xb1, 0x5b, 0xb6, 0xc2, 0x4a, 0x15, 0x4b, 0x40, 0xc6, 0x81, 0xfc, 0x5a, 0x01, - 0x4e, 0xfa, 0x69, 0xa5, 0x53, 0x76, 0x07, 0xcc, 0xaf, 0x5d, 0xa7, 0xd5, 0x58, 0xe9, 0x98, 0xd9, - 0x2f, 0x1b, 0x7b, 0xeb, 0xc2, 0xe4, 0x2f, 0x0e, 0x6b, 0x64, 0x77, 0x5a, 0xcc, 0xf9, 0xab, 0xb8, - 0xa4, 0xfc, 0x93, 0x69, 0x28, 0x59, 0x69, 0x5f, 0x2e, 0x42, 0x3d, 0x36, 0x5b, 0xe7, 0xfe, 0x97, - 0xcb, 0xed, 0xd4, 0xbf, 0x5c, 0x56, 0x07, 0x3f, 0xbb, 0x8c, 0x6a, 0x75, 0xdc, 0xbf, 0x73, 0xf9, - 0xf3, 0x22, 0x94, 0xd6, 0xe7, 0x17, 0x92, 0xdb, 0xc5, 0xc2, 0x7d, 0xd8, 0x2e, 0x6e, 0xc1, 0xf0, - 0x46, 0xd7, 0xb2, 0x03, 0xcb, 0xc9, 0x1d, 0x07, 0x40, 0xfd, 0xfa, 0x46, 0x5e, 0xa7, 0x15, 0xa8, - 0xa8, 0xe0, 0x49, 0x0b, 0x86, 0x5b, 0x22, 0x10, 0x5b, 0x6e, 0x8f, 0x2e, 0x19, 0xd0, 0x4d, 0x30, - 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x5d, 0x90, 0x3f, 0xcf, 0xbe, 0xef, 0xd2, 0xd4, 0x3e, 0x0f, 0xa1, - 0x16, 0x70, 0xff, 0x99, 0xff, 0x5b, 0x01, 0x92, 0x8a, 0xcf, 0xfd, 0xef, 0x4d, 0xdb, 0xe9, 0xde, - 0x34, 0x7f, 0x14, 0x83, 0x2f, 0xbb, 0x43, 0x69, 0x7f, 0x58, 0x84, 0xa1, 0xfb, 0x76, 0x77, 0x93, - 0x26, 0x9c, 0xd3, 0xe6, 0x72, 0x4e, 0x8c, 0x7d, 0x5d, 0xd3, 0xda, 0x29, 0xd7, 0xb4, 0xbc, 0x3f, - 0xeb, 0xbc, 0x87, 0x63, 0xda, 0x5f, 0x17, 0x40, 0x4e, 0xcb, 0x4b, 0x8e, 0x1f, 0xe8, 0x8e, 0xc1, - 0xff, 0x19, 0x2f, 0xd7, 0x80, 0xbc, 0x1e, 0x10, 0xd2, 0x4b, 0x48, 0x2c, 0xfb, 0xfc, 0x59, 0xcd, - 0xf9, 0xe4, 0xc3, 0x50, 0xdd, 0x72, 0xfd, 0x80, 0xcf, 0xf3, 0xc5, 0xa4, 0x5d, 0xe7, 0x8a, 0x4c, - 0xc7, 0x90, 0x22, 0x7d, 0x52, 0x58, 0xe9, 0x7f, 0x52, 0xa8, 0x7d, 0xb3, 0x08, 0x23, 0xef, 0x95, - 0x0b, 0xa8, 0x59, 0xae, 0x7c, 0xa5, 0x9c, 0xae, 0x7c, 0xe5, 0xc3, 0xb8, 0xf2, 0x69, 0xdf, 0x29, - 0x00, 0xdc, 0xb7, 0xdb, 0xaf, 0x66, 0xd2, 0xcb, 0x2e, 0x77, 0xbf, 0xca, 0xf6, 0xb1, 0xfb, 0xbd, - 0x8a, 0xfa, 0x24, 0xee, 0x61, 0xf7, 0x66, 0x01, 0xc6, 0xf4, 0x84, 0xd7, 0x5a, 0x6e, 0xd5, 0x32, - 0xe5, 0x04, 0x17, 0xde, 0xf4, 0x4b, 0xa6, 0x63, 0x8a, 0x2d, 0x79, 0x3e, 0x8a, 0xbc, 0x7a, 0x3d, - 0xea, 0xf6, 0x3d, 0x21, 0x53, 0xb9, 0x9a, 0x93, 0xa0, 0xbc, 0x87, 0x97, 0x60, 0xe9, 0x48, 0xbc, - 0x04, 0xe3, 0xf7, 0x9f, 0xca, 0x77, 0xbd, 0xff, 0xb4, 0x03, 0xb5, 0x4d, 0xcf, 0x6d, 0x73, 0x47, - 0x3c, 0xf9, 0x9b, 0xcf, 0xcb, 0x39, 0xd6, 0x94, 0xe8, 0x07, 0xd7, 0x91, 0x8d, 0x67, 0x41, 0xe1, - 0x63, 0xc4, 0x8a, 0x1b, 0xa4, 0x5d, 0xc1, 0x75, 0xe8, 0x28, 0xb9, 0x86, 0x73, 0xc9, 0x9a, 0x40, - 0x47, 0xc5, 0x26, 0xe9, 0x7c, 0x37, 0x7c, 0x7f, 0x9c, 0xef, 0xb4, 0x5f, 0x28, 0xab, 0x09, 0xec, - 0x81, 0x0b, 0xf2, 0xf7, 0xde, 0xbf, 0x35, 0x99, 0xbe, 0xd2, 0x38, 0x7c, 0x1f, 0xaf, 0x34, 0x56, - 0x07, 0x72, 0xf5, 0xda, 0x2b, 0x41, 0x6a, 0xdf, 0xf4, 0xfe, 0xe9, 0xc4, 0x7f, 0xa9, 0xd3, 0x89, - 0xb7, 0x8a, 0x10, 0x4d, 0x04, 0x87, 0xf4, 0xde, 0x78, 0x19, 0xaa, 0x6d, 0xfd, 0xf6, 0x3c, 0xb5, - 0xf5, 0xdd, 0x3c, 0xff, 0x66, 0x5c, 0x91, 0x18, 0x18, 0xa2, 0x11, 0x1f, 0xc0, 0x0a, 0xe3, 0x23, - 0xe7, 0xb6, 0x36, 0x47, 0xa1, 0x96, 0x85, 0x3d, 0x2b, 0x7a, 0xc7, 0x18, 0x1b, 0xed, 0xaf, 0x8a, - 0x20, 0x03, 0x69, 0x13, 0x0a, 0x95, 0x4d, 0xeb, 0x36, 0x35, 0x73, 0x7b, 0x32, 0xc6, 0xfe, 0x98, - 0x2b, 0xcc, 0xe9, 0x3c, 0x01, 0x05, 0x3a, 0xb7, 0x93, 0x8a, 0xe3, 0x11, 0x29, 0xbf, 0x1c, 0x76, - 0xd2, 0xf8, 0x31, 0x8b, 0xb4, 0x93, 0x8a, 0x24, 0x54, 0x3c, 0x84, 0x59, 0x96, 0x9f, 0x51, 0x4b, - 0x91, 0xe6, 0x31, 0xcb, 0xc6, 0xce, 0xba, 0x95, 0x59, 0xd6, 0x17, 0x77, 0x9a, 0x25, 0x8f, 0xc6, - 0x67, 0xbf, 0xfd, 0xdd, 0x0b, 0x0f, 0x7d, 0xe7, 0xbb, 0x17, 0x1e, 0x7a, 0xe7, 0xbb, 0x17, 0x1e, - 0xfa, 0xe2, 0xfe, 0x85, 0xc2, 0xb7, 0xf7, 0x2f, 0x14, 0xbe, 0xb3, 0x7f, 0xa1, 0xf0, 0xce, 0xfe, - 0x85, 0xc2, 0x3f, 0xee, 0x5f, 0x28, 0xfc, 0xf2, 0x3f, 0x5d, 0x78, 0xe8, 0x33, 0xcf, 0x45, 0x55, - 0x98, 0x56, 0x55, 0x98, 0x56, 0x0c, 0xa7, 0x3b, 0xdb, 0xad, 0x69, 0x56, 0x85, 0x28, 0x45, 0x55, - 0xe1, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x9c, 0x8e, 0xe2, 0xbf, 0x92, 0x00, 0x00, + // 7464 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x1d, 0xd7, + 0x75, 0xae, 0xcf, 0x1f, 0x79, 0xce, 0x3a, 0x24, 0x45, 0x6d, 0xc9, 0x32, 0x25, 0xcb, 0xa2, 0x32, + 0x8e, 0x7d, 0x95, 0x9b, 0x84, 0xbc, 0xe6, 0xf5, 0x5f, 0x72, 0x93, 0xd8, 0x3c, 0xa4, 0x48, 0x51, + 0x22, 0x25, 0x66, 0x1d, 0x52, 0x76, 0xe2, 0x9b, 0xf8, 0x0e, 0x67, 0x36, 0x0f, 0xc7, 0x9c, 0x33, + 0x73, 0x3c, 0x33, 0x87, 0x12, 0x9d, 0x7b, 0x91, 0xbf, 0x07, 0xfb, 0xa2, 0x2d, 0x5a, 0xe4, 0x29, + 0x40, 0x91, 0x16, 0x2d, 0x0a, 0xe4, 0x21, 0x48, 0x1f, 0x0a, 0xb8, 0x0f, 0x05, 0xfa, 0x93, 0xa2, + 0x68, 0xd3, 0xa2, 0x3f, 0x79, 0x28, 0x50, 0xf7, 0x85, 0x68, 0x58, 0xf4, 0xa1, 0x05, 0x1a, 0x04, + 0x0d, 0xd0, 0x24, 0x42, 0x80, 0x14, 0xfb, 0x6f, 0xfe, 0xce, 0x1c, 0x89, 0x3c, 0x43, 0xca, 0x72, + 0xeb, 0xb7, 0x99, 0xbd, 0xd7, 0xfe, 0xd6, 0xde, 0x6b, 0xf6, 0xcf, 0xda, 0x6b, 0xaf, 0xbd, 0x06, + 0x16, 0x5b, 0x56, 0xb0, 0xd5, 0xdd, 0x98, 0x32, 0xdc, 0xf6, 0xb4, 0xd3, 0x6d, 0xeb, 0x1d, 0xcf, + 0x7d, 0x8d, 0x3f, 0x6c, 0xda, 0xee, 0xad, 0xe9, 0xce, 0x76, 0x6b, 0x5a, 0xef, 0x58, 0x7e, 0x94, + 0xb2, 0xf3, 0x94, 0x6e, 0x77, 0xb6, 0xf4, 0xa7, 0xa6, 0x5b, 0xd4, 0xa1, 0x9e, 0x1e, 0x50, 0x73, + 0xaa, 0xe3, 0xb9, 0x81, 0x4b, 0x9e, 0x8b, 0x80, 0xa6, 0x14, 0xd0, 0x94, 0x2a, 0x36, 0xd5, 0xd9, + 0x6e, 0x4d, 0x31, 0xa0, 0x28, 0x45, 0x01, 0x9d, 0xfb, 0x68, 0xac, 0x06, 0x2d, 0xb7, 0xe5, 0x4e, + 0x73, 0xbc, 0x8d, 0xee, 0x26, 0x7f, 0xe3, 0x2f, 0xfc, 0x49, 0xf0, 0x39, 0xa7, 0x6d, 0x3f, 0xef, + 0x4f, 0x59, 0x2e, 0xab, 0xd6, 0xb4, 0xe1, 0x7a, 0x74, 0x7a, 0xa7, 0xa7, 0x2e, 0xe7, 0x9e, 0x8e, + 0x68, 0xda, 0xba, 0xb1, 0x65, 0x39, 0xd4, 0xdb, 0x55, 0x6d, 0x99, 0xf6, 0xa8, 0xef, 0x76, 0x3d, + 0x83, 0x1e, 0xaa, 0x94, 0x3f, 0xdd, 0xa6, 0x81, 0x9e, 0xc5, 0x6b, 0xba, 0x5f, 0x29, 0xaf, 0xeb, + 0x04, 0x56, 0xbb, 0x97, 0xcd, 0xb3, 0xf7, 0x2a, 0xe0, 0x1b, 0x5b, 0xb4, 0xad, 0xa7, 0xcb, 0x69, + 0xdf, 0x01, 0x38, 0x35, 0xbb, 0xe1, 0x07, 0x9e, 0x6e, 0x04, 0xab, 0xae, 0xb9, 0x46, 0xdb, 0x1d, + 0x5b, 0x0f, 0x28, 0xd9, 0x86, 0x2a, 0xab, 0x9b, 0xa9, 0x07, 0xfa, 0x44, 0xe1, 0x62, 0xe1, 0x52, + 0x7d, 0x66, 0x76, 0x6a, 0xc0, 0x6f, 0x31, 0xb5, 0x22, 0x81, 0x1a, 0x23, 0xfb, 0x7b, 0x93, 0x55, + 0xf5, 0x86, 0x21, 0x03, 0xf2, 0xf5, 0x02, 0x8c, 0x38, 0xae, 0x49, 0x9b, 0xd4, 0xa6, 0x46, 0xe0, + 0x7a, 0x13, 0xc5, 0x8b, 0xa5, 0x4b, 0xf5, 0x99, 0xcf, 0x0f, 0xcc, 0x31, 0xa3, 0x45, 0x53, 0xd7, + 0x63, 0x0c, 0x2e, 0x3b, 0x81, 0xb7, 0xdb, 0x38, 0xfd, 0xdd, 0xbd, 0xc9, 0x87, 0xf6, 0xf7, 0x26, + 0x47, 0xe2, 0x59, 0x98, 0xa8, 0x09, 0x59, 0x87, 0x7a, 0xe0, 0xda, 0x4c, 0x64, 0x96, 0xeb, 0xf8, + 0x13, 0x25, 0x5e, 0xb1, 0x0b, 0x53, 0x42, 0xda, 0x8c, 0xfd, 0x14, 0xeb, 0x2e, 0x53, 0x3b, 0x4f, + 0x4d, 0xad, 0x85, 0x64, 0x8d, 0x53, 0x12, 0xb8, 0x1e, 0xa5, 0xf9, 0x18, 0xc7, 0x21, 0x14, 0x4e, + 0xf8, 0xd4, 0xe8, 0x7a, 0x56, 0xb0, 0x3b, 0xe7, 0x3a, 0x01, 0xbd, 0x1d, 0x4c, 0x94, 0xb9, 0x94, + 0x9f, 0xcc, 0x82, 0x5e, 0x75, 0xcd, 0x66, 0x92, 0xba, 0x71, 0x6a, 0x7f, 0x6f, 0xf2, 0x44, 0x2a, + 0x11, 0xd3, 0x98, 0xc4, 0x81, 0x71, 0xab, 0xad, 0xb7, 0xe8, 0x6a, 0xd7, 0xb6, 0x9b, 0xd4, 0xf0, + 0x68, 0xe0, 0x4f, 0x54, 0x78, 0x13, 0x2e, 0x65, 0xf1, 0x59, 0x76, 0x0d, 0xdd, 0xbe, 0xb1, 0xf1, + 0x1a, 0x35, 0x02, 0xa4, 0x9b, 0xd4, 0xa3, 0x8e, 0x41, 0x1b, 0x13, 0xb2, 0x31, 0xe3, 0x4b, 0x29, + 0x24, 0xec, 0xc1, 0x26, 0x8b, 0x70, 0xb2, 0xe3, 0x59, 0x2e, 0xaf, 0x82, 0xad, 0xfb, 0xfe, 0x75, + 0xbd, 0x4d, 0x27, 0x86, 0x2e, 0x16, 0x2e, 0xd5, 0x1a, 0x67, 0x25, 0xcc, 0xc9, 0xd5, 0x34, 0x01, + 0xf6, 0x96, 0x21, 0x97, 0xa0, 0xaa, 0x12, 0x27, 0x86, 0x2f, 0x16, 0x2e, 0x55, 0x44, 0xdf, 0x51, + 0x65, 0x31, 0xcc, 0x25, 0x0b, 0x50, 0xd5, 0x37, 0x37, 0x2d, 0x87, 0x51, 0x56, 0xb9, 0x08, 0xcf, + 0x67, 0x35, 0x6d, 0x56, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x59, 0x72, 0x15, 0x88, 0x4f, 0xbd, + 0x1d, 0xcb, 0xa0, 0xb3, 0x86, 0xe1, 0x76, 0x9d, 0x80, 0xd7, 0xbd, 0xc6, 0xeb, 0x7e, 0x4e, 0xd6, + 0x9d, 0x34, 0x7b, 0x28, 0x30, 0xa3, 0x14, 0x79, 0x11, 0xc6, 0xe5, 0xb0, 0x8b, 0xa4, 0x00, 0x1c, + 0xe9, 0x34, 0x13, 0x24, 0xa6, 0xf2, 0xb0, 0x87, 0x9a, 0x98, 0x70, 0x5e, 0xef, 0x06, 0x6e, 0x9b, + 0x41, 0x26, 0x99, 0xae, 0xb9, 0xdb, 0xd4, 0x99, 0xa8, 0x5f, 0x2c, 0x5c, 0xaa, 0x36, 0x2e, 0xee, + 0xef, 0x4d, 0x9e, 0x9f, 0xbd, 0x0b, 0x1d, 0xde, 0x15, 0x85, 0xdc, 0x80, 0x9a, 0xe9, 0xf8, 0xab, + 0xae, 0x6d, 0x19, 0xbb, 0x13, 0x23, 0xbc, 0x82, 0x4f, 0xc9, 0xa6, 0xd6, 0xe6, 0xaf, 0x37, 0x45, + 0xc6, 0x9d, 0xbd, 0xc9, 0xf3, 0xbd, 0xb3, 0xe3, 0x54, 0x98, 0x8f, 0x11, 0x06, 0x59, 0xe1, 0x80, + 0x73, 0xae, 0xb3, 0x69, 0xb5, 0x26, 0x46, 0xf9, 0xd7, 0xb8, 0xd8, 0xa7, 0x43, 0xcf, 0x5f, 0x6f, + 0x0a, 0xba, 0xc6, 0xa8, 0x64, 0x27, 0x5e, 0x31, 0x42, 0x20, 0x26, 0x8c, 0xa9, 0x79, 0x75, 0xce, + 0xd6, 0xad, 0xb6, 0x3f, 0x31, 0xc6, 0x3b, 0xef, 0x07, 0xfb, 0x60, 0x62, 0x9c, 0xb8, 0x71, 0x46, + 0x36, 0x65, 0x2c, 0x91, 0xec, 0x63, 0x0a, 0xf3, 0xdc, 0x0b, 0x70, 0xb2, 0x67, 0x6e, 0x20, 0xe3, + 0x50, 0xda, 0xa6, 0xbb, 0x7c, 0xea, 0xab, 0x21, 0x7b, 0x24, 0xa7, 0xa1, 0xb2, 0xa3, 0xdb, 0x5d, + 0x3a, 0x51, 0xe4, 0x69, 0xe2, 0xe5, 0xe3, 0xc5, 0xe7, 0x0b, 0xda, 0x6f, 0x96, 0x60, 0x44, 0xcd, + 0x38, 0x4d, 0xcb, 0xd9, 0x26, 0x2f, 0x41, 0xc9, 0x76, 0x5b, 0x72, 0xde, 0xfc, 0xc4, 0xc0, 0xb3, + 0xd8, 0xb2, 0xdb, 0x6a, 0x0c, 0xef, 0xef, 0x4d, 0x96, 0x96, 0xdd, 0x16, 0x32, 0x44, 0x62, 0x40, + 0x65, 0x5b, 0xdf, 0xdc, 0xd6, 0x79, 0x1d, 0xea, 0x33, 0x8d, 0x81, 0xa1, 0xaf, 0x31, 0x14, 0x56, + 0xd7, 0x46, 0x6d, 0x7f, 0x6f, 0xb2, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x6a, 0x1b, 0xb6, 0x6e, + 0x6c, 0x6f, 0xb9, 0x36, 0x9d, 0x28, 0xe5, 0x64, 0xd4, 0x50, 0x48, 0xe2, 0x33, 0x87, 0xaf, 0x18, + 0xf1, 0x20, 0x06, 0x0c, 0x75, 0x4d, 0xdf, 0x72, 0xb6, 0xe5, 0x1c, 0xf8, 0xc2, 0xc0, 0xdc, 0xd6, + 0xe7, 0x79, 0x9b, 0x60, 0x7f, 0x6f, 0x72, 0x48, 0x3c, 0xa3, 0x84, 0xd6, 0x7e, 0x50, 0x87, 0x31, + 0xf5, 0x91, 0x6e, 0x52, 0x2f, 0xa0, 0xb7, 0xc9, 0x45, 0x28, 0x3b, 0x6c, 0x68, 0xf2, 0x8f, 0xdc, + 0x18, 0x91, 0xdd, 0xa5, 0xcc, 0x87, 0x24, 0xcf, 0x61, 0x35, 0x13, 0x5d, 0x45, 0x0a, 0x7c, 0xf0, + 0x9a, 0x35, 0x39, 0x8c, 0xa8, 0x99, 0x78, 0x46, 0x09, 0x4d, 0x5e, 0x81, 0x32, 0x6f, 0xbc, 0x10, + 0xf5, 0x27, 0x07, 0x67, 0xc1, 0x9a, 0x5e, 0x65, 0x2d, 0xe0, 0x0d, 0xe7, 0xa0, 0xac, 0x2b, 0x76, + 0xcd, 0x4d, 0x29, 0xd8, 0x4f, 0xe4, 0x10, 0xec, 0x82, 0xe8, 0x8a, 0xeb, 0xf3, 0x0b, 0xc8, 0x10, + 0xc9, 0x2f, 0x17, 0xe0, 0xa4, 0xe1, 0x3a, 0x81, 0xce, 0x54, 0x0d, 0xb5, 0xc8, 0x4e, 0x54, 0x38, + 0x9f, 0xab, 0x03, 0xf3, 0x99, 0x4b, 0x23, 0x36, 0x1e, 0x66, 0x6b, 0x46, 0x4f, 0x32, 0xf6, 0xf2, + 0x26, 0xbf, 0x5a, 0x80, 0x87, 0xd9, 0x5c, 0xde, 0x43, 0xcc, 0x57, 0xa0, 0xa3, 0xad, 0xd5, 0xd9, + 0xfd, 0xbd, 0xc9, 0x87, 0x97, 0xb2, 0x98, 0x61, 0x76, 0x1d, 0x58, 0xed, 0x4e, 0xe9, 0xbd, 0x6a, + 0x09, 0x5f, 0xdd, 0xea, 0x33, 0xcb, 0x47, 0xa9, 0xea, 0x34, 0x1e, 0x95, 0x5d, 0x39, 0x4b, 0xb3, + 0xc3, 0xac, 0x5a, 0x90, 0xcb, 0x30, 0xbc, 0xe3, 0xda, 0xdd, 0x36, 0xf5, 0x27, 0xaa, 0x7c, 0x8a, + 0x3d, 0x97, 0x35, 0xc5, 0xde, 0xe4, 0x24, 0x8d, 0x13, 0x12, 0x7e, 0x58, 0xbc, 0xfb, 0xa8, 0xca, + 0x12, 0x0b, 0x86, 0x6c, 0xab, 0x6d, 0x05, 0x3e, 0x5f, 0x38, 0xeb, 0x33, 0x97, 0x07, 0x6e, 0x96, + 0x18, 0xa2, 0xcb, 0x1c, 0x4c, 0x8c, 0x1a, 0xf1, 0x8c, 0x92, 0x01, 0x9b, 0x0a, 0x7d, 0x43, 0xb7, + 0xc5, 0xc2, 0x5a, 0x9f, 0xf9, 0xd4, 0xe0, 0xc3, 0x86, 0xa1, 0x34, 0x46, 0x65, 0x9b, 0x2a, 0xfc, + 0x15, 0x05, 0x36, 0xf9, 0x1c, 0x8c, 0x25, 0xbe, 0xa6, 0x3f, 0x51, 0xe7, 0xd2, 0x79, 0x2c, 0x4b, + 0x3a, 0x21, 0x55, 0xb4, 0xf2, 0x24, 0x7a, 0x88, 0x8f, 0x29, 0x30, 0x72, 0x0d, 0xaa, 0xbe, 0x65, + 0x52, 0x43, 0xf7, 0xfc, 0x89, 0x91, 0x83, 0x00, 0x8f, 0x4b, 0xe0, 0x6a, 0x53, 0x16, 0xc3, 0x10, + 0x80, 0x4c, 0x01, 0x74, 0x74, 0x2f, 0xb0, 0x84, 0xa2, 0x3a, 0xca, 0x95, 0xa6, 0xb1, 0xfd, 0xbd, + 0x49, 0x58, 0x0d, 0x53, 0x31, 0x46, 0xc1, 0xe8, 0x59, 0xd9, 0x25, 0xa7, 0xd3, 0x0d, 0xc4, 0xc2, + 0x5a, 0x13, 0xf4, 0xcd, 0x30, 0x15, 0x63, 0x14, 0xe4, 0xdb, 0x05, 0x78, 0x34, 0x7a, 0xed, 0x1d, + 0x64, 0x27, 0x8e, 0x7c, 0x90, 0x4d, 0xee, 0xef, 0x4d, 0x3e, 0xda, 0xec, 0xcf, 0x12, 0xef, 0x56, + 0x1f, 0xed, 0x25, 0x18, 0x9d, 0xed, 0x06, 0x5b, 0xae, 0x67, 0xbd, 0xc1, 0x95, 0x6e, 0xb2, 0x00, + 0x95, 0x80, 0x2b, 0x4f, 0x62, 0x5d, 0x7e, 0x22, 0x4b, 0xd4, 0x42, 0x91, 0xbd, 0x46, 0x77, 0x95, + 0x36, 0x20, 0xd6, 0x47, 0xa1, 0x4c, 0x89, 0xe2, 0xda, 0x6f, 0x14, 0xa0, 0xd6, 0xd0, 0x7d, 0xcb, + 0x60, 0xf0, 0x64, 0x0e, 0xca, 0x5d, 0x9f, 0x7a, 0x87, 0x03, 0xe5, 0xb3, 0xf4, 0xba, 0x4f, 0x3d, + 0xe4, 0x85, 0xc9, 0x0d, 0xa8, 0x76, 0x74, 0xdf, 0xbf, 0xe5, 0x7a, 0xa6, 0x5c, 0x69, 0x0e, 0x08, + 0x24, 0xb4, 0x62, 0x59, 0x14, 0x43, 0x10, 0xad, 0x0e, 0xd1, 0x52, 0xab, 0xfd, 0xa8, 0x00, 0xa7, + 0x1a, 0xdd, 0xcd, 0x4d, 0xea, 0x49, 0x25, 0x50, 0xaa, 0x57, 0x14, 0x2a, 0x1e, 0x35, 0x2d, 0x5f, + 0xd6, 0x7d, 0x7e, 0xe0, 0x4f, 0x87, 0x0c, 0x45, 0x6a, 0x73, 0x5c, 0x5e, 0x3c, 0x01, 0x05, 0x3a, + 0xe9, 0x42, 0xed, 0x35, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xb6, 0x6c, 0xdd, 0x95, 0x81, 0x59, 0x5d, + 0xa5, 0x41, 0x93, 0x23, 0xc5, 0x95, 0xc7, 0x30, 0x11, 0x23, 0x4e, 0xda, 0x77, 0x2a, 0x30, 0x32, + 0xe7, 0xb6, 0x37, 0x2c, 0x87, 0x9a, 0x97, 0xcd, 0x16, 0x25, 0xaf, 0x42, 0x99, 0x9a, 0x2d, 0x2a, + 0x5b, 0x3b, 0xf8, 0x3a, 0xcb, 0xc0, 0x22, 0x6d, 0x81, 0xbd, 0x21, 0x07, 0x26, 0xcb, 0x30, 0xb6, + 0xe9, 0xb9, 0x6d, 0x31, 0x75, 0xad, 0xed, 0x76, 0xa4, 0xaa, 0xd8, 0xf8, 0xa0, 0x9a, 0x0e, 0x16, + 0x12, 0xb9, 0x77, 0xf6, 0x26, 0x21, 0x7a, 0xc3, 0x54, 0x59, 0xf2, 0x32, 0x4c, 0x44, 0x29, 0xe1, + 0x18, 0x9e, 0x63, 0xda, 0x3b, 0x57, 0x15, 0x2a, 0x8d, 0xf3, 0xfb, 0x7b, 0x93, 0x13, 0x0b, 0x7d, + 0x68, 0xb0, 0x6f, 0x69, 0xf2, 0x66, 0x01, 0xc6, 0xa3, 0x4c, 0x31, 0xaf, 0x4a, 0x0d, 0xe1, 0x88, + 0x26, 0x6c, 0xbe, 0xcd, 0x59, 0x48, 0xb1, 0xc0, 0x1e, 0xa6, 0x64, 0x01, 0x46, 0x02, 0x37, 0x26, + 0xaf, 0x0a, 0x97, 0x97, 0xa6, 0xf6, 0xe5, 0x6b, 0x6e, 0x5f, 0x69, 0x25, 0xca, 0x11, 0x84, 0x33, + 0xea, 0x3d, 0x25, 0xa9, 0x21, 0x2e, 0xa9, 0x73, 0xfb, 0x7b, 0x93, 0x67, 0xd6, 0x32, 0x29, 0xb0, + 0x4f, 0x49, 0xf2, 0xe5, 0x02, 0x8c, 0xa9, 0x2c, 0x29, 0xa3, 0xe1, 0xa3, 0x94, 0x11, 0x61, 0x3d, + 0x62, 0x2d, 0xc1, 0x00, 0x53, 0x0c, 0xb5, 0x9f, 0x94, 0xa1, 0x16, 0xce, 0x6c, 0xe4, 0x71, 0xa8, + 0xf0, 0x1d, 0xb7, 0x54, 0x58, 0xc3, 0x25, 0x8b, 0x6f, 0xcc, 0x51, 0xe4, 0x91, 0x27, 0x60, 0xd8, + 0x70, 0xdb, 0x6d, 0xdd, 0x31, 0xb9, 0x15, 0xa5, 0xd6, 0xa8, 0xb3, 0x95, 0x7a, 0x4e, 0x24, 0xa1, + 0xca, 0x23, 0xe7, 0xa1, 0xac, 0x7b, 0x2d, 0x61, 0xd0, 0xa8, 0x89, 0xf9, 0x68, 0xd6, 0x6b, 0xf9, + 0xc8, 0x53, 0xc9, 0xc7, 0xa0, 0x44, 0x9d, 0x9d, 0x89, 0x72, 0x7f, 0x55, 0xe0, 0xb2, 0xb3, 0x73, + 0x53, 0xf7, 0x1a, 0x75, 0x59, 0x87, 0xd2, 0x65, 0x67, 0x07, 0x59, 0x19, 0xb2, 0x0c, 0xc3, 0xd4, + 0xd9, 0x61, 0xdf, 0x5e, 0x5a, 0x1a, 0x3e, 0xd0, 0xa7, 0x38, 0x23, 0x91, 0x5a, 0x71, 0xa8, 0x50, + 0xc8, 0x64, 0x54, 0x10, 0xe4, 0x33, 0x30, 0x22, 0x74, 0x8b, 0x15, 0xf6, 0x4d, 0xfc, 0x89, 0x21, + 0x0e, 0x39, 0xd9, 0x5f, 0x39, 0xe1, 0x74, 0x91, 0x65, 0x27, 0x96, 0xe8, 0x63, 0x02, 0x8a, 0x7c, + 0x06, 0x6a, 0x6a, 0x23, 0xa8, 0xbe, 0x6c, 0xa6, 0x51, 0x44, 0xed, 0x1e, 0x91, 0xbe, 0xde, 0xb5, + 0x3c, 0xda, 0xa6, 0x4e, 0xe0, 0x37, 0x4e, 0xaa, 0x6d, 0xb2, 0xca, 0xf5, 0x31, 0x42, 0x23, 0x1b, + 0xbd, 0xd6, 0x1d, 0x61, 0x9a, 0x78, 0xbc, 0xcf, 0xac, 0x3e, 0x80, 0x69, 0xe7, 0xf3, 0x70, 0x22, + 0x34, 0xbf, 0xc8, 0x1d, 0xbc, 0x30, 0x56, 0x3c, 0xcd, 0x8a, 0x2f, 0x25, 0xb3, 0xee, 0xec, 0x4d, + 0x3e, 0x96, 0xb1, 0x87, 0x8f, 0x08, 0x30, 0x0d, 0xa6, 0xfd, 0x61, 0x09, 0x7a, 0xd5, 0xee, 0xa4, + 0xd0, 0x0a, 0x47, 0x2d, 0xb4, 0x74, 0x83, 0xc4, 0xf4, 0xf9, 0xbc, 0x2c, 0x96, 0xbf, 0x51, 0x59, + 0x1f, 0xa6, 0x74, 0xd4, 0x1f, 0xe6, 0x41, 0x19, 0x3b, 0xda, 0x5b, 0x65, 0x18, 0x9b, 0xd7, 0x69, + 0xdb, 0x75, 0xee, 0xb9, 0x09, 0x29, 0x3c, 0x10, 0x9b, 0x90, 0x4b, 0x50, 0xf5, 0x68, 0xc7, 0xb6, + 0x0c, 0xdd, 0xe7, 0x9f, 0x5e, 0x1a, 0xfd, 0x50, 0xa6, 0x61, 0x98, 0xdb, 0x67, 0xf3, 0x59, 0x7a, + 0x20, 0x37, 0x9f, 0xe5, 0x77, 0x7f, 0xf3, 0xa9, 0x7d, 0xb9, 0x08, 0x5c, 0x51, 0x21, 0x17, 0xa1, + 0xcc, 0x16, 0xe1, 0xb4, 0xc9, 0x83, 0x77, 0x1c, 0x9e, 0x43, 0xce, 0x41, 0x31, 0x70, 0xe5, 0xc8, + 0x03, 0x99, 0x5f, 0x5c, 0x73, 0xb1, 0x18, 0xb8, 0xe4, 0x0d, 0x00, 0xc3, 0x75, 0x4c, 0x4b, 0xd9, + 0xc2, 0xf3, 0x35, 0x6c, 0xc1, 0xf5, 0x6e, 0xe9, 0x9e, 0x39, 0x17, 0x22, 0x8a, 0xed, 0x47, 0xf4, + 0x8e, 0x31, 0x6e, 0xe4, 0x05, 0x18, 0x72, 0x9d, 0x85, 0xae, 0x6d, 0x73, 0x81, 0xd6, 0x1a, 0xff, + 0x8d, 0xed, 0x09, 0x6f, 0xf0, 0x94, 0x3b, 0x7b, 0x93, 0x67, 0x85, 0x7e, 0xcb, 0xde, 0x5e, 0xf2, + 0xac, 0xc0, 0x72, 0x5a, 0xcd, 0xc0, 0xd3, 0x03, 0xda, 0xda, 0x45, 0x59, 0x4c, 0xfb, 0x5a, 0x01, + 0xea, 0x0b, 0xd6, 0x6d, 0x6a, 0xbe, 0x64, 0x39, 0xa6, 0x7b, 0x8b, 0x20, 0x0c, 0xd9, 0xd4, 0x69, + 0x05, 0x5b, 0xb2, 0xf7, 0x4f, 0xc5, 0xc6, 0x5a, 0x78, 0x84, 0x12, 0xd5, 0xbf, 0x4d, 0x03, 0x9d, + 0x8d, 0xbe, 0xf9, 0xae, 0x34, 0xf2, 0x8b, 0x4d, 0x29, 0x47, 0x40, 0x89, 0x44, 0xa6, 0xa1, 0x26, + 0xb4, 0x4f, 0xcb, 0x69, 0x71, 0x19, 0x56, 0xa3, 0x49, 0xaf, 0xa9, 0x32, 0x30, 0xa2, 0xd1, 0x76, + 0xe1, 0x64, 0x8f, 0x18, 0x88, 0x09, 0xe5, 0x40, 0x6f, 0xa9, 0xf9, 0x75, 0x61, 0x60, 0x01, 0xaf, + 0xe9, 0xad, 0x98, 0x70, 0xf9, 0x1a, 0xbf, 0xa6, 0xb3, 0x35, 0x9e, 0xa1, 0x6b, 0x3f, 0x2b, 0x40, + 0x75, 0xa1, 0xeb, 0x18, 0x7c, 0x6f, 0x74, 0x6f, 0x53, 0x98, 0x52, 0x18, 0x8a, 0x99, 0x0a, 0x43, + 0x17, 0x86, 0xb6, 0x6f, 0x85, 0x0a, 0x45, 0x7d, 0x66, 0x65, 0xf0, 0x5e, 0x21, 0xab, 0x34, 0x75, + 0x8d, 0xe3, 0x89, 0x93, 0x9a, 0x31, 0x59, 0xa1, 0xa1, 0x6b, 0x2f, 0x71, 0xa6, 0x92, 0xd9, 0xb9, + 0x8f, 0x41, 0x3d, 0x46, 0x76, 0x28, 0xa3, 0xed, 0xef, 0x96, 0x61, 0x68, 0xb1, 0xd9, 0x9c, 0x5d, + 0x5d, 0x22, 0xcf, 0x40, 0x5d, 0x1a, 0xf1, 0xaf, 0x47, 0x32, 0x08, 0xcf, 0x70, 0x9a, 0x51, 0x16, + 0xc6, 0xe9, 0x98, 0x3a, 0xe6, 0x51, 0xdd, 0x6e, 0xcb, 0xc1, 0x12, 0xaa, 0x63, 0xc8, 0x12, 0x51, + 0xe4, 0x11, 0x1d, 0xc6, 0xd8, 0x0e, 0x8f, 0x89, 0x50, 0xec, 0xde, 0xe4, 0xb0, 0x39, 0xe0, 0xfe, + 0x8e, 0x2b, 0x89, 0xeb, 0x09, 0x00, 0x4c, 0x01, 0x92, 0xe7, 0xa1, 0xaa, 0x77, 0x83, 0x2d, 0xae, + 0x40, 0x8b, 0xb1, 0x71, 0x9e, 0x9f, 0x71, 0xc8, 0xb4, 0x3b, 0x7b, 0x93, 0x23, 0xd7, 0xb0, 0xf1, + 0x8c, 0x7a, 0xc7, 0x90, 0x9a, 0x55, 0x4e, 0xed, 0x18, 0x65, 0xe5, 0x2a, 0x87, 0xae, 0xdc, 0x6a, + 0x02, 0x00, 0x53, 0x80, 0xe4, 0x15, 0x18, 0xd9, 0xa6, 0xbb, 0x81, 0xbe, 0x21, 0x19, 0x0c, 0x1d, + 0x86, 0xc1, 0x38, 0x53, 0xe1, 0xae, 0xc5, 0x8a, 0x63, 0x02, 0x8c, 0xf8, 0x70, 0x7a, 0x9b, 0x7a, + 0x1b, 0xd4, 0x73, 0xe5, 0xee, 0x53, 0x32, 0x19, 0x3e, 0x0c, 0x93, 0x89, 0xfd, 0xbd, 0xc9, 0xd3, + 0xd7, 0x32, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x69, 0x11, 0x4e, 0x2c, 0x8a, 0x53, 0x54, 0xd7, 0x13, + 0x8b, 0x30, 0x39, 0x0b, 0x25, 0xaf, 0xd3, 0xe5, 0x3d, 0xa7, 0x24, 0xec, 0xa4, 0xb8, 0xba, 0x8e, + 0x2c, 0x8d, 0xbc, 0x0c, 0x55, 0x53, 0x4e, 0x19, 0x72, 0xf3, 0x7b, 0xd8, 0x89, 0x86, 0x2f, 0x82, + 0xea, 0x0d, 0x43, 0x34, 0xa6, 0xe9, 0xb7, 0xfd, 0x56, 0xd3, 0x7a, 0x83, 0xca, 0xfd, 0x20, 0xd7, + 0xf4, 0x57, 0x44, 0x12, 0xaa, 0x3c, 0xb6, 0xaa, 0x6e, 0xd3, 0x5d, 0xb1, 0x1b, 0x2a, 0x47, 0xab, + 0xea, 0x35, 0x99, 0x86, 0x61, 0x2e, 0x99, 0x54, 0x83, 0x85, 0xf5, 0x82, 0xb2, 0xd8, 0xc9, 0xdf, + 0x64, 0x09, 0x72, 0xdc, 0xb0, 0x29, 0xf3, 0x35, 0x2b, 0x08, 0xa8, 0x27, 0x3f, 0xe3, 0x40, 0x53, + 0xe6, 0x55, 0x8e, 0x80, 0x12, 0x89, 0x7c, 0x18, 0x6a, 0x1c, 0xbc, 0x61, 0xbb, 0x1b, 0xfc, 0xc3, + 0xd5, 0xc4, 0x9e, 0xfe, 0xa6, 0x4a, 0xc4, 0x28, 0x5f, 0xfb, 0x79, 0x11, 0xce, 0x2c, 0xd2, 0x40, + 0x68, 0x35, 0xf3, 0xb4, 0x63, 0xbb, 0xbb, 0x4c, 0xb5, 0x44, 0xfa, 0x3a, 0x79, 0x11, 0xc0, 0xf2, + 0x37, 0x9a, 0x3b, 0x06, 0x1f, 0x07, 0x62, 0x0c, 0x5f, 0x94, 0x43, 0x12, 0x96, 0x9a, 0x0d, 0x99, + 0x73, 0x27, 0xf1, 0x86, 0xb1, 0x32, 0xd1, 0xf6, 0xaa, 0x78, 0x97, 0xed, 0x55, 0x13, 0xa0, 0x13, + 0x29, 0xa8, 0x25, 0x4e, 0xf9, 0x3f, 0x15, 0x9b, 0xc3, 0xe8, 0xa6, 0x31, 0x98, 0x3c, 0x2a, 0xa3, + 0x03, 0xe3, 0x26, 0xdd, 0xd4, 0xbb, 0x76, 0x10, 0x2a, 0xd5, 0x72, 0x10, 0x1f, 0x5c, 0x2f, 0x0f, + 0x4f, 0x78, 0xe7, 0x53, 0x48, 0xd8, 0x83, 0xad, 0xfd, 0x5e, 0x09, 0xce, 0x2d, 0xd2, 0x20, 0xb4, + 0xb8, 0xc8, 0xd9, 0xb1, 0xd9, 0xa1, 0x06, 0xfb, 0x0a, 0x6f, 0x16, 0x60, 0xc8, 0xd6, 0x37, 0xa8, + 0xcd, 0x56, 0x2f, 0xd6, 0x9a, 0x57, 0x07, 0x5e, 0x08, 0xfa, 0x73, 0x99, 0x5a, 0xe6, 0x1c, 0x52, + 0x4b, 0x83, 0x48, 0x44, 0xc9, 0x9e, 0x4d, 0xea, 0x86, 0xdd, 0xf5, 0x03, 0xea, 0xad, 0xba, 0x5e, + 0x20, 0xf5, 0xc9, 0x70, 0x52, 0x9f, 0x8b, 0xb2, 0x30, 0x4e, 0x47, 0x66, 0x00, 0x0c, 0xdb, 0xa2, + 0x4e, 0xc0, 0x4b, 0x89, 0x71, 0x45, 0xd4, 0xf7, 0x9d, 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0xb5, + 0x5d, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, 0x64, 0xb5, 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, + 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, 0x15, 0x8b, 0xb2, 0x30, 0x4e, 0xc7, 0xd6, 0xbc, 0x58, + 0xfb, 0x0f, 0xb5, 0xe6, 0x7d, 0xab, 0x06, 0x17, 0x12, 0x62, 0x0d, 0xf4, 0x80, 0x6e, 0x76, 0xed, + 0x26, 0x0d, 0xd4, 0x07, 0x1c, 0x70, 0x2d, 0xfc, 0x85, 0xe8, 0xbb, 0x0b, 0xdf, 0x0d, 0xe3, 0x68, + 0xbe, 0x7b, 0x4f, 0x05, 0x0f, 0xf4, 0xed, 0xa7, 0xa1, 0xe6, 0xe8, 0x81, 0xcf, 0x07, 0xae, 0x1c, + 0xa3, 0xa1, 0x1a, 0x76, 0x5d, 0x65, 0x60, 0x44, 0x43, 0x56, 0xe1, 0xb4, 0x14, 0xf1, 0xe5, 0xdb, + 0x1d, 0xd7, 0x0b, 0xa8, 0x27, 0xca, 0xca, 0xe5, 0x54, 0x96, 0x3d, 0xbd, 0x92, 0x41, 0x83, 0x99, + 0x25, 0xc9, 0x0a, 0x9c, 0x32, 0xc4, 0x79, 0x36, 0xb5, 0x5d, 0xdd, 0x54, 0x80, 0xc2, 0xc0, 0x15, + 0x6e, 0x8d, 0xe6, 0x7a, 0x49, 0x30, 0xab, 0x5c, 0xba, 0x37, 0x0f, 0x0d, 0xd4, 0x9b, 0x87, 0x07, + 0xe9, 0xcd, 0xd5, 0xc1, 0x7a, 0x73, 0xed, 0x60, 0xbd, 0x99, 0x49, 0x9e, 0xf5, 0x23, 0xea, 0x31, + 0xf5, 0x44, 0xac, 0xb0, 0x31, 0x77, 0x89, 0x50, 0xf2, 0xcd, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x36, + 0xe0, 0x9c, 0x48, 0xbf, 0xec, 0x18, 0xde, 0x6e, 0x87, 0x2d, 0x3c, 0x31, 0xdc, 0x7a, 0xc2, 0xc2, + 0x78, 0xae, 0xd9, 0x97, 0x12, 0xef, 0x82, 0x42, 0xfe, 0x17, 0x8c, 0x8a, 0xaf, 0xb4, 0xa2, 0x77, + 0x38, 0xac, 0x70, 0x9e, 0x78, 0x58, 0xc2, 0x8e, 0xce, 0xc5, 0x33, 0x31, 0x49, 0x4b, 0x66, 0xe1, + 0x44, 0x67, 0xc7, 0x60, 0x8f, 0x4b, 0x9b, 0xd7, 0x29, 0x35, 0xa9, 0xc9, 0x4f, 0x6b, 0x6a, 0x8d, + 0x47, 0x94, 0xa1, 0x63, 0x35, 0x99, 0x8d, 0x69, 0x7a, 0xf2, 0x3c, 0x8c, 0xf8, 0x81, 0xee, 0x05, + 0xd2, 0xac, 0x37, 0x31, 0x26, 0x9c, 0x4b, 0x94, 0xd5, 0xab, 0x19, 0xcb, 0xc3, 0x04, 0x65, 0xe6, + 0x7a, 0x71, 0xe2, 0xf8, 0xd6, 0x8b, 0x3c, 0xb3, 0xd5, 0x9f, 0x15, 0xe1, 0xe2, 0x22, 0x0d, 0x56, + 0x5c, 0x47, 0x1a, 0x45, 0xb3, 0x96, 0xfd, 0x03, 0xd9, 0x44, 0x93, 0x8b, 0x76, 0xf1, 0x48, 0x17, + 0xed, 0xd2, 0x11, 0x2d, 0xda, 0xe5, 0x63, 0x5c, 0xb4, 0xff, 0xa0, 0x08, 0x8f, 0x24, 0x24, 0xb9, + 0xea, 0x9a, 0x6a, 0xc2, 0x7f, 0x5f, 0x80, 0x07, 0x10, 0xe0, 0x1d, 0xa1, 0x77, 0xf2, 0x63, 0xad, + 0x94, 0xc6, 0xf3, 0xd5, 0xb4, 0xc6, 0xf3, 0x4a, 0x9e, 0x95, 0x2f, 0x83, 0xc3, 0x81, 0x56, 0xbc, + 0xab, 0x40, 0x3c, 0x79, 0x08, 0x27, 0x4c, 0x3f, 0x31, 0xa5, 0x27, 0xf4, 0x5e, 0xc3, 0x1e, 0x0a, + 0xcc, 0x28, 0x45, 0x9a, 0xf0, 0xb0, 0x4f, 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, + 0x63, 0x12, 0xee, 0xe1, 0x66, 0x16, 0x11, 0x66, 0x97, 0xcd, 0x33, 0x0f, 0xfc, 0x25, 0x70, 0x95, + 0x53, 0x88, 0xe6, 0xc8, 0x34, 0x96, 0x37, 0xd3, 0x1a, 0xcb, 0xab, 0xf9, 0xbf, 0xdb, 0x60, 0xda, + 0xca, 0x0c, 0x00, 0xff, 0x0a, 0x71, 0x75, 0x25, 0x5c, 0xa4, 0x31, 0xcc, 0xc1, 0x18, 0x15, 0x5b, + 0x80, 0x94, 0x9c, 0xe3, 0x9a, 0x4a, 0xb8, 0x00, 0x35, 0xe3, 0x99, 0x98, 0xa4, 0xed, 0xab, 0xed, + 0x54, 0x06, 0xd6, 0x76, 0xae, 0x02, 0x49, 0x18, 0x1e, 0x05, 0xde, 0x50, 0xd2, 0x79, 0x72, 0xa9, + 0x87, 0x02, 0x33, 0x4a, 0xf5, 0xe9, 0xca, 0xc3, 0x47, 0xdb, 0x95, 0xab, 0x83, 0x77, 0x65, 0xf2, + 0x2a, 0x9c, 0xe5, 0xac, 0xa4, 0x7c, 0x92, 0xc0, 0x42, 0xef, 0xf9, 0x80, 0x04, 0x3e, 0x8b, 0xfd, + 0x08, 0xb1, 0x3f, 0x06, 0xfb, 0x3e, 0x86, 0x47, 0x4d, 0xc6, 0x5c, 0xb7, 0xfb, 0xeb, 0x44, 0x73, + 0x19, 0x34, 0x98, 0x59, 0x92, 0x75, 0xb1, 0x80, 0x75, 0x43, 0x7d, 0xc3, 0xa6, 0xa6, 0x74, 0x1e, + 0x0d, 0xbb, 0xd8, 0xda, 0x72, 0x53, 0xe6, 0x60, 0x8c, 0x2a, 0x4b, 0x4d, 0x19, 0x39, 0xa4, 0x9a, + 0xb2, 0xc8, 0xad, 0xf4, 0x9b, 0x09, 0x6d, 0x48, 0xea, 0x3a, 0xa1, 0x3b, 0xf0, 0x5c, 0x9a, 0x00, + 0x7b, 0xcb, 0x70, 0x2d, 0xd1, 0xf0, 0xac, 0x4e, 0xe0, 0x27, 0xb1, 0xc6, 0x52, 0x5a, 0x62, 0x06, + 0x0d, 0x66, 0x96, 0x64, 0xfa, 0xf9, 0x16, 0xd5, 0xed, 0x60, 0x2b, 0x09, 0x78, 0x22, 0xa9, 0x9f, + 0x5f, 0xe9, 0x25, 0xc1, 0xac, 0x72, 0x99, 0x0b, 0xd2, 0xf8, 0x83, 0xa9, 0x56, 0x7d, 0xa5, 0x04, + 0x67, 0x17, 0x69, 0x10, 0xfa, 0xd5, 0xbc, 0x6f, 0x46, 0x79, 0x17, 0xcc, 0x28, 0xdf, 0xac, 0xc0, + 0xa9, 0x45, 0x1a, 0xf4, 0x68, 0x63, 0xff, 0x45, 0xc5, 0xbf, 0x02, 0xa7, 0x22, 0x57, 0xae, 0x66, + 0xe0, 0x7a, 0x62, 0x2d, 0x4f, 0xed, 0x96, 0x9b, 0xbd, 0x24, 0x98, 0x55, 0x8e, 0x7c, 0x06, 0x1e, + 0xe1, 0x4b, 0xbd, 0xd3, 0x12, 0xf6, 0x59, 0x61, 0x4c, 0x88, 0x5d, 0x46, 0x98, 0x94, 0x90, 0x8f, + 0x34, 0xb3, 0xc9, 0xb0, 0x5f, 0x79, 0xf2, 0x45, 0x18, 0xe9, 0x58, 0x1d, 0x6a, 0x5b, 0x0e, 0xd7, + 0xcf, 0x72, 0xbb, 0x84, 0xac, 0xc6, 0xc0, 0xa2, 0x0d, 0x5c, 0x3c, 0x15, 0x13, 0x0c, 0x33, 0x7b, + 0x6a, 0xf5, 0x18, 0x7b, 0xea, 0xbf, 0x15, 0x61, 0x78, 0xd1, 0x73, 0xbb, 0x9d, 0xc6, 0x2e, 0x69, + 0xc1, 0xd0, 0x2d, 0x7e, 0x78, 0x26, 0x8f, 0xa6, 0x06, 0x77, 0x87, 0x16, 0x67, 0x70, 0x91, 0x4a, + 0x24, 0xde, 0x51, 0xc2, 0xb3, 0x4e, 0xbc, 0x4d, 0x77, 0xa9, 0x29, 0xcf, 0xd0, 0xc2, 0x4e, 0x7c, + 0x8d, 0x25, 0xa2, 0xc8, 0x23, 0x6d, 0x38, 0xa1, 0xdb, 0xb6, 0x7b, 0x8b, 0x9a, 0xcb, 0x7a, 0x40, + 0x1d, 0xea, 0xab, 0x23, 0xc9, 0xc3, 0x9a, 0xa5, 0xf9, 0xb9, 0xfe, 0x6c, 0x12, 0x0a, 0xd3, 0xd8, + 0xe4, 0x35, 0x18, 0xf6, 0x03, 0xd7, 0x53, 0xca, 0x56, 0x7d, 0x66, 0x6e, 0xf0, 0x8f, 0xde, 0xf8, + 0x74, 0x53, 0x40, 0x09, 0x9b, 0xbd, 0x7c, 0x41, 0xc5, 0x40, 0xfb, 0x46, 0x01, 0xe0, 0xca, 0xda, + 0xda, 0xaa, 0x3c, 0x5e, 0x30, 0xa1, 0xac, 0x77, 0xc3, 0x83, 0xca, 0xc1, 0x0f, 0x04, 0x13, 0xfe, + 0x90, 0xf2, 0x0c, 0xaf, 0x1b, 0x6c, 0x21, 0x47, 0x27, 0x1f, 0x82, 0x61, 0xa9, 0x20, 0x4b, 0xb1, + 0x87, 0xae, 0x05, 0x52, 0x89, 0x46, 0x95, 0xaf, 0xfd, 0x4e, 0x11, 0x60, 0xc9, 0xb4, 0x69, 0x53, + 0x79, 0xb0, 0xd7, 0x82, 0x2d, 0x8f, 0xfa, 0x5b, 0xae, 0x6d, 0x0e, 0x78, 0x9a, 0xca, 0x6d, 0xfe, + 0x6b, 0x0a, 0x04, 0x23, 0x3c, 0x62, 0xc2, 0x88, 0x1f, 0xd0, 0xce, 0x92, 0x13, 0x50, 0x6f, 0x47, + 0xb7, 0x07, 0x3c, 0x44, 0x19, 0x17, 0x76, 0x91, 0x08, 0x07, 0x13, 0xa8, 0x44, 0x87, 0xba, 0xe5, + 0x18, 0x62, 0x80, 0x34, 0x76, 0x07, 0xec, 0x48, 0x27, 0xd8, 0x8e, 0x63, 0x29, 0x82, 0xc1, 0x38, + 0xa6, 0xf6, 0xc3, 0x22, 0x9c, 0xe1, 0xfc, 0x58, 0x35, 0x12, 0xfe, 0x98, 0xe4, 0xff, 0xf4, 0xdc, + 0xb6, 0xfb, 0x1f, 0x07, 0x63, 0x2d, 0x2e, 0x6b, 0xad, 0xd0, 0x40, 0x8f, 0xf4, 0xb9, 0x28, 0x2d, + 0x76, 0xc5, 0xae, 0x0b, 0x65, 0x9f, 0xcd, 0x57, 0x42, 0x7a, 0xcd, 0x81, 0xbb, 0x50, 0x76, 0x03, + 0xf8, 0xec, 0x15, 0x9e, 0x1a, 0xf3, 0x59, 0x8b, 0xb3, 0x23, 0xff, 0x0f, 0x86, 0xfc, 0x40, 0x0f, + 0xba, 0x6a, 0x68, 0xae, 0x1f, 0x35, 0x63, 0x0e, 0x1e, 0xcd, 0x23, 0xe2, 0x1d, 0x25, 0x53, 0xed, + 0x87, 0x05, 0x38, 0x97, 0x5d, 0x70, 0xd9, 0xf2, 0x03, 0xf2, 0xbf, 0x7b, 0xc4, 0x7e, 0xc0, 0x2f, + 0xce, 0x4a, 0x73, 0xa1, 0x87, 0x0e, 0xd9, 0x2a, 0x25, 0x26, 0xf2, 0x00, 0x2a, 0x56, 0x40, 0xdb, + 0x6a, 0x7f, 0x79, 0xe3, 0x88, 0x9b, 0x1e, 0x5b, 0xda, 0x19, 0x17, 0x14, 0xcc, 0xb4, 0xb7, 0x8a, + 0xfd, 0x9a, 0xcc, 0x97, 0x0f, 0x3b, 0xe9, 0xf3, 0x7b, 0x2d, 0x9f, 0xcf, 0x6f, 0xb2, 0x42, 0xbd, + 0xae, 0xbf, 0xff, 0xb7, 0xd7, 0xf5, 0xf7, 0x46, 0x7e, 0xd7, 0xdf, 0x94, 0x18, 0xfa, 0x7a, 0x00, + 0xbf, 0x53, 0x82, 0xf3, 0x77, 0xeb, 0x36, 0x6c, 0x3d, 0x93, 0xbd, 0x33, 0xef, 0x7a, 0x76, 0xf7, + 0x7e, 0x48, 0x66, 0xa0, 0xd2, 0xd9, 0xd2, 0x7d, 0xa5, 0x94, 0xa9, 0x0d, 0x4b, 0x65, 0x95, 0x25, + 0xde, 0x61, 0x93, 0x06, 0x57, 0xe6, 0xf8, 0x2b, 0x0a, 0x52, 0x36, 0x1d, 0xb7, 0xa9, 0xef, 0x47, + 0x36, 0x81, 0x70, 0x3a, 0x5e, 0x11, 0xc9, 0xa8, 0xf2, 0x49, 0x00, 0x43, 0xc2, 0xc4, 0x2c, 0x57, + 0xa6, 0xc1, 0x1d, 0xb9, 0x32, 0xdc, 0xc4, 0xa3, 0x46, 0xc9, 0xd3, 0x0a, 0xc9, 0x8b, 0x4c, 0x41, + 0x39, 0x88, 0x9c, 0x76, 0xd5, 0xd6, 0xbc, 0x9c, 0xa1, 0x9f, 0x72, 0x3a, 0xb6, 0xb1, 0x77, 0x37, + 0xb8, 0x51, 0xdd, 0x94, 0xe7, 0xe7, 0x96, 0xeb, 0x70, 0x85, 0xac, 0x14, 0x6d, 0xec, 0x6f, 0xf4, + 0x50, 0x60, 0x46, 0x29, 0xed, 0x6f, 0xaa, 0x70, 0x26, 0xbb, 0x3f, 0x30, 0xb9, 0xed, 0x50, 0xcf, + 0x67, 0xd8, 0x85, 0xa4, 0xdc, 0x6e, 0x8a, 0x64, 0x54, 0xf9, 0xef, 0x69, 0x87, 0xb3, 0x6f, 0x16, + 0xe0, 0xac, 0x27, 0xcf, 0x88, 0xee, 0x87, 0xd3, 0xd9, 0x63, 0xc2, 0x9c, 0xd1, 0x87, 0x21, 0xf6, + 0xaf, 0x0b, 0xf9, 0xad, 0x02, 0x4c, 0xb4, 0x53, 0x76, 0x8e, 0x63, 0xbc, 0x30, 0xc6, 0xbd, 0xe2, + 0x57, 0xfa, 0xf0, 0xc3, 0xbe, 0x35, 0x21, 0x5f, 0x84, 0x7a, 0x87, 0xf5, 0x0b, 0x3f, 0xa0, 0x8e, + 0xa1, 0xee, 0x8c, 0x0d, 0x3e, 0x92, 0x56, 0x23, 0x2c, 0xe5, 0x8a, 0x26, 0xf4, 0x83, 0x58, 0x06, + 0xc6, 0x39, 0x3e, 0xe0, 0x37, 0xc4, 0x2e, 0x41, 0xd5, 0xa7, 0x41, 0x60, 0x39, 0x2d, 0xb1, 0xdf, + 0xa8, 0x89, 0xb1, 0xd2, 0x94, 0x69, 0x18, 0xe6, 0x92, 0x0f, 0x43, 0x8d, 0x1f, 0x39, 0xcd, 0x7a, + 0x2d, 0x7f, 0xa2, 0xc6, 0xdd, 0xc5, 0x46, 0x85, 0x03, 0x9c, 0x4c, 0xc4, 0x28, 0x9f, 0x3c, 0x0d, + 0x23, 0x1b, 0x7c, 0xf8, 0xca, 0x4b, 0xc3, 0xc2, 0xc6, 0xc5, 0xb5, 0xb5, 0x46, 0x2c, 0x1d, 0x13, + 0x54, 0x64, 0x06, 0x80, 0x86, 0xe7, 0x72, 0x69, 0x7b, 0x56, 0x74, 0x62, 0x87, 0x31, 0x2a, 0xf2, + 0x18, 0x94, 0x02, 0xdb, 0xe7, 0x36, 0xac, 0x6a, 0xb4, 0x05, 0x5d, 0x5b, 0x6e, 0x22, 0x4b, 0xd7, + 0x7e, 0x5e, 0x80, 0x13, 0xa9, 0xcb, 0x25, 0xac, 0x48, 0xd7, 0xb3, 0xe5, 0x34, 0x12, 0x16, 0x59, + 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0xa9, 0x96, 0x17, 0x73, 0xc6, 0x47, 0xb8, 0xae, 0x07, 0x3e, + 0xd3, 0xc3, 0x7b, 0x34, 0x72, 0x7e, 0xcc, 0x17, 0xd5, 0x47, 0xae, 0x03, 0xb1, 0x63, 0xbe, 0x28, + 0x0f, 0x13, 0x94, 0x29, 0x83, 0x5f, 0xf9, 0x20, 0x06, 0x3f, 0xed, 0x6b, 0xc5, 0x98, 0x04, 0xa4, + 0x66, 0x7f, 0x0f, 0x09, 0x3c, 0xc9, 0x16, 0xd0, 0x70, 0x71, 0xaf, 0xc5, 0xd7, 0x3f, 0xbe, 0x18, + 0xcb, 0x5c, 0xf2, 0x92, 0x90, 0x7d, 0x29, 0xe7, 0x2d, 0xd4, 0xb5, 0xe5, 0xa6, 0xf0, 0xae, 0x52, + 0x5f, 0x2d, 0xfc, 0x04, 0xe5, 0x63, 0xfa, 0x04, 0xda, 0x5f, 0x94, 0xa0, 0x7e, 0xd5, 0xdd, 0x78, + 0x8f, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, 0x77, 0x71, 0x99, 0x5a, 0x87, 0x47, 0x82, 0xc0, 0x6e, + 0x52, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, 0x35, 0xe5, + 0x71, 0xd2, 0xa3, 0xfb, 0x7b, 0x93, 0x8f, 0xac, 0xad, 0x2d, 0x67, 0x91, 0x60, 0xbf, 0xb2, 0x7c, + 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0xdf, 0x94, 0x91, 0x3e, 0x37, 0x62, 0xda, 0x88, 0xa5, + 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xb5, 0xf0, 0xe6, 0x3b, 0x79, 0x02, 0x86, 0x37, 0x3c, 0x77, + 0x9b, 0x7a, 0xe2, 0xe4, 0x4e, 0xde, 0x94, 0x69, 0x88, 0x24, 0x54, 0x79, 0xe4, 0x71, 0xa8, 0x04, + 0x6e, 0xc7, 0x32, 0xd2, 0x06, 0xb5, 0x35, 0x96, 0x88, 0x22, 0xef, 0xf8, 0x3a, 0xf8, 0x93, 0x09, + 0xd5, 0xae, 0xd6, 0x57, 0x19, 0x7b, 0x05, 0xca, 0xbe, 0xee, 0xdb, 0x72, 0x3d, 0xcd, 0x71, 0x89, + 0x7c, 0xb6, 0xb9, 0x2c, 0x2f, 0x91, 0xcf, 0x36, 0x97, 0x91, 0x83, 0x6a, 0x3f, 0x29, 0x42, 0x5d, + 0xc8, 0x4d, 0xcc, 0x0a, 0x47, 0x29, 0xb9, 0x17, 0xb8, 0x2b, 0x85, 0xdf, 0x6d, 0x53, 0x8f, 0x9b, + 0x99, 0xe4, 0x24, 0x17, 0x3f, 0x1f, 0x88, 0x32, 0x43, 0x77, 0x8a, 0x28, 0x49, 0x89, 0xbe, 0x7c, + 0x8c, 0xa2, 0xaf, 0x1c, 0x48, 0xf4, 0x43, 0xc7, 0x21, 0xfa, 0x37, 0x8b, 0x50, 0x5b, 0xb6, 0x36, + 0xa9, 0xb1, 0x6b, 0xd8, 0xfc, 0x4e, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x45, 0x4f, 0x37, 0xe8, 0x2a, + 0xf5, 0x2c, 0x1e, 0x19, 0x86, 0x8d, 0x0f, 0x3e, 0x03, 0xc9, 0x3b, 0x81, 0xf3, 0x7d, 0x68, 0xb0, + 0x6f, 0x69, 0xb2, 0x04, 0x23, 0x26, 0xf5, 0x2d, 0x8f, 0x9a, 0xab, 0xb1, 0x8d, 0xca, 0x13, 0x6a, + 0xa9, 0x99, 0x8f, 0xe5, 0xdd, 0xd9, 0x9b, 0x1c, 0x55, 0x06, 0x4a, 0xb1, 0x63, 0x49, 0x14, 0x65, + 0x43, 0xbe, 0xa3, 0x77, 0xfd, 0xac, 0x3a, 0xc6, 0x86, 0xfc, 0x6a, 0x36, 0x09, 0xf6, 0x2b, 0xab, + 0x55, 0xa0, 0xb4, 0xec, 0xb6, 0xb4, 0xb7, 0x4a, 0x10, 0x86, 0x10, 0x22, 0xff, 0xbf, 0x00, 0x75, + 0xdd, 0x71, 0xdc, 0x40, 0x86, 0xe7, 0x11, 0x27, 0xf0, 0x98, 0x3b, 0x52, 0xd1, 0xd4, 0x6c, 0x04, + 0x2a, 0x0e, 0x6f, 0xc3, 0x03, 0xe5, 0x58, 0x0e, 0xc6, 0x79, 0x93, 0x6e, 0xea, 0x3c, 0x79, 0x25, + 0x7f, 0x2d, 0x0e, 0x70, 0x7a, 0x7c, 0xee, 0x53, 0x30, 0x9e, 0xae, 0xec, 0x61, 0x8e, 0x83, 0x72, + 0x1d, 0xcc, 0x17, 0x01, 0x22, 0x9f, 0x92, 0xfb, 0x60, 0xc4, 0xb2, 0x12, 0x46, 0xac, 0xc5, 0xc1, + 0x05, 0x1c, 0x56, 0xba, 0xaf, 0xe1, 0xea, 0xf5, 0x94, 0xe1, 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, + 0x58, 0xf5, 0xdb, 0x05, 0x18, 0x8f, 0x88, 0xe5, 0x0d, 0xd9, 0xe7, 0x60, 0xd4, 0xa3, 0xba, 0xd9, + 0xd0, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, 0x6f, 0xf6, 0xc9, 0xfd, 0xbd, 0xc9, 0x51, 0x8c, + 0x67, 0x60, 0x92, 0x8e, 0xe8, 0x50, 0x67, 0x09, 0x6b, 0x56, 0x9b, 0xba, 0xdd, 0x60, 0x40, 0xab, + 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x9d, 0x02, 0x8c, 0xc5, 0x2b, 0x7c, 0xec, + 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, 0xbe, 0x49, 0x1f, 0x2b, 0xda, 0x4f, 0xab, 0xf1, + 0xa6, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, 0xab, 0xb1, 0xe0, 0xbd, 0x1f, 0x35, 0xa6, 0x9f, + 0x96, 0x5b, 0x7e, 0x80, 0xb5, 0xdc, 0x77, 0x33, 0xf4, 0x4c, 0x2c, 0x7c, 0xca, 0x50, 0x8e, 0xf0, + 0x29, 0xed, 0x30, 0x7c, 0xca, 0xf0, 0x91, 0x4d, 0x3a, 0x07, 0x09, 0xa1, 0x52, 0xbd, 0xaf, 0x21, + 0x54, 0x6a, 0xc7, 0x15, 0x42, 0x05, 0xf2, 0x86, 0x50, 0xf9, 0x6a, 0x01, 0xc6, 0xcc, 0xc4, 0x8d, + 0x59, 0x6e, 0x5b, 0xc8, 0xb3, 0xd4, 0x24, 0x2f, 0xe0, 0x8a, 0x2b, 0x53, 0xc9, 0x34, 0x4c, 0xb1, + 0xd4, 0x7e, 0x5c, 0x8e, 0xaf, 0x03, 0xf7, 0xdb, 0x54, 0xfd, 0x6c, 0xd2, 0x54, 0x7d, 0x31, 0x6d, + 0xaa, 0x3e, 0x11, 0xf3, 0x22, 0x8d, 0x9b, 0xab, 0x3f, 0x12, 0x9b, 0x1e, 0xd9, 0x9c, 0x34, 0x1a, + 0x49, 0x3a, 0x63, 0x8a, 0xfc, 0x08, 0x54, 0x7d, 0x15, 0xec, 0x51, 0x6c, 0x6c, 0xa2, 0xef, 0xa2, + 0x02, 0x31, 0x86, 0x14, 0x4c, 0x13, 0xf7, 0xa8, 0xee, 0xbb, 0x4e, 0x5a, 0x13, 0x47, 0x9e, 0x8a, + 0x32, 0x37, 0x6e, 0x32, 0x1f, 0xba, 0x87, 0xc9, 0x5c, 0x87, 0xba, 0xad, 0xfb, 0xc1, 0x7a, 0xc7, + 0xd4, 0x03, 0x6a, 0xca, 0xf1, 0xf6, 0xdf, 0x0f, 0xb6, 0x56, 0xb1, 0xf5, 0x2f, 0x52, 0x08, 0x97, + 0x23, 0x18, 0x8c, 0x63, 0x12, 0x13, 0x46, 0xd8, 0x2b, 0x1f, 0x0d, 0xe6, 0xac, 0x0a, 0x01, 0x70, + 0x18, 0x1e, 0xa1, 0xa5, 0x67, 0x39, 0x86, 0x83, 0x09, 0xd4, 0x3e, 0x56, 0xf5, 0xda, 0x40, 0x56, + 0xf5, 0xaf, 0xd6, 0xa0, 0x7e, 0x5d, 0x0f, 0xac, 0x1d, 0xca, 0x4f, 0x71, 0x8e, 0xc7, 0x94, 0xfe, + 0x6b, 0x05, 0x38, 0x93, 0x74, 0xd5, 0x3b, 0x46, 0x7b, 0x3a, 0x0f, 0xfc, 0x81, 0x99, 0xdc, 0xb0, + 0x4f, 0x2d, 0xb8, 0x65, 0xbd, 0xc7, 0xf3, 0xef, 0xb8, 0x2d, 0xeb, 0xcd, 0x7e, 0x0c, 0xb1, 0x7f, + 0x5d, 0xde, 0x2b, 0x96, 0xf5, 0x07, 0x3b, 0x30, 0x5b, 0xca, 0xee, 0x3f, 0xfc, 0xc0, 0xd8, 0xfd, + 0xab, 0x0f, 0x84, 0xb2, 0xd5, 0x89, 0xd9, 0xfd, 0x6b, 0x39, 0xfd, 0x4f, 0xa4, 0x77, 0xbb, 0x40, + 0xeb, 0x77, 0x7e, 0xc0, 0x2f, 0xa6, 0x2b, 0x7b, 0x2c, 0xd3, 0x51, 0x36, 0x74, 0xdf, 0x32, 0xe4, + 0xb2, 0x97, 0x23, 0x10, 0xa5, 0x8a, 0xd8, 0x25, 0x8e, 0xa9, 0xf9, 0x2b, 0x0a, 0xec, 0x28, 0x32, + 0x58, 0x31, 0x57, 0x64, 0x30, 0x32, 0x07, 0x65, 0x87, 0xed, 0x9e, 0x4b, 0x87, 0x8e, 0x05, 0x76, + 0xfd, 0x1a, 0xdd, 0x45, 0x5e, 0x58, 0x7b, 0xbb, 0x08, 0xc0, 0x9a, 0x7f, 0x30, 0x0b, 0xfc, 0x87, + 0x60, 0xd8, 0xef, 0xf2, 0xbd, 0xb2, 0x5c, 0xb0, 0x23, 0xa7, 0x1d, 0x91, 0x8c, 0x2a, 0x9f, 0x3c, + 0x0e, 0x95, 0xd7, 0xbb, 0xb4, 0xab, 0x8e, 0x93, 0x43, 0x75, 0xed, 0xd3, 0x2c, 0x11, 0x45, 0xde, + 0xf1, 0x59, 0xd3, 0x94, 0xa5, 0xbe, 0x72, 0x5c, 0x96, 0xfa, 0x1a, 0x0c, 0x5f, 0x77, 0xb9, 0x0f, + 0xa0, 0xf6, 0x2f, 0x45, 0x80, 0xc8, 0xc7, 0x8a, 0x7c, 0xa3, 0x00, 0x0f, 0x87, 0x03, 0x2e, 0x10, + 0x5a, 0x37, 0x8f, 0xfd, 0x9a, 0xdb, 0x6a, 0x9f, 0x35, 0xd8, 0xf9, 0x0c, 0xb4, 0x9a, 0xc5, 0x0e, + 0xb3, 0x6b, 0x41, 0x10, 0xaa, 0xb4, 0xdd, 0x09, 0x76, 0xe7, 0x2d, 0x4f, 0xf6, 0xc0, 0x4c, 0x57, + 0xbe, 0xcb, 0x92, 0x46, 0x14, 0x95, 0x5b, 0x43, 0x3e, 0x88, 0x54, 0x0e, 0x86, 0x38, 0x64, 0x0b, + 0xaa, 0x8e, 0xfb, 0xaa, 0xcf, 0xc4, 0x21, 0xbb, 0xe3, 0x8b, 0x83, 0x8b, 0x5c, 0x88, 0x55, 0x58, + 0x79, 0xe5, 0x0b, 0x0e, 0x3b, 0x52, 0xd8, 0x5f, 0x2f, 0xc2, 0xa9, 0x0c, 0x39, 0x90, 0x17, 0x61, + 0x5c, 0xba, 0xb3, 0x45, 0x41, 0x90, 0x0b, 0x51, 0x10, 0xe4, 0x66, 0x2a, 0x0f, 0x7b, 0xa8, 0xc9, + 0xab, 0x00, 0xba, 0x61, 0x50, 0xdf, 0x5f, 0x71, 0x4d, 0xa5, 0x8f, 0xbe, 0xb0, 0xbf, 0x37, 0x09, + 0xb3, 0x61, 0xea, 0x9d, 0xbd, 0xc9, 0x8f, 0x66, 0x79, 0xa8, 0xa6, 0xe4, 0x1c, 0x15, 0xc0, 0x18, + 0x24, 0xf9, 0x3c, 0x80, 0xd8, 0x7a, 0x85, 0x97, 0xe8, 0xef, 0x61, 0xaf, 0x98, 0x52, 0xe1, 0x8a, + 0xa6, 0x3e, 0xdd, 0xd5, 0x9d, 0xc0, 0x0a, 0x76, 0x45, 0xcc, 0x92, 0x9b, 0x21, 0x0a, 0xc6, 0x10, + 0xb5, 0x3f, 0x2d, 0x42, 0x55, 0x59, 0x4a, 0xef, 0x83, 0x79, 0xac, 0x95, 0x30, 0x8f, 0x1d, 0x91, + 0x4f, 0x6a, 0x96, 0x71, 0xcc, 0x4d, 0x19, 0xc7, 0x16, 0xf3, 0xb3, 0xba, 0xbb, 0x69, 0xec, 0xdb, + 0x45, 0x18, 0x53, 0xa4, 0x79, 0x0d, 0x63, 0x9f, 0x84, 0x13, 0xe2, 0x2c, 0x79, 0x45, 0xbf, 0x2d, + 0xc2, 0xb7, 0x70, 0x81, 0x95, 0x85, 0x1b, 0x68, 0x23, 0x99, 0x85, 0x69, 0x5a, 0xd6, 0xad, 0x45, + 0xd2, 0x3a, 0xdb, 0x47, 0x88, 0xd3, 0x27, 0xb1, 0xdf, 0xe1, 0xdd, 0xba, 0x91, 0xca, 0xc3, 0x1e, + 0xea, 0xb4, 0x65, 0xae, 0x7c, 0x0c, 0x96, 0xb9, 0xbf, 0x2d, 0xc0, 0x48, 0x24, 0xaf, 0x63, 0xb7, + 0xcb, 0x6d, 0x26, 0xed, 0x72, 0xb3, 0xb9, 0xbb, 0x43, 0x1f, 0xab, 0xdc, 0x2f, 0x0d, 0x43, 0xc2, + 0x35, 0x9a, 0x6c, 0xc0, 0x39, 0x2b, 0xd3, 0xc1, 0x2b, 0x36, 0xdb, 0x84, 0x77, 0x7d, 0x97, 0xfa, + 0x52, 0xe2, 0x5d, 0x50, 0x48, 0x17, 0xaa, 0x3b, 0xd4, 0x0b, 0x2c, 0x83, 0xaa, 0xf6, 0x2d, 0xe6, + 0x56, 0xc9, 0xa4, 0xed, 0x31, 0x94, 0xe9, 0x4d, 0xc9, 0x00, 0x43, 0x56, 0x64, 0x03, 0x2a, 0xd4, + 0x6c, 0x51, 0x15, 0x50, 0x27, 0x67, 0xb8, 0xca, 0x50, 0x9e, 0xec, 0xcd, 0x47, 0x01, 0x4d, 0x7c, + 0xa8, 0xd9, 0xea, 0x6c, 0x49, 0xf6, 0xc3, 0xc1, 0x15, 0xac, 0xf0, 0x94, 0x2a, 0xba, 0x6b, 0x1f, + 0x26, 0x61, 0xc4, 0x87, 0x6c, 0x87, 0x46, 0xae, 0xca, 0x11, 0x4d, 0x1e, 0x77, 0x31, 0x71, 0xf9, + 0x50, 0xbb, 0xa5, 0x07, 0xd4, 0x6b, 0xeb, 0xde, 0xb6, 0xdc, 0x6d, 0x0c, 0xde, 0xc2, 0x97, 0x14, + 0x52, 0xd4, 0xc2, 0x30, 0x09, 0x23, 0x3e, 0xc4, 0x85, 0x5a, 0x20, 0xd5, 0x67, 0x65, 0xc9, 0x1b, + 0x9c, 0xa9, 0x52, 0xc4, 0x7d, 0xe9, 0x22, 0xad, 0x5e, 0x31, 0xe2, 0x41, 0x76, 0x12, 0xa1, 0x7c, + 0x45, 0x00, 0xe7, 0x46, 0x0e, 0x8b, 0xb0, 0x84, 0x8a, 0x96, 0x9b, 0xec, 0x90, 0xc0, 0xda, 0xdb, + 0x95, 0x68, 0x5a, 0xbe, 0xdf, 0x76, 0xaa, 0xa7, 0x93, 0x76, 0xaa, 0x0b, 0x69, 0x3b, 0x55, 0xea, + 0x88, 0xf2, 0xf0, 0x4e, 0x95, 0x29, 0x0b, 0x51, 0xf9, 0x18, 0x2c, 0x44, 0x4f, 0x41, 0x7d, 0x87, + 0xcf, 0x04, 0x22, 0x3a, 0x4f, 0x85, 0x2f, 0x23, 0x7c, 0x66, 0xbf, 0x19, 0x25, 0x63, 0x9c, 0x86, + 0x15, 0x91, 0x3f, 0x2f, 0x08, 0xc3, 0x9b, 0xca, 0x22, 0xcd, 0x28, 0x19, 0xe3, 0x34, 0xdc, 0x1f, + 0xcb, 0x72, 0xb6, 0x45, 0x81, 0x61, 0x5e, 0x40, 0xf8, 0x63, 0xa9, 0x44, 0x8c, 0xf2, 0xc9, 0x25, + 0xa8, 0x76, 0xcd, 0x4d, 0x41, 0x5b, 0xe5, 0xb4, 0x5c, 0xc3, 0x5c, 0x9f, 0x5f, 0x90, 0xd1, 0x82, + 0x54, 0x2e, 0xab, 0x49, 0x5b, 0xef, 0xa8, 0x0c, 0xbe, 0x37, 0x94, 0x35, 0x59, 0x89, 0x92, 0x31, + 0x4e, 0x43, 0x3e, 0x0e, 0x63, 0x1e, 0x35, 0xbb, 0x06, 0x0d, 0x4b, 0x01, 0x2f, 0x45, 0xc4, 0x5f, + 0x1a, 0xe2, 0x39, 0x98, 0xa2, 0xec, 0x63, 0xe7, 0xaa, 0x0f, 0x64, 0xe7, 0xfa, 0x41, 0x01, 0x48, + 0xaf, 0xff, 0x32, 0xd9, 0x82, 0x21, 0x87, 0x5b, 0xbf, 0x72, 0x07, 0x44, 0x8e, 0x19, 0xd1, 0xc4, + 0xb4, 0x24, 0x13, 0x24, 0x3e, 0x71, 0xa0, 0x4a, 0x6f, 0x07, 0xd4, 0x73, 0xc2, 0xfb, 0x0c, 0x47, + 0x13, 0x7c, 0x59, 0xec, 0x06, 0x24, 0x32, 0x86, 0x3c, 0xb4, 0x1f, 0x15, 0xa1, 0x1e, 0xa3, 0xbb, + 0xd7, 0xa6, 0x92, 0x5f, 0xa9, 0x16, 0x46, 0xa7, 0x75, 0xcf, 0x96, 0x23, 0x2c, 0x76, 0xa5, 0x5a, + 0x66, 0xe1, 0x32, 0xc6, 0xe9, 0xc8, 0x0c, 0x40, 0x5b, 0xf7, 0x03, 0xea, 0xf1, 0xd5, 0x37, 0x75, + 0x91, 0x79, 0x25, 0xcc, 0xc1, 0x18, 0x15, 0xb9, 0x28, 0xc3, 0x67, 0x97, 0x93, 0x81, 0xe7, 0xfa, + 0xc4, 0xc6, 0xae, 0x1c, 0x41, 0x6c, 0x6c, 0xd2, 0x82, 0x71, 0x55, 0x6b, 0x95, 0x7b, 0xb8, 0xb0, + 0x64, 0x62, 0xff, 0x92, 0x82, 0xc0, 0x1e, 0x50, 0xed, 0xed, 0x02, 0x8c, 0x26, 0x4c, 0x1e, 0x22, + 0x64, 0x9c, 0xf2, 0xbe, 0x4f, 0x84, 0x8c, 0x8b, 0x39, 0xcd, 0x3f, 0x09, 0x43, 0x42, 0x40, 0x69, + 0xa7, 0x3a, 0x21, 0x42, 0x94, 0xb9, 0x6c, 0x2e, 0x93, 0x46, 0xd5, 0xf4, 0x5c, 0x26, 0xad, 0xae, + 0xa8, 0xf2, 0x85, 0xb9, 0x5d, 0xd4, 0xae, 0xd7, 0xdc, 0x2e, 0xd2, 0x31, 0xa4, 0xd0, 0x7e, 0x5c, + 0x02, 0xee, 0x82, 0x42, 0x9e, 0x83, 0x5a, 0x9b, 0x1a, 0x5b, 0xba, 0x63, 0xf9, 0x2a, 0x64, 0x24, + 0xdb, 0xdd, 0xd6, 0x56, 0x54, 0xe2, 0x1d, 0x06, 0x30, 0xdb, 0x5c, 0xe6, 0x5e, 0xde, 0x11, 0x2d, + 0x31, 0x60, 0xa8, 0xe5, 0xfb, 0x7a, 0xc7, 0xca, 0x7d, 0x02, 0x2a, 0x42, 0xf4, 0x89, 0x41, 0x24, + 0x9e, 0x51, 0x42, 0x13, 0x03, 0x2a, 0x1d, 0x5b, 0xb7, 0x9c, 0xdc, 0xff, 0x28, 0x61, 0x2d, 0x58, + 0x65, 0x48, 0xc2, 0xa4, 0xc3, 0x1f, 0x51, 0x60, 0x93, 0x2e, 0xd4, 0x7d, 0xc3, 0xd3, 0xdb, 0xfe, + 0x96, 0x3e, 0xf3, 0xcc, 0xb3, 0xb9, 0x95, 0xa4, 0x88, 0x95, 0x98, 0xb3, 0xe7, 0x70, 0x76, 0xa5, + 0x79, 0x65, 0x76, 0xe6, 0x99, 0x67, 0x31, 0xce, 0x27, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x64, 0xbf, + 0x3f, 0x72, 0xb6, 0xcf, 0x3c, 0x35, 0x83, 0x71, 0x3e, 0xda, 0xbf, 0x17, 0xa0, 0x16, 0xd2, 0x92, + 0x75, 0x00, 0x36, 0x02, 0x65, 0x50, 0xbd, 0x43, 0x05, 0xb8, 0xe7, 0xbb, 0xe2, 0xf5, 0xb0, 0x30, + 0xc6, 0x80, 0x32, 0xa2, 0x0e, 0x16, 0x8f, 0x3a, 0xea, 0xe0, 0x34, 0xd4, 0xb6, 0x74, 0xc7, 0xf4, + 0xb7, 0xf4, 0x6d, 0x31, 0x11, 0xc5, 0xe2, 0x70, 0x5e, 0x51, 0x19, 0x18, 0xd1, 0x68, 0x7f, 0x3c, + 0x04, 0xe2, 0xd8, 0x92, 0x0d, 0x15, 0xd3, 0xf2, 0x85, 0xdf, 0x6c, 0x81, 0x97, 0x0c, 0x87, 0xca, + 0xbc, 0x4c, 0xc7, 0x90, 0x82, 0x9c, 0x85, 0x52, 0xdb, 0x72, 0xe4, 0x89, 0x07, 0x37, 0x78, 0xad, + 0x58, 0x0e, 0xb2, 0x34, 0x9e, 0xa5, 0xdf, 0x96, 0x2e, 0x4f, 0x22, 0x4b, 0xbf, 0x8d, 0x2c, 0x8d, + 0x6d, 0x41, 0x6d, 0xd7, 0xdd, 0xde, 0xd0, 0x8d, 0x6d, 0xe5, 0x19, 0x55, 0xe6, 0x0b, 0x21, 0xdf, + 0x82, 0x2e, 0x27, 0xb3, 0x30, 0x4d, 0x4b, 0xd6, 0xe1, 0x91, 0x37, 0xa8, 0xe7, 0xca, 0x51, 0xde, + 0xb4, 0x29, 0xed, 0x28, 0x18, 0xa1, 0x42, 0x70, 0x07, 0xab, 0xcf, 0x66, 0x93, 0x60, 0xbf, 0xb2, + 0xdc, 0x55, 0x53, 0xf7, 0x5a, 0x34, 0x58, 0xf5, 0x5c, 0x83, 0xfa, 0xbe, 0xe5, 0xb4, 0x14, 0xec, + 0x50, 0x04, 0xbb, 0x96, 0x4d, 0x82, 0xfd, 0xca, 0x92, 0x97, 0x61, 0x42, 0x64, 0x89, 0xc5, 0x76, + 0x76, 0x47, 0xb7, 0x6c, 0x7d, 0xc3, 0xb2, 0xd5, 0xaf, 0xbd, 0x46, 0xc5, 0xb9, 0xc2, 0x5a, 0x1f, + 0x1a, 0xec, 0x5b, 0x9a, 0x5c, 0x85, 0x71, 0x75, 0xaa, 0xb4, 0x4a, 0xbd, 0x66, 0x78, 0x94, 0x3d, + 0xda, 0xb8, 0xc0, 0xf6, 0x7b, 0xf3, 0xb4, 0xe3, 0x51, 0x83, 0x6b, 0x5d, 0x29, 0x2a, 0xec, 0x29, + 0x47, 0x10, 0xce, 0xf0, 0xf3, 0xea, 0xf5, 0xce, 0x9c, 0xeb, 0xda, 0xa6, 0x7b, 0xcb, 0x51, 0x6d, + 0x17, 0x8a, 0x0d, 0x3f, 0x48, 0x6a, 0x66, 0x52, 0x60, 0x9f, 0x92, 0xac, 0xe5, 0x3c, 0x67, 0xde, + 0xbd, 0xe5, 0xa4, 0x51, 0x21, 0x6a, 0x79, 0xb3, 0x0f, 0x0d, 0xf6, 0x2d, 0x4d, 0x16, 0x80, 0xa4, + 0x5b, 0xb0, 0xde, 0xe1, 0xca, 0xd0, 0x68, 0xe3, 0x8c, 0x88, 0x8f, 0x91, 0xce, 0xc5, 0x8c, 0x12, + 0x64, 0x19, 0x4e, 0xa7, 0x53, 0x19, 0x3b, 0xee, 0x24, 0x3f, 0x2a, 0x22, 0x63, 0x62, 0x46, 0x3e, + 0x66, 0x96, 0xd2, 0xfe, 0xa4, 0x08, 0xa3, 0x89, 0x0b, 0xd5, 0x0f, 0xdc, 0xc5, 0x55, 0xa6, 0x81, + 0xb6, 0xfd, 0xd6, 0xd2, 0xfc, 0x15, 0xaa, 0x9b, 0xd4, 0xbb, 0x46, 0xd5, 0xe5, 0x77, 0x3e, 0xa9, + 0xac, 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0xec, 0xa9, 0x79, 0x7f, 0x95, 0xa0, 0x64, + 0xc4, 0x8d, 0xaa, 0x7c, 0xc9, 0x11, 0x26, 0x55, 0x01, 0xaf, 0x05, 0x30, 0x12, 0xa7, 0x60, 0x13, + 0x49, 0xa4, 0xac, 0x0d, 0x27, 0x14, 0xb5, 0x25, 0x28, 0x05, 0xc1, 0xa0, 0x57, 0x62, 0x85, 0x7d, + 0x7e, 0x6d, 0x19, 0x19, 0x86, 0xb6, 0xc9, 0xbe, 0x9d, 0xef, 0x5b, 0xae, 0x23, 0xe3, 0x23, 0xaf, + 0xc3, 0x70, 0x20, 0x4d, 0x54, 0x83, 0x5d, 0xe9, 0xe5, 0xe6, 0x62, 0x65, 0x9e, 0x52, 0x58, 0xda, + 0xdf, 0x15, 0xa1, 0x16, 0x6e, 0x27, 0x0f, 0x10, 0x77, 0xd8, 0x85, 0x5a, 0xe8, 0x6f, 0x93, 0xfb, + 0xb7, 0x67, 0x91, 0x1b, 0x08, 0xdf, 0x01, 0x85, 0xaf, 0x18, 0xf1, 0x88, 0xfb, 0xf2, 0x94, 0x72, + 0xf8, 0xf2, 0x74, 0x60, 0x38, 0xf0, 0xac, 0x56, 0x4b, 0xea, 0xb6, 0x79, 0x9c, 0x79, 0x42, 0x71, + 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, 0xf6, 0x1a, 0x8c, 0xa7, 0x29, 0xb9, 0xe2, + 0x67, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x19, 0x47, 0x8a, 0x9f, 0x4c, 0xc7, 0x90, 0x82, 0x6d, 0xfe, + 0xd8, 0x67, 0x7a, 0xc3, 0x75, 0xd4, 0xb6, 0x9a, 0xeb, 0xd0, 0x6b, 0x32, 0x0d, 0xc3, 0x5c, 0xed, + 0x9f, 0x4b, 0x70, 0x36, 0x32, 0x0a, 0xac, 0xe8, 0x8e, 0xde, 0x3a, 0xc0, 0xbf, 0xae, 0xde, 0xbf, + 0x24, 0x71, 0xd8, 0xe0, 0xf1, 0xa5, 0x07, 0x20, 0x78, 0xfc, 0x4f, 0x0a, 0xc0, 0x7d, 0x03, 0xc9, + 0x17, 0x61, 0x44, 0x8f, 0xfd, 0xe6, 0x50, 0x7e, 0xce, 0xcb, 0xb9, 0x3f, 0x27, 0x77, 0x41, 0x0c, + 0x7d, 0x5d, 0xe2, 0xa9, 0x98, 0x60, 0x48, 0x5c, 0xa8, 0x6e, 0xea, 0xb6, 0xcd, 0x74, 0xa1, 0xdc, + 0x87, 0x1c, 0x09, 0xe6, 0xbc, 0x9b, 0x2f, 0x48, 0x68, 0x0c, 0x99, 0x68, 0xff, 0x54, 0x80, 0xd1, + 0xa6, 0x6d, 0x99, 0x96, 0xd3, 0x3a, 0xc6, 0xa8, 0xf1, 0x37, 0xa0, 0xe2, 0xdb, 0x96, 0x49, 0x07, + 0x9c, 0xc7, 0xc5, 0x0a, 0xc2, 0x00, 0x50, 0xe0, 0x24, 0xc3, 0xd0, 0x97, 0x0e, 0x10, 0x86, 0xfe, + 0x67, 0x43, 0x20, 0xfd, 0x4b, 0x49, 0x17, 0x6a, 0x2d, 0x15, 0xdd, 0x5a, 0xb6, 0xf1, 0x4a, 0x8e, + 0xc8, 0x68, 0x89, 0x38, 0xd9, 0x62, 0xd6, 0x0d, 0x13, 0x31, 0xe2, 0x44, 0x68, 0xf2, 0xcf, 0x96, + 0xf3, 0x39, 0xff, 0x6c, 0x29, 0xd8, 0xf5, 0xfe, 0xdb, 0x52, 0x87, 0xf2, 0x56, 0x10, 0x74, 0xe4, + 0xb8, 0x1a, 0xdc, 0x81, 0x38, 0x0a, 0xce, 0x21, 0xb4, 0x11, 0xf6, 0x8e, 0x1c, 0x9a, 0xb1, 0x70, + 0xf4, 0xf0, 0x87, 0x4a, 0x73, 0xb9, 0x0e, 0xba, 0xe3, 0x2c, 0xd8, 0x3b, 0x72, 0x68, 0xf2, 0x05, + 0xa8, 0x07, 0x9e, 0xee, 0xf8, 0x9b, 0xae, 0xd7, 0xa6, 0x9e, 0xdc, 0x1d, 0x2e, 0xe4, 0xf8, 0xb9, + 0xe3, 0x5a, 0x84, 0x26, 0x4e, 0xd0, 0x12, 0x49, 0x18, 0xe7, 0x46, 0xb6, 0xa1, 0xda, 0x35, 0x45, + 0xc5, 0xa4, 0xd9, 0x64, 0x36, 0xcf, 0xff, 0x3a, 0x63, 0xc7, 0xd8, 0xea, 0x0d, 0x43, 0x06, 0xc9, + 0x7f, 0x87, 0x0d, 0x1f, 0xd5, 0xbf, 0xc3, 0xe2, 0xbd, 0x31, 0x2b, 0x72, 0x00, 0x69, 0x4b, 0x8d, + 0xd2, 0x69, 0x49, 0x2f, 0x9c, 0x85, 0xdc, 0xca, 0x9e, 0x60, 0x59, 0x0f, 0xb5, 0x52, 0xa7, 0x85, + 0x8a, 0x87, 0xd6, 0x06, 0x69, 0xdd, 0x26, 0x46, 0xe2, 0x0f, 0x1b, 0xe2, 0x3a, 0xcb, 0xf4, 0xc1, + 0xe6, 0x83, 0xf0, 0x57, 0x0f, 0xb1, 0x08, 0xbf, 0x99, 0xbf, 0xd2, 0xd0, 0xfe, 0xbe, 0x08, 0xa5, + 0xb5, 0xe5, 0xa6, 0x88, 0xda, 0xc7, 0x7f, 0x5f, 0x43, 0x9b, 0xdb, 0x56, 0xe7, 0x26, 0xf5, 0xac, + 0xcd, 0x5d, 0xb9, 0xe9, 0x8d, 0x45, 0xed, 0x4b, 0x53, 0x60, 0x46, 0x29, 0xf2, 0x0a, 0x8c, 0x18, + 0xfa, 0x1c, 0xf5, 0x82, 0x41, 0xb6, 0xf4, 0xfc, 0xde, 0xde, 0xdc, 0x6c, 0x54, 0x1c, 0x13, 0x60, + 0x64, 0x1d, 0xc0, 0x88, 0xa0, 0x4b, 0x87, 0x36, 0x44, 0xc4, 0x80, 0x63, 0x40, 0x04, 0xa1, 0xb6, + 0xcd, 0x48, 0x39, 0x6a, 0xf9, 0x30, 0xa8, 0xbc, 0xe7, 0x5c, 0x53, 0x65, 0x31, 0x82, 0xd1, 0x1c, + 0x18, 0x4d, 0xfc, 0x76, 0x83, 0x7c, 0x0c, 0xaa, 0x6e, 0x27, 0x36, 0x9d, 0xd6, 0xb8, 0xbf, 0x5f, + 0xf5, 0x86, 0x4c, 0xbb, 0xb3, 0x37, 0x39, 0xba, 0xec, 0xb6, 0x2c, 0x43, 0x25, 0x60, 0x48, 0x4e, + 0x34, 0x18, 0xe2, 0x97, 0x6d, 0xd4, 0x4f, 0x37, 0xf8, 0xda, 0xc1, 0xe3, 0xe2, 0xfb, 0x28, 0x73, + 0xb4, 0x2f, 0x95, 0x21, 0x3a, 0x13, 0x22, 0x3e, 0x0c, 0x09, 0x67, 0x62, 0x39, 0x73, 0x1f, 0xab, + 0xdf, 0xb2, 0x64, 0x45, 0x5a, 0x50, 0x7a, 0xcd, 0xdd, 0xc8, 0x3d, 0x71, 0xc7, 0x6e, 0xd9, 0x0a, + 0x2b, 0x55, 0x2c, 0x01, 0x19, 0x07, 0xf2, 0xeb, 0x05, 0x38, 0xe9, 0xa7, 0x95, 0x4e, 0xd9, 0x1d, + 0x30, 0xbf, 0x76, 0x9d, 0x56, 0x63, 0xa5, 0x63, 0x66, 0xbf, 0x6c, 0xec, 0xad, 0x0b, 0x93, 0xbf, + 0x38, 0xac, 0x91, 0xdd, 0x69, 0x31, 0xe7, 0xaf, 0xe2, 0x92, 0xf2, 0x4f, 0xa6, 0xa1, 0x64, 0xa5, + 0x7d, 0xa5, 0x08, 0xf5, 0xd8, 0x6c, 0x9d, 0xfb, 0x5f, 0x2e, 0xb7, 0x53, 0xff, 0x72, 0x59, 0x1d, + 0xfc, 0xec, 0x32, 0xaa, 0xd5, 0x71, 0xff, 0xce, 0xe5, 0xcf, 0x8b, 0x50, 0x5a, 0x9f, 0x5f, 0x48, + 0x6e, 0x17, 0x0b, 0xf7, 0x61, 0xbb, 0xb8, 0x05, 0xc3, 0x1b, 0x5d, 0xcb, 0x0e, 0x2c, 0x27, 0x77, + 0x1c, 0x00, 0xf5, 0xeb, 0x1b, 0x79, 0x9d, 0x56, 0xa0, 0xa2, 0x82, 0x27, 0x2d, 0x18, 0x6e, 0x89, + 0x40, 0x6c, 0xb9, 0x3d, 0xba, 0x64, 0x40, 0x37, 0xc1, 0x48, 0xbe, 0xa0, 0x42, 0xd7, 0x76, 0x41, + 0xfe, 0x3c, 0xfb, 0xbe, 0x4b, 0x53, 0xfb, 0x02, 0x84, 0x5a, 0xc0, 0xfd, 0x67, 0xfe, 0xaf, 0x05, + 0x48, 0x2a, 0x3e, 0xf7, 0xbf, 0x37, 0x6d, 0xa7, 0x7b, 0xd3, 0xfc, 0x51, 0x0c, 0xbe, 0xec, 0x0e, + 0xa5, 0xfd, 0x51, 0x11, 0x86, 0xee, 0xdb, 0xdd, 0x4d, 0x9a, 0x70, 0x4e, 0x9b, 0xcb, 0x39, 0x31, + 0xf6, 0x75, 0x4d, 0x6b, 0xa7, 0x5c, 0xd3, 0xf2, 0xfe, 0xac, 0xf3, 0x1e, 0x8e, 0x69, 0x7f, 0x5d, + 0x00, 0x39, 0x2d, 0x2f, 0x39, 0x7e, 0xa0, 0x3b, 0x06, 0xff, 0x67, 0xbc, 0x5c, 0x03, 0xf2, 0x7a, + 0x40, 0x48, 0x2f, 0x21, 0xb1, 0xec, 0xf3, 0x67, 0x35, 0xe7, 0x93, 0x8f, 0x40, 0x75, 0xcb, 0xf5, + 0x03, 0x3e, 0xcf, 0x17, 0x93, 0x76, 0x9d, 0x2b, 0x32, 0x1d, 0x43, 0x8a, 0xf4, 0x49, 0x61, 0xa5, + 0xff, 0x49, 0xa1, 0xf6, 0xad, 0x22, 0x8c, 0xbc, 0x57, 0x2e, 0xa0, 0x66, 0xb9, 0xf2, 0x95, 0x72, + 0xba, 0xf2, 0x95, 0x0f, 0xe3, 0xca, 0xa7, 0x7d, 0xaf, 0x00, 0x70, 0xdf, 0x6e, 0xbf, 0x9a, 0x49, + 0x2f, 0xbb, 0xdc, 0xfd, 0x2a, 0xdb, 0xc7, 0xee, 0xf7, 0x2b, 0xaa, 0x49, 0xdc, 0xc3, 0xee, 0xcd, + 0x02, 0x8c, 0xe9, 0x09, 0xaf, 0xb5, 0xdc, 0xaa, 0x65, 0xca, 0x09, 0x2e, 0xbc, 0xe9, 0x97, 0x4c, + 0xc7, 0x14, 0x5b, 0xf2, 0x7c, 0x14, 0x79, 0xf5, 0x7a, 0xd4, 0xed, 0x7b, 0x42, 0xa6, 0x72, 0x35, + 0x27, 0x41, 0x79, 0x0f, 0x2f, 0xc1, 0xd2, 0x91, 0x78, 0x09, 0xc6, 0xef, 0x3f, 0x95, 0xef, 0x7a, + 0xff, 0x69, 0x07, 0x6a, 0x9b, 0x9e, 0xdb, 0xe6, 0x8e, 0x78, 0xf2, 0x37, 0x9f, 0x97, 0x73, 0xac, + 0x29, 0xd1, 0x0f, 0xae, 0x23, 0x1b, 0xcf, 0x82, 0xc2, 0xc7, 0x88, 0x15, 0x37, 0x48, 0xbb, 0x82, + 0xeb, 0xd0, 0x51, 0x72, 0x0d, 0xe7, 0x92, 0x35, 0x81, 0x8e, 0x8a, 0x4d, 0xd2, 0xf9, 0x6e, 0xf8, + 0xfe, 0x38, 0xdf, 0x69, 0xbf, 0x58, 0x56, 0x13, 0xd8, 0x03, 0x17, 0xe4, 0xef, 0xbd, 0x7f, 0x6b, + 0x32, 0x7d, 0xa5, 0x71, 0xf8, 0x3e, 0x5e, 0x69, 0xac, 0x0e, 0xe4, 0xea, 0xb5, 0x57, 0x82, 0xd4, + 0xbe, 0xe9, 0xfd, 0xd3, 0x89, 0xff, 0x54, 0xa7, 0x13, 0x6f, 0x15, 0x21, 0x9a, 0x08, 0x0e, 0xe9, + 0xbd, 0xf1, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, 0x6a, 0xeb, 0xbb, 0x79, 0xfe, 0xcd, 0xb8, 0x22, + 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0xc6, 0x47, 0xce, 0x6d, 0x6d, 0x8e, 0x42, 0x2d, 0x0b, + 0x7b, 0x56, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, 0x15, 0x41, 0x06, 0xd2, 0x26, 0x14, 0x2a, 0x9b, + 0xd6, 0x6d, 0x6a, 0xe6, 0xf6, 0x64, 0x8c, 0xfd, 0x31, 0x57, 0x98, 0xd3, 0x79, 0x02, 0x0a, 0x74, + 0x6e, 0x27, 0x15, 0xc7, 0x23, 0x52, 0x7e, 0x39, 0xec, 0xa4, 0xf1, 0x63, 0x16, 0x69, 0x27, 0x15, + 0x49, 0xa8, 0x78, 0x08, 0xb3, 0x2c, 0x3f, 0xa3, 0x96, 0x22, 0xcd, 0x63, 0x96, 0x8d, 0x9d, 0x75, + 0x2b, 0xb3, 0xac, 0x2f, 0xee, 0x34, 0x4b, 0x1e, 0x8d, 0xcf, 0x7d, 0xf7, 0xfb, 0x17, 0x1e, 0xfa, + 0xde, 0xf7, 0x2f, 0x3c, 0xf4, 0xce, 0xf7, 0x2f, 0x3c, 0xf4, 0xa5, 0xfd, 0x0b, 0x85, 0xef, 0xee, + 0x5f, 0x28, 0x7c, 0x6f, 0xff, 0x42, 0xe1, 0x9d, 0xfd, 0x0b, 0x85, 0x7f, 0xd8, 0xbf, 0x50, 0xf8, + 0x95, 0x7f, 0xbc, 0xf0, 0xd0, 0x67, 0x9f, 0x8b, 0xaa, 0x30, 0xad, 0xaa, 0x30, 0xad, 0x18, 0x4e, + 0x77, 0xb6, 0x5b, 0xd3, 0xac, 0x0a, 0x51, 0x8a, 0xaa, 0xc2, 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, + 0xb2, 0x71, 0xfd, 0x11, 0x25, 0x93, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -3192,6 +3194,20 @@ func (m *AbstractPodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } if m.DNSConfig != nil { { size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -9058,6 +9074,12 @@ func (m *AbstractPodTemplate) Size() (n int) { l = m.DNSConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -11162,6 +11184,11 @@ func (this *AbstractPodTemplate) String() string { repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," } repeatedStringForImagePullSecrets += "}" + repeatedStringForResourceClaims := "[]PodResourceClaim{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += fmt.Sprintf("%v", f) + "," + } + repeatedStringForResourceClaims += "}" keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) for k := range this.NodeSelector { keysForNodeSelector = append(keysForNodeSelector, k) @@ -11186,6 +11213,7 @@ func (this *AbstractPodTemplate) String() string { `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") return s @@ -13157,6 +13185,40 @@ func (m *AbstractPodTemplate) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaims = append(m.ResourceClaims, v1.PodResourceClaim{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 588b26c9af..b8935365e6 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -114,6 +114,14 @@ message AbstractPodTemplate { // configuration based on DNSPolicy. // +optional optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 13; + + // ResourceClaims defines which ResourceClaims must be allocated and reserved + // before the Pod is allowed to start. The resources will be made available to those + // containers which consume them by name. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +optional + repeated k8s.io.api.core.v1.PodResourceClaim resourceClaims = 14; } message AbstractSink { diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 7189543f3b..0d129cdab1 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -249,11 +249,31 @@ func schema_pkg_apis_numaflow_v1alpha1_AbstractPodTemplate(ref common.ReferenceC Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -451,6 +471,26 @@ func schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref common.ReferenceCallba Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "volumes": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -544,7 +584,7 @@ func schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -999,6 +1039,26 @@ func schema_pkg_apis_numaflow_v1alpha1_DaemonTemplate(ref common.ReferenceCallba Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "Replicas is the number of desired replicas of the Deployment. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", @@ -1020,7 +1080,7 @@ func schema_pkg_apis_numaflow_v1alpha1_DaemonTemplate(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -2369,6 +2429,26 @@ func schema_pkg_apis_numaflow_v1alpha1_JetStreamBufferService(ref common.Referen Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "settings": { SchemaProps: spec.SchemaProps{ Description: "Nats/JetStream configuration, if not specified, global settings in numaflow-controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#limits and https://docs.nats.io/running-a-nats-service/configuration#jetstream. For limits, only \"max_payload\" is supported for configuration, defaults to 1048576 (1MB), not recommended to use values over 8388608 (8MB) but max_payload can be set up to 67108864 (64MB). For jetstream, only \"max_memory_store\" and \"max_file_store\" are supported for configuration, do not set \"store_dir\" as it has been hardcoded.", @@ -2416,7 +2496,7 @@ func schema_pkg_apis_numaflow_v1alpha1_JetStreamBufferService(ref common.Referen }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -2624,6 +2704,26 @@ func schema_pkg_apis_numaflow_v1alpha1_JobTemplate(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "containerTemplate": { SchemaProps: spec.SchemaProps{ Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate"), @@ -2647,7 +2747,7 @@ func schema_pkg_apis_numaflow_v1alpha1_JobTemplate(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -3110,6 +3210,26 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallba Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "containerTemplate": { SchemaProps: spec.SchemaProps{ Description: "Container template for the main numa container.", @@ -3186,7 +3306,7 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -3436,6 +3556,26 @@ func schema_pkg_apis_numaflow_v1alpha1_NativeRedis(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "settings": { SchemaProps: spec.SchemaProps{ Description: "Redis configuration, if not specified, global settings in numaflow-controller-config will be used.", @@ -3446,7 +3586,7 @@ func schema_pkg_apis_numaflow_v1alpha1_NativeRedis(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PersistenceStrategy", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -4522,6 +4662,26 @@ func schema_pkg_apis_numaflow_v1alpha1_SideInputsManagerTemplate(ref common.Refe Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "containerTemplate": { SchemaProps: spec.SchemaProps{ Description: "Template for the side inputs manager numa container", @@ -4538,7 +4698,7 @@ func schema_pkg_apis_numaflow_v1alpha1_SideInputsManagerTemplate(ref common.Refe }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } @@ -5286,6 +5446,26 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "volumes": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -5432,7 +5612,7 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -5636,6 +5816,26 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexTemplate(ref common.ReferenceCallba Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), }, }, + "resourceClaims": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PodResourceClaim"), + }, + }, + }, + }, + }, "containerTemplate": { SchemaProps: spec.SchemaProps{ Description: "Template for the vertex numa container", @@ -5652,7 +5852,7 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexTemplate(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, } } diff --git a/pkg/apis/numaflow/v1alpha1/pod_template.go b/pkg/apis/numaflow/v1alpha1/pod_template.go index 442ede947c..553e280efc 100644 --- a/pkg/apis/numaflow/v1alpha1/pod_template.go +++ b/pkg/apis/numaflow/v1alpha1/pod_template.go @@ -90,6 +90,13 @@ type AbstractPodTemplate struct { // configuration based on DNSPolicy. // +optional DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,13,opt,name=dnsConfig"` + // ResourceClaims defines which ResourceClaims must be allocated and reserved + // before the Pod is allowed to start. The resources will be made available to those + // containers which consume them by name. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +optional + ResourceClaims []corev1.PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,14,rep,name=resourceClaims"` } // ApplyToPodSpec updates the PodSpec with the values in the AbstractPodTemplate @@ -130,6 +137,9 @@ func (apt *AbstractPodTemplate) ApplyToPodSpec(ps *corev1.PodSpec) { if ps.DNSConfig == nil { ps.DNSConfig = apt.DNSConfig } + if len(ps.ResourceClaims) == 0 { + ps.ResourceClaims = apt.ResourceClaims + } } // ApplyToPodTemplateSpec updates the PodTemplateSpec with the values in the AbstractPodTemplate diff --git a/pkg/apis/numaflow/v1alpha1/pod_template_test.go b/pkg/apis/numaflow/v1alpha1/pod_template_test.go index 4a54f4a288..40d6072f45 100644 --- a/pkg/apis/numaflow/v1alpha1/pod_template_test.go +++ b/pkg/apis/numaflow/v1alpha1/pod_template_test.go @@ -71,6 +71,11 @@ func TestApplyToPodSpec(t *testing.T) { }, }, ServiceAccountName: "template-sa", + ResourceClaims: []corev1.PodResourceClaim{ + { + Name: "template-resource-claim", + }, + }, } podSpec := &corev1.PodSpec{ @@ -92,4 +97,5 @@ func TestApplyToPodSpec(t *testing.T) { assert.Equal(t, podSpec.Priority, abstractPodTemplate.Priority) assert.Equal(t, podSpec.Affinity, abstractPodTemplate.Affinity) assert.Equal(t, podSpec.ServiceAccountName, "spec-sa") + assert.Equal(t, podSpec.ResourceClaims[0].Name, "template-resource-claim") } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 81d1a53700..a5ee6d9b6f 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -84,6 +84,13 @@ func (in *AbstractPodTemplate) DeepCopyInto(out *AbstractPodTemplate) { *out = new(v1.PodDNSConfig) (*in).DeepCopyInto(*out) } + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]v1.PodResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/rust/numaflow-models/Makefile b/rust/numaflow-models/Makefile index 5ff0bf7e53..3e38249cae 100644 --- a/rust/numaflow-models/Makefile +++ b/rust/numaflow-models/Makefile @@ -50,6 +50,7 @@ generate: --type-mappings CoreV1ContainerResizePolicy="k8s_openapi::api::core::v1::ContainerResizePolicy" \ --type-mappings CoreV1Container="k8s_openapi::api::core::v1::Container" \ --type-mappings CoreV1Volume="k8s_openapi::api::core::v1::Volume" \ + --type-mappings CoreV1PodResourceClaim="k8s_openapi::api::core::v1::PodResourceClaim" \ --type-mappings CoreV1EmptyDirVolumeSource="k8s_openapi::api::core::v1::EmptyDirVolumeSource" \ --type-mappings MetaV1Duration="kube::core::Duration" \ --type-mappings MetaV1ListMeta="k8s_openapi::apimachinery::pkg::apis::meta::v1::ListMeta" \ diff --git a/rust/numaflow-models/src/models/abstract_pod_template.rs b/rust/numaflow-models/src/models/abstract_pod_template.rs index 612ade86b6..7315dc4d90 100644 --- a/rust/numaflow-models/src/models/abstract_pod_template.rs +++ b/rust/numaflow-models/src/models/abstract_pod_template.rs @@ -47,6 +47,9 @@ pub struct AbstractPodTemplate { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -73,6 +76,7 @@ impl AbstractPodTemplate { node_selector: None, priority: None, priority_class_name: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, diff --git a/rust/numaflow-models/src/models/abstract_vertex.rs b/rust/numaflow-models/src/models/abstract_vertex.rs index a93bcc3ac0..23fb85c813 100644 --- a/rust/numaflow-models/src/models/abstract_vertex.rs +++ b/rust/numaflow-models/src/models/abstract_vertex.rs @@ -62,6 +62,9 @@ pub struct AbstractVertex { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -114,6 +117,7 @@ impl AbstractVertex { partitions: None, priority: None, priority_class_name: None, + resource_claims: None, runtime_class_name: None, scale: None, security_context: None, diff --git a/rust/numaflow-models/src/models/daemon_template.rs b/rust/numaflow-models/src/models/daemon_template.rs index 5d03240b86..c6394874fd 100644 --- a/rust/numaflow-models/src/models/daemon_template.rs +++ b/rust/numaflow-models/src/models/daemon_template.rs @@ -55,6 +55,9 @@ pub struct DaemonTemplate { /// Replicas is the number of desired replicas of the Deployment. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -83,6 +86,7 @@ impl DaemonTemplate { priority: None, priority_class_name: None, replicas: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, diff --git a/rust/numaflow-models/src/models/jet_stream_buffer_service.rs b/rust/numaflow-models/src/models/jet_stream_buffer_service.rs index 96dce5855c..9d434864e7 100644 --- a/rust/numaflow-models/src/models/jet_stream_buffer_service.rs +++ b/rust/numaflow-models/src/models/jet_stream_buffer_service.rs @@ -68,6 +68,9 @@ pub struct JetStreamBufferService { /// JetStream StatefulSet size #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -112,6 +115,7 @@ impl JetStreamBufferService { priority_class_name: None, reloader_container_template: None, replicas: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, diff --git a/rust/numaflow-models/src/models/job_template.rs b/rust/numaflow-models/src/models/job_template.rs index 6c273d4db0..d41b0d3340 100644 --- a/rust/numaflow-models/src/models/job_template.rs +++ b/rust/numaflow-models/src/models/job_template.rs @@ -50,6 +50,9 @@ pub struct JobTemplate { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -83,6 +86,7 @@ impl JobTemplate { node_selector: None, priority: None, priority_class_name: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, diff --git a/rust/numaflow-models/src/models/mono_vertex_spec.rs b/rust/numaflow-models/src/models/mono_vertex_spec.rs index 1041fbafb7..8eadbbdaab 100644 --- a/rust/numaflow-models/src/models/mono_vertex_spec.rs +++ b/rust/numaflow-models/src/models/mono_vertex_spec.rs @@ -56,6 +56,9 @@ pub struct MonoVertexSpec { pub priority_class_name: Option, #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -97,6 +100,7 @@ impl MonoVertexSpec { priority: None, priority_class_name: None, replicas: None, + resource_claims: None, runtime_class_name: None, scale: None, security_context: None, diff --git a/rust/numaflow-models/src/models/native_redis.rs b/rust/numaflow-models/src/models/native_redis.rs index 3200cd7731..2516bd5e23 100644 --- a/rust/numaflow-models/src/models/native_redis.rs +++ b/rust/numaflow-models/src/models/native_redis.rs @@ -65,6 +65,9 @@ pub struct NativeRedis { /// Redis StatefulSet size #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -105,6 +108,7 @@ impl NativeRedis { priority_class_name: None, redis_container_template: None, replicas: None, + resource_claims: None, runtime_class_name: None, security_context: None, sentinel_container_template: None, diff --git a/rust/numaflow-models/src/models/side_inputs_manager_template.rs b/rust/numaflow-models/src/models/side_inputs_manager_template.rs index d1b4fa8cb4..ec9d3f1073 100644 --- a/rust/numaflow-models/src/models/side_inputs_manager_template.rs +++ b/rust/numaflow-models/src/models/side_inputs_manager_template.rs @@ -52,6 +52,9 @@ pub struct SideInputsManagerTemplate { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -79,6 +82,7 @@ impl SideInputsManagerTemplate { node_selector: None, priority: None, priority_class_name: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, diff --git a/rust/numaflow-models/src/models/vertex_spec.rs b/rust/numaflow-models/src/models/vertex_spec.rs index 078879d286..a647ecc7ae 100644 --- a/rust/numaflow-models/src/models/vertex_spec.rs +++ b/rust/numaflow-models/src/models/vertex_spec.rs @@ -73,6 +73,9 @@ pub struct VertexSpec { pub priority_class_name: Option, #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -133,6 +136,7 @@ impl VertexSpec { priority: None, priority_class_name: None, replicas: None, + resource_claims: None, runtime_class_name: None, scale: None, security_context: None, diff --git a/rust/numaflow-models/src/models/vertex_template.rs b/rust/numaflow-models/src/models/vertex_template.rs index 85d743546d..b707cd2ed0 100644 --- a/rust/numaflow-models/src/models/vertex_template.rs +++ b/rust/numaflow-models/src/models/vertex_template.rs @@ -52,6 +52,9 @@ pub struct VertexTemplate { /// If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ #[serde(rename = "priorityClassName", skip_serializing_if = "Option::is_none")] pub priority_class_name: Option, + /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. + #[serde(rename = "resourceClaims", skip_serializing_if = "Option::is_none")] + pub resource_claims: Option>, /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(rename = "runtimeClassName", skip_serializing_if = "Option::is_none")] pub runtime_class_name: Option, @@ -79,6 +82,7 @@ impl VertexTemplate { node_selector: None, priority: None, priority_class_name: None, + resource_claims: None, runtime_class_name: None, security_context: None, service_account_name: None, From ee8b83ac649ab0ec4860cf8b03720ac6016aadaa Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Thu, 29 Aug 2024 16:32:21 -0700 Subject: [PATCH 035/188] chore: refactor metric variable names (#2012) Signed-off-by: Vigith Maurice --- rust/monovertex/src/forwarder.rs | 10 ++--- rust/monovertex/src/metrics.rs | 68 ++++++++++++++++---------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index d9f68fc608..8f61b68ff1 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -106,7 +106,7 @@ impl Forwarder { } forward_metrics() - .monovtx_processing_time + .e2e_processing_time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); } @@ -135,7 +135,7 @@ impl Forwarder { let msg_count = messages.len() as u64; forward_metrics() - .monovtx_read_total + .read_total .get_or_create(&self.common_labels) .inc_by(msg_count); @@ -149,7 +149,7 @@ impl Forwarder { ); forward_metrics() - .monovtx_read_bytes_total + .read_bytes_total .get_or_create(&self.common_labels) .inc_by(bytes_count); @@ -276,7 +276,7 @@ impl Forwarder { } forward_metrics() - .monovtx_sink_write_total + .sink_write_total .get_or_create(&self.common_labels) .inc_by(msg_count); Ok(()) @@ -382,7 +382,7 @@ impl Forwarder { self.source_client.ack_fn(offsets).await?; debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); forward_metrics() - .monovtx_ack_total + .ack_total .get_or_create(&self.common_labels) .inc_by(n as u64); Ok(()) diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 7a87b508a4..fd612ba12a 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -29,8 +29,8 @@ use prometheus_client::registry::Registry; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon -pub const MONO_VERTEX_NAME_LABEL: &str = "mvtx_name"; -pub const REPLICA_LABEL: &str = "mvtx_replica"; +const VERTEX_NAME_LABEL: &str = "mvtx_name"; +const REPLICA_LABEL: &str = "mvtx_replica"; const PENDING_PERIOD_LABEL: &str = "period"; // Define the metrics @@ -39,12 +39,12 @@ const PENDING_PERIOD_LABEL: &str = "period"; // refer: https://github.com/prometheus/client_rust/blob/master/src/registry.rs#L102 // Note: Please keep consistent with the definitions in MonoVertex daemon -const MONOVTX_READ_TOTAL: &str = "monovtx_read"; -const MONOVTX_READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; -const MONOVTX_ACK_TOTAL: &str = "monovtx_ack"; -const MONOVTX_SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; -const MONOVTX_PROCESSING_TIME: &str = "monovtx_processing_time"; -const MONOVTX_PENDING: &str = "monovtx_pending"; +const READ_TOTAL: &str = "monovtx_read"; +const READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; +const ACK_TOTAL: &str = "monovtx_ack"; +const SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; +const E2E_PROCESSING_TIME: &str = "monovtx_processing_time"; +const SOURCE_PENDING: &str = "monovtx_pending"; #[derive(Clone)] pub(crate) struct MetricsState { @@ -88,12 +88,12 @@ fn global_registry() -> &'static GlobalRegistry { // The labels are provided in the form of Vec<(String, String) // The second argument is the metric kind. pub struct MonoVtxMetrics { - pub monovtx_read_total: Family, Counter>, - pub monovtx_read_bytes_total: Family, Counter>, - pub monovtx_ack_total: Family, Counter>, - pub monovtx_sink_write_total: Family, Counter>, - pub monovtx_processing_time: Family, Histogram>, - pub monovtx_pending: Family, Gauge>, + pub read_total: Family, Counter>, + pub read_bytes_total: Family, Counter>, + pub ack_total: Family, Counter>, + pub sink_write_total: Family, Counter>, + pub e2e_processing_time: Family, Histogram>, + pub source_pending: Family, Gauge>, } /// impl the MonoVtxMetrics struct and create a new object @@ -111,45 +111,45 @@ impl MonoVtxMetrics { let monovtx_pending = Family::, Gauge>::default(); let metrics = Self { - monovtx_read_total, - monovtx_read_bytes_total, - monovtx_ack_total, - monovtx_sink_write_total, - monovtx_processing_time, - monovtx_pending, + read_total: monovtx_read_total, + read_bytes_total: monovtx_read_bytes_total, + ack_total: monovtx_ack_total, + sink_write_total: monovtx_sink_write_total, + e2e_processing_time: monovtx_processing_time, + source_pending: monovtx_pending, }; let mut registry = global_registry().registry.lock(); // Register all the metrics to the global registry registry.register( - MONOVTX_READ_TOTAL, + READ_TOTAL, "A Counter to keep track of the total number of messages read from the source", - metrics.monovtx_read_total.clone(), + metrics.read_total.clone(), ); registry.register( - MONOVTX_SINK_WRITE_TOTAL, + SINK_WRITE_TOTAL, "A Counter to keep track of the total number of messages written to the sink", - metrics.monovtx_sink_write_total.clone(), + metrics.sink_write_total.clone(), ); registry.register( - MONOVTX_ACK_TOTAL, + ACK_TOTAL, "A Counter to keep track of the total number of messages acknowledged by the sink", - metrics.monovtx_ack_total.clone(), + metrics.ack_total.clone(), ); registry.register( - MONOVTX_PROCESSING_TIME, + E2E_PROCESSING_TIME, "A Histogram to keep track of the total time taken to forward a chunk, the time is in microseconds", - metrics.monovtx_processing_time.clone(), + metrics.e2e_processing_time.clone(), ); registry.register( - MONOVTX_READ_BYTES_TOTAL, + READ_BYTES_TOTAL, "A Counter to keep track of the total number of bytes read from the source", - metrics.monovtx_read_bytes_total.clone(), + metrics.read_bytes_total.clone(), ); registry.register( - MONOVTX_PENDING, + SOURCE_PENDING, "A Gauge to keep track of the total number of pending messages for the monovtx", - metrics.monovtx_pending.clone(), + metrics.source_pending.clone(), ); metrics @@ -177,7 +177,7 @@ pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { MONOVTX_METRICS_LABELS.get_or_init(|| { let common_labels = vec![ ( - MONO_VERTEX_NAME_LABEL.to_string(), + VERTEX_NAME_LABEL.to_string(), config().mono_vertex_name.clone(), ), (REPLICA_LABEL.to_string(), config().replica.to_string()), @@ -435,7 +435,7 @@ async fn expose_pending_metrics( let mut metric_labels = forward_metrics_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); forward_metrics() - .monovtx_pending + .source_pending .get_or_create(&metric_labels) .set(pending); info!("Pending messages ({}): {}", label, pending); From 35c6f0991d6821b728c82bee6161e265dc2c1ba6 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 29 Aug 2024 19:42:49 -0700 Subject: [PATCH 036/188] feat: introduce `readyReplicas` for Vertex and MonoVertex (#2014) --- api/json-schema/schema.json | 28 +- api/openapi-spec/swagger.json | 24 +- config/advanced-install/minimal-crds.yaml | 6 + .../numaflow.numaproj.io_monovertices.yaml | 8 +- .../full/numaflow.numaproj.io_vertices.yaml | 9 +- .../numaflow.numaproj.io_monovertices.yaml | 3 + .../numaflow.numaproj.io_vertices.yaml | 3 + config/install.yaml | 17 +- config/namespace-install.yaml | 17 +- docs/APIs.md | 100 ++ pkg/apis/numaflow/v1alpha1/generated.pb.go | 973 +++++++++--------- pkg/apis/numaflow/v1alpha1/generated.proto | 42 + .../numaflow/v1alpha1/mono_vertex_types.go | 33 +- .../numaflow/v1alpha1/openapi_generated.go | 51 +- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 33 +- pkg/apis/numaflow/v1alpha1/vertex_types.go | 30 +- pkg/metrics/metrics_server.go | 8 + pkg/reconciler/monovertex/controller.go | 7 +- pkg/reconciler/util.go | 27 +- pkg/reconciler/util_test.go | 82 +- pkg/reconciler/vertex/controller.go | 7 +- rust/monovertex/src/metrics.rs | 6 +- .../src/models/mono_vertex_status.rs | 14 +- .../src/models/pipeline_status.rs | 1 + .../src/models/vertex_status.rs | 20 +- 25 files changed, 985 insertions(+), 564 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 66b8540090..584220c245 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19278,7 +19278,8 @@ "x-kubernetes-patch-strategy": "merge" }, "lastScaledAt": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time of last scaling operation." }, "lastUpdated": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" @@ -19287,16 +19288,23 @@ "type": "string" }, "observedGeneration": { + "description": "The generation observed by the MonoVertex controller.", "format": "int64", "type": "integer" }, "phase": { "type": "string" }, + "readyReplicas": { + "description": "The number of pods targeted by this MonoVertex with a Ready Condition.", + "format": "int64", + "type": "integer" + }, "reason": { "type": "string" }, "replicas": { + "description": "Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector).", "format": "int64", "type": "integer" }, @@ -19304,9 +19312,6 @@ "type": "string" } }, - "required": [ - "replicas" - ], "type": "object" }, "io.numaproj.numaflow.v1alpha1.NativeRedis": { @@ -19654,6 +19659,7 @@ "type": "string" }, "observedGeneration": { + "description": "The generation observed by the Pipeline controller.", "format": "int64", "type": "integer" }, @@ -20542,22 +20548,30 @@ "x-kubernetes-patch-strategy": "merge" }, "lastScaledAt": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time of last scaling operation." }, "message": { "type": "string" }, "observedGeneration": { + "description": "The generation observed by the Vertex controller.", "format": "int64", "type": "integer" }, "phase": { "type": "string" }, + "readyReplicas": { + "description": "The number of pods targeted by this Vertex with a Ready Condition.", + "format": "int64", + "type": "integer" + }, "reason": { "type": "string" }, "replicas": { + "description": "Total number of non-terminated pods targeted by this Vertex (their labels match the selector).", "format": "int64", "type": "integer" }, @@ -20565,10 +20579,6 @@ "type": "string" } }, - "required": [ - "phase", - "replicas" - ], "type": "object" }, "io.numaproj.numaflow.v1alpha1.VertexTemplate": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index a7eff01898..a326712b6c 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19263,9 +19263,6 @@ }, "io.numaproj.numaflow.v1alpha1.MonoVertexStatus": { "type": "object", - "required": [ - "replicas" - ], "properties": { "conditions": { "description": "Conditions are the latest available observations of a resource's current state.", @@ -19277,6 +19274,7 @@ "x-kubernetes-patch-strategy": "merge" }, "lastScaledAt": { + "description": "Time of last scaling operation.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "lastUpdated": { @@ -19286,16 +19284,23 @@ "type": "string" }, "observedGeneration": { + "description": "The generation observed by the MonoVertex controller.", "type": "integer", "format": "int64" }, "phase": { "type": "string" }, + "readyReplicas": { + "description": "The number of pods targeted by this MonoVertex with a Ready Condition.", + "type": "integer", + "format": "int64" + }, "reason": { "type": "string" }, "replicas": { + "description": "Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector).", "type": "integer", "format": "int64" }, @@ -19641,6 +19646,7 @@ "type": "string" }, "observedGeneration": { + "description": "The generation observed by the Pipeline controller.", "type": "integer", "format": "int64" }, @@ -20509,10 +20515,6 @@ }, "io.numaproj.numaflow.v1alpha1.VertexStatus": { "type": "object", - "required": [ - "phase", - "replicas" - ], "properties": { "conditions": { "description": "Conditions are the latest available observations of a resource's current state.", @@ -20524,22 +20526,30 @@ "x-kubernetes-patch-strategy": "merge" }, "lastScaledAt": { + "description": "Time of last scaling operation.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { "type": "string" }, "observedGeneration": { + "description": "The generation observed by the Vertex controller.", "type": "integer", "format": "int64" }, "phase": { "type": "string" }, + "readyReplicas": { + "description": "The number of pods targeted by this Vertex with a Ready Condition.", + "type": "integer", + "format": "int64" + }, "reason": { "type": "string" }, "replicas": { + "description": "Total number of non-terminated pods targeted by this Vertex (their labels match the selector).", "type": "integer", "format": "int64" }, diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index 9d27719000..3e647ee3d6 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -75,6 +75,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -209,6 +212,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index a26426074f..d456bbb249 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -27,6 +27,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -5551,6 +5554,9 @@ spec: - Paused - Deleting type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -5558,8 +5564,6 @@ spec: type: integer selector: type: string - required: - - replicas type: object required: - spec diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index d1a77b9375..7973694b88 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -27,6 +27,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -5480,6 +5483,9 @@ spec: - Running - Failed type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -5487,9 +5493,6 @@ spec: type: integer selector: type: string - required: - - phase - - replicas type: object required: - spec diff --git a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml index ac33f527a9..65cb6b2652 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml @@ -23,6 +23,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml index b0f1c1ba0f..68a95ee056 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml @@ -23,6 +23,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date diff --git a/config/install.yaml b/config/install.yaml index 575bac0cc3..a648c1413e 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -2671,6 +2671,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -8195,6 +8198,9 @@ spec: - Paused - Deleting type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -8202,8 +8208,6 @@ spec: type: integer selector: type: string - required: - - replicas type: object required: - spec @@ -18108,6 +18112,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -23561,6 +23568,9 @@ spec: - Running - Failed type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -23568,9 +23578,6 @@ spec: type: integer selector: type: string - required: - - phase - - replicas type: object required: - spec diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 3b3737b3a9..a922d0f7c5 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -2671,6 +2671,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -8195,6 +8198,9 @@ spec: - Paused - Deleting type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -8202,8 +8208,6 @@ spec: type: integer selector: type: string - required: - - replicas type: object required: - spec @@ -18108,6 +18112,9 @@ spec: - jsonPath: .status.replicas name: Current type: string + - jsonPath: .status.readyReplicas + name: Ready + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -23561,6 +23568,9 @@ spec: - Running - Failed type: string + readyReplicas: + format: int32 + type: integer reason: type: string replicas: @@ -23568,9 +23578,6 @@ spec: type: integer selector: type: string - required: - - phase - - replicas type: object required: - spec diff --git a/docs/APIs.md b/docs/APIs.md index 46a865e83b..b5ba81450d 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -6142,6 +6142,7 @@ MonoVertexPhase +(Optional) @@ -6155,6 +6156,13 @@ MonoVertexPhase +(Optional) +

+ +Total number of non-terminated pods targeted by this MonoVertex (their +labels match the selector). +

+ @@ -6168,6 +6176,7 @@ MonoVertexPhase +(Optional) @@ -6181,6 +6190,7 @@ MonoVertexPhase +(Optional) @@ -6194,6 +6204,7 @@ MonoVertexPhase +(Optional) @@ -6209,6 +6220,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -6224,6 +6236,12 @@ Kubernetes meta/v1.Time +(Optional) +

+ +Time of last scaling operation. +

+ @@ -6237,6 +6255,31 @@ Kubernetes meta/v1.Time +(Optional) +

+ +The generation observed by the MonoVertex controller. +

+ + + + + + + + + +readyReplicas
uint32 + + + + +(Optional) +

+ +The number of pods targeted by this MonoVertex with a Ready Condition. +

+ @@ -7579,6 +7622,7 @@ Description +(Optional) @@ -7592,6 +7636,7 @@ Description +(Optional) @@ -7607,6 +7652,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7620,6 +7666,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7633,6 +7680,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7646,6 +7694,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7659,6 +7708,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7672,6 +7722,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7685,6 +7736,7 @@ Kubernetes meta/v1.Time +(Optional) @@ -7698,6 +7750,12 @@ Kubernetes meta/v1.Time +(Optional) +

+ +The generation observed by the Pipeline controller. +

+ @@ -10941,6 +10999,7 @@ Description +(Optional) @@ -10954,6 +11013,13 @@ Description +(Optional) +

+ +Total number of non-terminated pods targeted by this Vertex (their +labels match the selector). +

+ @@ -10967,6 +11033,7 @@ Description +(Optional) @@ -10980,6 +11047,7 @@ Description +(Optional) @@ -10993,6 +11061,7 @@ Description +(Optional) @@ -11008,6 +11077,12 @@ Kubernetes meta/v1.Time +(Optional) +

+ +Time of last scaling operation. +

+ @@ -11021,6 +11096,31 @@ Kubernetes meta/v1.Time +(Optional) +

+ +The generation observed by the Vertex controller. +

+ + + + + + + + + +readyReplicas
uint32 + + + + +(Optional) +

+ +The number of pods targeted by this Vertex with a Ready Condition. +

+ diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index d905171bec..3a29fb484b 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2704,474 +2704,475 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7464 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x1d, 0xd7, - 0x75, 0xae, 0xcf, 0x1f, 0x79, 0xce, 0x3a, 0x24, 0x45, 0x6d, 0xc9, 0x32, 0x25, 0xcb, 0xa2, 0x32, - 0x8e, 0x7d, 0x95, 0x9b, 0x84, 0xbc, 0xe6, 0xf5, 0x5f, 0x72, 0x93, 0xd8, 0x3c, 0xa4, 0x48, 0x51, - 0x22, 0x25, 0x66, 0x1d, 0x52, 0x76, 0xe2, 0x9b, 0xf8, 0x0e, 0x67, 0x36, 0x0f, 0xc7, 0x9c, 0x33, - 0x73, 0x3c, 0x33, 0x87, 0x12, 0x9d, 0x7b, 0x91, 0xbf, 0x07, 0xfb, 0xa2, 0x2d, 0x5a, 0xe4, 0x29, - 0x40, 0x91, 0x16, 0x2d, 0x0a, 0xe4, 0x21, 0x48, 0x1f, 0x0a, 0xb8, 0x0f, 0x05, 0xfa, 0x93, 0xa2, - 0x68, 0xd3, 0xa2, 0x3f, 0x79, 0x28, 0x50, 0xf7, 0x85, 0x68, 0x58, 0xf4, 0xa1, 0x05, 0x1a, 0x04, - 0x0d, 0xd0, 0x24, 0x42, 0x80, 0x14, 0xfb, 0x6f, 0xfe, 0xce, 0x1c, 0x89, 0x3c, 0x43, 0xca, 0x72, - 0xeb, 0xb7, 0x99, 0xbd, 0xd7, 0xfe, 0xd6, 0xde, 0x6b, 0xf6, 0xcf, 0xda, 0x6b, 0xaf, 0xbd, 0x06, - 0x16, 0x5b, 0x56, 0xb0, 0xd5, 0xdd, 0x98, 0x32, 0xdc, 0xf6, 0xb4, 0xd3, 0x6d, 0xeb, 0x1d, 0xcf, - 0x7d, 0x8d, 0x3f, 0x6c, 0xda, 0xee, 0xad, 0xe9, 0xce, 0x76, 0x6b, 0x5a, 0xef, 0x58, 0x7e, 0x94, - 0xb2, 0xf3, 0x94, 0x6e, 0x77, 0xb6, 0xf4, 0xa7, 0xa6, 0x5b, 0xd4, 0xa1, 0x9e, 0x1e, 0x50, 0x73, - 0xaa, 0xe3, 0xb9, 0x81, 0x4b, 0x9e, 0x8b, 0x80, 0xa6, 0x14, 0xd0, 0x94, 0x2a, 0x36, 0xd5, 0xd9, - 0x6e, 0x4d, 0x31, 0xa0, 0x28, 0x45, 0x01, 0x9d, 0xfb, 0x68, 0xac, 0x06, 0x2d, 0xb7, 0xe5, 0x4e, - 0x73, 0xbc, 0x8d, 0xee, 0x26, 0x7f, 0xe3, 0x2f, 0xfc, 0x49, 0xf0, 0x39, 0xa7, 0x6d, 0x3f, 0xef, - 0x4f, 0x59, 0x2e, 0xab, 0xd6, 0xb4, 0xe1, 0x7a, 0x74, 0x7a, 0xa7, 0xa7, 0x2e, 0xe7, 0x9e, 0x8e, - 0x68, 0xda, 0xba, 0xb1, 0x65, 0x39, 0xd4, 0xdb, 0x55, 0x6d, 0x99, 0xf6, 0xa8, 0xef, 0x76, 0x3d, - 0x83, 0x1e, 0xaa, 0x94, 0x3f, 0xdd, 0xa6, 0x81, 0x9e, 0xc5, 0x6b, 0xba, 0x5f, 0x29, 0xaf, 0xeb, - 0x04, 0x56, 0xbb, 0x97, 0xcd, 0xb3, 0xf7, 0x2a, 0xe0, 0x1b, 0x5b, 0xb4, 0xad, 0xa7, 0xcb, 0x69, - 0xdf, 0x01, 0x38, 0x35, 0xbb, 0xe1, 0x07, 0x9e, 0x6e, 0x04, 0xab, 0xae, 0xb9, 0x46, 0xdb, 0x1d, - 0x5b, 0x0f, 0x28, 0xd9, 0x86, 0x2a, 0xab, 0x9b, 0xa9, 0x07, 0xfa, 0x44, 0xe1, 0x62, 0xe1, 0x52, - 0x7d, 0x66, 0x76, 0x6a, 0xc0, 0x6f, 0x31, 0xb5, 0x22, 0x81, 0x1a, 0x23, 0xfb, 0x7b, 0x93, 0x55, - 0xf5, 0x86, 0x21, 0x03, 0xf2, 0xf5, 0x02, 0x8c, 0x38, 0xae, 0x49, 0x9b, 0xd4, 0xa6, 0x46, 0xe0, - 0x7a, 0x13, 0xc5, 0x8b, 0xa5, 0x4b, 0xf5, 0x99, 0xcf, 0x0f, 0xcc, 0x31, 0xa3, 0x45, 0x53, 0xd7, - 0x63, 0x0c, 0x2e, 0x3b, 0x81, 0xb7, 0xdb, 0x38, 0xfd, 0xdd, 0xbd, 0xc9, 0x87, 0xf6, 0xf7, 0x26, - 0x47, 0xe2, 0x59, 0x98, 0xa8, 0x09, 0x59, 0x87, 0x7a, 0xe0, 0xda, 0x4c, 0x64, 0x96, 0xeb, 0xf8, - 0x13, 0x25, 0x5e, 0xb1, 0x0b, 0x53, 0x42, 0xda, 0x8c, 0xfd, 0x14, 0xeb, 0x2e, 0x53, 0x3b, 0x4f, - 0x4d, 0xad, 0x85, 0x64, 0x8d, 0x53, 0x12, 0xb8, 0x1e, 0xa5, 0xf9, 0x18, 0xc7, 0x21, 0x14, 0x4e, - 0xf8, 0xd4, 0xe8, 0x7a, 0x56, 0xb0, 0x3b, 0xe7, 0x3a, 0x01, 0xbd, 0x1d, 0x4c, 0x94, 0xb9, 0x94, - 0x9f, 0xcc, 0x82, 0x5e, 0x75, 0xcd, 0x66, 0x92, 0xba, 0x71, 0x6a, 0x7f, 0x6f, 0xf2, 0x44, 0x2a, - 0x11, 0xd3, 0x98, 0xc4, 0x81, 0x71, 0xab, 0xad, 0xb7, 0xe8, 0x6a, 0xd7, 0xb6, 0x9b, 0xd4, 0xf0, - 0x68, 0xe0, 0x4f, 0x54, 0x78, 0x13, 0x2e, 0x65, 0xf1, 0x59, 0x76, 0x0d, 0xdd, 0xbe, 0xb1, 0xf1, - 0x1a, 0x35, 0x02, 0xa4, 0x9b, 0xd4, 0xa3, 0x8e, 0x41, 0x1b, 0x13, 0xb2, 0x31, 0xe3, 0x4b, 0x29, - 0x24, 0xec, 0xc1, 0x26, 0x8b, 0x70, 0xb2, 0xe3, 0x59, 0x2e, 0xaf, 0x82, 0xad, 0xfb, 0xfe, 0x75, - 0xbd, 0x4d, 0x27, 0x86, 0x2e, 0x16, 0x2e, 0xd5, 0x1a, 0x67, 0x25, 0xcc, 0xc9, 0xd5, 0x34, 0x01, - 0xf6, 0x96, 0x21, 0x97, 0xa0, 0xaa, 0x12, 0x27, 0x86, 0x2f, 0x16, 0x2e, 0x55, 0x44, 0xdf, 0x51, - 0x65, 0x31, 0xcc, 0x25, 0x0b, 0x50, 0xd5, 0x37, 0x37, 0x2d, 0x87, 0x51, 0x56, 0xb9, 0x08, 0xcf, - 0x67, 0x35, 0x6d, 0x56, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x59, 0x72, 0x15, 0x88, 0x4f, 0xbd, - 0x1d, 0xcb, 0xa0, 0xb3, 0x86, 0xe1, 0x76, 0x9d, 0x80, 0xd7, 0xbd, 0xc6, 0xeb, 0x7e, 0x4e, 0xd6, - 0x9d, 0x34, 0x7b, 0x28, 0x30, 0xa3, 0x14, 0x79, 0x11, 0xc6, 0xe5, 0xb0, 0x8b, 0xa4, 0x00, 0x1c, - 0xe9, 0x34, 0x13, 0x24, 0xa6, 0xf2, 0xb0, 0x87, 0x9a, 0x98, 0x70, 0x5e, 0xef, 0x06, 0x6e, 0x9b, - 0x41, 0x26, 0x99, 0xae, 0xb9, 0xdb, 0xd4, 0x99, 0xa8, 0x5f, 0x2c, 0x5c, 0xaa, 0x36, 0x2e, 0xee, - 0xef, 0x4d, 0x9e, 0x9f, 0xbd, 0x0b, 0x1d, 0xde, 0x15, 0x85, 0xdc, 0x80, 0x9a, 0xe9, 0xf8, 0xab, - 0xae, 0x6d, 0x19, 0xbb, 0x13, 0x23, 0xbc, 0x82, 0x4f, 0xc9, 0xa6, 0xd6, 0xe6, 0xaf, 0x37, 0x45, - 0xc6, 0x9d, 0xbd, 0xc9, 0xf3, 0xbd, 0xb3, 0xe3, 0x54, 0x98, 0x8f, 0x11, 0x06, 0x59, 0xe1, 0x80, - 0x73, 0xae, 0xb3, 0x69, 0xb5, 0x26, 0x46, 0xf9, 0xd7, 0xb8, 0xd8, 0xa7, 0x43, 0xcf, 0x5f, 0x6f, - 0x0a, 0xba, 0xc6, 0xa8, 0x64, 0x27, 0x5e, 0x31, 0x42, 0x20, 0x26, 0x8c, 0xa9, 0x79, 0x75, 0xce, - 0xd6, 0xad, 0xb6, 0x3f, 0x31, 0xc6, 0x3b, 0xef, 0x07, 0xfb, 0x60, 0x62, 0x9c, 0xb8, 0x71, 0x46, - 0x36, 0x65, 0x2c, 0x91, 0xec, 0x63, 0x0a, 0xf3, 0xdc, 0x0b, 0x70, 0xb2, 0x67, 0x6e, 0x20, 0xe3, - 0x50, 0xda, 0xa6, 0xbb, 0x7c, 0xea, 0xab, 0x21, 0x7b, 0x24, 0xa7, 0xa1, 0xb2, 0xa3, 0xdb, 0x5d, - 0x3a, 0x51, 0xe4, 0x69, 0xe2, 0xe5, 0xe3, 0xc5, 0xe7, 0x0b, 0xda, 0x6f, 0x96, 0x60, 0x44, 0xcd, - 0x38, 0x4d, 0xcb, 0xd9, 0x26, 0x2f, 0x41, 0xc9, 0x76, 0x5b, 0x72, 0xde, 0xfc, 0xc4, 0xc0, 0xb3, - 0xd8, 0xb2, 0xdb, 0x6a, 0x0c, 0xef, 0xef, 0x4d, 0x96, 0x96, 0xdd, 0x16, 0x32, 0x44, 0x62, 0x40, - 0x65, 0x5b, 0xdf, 0xdc, 0xd6, 0x79, 0x1d, 0xea, 0x33, 0x8d, 0x81, 0xa1, 0xaf, 0x31, 0x14, 0x56, - 0xd7, 0x46, 0x6d, 0x7f, 0x6f, 0xb2, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x6a, 0x1b, 0xb6, 0x6e, - 0x6c, 0x6f, 0xb9, 0x36, 0x9d, 0x28, 0xe5, 0x64, 0xd4, 0x50, 0x48, 0xe2, 0x33, 0x87, 0xaf, 0x18, - 0xf1, 0x20, 0x06, 0x0c, 0x75, 0x4d, 0xdf, 0x72, 0xb6, 0xe5, 0x1c, 0xf8, 0xc2, 0xc0, 0xdc, 0xd6, - 0xe7, 0x79, 0x9b, 0x60, 0x7f, 0x6f, 0x72, 0x48, 0x3c, 0xa3, 0x84, 0xd6, 0x7e, 0x50, 0x87, 0x31, - 0xf5, 0x91, 0x6e, 0x52, 0x2f, 0xa0, 0xb7, 0xc9, 0x45, 0x28, 0x3b, 0x6c, 0x68, 0xf2, 0x8f, 0xdc, - 0x18, 0x91, 0xdd, 0xa5, 0xcc, 0x87, 0x24, 0xcf, 0x61, 0x35, 0x13, 0x5d, 0x45, 0x0a, 0x7c, 0xf0, - 0x9a, 0x35, 0x39, 0x8c, 0xa8, 0x99, 0x78, 0x46, 0x09, 0x4d, 0x5e, 0x81, 0x32, 0x6f, 0xbc, 0x10, - 0xf5, 0x27, 0x07, 0x67, 0xc1, 0x9a, 0x5e, 0x65, 0x2d, 0xe0, 0x0d, 0xe7, 0xa0, 0xac, 0x2b, 0x76, - 0xcd, 0x4d, 0x29, 0xd8, 0x4f, 0xe4, 0x10, 0xec, 0x82, 0xe8, 0x8a, 0xeb, 0xf3, 0x0b, 0xc8, 0x10, - 0xc9, 0x2f, 0x17, 0xe0, 0xa4, 0xe1, 0x3a, 0x81, 0xce, 0x54, 0x0d, 0xb5, 0xc8, 0x4e, 0x54, 0x38, + // 7488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x1d, 0xd7, + 0x79, 0xae, 0xf7, 0x8d, 0xdc, 0xfb, 0xdf, 0x24, 0x45, 0x2d, 0xc9, 0x32, 0x25, 0xcb, 0xa2, 0x32, + 0x8e, 0x7d, 0x94, 0x93, 0x84, 0x3c, 0xe6, 0xf1, 0x2d, 0x57, 0x9b, 0x9b, 0x14, 0x29, 0x4a, 0xa4, + 0xc4, 0xfc, 0x9b, 0x94, 0x9d, 0xf8, 0x24, 0x3e, 0xc3, 0x99, 0xc5, 0xcd, 0x31, 0x67, 0xcf, 0x6c, + 0xcf, 0xcc, 0xa6, 0x44, 0xe7, 0x1c, 0xe4, 0xf6, 0x60, 0x1f, 0x1c, 0x1c, 0x9c, 0x22, 0x4f, 0x01, + 0x8a, 0xb4, 0x68, 0x51, 0x20, 0x0f, 0x41, 0xfa, 0x50, 0xd4, 0x7d, 0x28, 0xd0, 0x4b, 0x8a, 0xa2, + 0x4d, 0x8a, 0x5e, 0xf2, 0x50, 0xa0, 0xee, 0x0b, 0xd1, 0xb0, 0xe8, 0x43, 0x0b, 0x34, 0x08, 0x1a, + 0xa0, 0x4d, 0x84, 0x00, 0x29, 0xd6, 0x6d, 0x6e, 0x7b, 0xb6, 0x44, 0xee, 0x21, 0x65, 0xb9, 0xf5, + 0xdb, 0xcc, 0x5a, 0xff, 0xfa, 0xfe, 0x35, 0xff, 0xba, 0xfd, 0xeb, 0x5f, 0xff, 0xfa, 0x07, 0x16, + 0x5b, 0x56, 0xb0, 0xd5, 0xdd, 0x98, 0x32, 0xdc, 0xf6, 0xb4, 0xd3, 0x6d, 0xeb, 0x1d, 0xcf, 0x7d, + 0x8d, 0x3f, 0x6c, 0xda, 0xee, 0xad, 0xe9, 0xce, 0x76, 0x6b, 0x5a, 0xef, 0x58, 0x7e, 0x94, 0xb2, + 0xf3, 0x94, 0x6e, 0x77, 0xb6, 0xf4, 0xa7, 0xa6, 0x5b, 0xd4, 0xa1, 0x9e, 0x1e, 0x50, 0x73, 0xaa, + 0xe3, 0xb9, 0x81, 0x4b, 0x9e, 0x8b, 0x80, 0xa6, 0x14, 0xd0, 0x94, 0x2a, 0x36, 0xd5, 0xd9, 0x6e, + 0x4d, 0x31, 0xa0, 0x28, 0x45, 0x01, 0x9d, 0xfb, 0x68, 0xac, 0x06, 0x2d, 0xb7, 0xe5, 0x4e, 0x73, + 0xbc, 0x8d, 0xee, 0x26, 0x7f, 0xe3, 0x2f, 0xfc, 0x49, 0xf0, 0x39, 0xa7, 0x6d, 0x3f, 0xef, 0x4f, + 0x59, 0x2e, 0xab, 0xd6, 0xb4, 0xe1, 0x7a, 0x74, 0x7a, 0xa7, 0xa7, 0x2e, 0xe7, 0x9e, 0x8e, 0x68, + 0xda, 0xba, 0xb1, 0x65, 0x39, 0xd4, 0xdb, 0x55, 0xdf, 0x32, 0xed, 0x51, 0xdf, 0xed, 0x7a, 0x06, + 0x3d, 0x54, 0x29, 0x7f, 0xba, 0x4d, 0x03, 0x3d, 0x8b, 0xd7, 0x74, 0xbf, 0x52, 0x5e, 0xd7, 0x09, + 0xac, 0x76, 0x2f, 0x9b, 0x67, 0xef, 0x55, 0xc0, 0x37, 0xb6, 0x68, 0x5b, 0x4f, 0x97, 0xd3, 0xbe, + 0x0b, 0x70, 0x6a, 0x76, 0xc3, 0x0f, 0x3c, 0xdd, 0x08, 0x56, 0x5d, 0x73, 0x8d, 0xb6, 0x3b, 0xb6, + 0x1e, 0x50, 0xb2, 0x0d, 0x55, 0x56, 0x37, 0x53, 0x0f, 0xf4, 0x89, 0xc2, 0xc5, 0xc2, 0xa5, 0xfa, + 0xcc, 0xec, 0xd4, 0x80, 0x6d, 0x31, 0xb5, 0x22, 0x81, 0x1a, 0x23, 0xfb, 0x7b, 0x93, 0x55, 0xf5, + 0x86, 0x21, 0x03, 0xf2, 0x8d, 0x02, 0x8c, 0x38, 0xae, 0x49, 0x9b, 0xd4, 0xa6, 0x46, 0xe0, 0x7a, + 0x13, 0xc5, 0x8b, 0xa5, 0x4b, 0xf5, 0x99, 0x2f, 0x0c, 0xcc, 0x31, 0xe3, 0x8b, 0xa6, 0xae, 0xc7, + 0x18, 0x5c, 0x76, 0x02, 0x6f, 0xb7, 0x71, 0xfa, 0x7b, 0x7b, 0x93, 0x0f, 0xed, 0xef, 0x4d, 0x8e, + 0xc4, 0xb3, 0x30, 0x51, 0x13, 0xb2, 0x0e, 0xf5, 0xc0, 0xb5, 0x99, 0xc8, 0x2c, 0xd7, 0xf1, 0x27, + 0x4a, 0xbc, 0x62, 0x17, 0xa6, 0x84, 0xb4, 0x19, 0xfb, 0x29, 0xd6, 0x5d, 0xa6, 0x76, 0x9e, 0x9a, + 0x5a, 0x0b, 0xc9, 0x1a, 0xa7, 0x24, 0x70, 0x3d, 0x4a, 0xf3, 0x31, 0x8e, 0x43, 0x28, 0x9c, 0xf0, + 0xa9, 0xd1, 0xf5, 0xac, 0x60, 0x77, 0xce, 0x75, 0x02, 0x7a, 0x3b, 0x98, 0x28, 0x73, 0x29, 0x3f, + 0x99, 0x05, 0xbd, 0xea, 0x9a, 0xcd, 0x24, 0x75, 0xe3, 0xd4, 0xfe, 0xde, 0xe4, 0x89, 0x54, 0x22, + 0xa6, 0x31, 0x89, 0x03, 0xe3, 0x56, 0x5b, 0x6f, 0xd1, 0xd5, 0xae, 0x6d, 0x37, 0xa9, 0xe1, 0xd1, + 0xc0, 0x9f, 0xa8, 0xf0, 0x4f, 0xb8, 0x94, 0xc5, 0x67, 0xd9, 0x35, 0x74, 0xfb, 0xc6, 0xc6, 0x6b, + 0xd4, 0x08, 0x90, 0x6e, 0x52, 0x8f, 0x3a, 0x06, 0x6d, 0x4c, 0xc8, 0x8f, 0x19, 0x5f, 0x4a, 0x21, + 0x61, 0x0f, 0x36, 0x59, 0x84, 0x93, 0x1d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, 0xaf, 0xeb, + 0x6d, 0x3a, 0x31, 0x74, 0xb1, 0x70, 0xa9, 0xd6, 0x38, 0x2b, 0x61, 0x4e, 0xae, 0xa6, 0x09, 0xb0, + 0xb7, 0x0c, 0xb9, 0x04, 0x55, 0x95, 0x38, 0x31, 0x7c, 0xb1, 0x70, 0xa9, 0x22, 0xfa, 0x8e, 0x2a, + 0x8b, 0x61, 0x2e, 0x59, 0x80, 0xaa, 0xbe, 0xb9, 0x69, 0x39, 0x8c, 0xb2, 0xca, 0x45, 0x78, 0x3e, + 0xeb, 0xd3, 0x66, 0x25, 0x8d, 0xc0, 0x51, 0x6f, 0x18, 0x96, 0x25, 0x57, 0x81, 0xf8, 0xd4, 0xdb, + 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xd7, 0x09, 0x78, 0xdd, 0x6b, 0xbc, 0xee, 0xe7, 0x64, 0xdd, + 0x49, 0xb3, 0x87, 0x02, 0x33, 0x4a, 0x91, 0x17, 0x61, 0x5c, 0x0e, 0xbb, 0x48, 0x0a, 0xc0, 0x91, + 0x4e, 0x33, 0x41, 0x62, 0x2a, 0x0f, 0x7b, 0xa8, 0x89, 0x09, 0xe7, 0xf5, 0x6e, 0xe0, 0xb6, 0x19, + 0x64, 0x92, 0xe9, 0x9a, 0xbb, 0x4d, 0x9d, 0x89, 0xfa, 0xc5, 0xc2, 0xa5, 0x6a, 0xe3, 0xe2, 0xfe, + 0xde, 0xe4, 0xf9, 0xd9, 0xbb, 0xd0, 0xe1, 0x5d, 0x51, 0xc8, 0x0d, 0xa8, 0x99, 0x8e, 0xbf, 0xea, + 0xda, 0x96, 0xb1, 0x3b, 0x31, 0xc2, 0x2b, 0xf8, 0x94, 0xfc, 0xd4, 0xda, 0xfc, 0xf5, 0xa6, 0xc8, + 0xb8, 0xb3, 0x37, 0x79, 0xbe, 0x77, 0x76, 0x9c, 0x0a, 0xf3, 0x31, 0xc2, 0x20, 0x2b, 0x1c, 0x70, + 0xce, 0x75, 0x36, 0xad, 0xd6, 0xc4, 0x28, 0x6f, 0x8d, 0x8b, 0x7d, 0x3a, 0xf4, 0xfc, 0xf5, 0xa6, + 0xa0, 0x6b, 0x8c, 0x4a, 0x76, 0xe2, 0x15, 0x23, 0x04, 0x62, 0xc2, 0x98, 0x9a, 0x57, 0xe7, 0x6c, + 0xdd, 0x6a, 0xfb, 0x13, 0x63, 0xbc, 0xf3, 0x7e, 0xb0, 0x0f, 0x26, 0xc6, 0x89, 0x1b, 0x67, 0xe4, + 0xa7, 0x8c, 0x25, 0x92, 0x7d, 0x4c, 0x61, 0x9e, 0x7b, 0x01, 0x4e, 0xf6, 0xcc, 0x0d, 0x64, 0x1c, + 0x4a, 0xdb, 0x74, 0x97, 0x4f, 0x7d, 0x35, 0x64, 0x8f, 0xe4, 0x34, 0x54, 0x76, 0x74, 0xbb, 0x4b, + 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xbc, 0xf8, 0x7c, 0x41, 0xfb, 0xf5, 0x12, 0x8c, 0xa8, 0x19, + 0xa7, 0x69, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x4b, 0xce, 0x9b, 0x9f, 0x1c, 0x78, 0x16, + 0x5b, 0x76, 0x5b, 0x8d, 0xe1, 0xfd, 0xbd, 0xc9, 0xd2, 0xb2, 0xdb, 0x42, 0x86, 0x48, 0x0c, 0xa8, + 0x6c, 0xeb, 0x9b, 0xdb, 0x3a, 0xaf, 0x43, 0x7d, 0xa6, 0x31, 0x30, 0xf4, 0x35, 0x86, 0xc2, 0xea, + 0xda, 0xa8, 0xed, 0xef, 0x4d, 0x56, 0xf8, 0x2b, 0x0a, 0x6c, 0xe2, 0x42, 0x6d, 0xc3, 0xd6, 0x8d, + 0xed, 0x2d, 0xd7, 0xa6, 0x13, 0xa5, 0x9c, 0x8c, 0x1a, 0x0a, 0x49, 0x34, 0x73, 0xf8, 0x8a, 0x11, + 0x0f, 0x62, 0xc0, 0x50, 0xd7, 0xf4, 0x2d, 0x67, 0x5b, 0xce, 0x81, 0x2f, 0x0c, 0xcc, 0x6d, 0x7d, + 0x9e, 0x7f, 0x13, 0xec, 0xef, 0x4d, 0x0e, 0x89, 0x67, 0x94, 0xd0, 0xda, 0x8f, 0xea, 0x30, 0xa6, + 0x1a, 0xe9, 0x26, 0xf5, 0x02, 0x7a, 0x9b, 0x5c, 0x84, 0xb2, 0xc3, 0x86, 0x26, 0x6f, 0xe4, 0xc6, + 0x88, 0xec, 0x2e, 0x65, 0x3e, 0x24, 0x79, 0x0e, 0xab, 0x99, 0xe8, 0x2a, 0x52, 0xe0, 0x83, 0xd7, + 0xac, 0xc9, 0x61, 0x44, 0xcd, 0xc4, 0x33, 0x4a, 0x68, 0xf2, 0x0a, 0x94, 0xf9, 0xc7, 0x0b, 0x51, + 0x7f, 0x6a, 0x70, 0x16, 0xec, 0xd3, 0xab, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, 0xd6, 0x15, 0xbb, + 0xe6, 0xa6, 0x14, 0xec, 0x27, 0x73, 0x08, 0x76, 0x41, 0x74, 0xc5, 0xf5, 0xf9, 0x05, 0x64, 0x88, + 0xe4, 0xff, 0x17, 0xe0, 0xa4, 0xe1, 0x3a, 0x81, 0xce, 0x54, 0x0d, 0xb5, 0xc8, 0x4e, 0x54, 0x38, 0x9f, 0xab, 0x03, 0xf3, 0x99, 0x4b, 0x23, 0x36, 0x1e, 0x66, 0x6b, 0x46, 0x4f, 0x32, 0xf6, 0xf2, - 0x26, 0xbf, 0x5a, 0x80, 0x87, 0xd9, 0x5c, 0xde, 0x43, 0xcc, 0x57, 0xa0, 0xa3, 0xad, 0xd5, 0xd9, + 0x26, 0xbf, 0x5c, 0x80, 0x87, 0xd9, 0x5c, 0xde, 0x43, 0xcc, 0x57, 0xa0, 0xa3, 0xad, 0xd5, 0xd9, 0xfd, 0xbd, 0xc9, 0x87, 0x97, 0xb2, 0x98, 0x61, 0x76, 0x1d, 0x58, 0xed, 0x4e, 0xe9, 0xbd, 0x6a, 0x09, 0x5f, 0xdd, 0xea, 0x33, 0xcb, 0x47, 0xa9, 0xea, 0x34, 0x1e, 0x95, 0x5d, 0x39, 0x4b, 0xb3, 0xc3, 0xac, 0x5a, 0x90, 0xcb, 0x30, 0xbc, 0xe3, 0xda, 0xdd, 0x36, 0xf5, 0x27, 0xaa, 0x7c, 0x8a, 0x3d, 0x97, 0x35, 0xc5, 0xde, 0xe4, 0x24, 0x8d, 0x13, 0x12, 0x7e, 0x58, 0xbc, 0xfb, 0xa8, 0xca, - 0x12, 0x0b, 0x86, 0x6c, 0xab, 0x6d, 0x05, 0x3e, 0x5f, 0x38, 0xeb, 0x33, 0x97, 0x07, 0x6e, 0x96, - 0x18, 0xa2, 0xcb, 0x1c, 0x4c, 0x8c, 0x1a, 0xf1, 0x8c, 0x92, 0x01, 0x9b, 0x0a, 0x7d, 0x43, 0xb7, - 0xc5, 0xc2, 0x5a, 0x9f, 0xf9, 0xd4, 0xe0, 0xc3, 0x86, 0xa1, 0x34, 0x46, 0x65, 0x9b, 0x2a, 0xfc, - 0x15, 0x05, 0x36, 0xf9, 0x1c, 0x8c, 0x25, 0xbe, 0xa6, 0x3f, 0x51, 0xe7, 0xd2, 0x79, 0x2c, 0x4b, - 0x3a, 0x21, 0x55, 0xb4, 0xf2, 0x24, 0x7a, 0x88, 0x8f, 0x29, 0x30, 0x72, 0x0d, 0xaa, 0xbe, 0x65, - 0x52, 0x43, 0xf7, 0xfc, 0x89, 0x91, 0x83, 0x00, 0x8f, 0x4b, 0xe0, 0x6a, 0x53, 0x16, 0xc3, 0x10, - 0x80, 0x4c, 0x01, 0x74, 0x74, 0x2f, 0xb0, 0x84, 0xa2, 0x3a, 0xca, 0x95, 0xa6, 0xb1, 0xfd, 0xbd, - 0x49, 0x58, 0x0d, 0x53, 0x31, 0x46, 0xc1, 0xe8, 0x59, 0xd9, 0x25, 0xa7, 0xd3, 0x0d, 0xc4, 0xc2, - 0x5a, 0x13, 0xf4, 0xcd, 0x30, 0x15, 0x63, 0x14, 0xe4, 0xdb, 0x05, 0x78, 0x34, 0x7a, 0xed, 0x1d, - 0x64, 0x27, 0x8e, 0x7c, 0x90, 0x4d, 0xee, 0xef, 0x4d, 0x3e, 0xda, 0xec, 0xcf, 0x12, 0xef, 0x56, - 0x1f, 0xed, 0x25, 0x18, 0x9d, 0xed, 0x06, 0x5b, 0xae, 0x67, 0xbd, 0xc1, 0x95, 0x6e, 0xb2, 0x00, - 0x95, 0x80, 0x2b, 0x4f, 0x62, 0x5d, 0x7e, 0x22, 0x4b, 0xd4, 0x42, 0x91, 0xbd, 0x46, 0x77, 0x95, - 0x36, 0x20, 0xd6, 0x47, 0xa1, 0x4c, 0x89, 0xe2, 0xda, 0x6f, 0x14, 0xa0, 0xd6, 0xd0, 0x7d, 0xcb, - 0x60, 0xf0, 0x64, 0x0e, 0xca, 0x5d, 0x9f, 0x7a, 0x87, 0x03, 0xe5, 0xb3, 0xf4, 0xba, 0x4f, 0x3d, - 0xe4, 0x85, 0xc9, 0x0d, 0xa8, 0x76, 0x74, 0xdf, 0xbf, 0xe5, 0x7a, 0xa6, 0x5c, 0x69, 0x0e, 0x08, - 0x24, 0xb4, 0x62, 0x59, 0x14, 0x43, 0x10, 0xad, 0x0e, 0xd1, 0x52, 0xab, 0xfd, 0xa8, 0x00, 0xa7, - 0x1a, 0xdd, 0xcd, 0x4d, 0xea, 0x49, 0x25, 0x50, 0xaa, 0x57, 0x14, 0x2a, 0x1e, 0x35, 0x2d, 0x5f, - 0xd6, 0x7d, 0x7e, 0xe0, 0x4f, 0x87, 0x0c, 0x45, 0x6a, 0x73, 0x5c, 0x5e, 0x3c, 0x01, 0x05, 0x3a, - 0xe9, 0x42, 0xed, 0x35, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xb6, 0x6c, 0xdd, 0x95, 0x81, 0x59, 0x5d, - 0xa5, 0x41, 0x93, 0x23, 0xc5, 0x95, 0xc7, 0x30, 0x11, 0x23, 0x4e, 0xda, 0x77, 0x2a, 0x30, 0x32, - 0xe7, 0xb6, 0x37, 0x2c, 0x87, 0x9a, 0x97, 0xcd, 0x16, 0x25, 0xaf, 0x42, 0x99, 0x9a, 0x2d, 0x2a, - 0x5b, 0x3b, 0xf8, 0x3a, 0xcb, 0xc0, 0x22, 0x6d, 0x81, 0xbd, 0x21, 0x07, 0x26, 0xcb, 0x30, 0xb6, - 0xe9, 0xb9, 0x6d, 0x31, 0x75, 0xad, 0xed, 0x76, 0xa4, 0xaa, 0xd8, 0xf8, 0xa0, 0x9a, 0x0e, 0x16, - 0x12, 0xb9, 0x77, 0xf6, 0x26, 0x21, 0x7a, 0xc3, 0x54, 0x59, 0xf2, 0x32, 0x4c, 0x44, 0x29, 0xe1, - 0x18, 0x9e, 0x63, 0xda, 0x3b, 0x57, 0x15, 0x2a, 0x8d, 0xf3, 0xfb, 0x7b, 0x93, 0x13, 0x0b, 0x7d, - 0x68, 0xb0, 0x6f, 0x69, 0xf2, 0x66, 0x01, 0xc6, 0xa3, 0x4c, 0x31, 0xaf, 0x4a, 0x0d, 0xe1, 0x88, - 0x26, 0x6c, 0xbe, 0xcd, 0x59, 0x48, 0xb1, 0xc0, 0x1e, 0xa6, 0x64, 0x01, 0x46, 0x02, 0x37, 0x26, - 0xaf, 0x0a, 0x97, 0x97, 0xa6, 0xf6, 0xe5, 0x6b, 0x6e, 0x5f, 0x69, 0x25, 0xca, 0x11, 0x84, 0x33, - 0xea, 0x3d, 0x25, 0xa9, 0x21, 0x2e, 0xa9, 0x73, 0xfb, 0x7b, 0x93, 0x67, 0xd6, 0x32, 0x29, 0xb0, - 0x4f, 0x49, 0xf2, 0xe5, 0x02, 0x8c, 0xa9, 0x2c, 0x29, 0xa3, 0xe1, 0xa3, 0x94, 0x11, 0x61, 0x3d, - 0x62, 0x2d, 0xc1, 0x00, 0x53, 0x0c, 0xb5, 0x9f, 0x94, 0xa1, 0x16, 0xce, 0x6c, 0xe4, 0x71, 0xa8, - 0xf0, 0x1d, 0xb7, 0x54, 0x58, 0xc3, 0x25, 0x8b, 0x6f, 0xcc, 0x51, 0xe4, 0x91, 0x27, 0x60, 0xd8, - 0x70, 0xdb, 0x6d, 0xdd, 0x31, 0xb9, 0x15, 0xa5, 0xd6, 0xa8, 0xb3, 0x95, 0x7a, 0x4e, 0x24, 0xa1, - 0xca, 0x23, 0xe7, 0xa1, 0xac, 0x7b, 0x2d, 0x61, 0xd0, 0xa8, 0x89, 0xf9, 0x68, 0xd6, 0x6b, 0xf9, - 0xc8, 0x53, 0xc9, 0xc7, 0xa0, 0x44, 0x9d, 0x9d, 0x89, 0x72, 0x7f, 0x55, 0xe0, 0xb2, 0xb3, 0x73, - 0x53, 0xf7, 0x1a, 0x75, 0x59, 0x87, 0xd2, 0x65, 0x67, 0x07, 0x59, 0x19, 0xb2, 0x0c, 0xc3, 0xd4, - 0xd9, 0x61, 0xdf, 0x5e, 0x5a, 0x1a, 0x3e, 0xd0, 0xa7, 0x38, 0x23, 0x91, 0x5a, 0x71, 0xa8, 0x50, - 0xc8, 0x64, 0x54, 0x10, 0xe4, 0x33, 0x30, 0x22, 0x74, 0x8b, 0x15, 0xf6, 0x4d, 0xfc, 0x89, 0x21, + 0x12, 0x0b, 0x86, 0x6c, 0xab, 0x6d, 0x05, 0x3e, 0x5f, 0x38, 0xeb, 0x33, 0x97, 0x07, 0xfe, 0x2c, + 0x31, 0x44, 0x97, 0x39, 0x98, 0x18, 0x35, 0xe2, 0x19, 0x25, 0x03, 0x36, 0x15, 0xfa, 0x86, 0x6e, + 0x8b, 0x85, 0xb5, 0x3e, 0xf3, 0xe9, 0xc1, 0x87, 0x0d, 0x43, 0x69, 0x8c, 0xca, 0x6f, 0xaa, 0xf0, + 0x57, 0x14, 0xd8, 0xe4, 0xf3, 0x30, 0x96, 0x68, 0x4d, 0x7f, 0xa2, 0xce, 0xa5, 0xf3, 0x58, 0x96, + 0x74, 0x42, 0xaa, 0x68, 0xe5, 0x49, 0xf4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x1a, 0x54, 0x7d, 0xcb, + 0xa4, 0x86, 0xee, 0xf9, 0x13, 0x23, 0x07, 0x01, 0x1e, 0x97, 0xc0, 0xd5, 0xa6, 0x2c, 0x86, 0x21, + 0x00, 0x99, 0x02, 0xe8, 0xe8, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x94, 0x2b, 0x4d, 0x63, 0xfb, 0x7b, + 0x93, 0xb0, 0x1a, 0xa6, 0x62, 0x8c, 0x82, 0xd1, 0xb3, 0xb2, 0x4b, 0x4e, 0xa7, 0x1b, 0x88, 0x85, + 0xb5, 0x26, 0xe8, 0x9b, 0x61, 0x2a, 0xc6, 0x28, 0xc8, 0x77, 0x0a, 0xf0, 0x68, 0xf4, 0xda, 0x3b, + 0xc8, 0x4e, 0x1c, 0xf9, 0x20, 0x9b, 0xdc, 0xdf, 0x9b, 0x7c, 0xb4, 0xd9, 0x9f, 0x25, 0xde, 0xad, + 0x3e, 0xda, 0x4b, 0x30, 0x3a, 0xdb, 0x0d, 0xb6, 0x5c, 0xcf, 0x7a, 0x83, 0x2b, 0xdd, 0x64, 0x01, + 0x2a, 0x01, 0x57, 0x9e, 0xc4, 0xba, 0xfc, 0x44, 0x96, 0xa8, 0x85, 0x22, 0x7b, 0x8d, 0xee, 0x2a, + 0x6d, 0x40, 0xac, 0x8f, 0x42, 0x99, 0x12, 0xc5, 0xb5, 0x5f, 0x2b, 0x40, 0xad, 0xa1, 0xfb, 0x96, + 0xc1, 0xe0, 0xc9, 0x1c, 0x94, 0xbb, 0x3e, 0xf5, 0x0e, 0x07, 0xca, 0x67, 0xe9, 0x75, 0x9f, 0x7a, + 0xc8, 0x0b, 0x93, 0x1b, 0x50, 0xed, 0xe8, 0xbe, 0x7f, 0xcb, 0xf5, 0x4c, 0xb9, 0xd2, 0x1c, 0x10, + 0x48, 0x68, 0xc5, 0xb2, 0x28, 0x86, 0x20, 0x5a, 0x1d, 0xa2, 0xa5, 0x56, 0xfb, 0x49, 0x01, 0x4e, + 0x35, 0xba, 0x9b, 0x9b, 0xd4, 0x93, 0x4a, 0xa0, 0x54, 0xaf, 0x28, 0x54, 0x3c, 0x6a, 0x5a, 0xbe, + 0xac, 0xfb, 0xfc, 0xc0, 0x4d, 0x87, 0x0c, 0x45, 0x6a, 0x73, 0x5c, 0x5e, 0x3c, 0x01, 0x05, 0x3a, + 0xe9, 0x42, 0xed, 0x35, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xb6, 0xfc, 0xba, 0x2b, 0x03, 0xb3, 0xba, + 0x4a, 0x83, 0x26, 0x47, 0x8a, 0x2b, 0x8f, 0x61, 0x22, 0x46, 0x9c, 0xb4, 0xef, 0x56, 0x60, 0x64, + 0xce, 0x6d, 0x6f, 0x58, 0x0e, 0x35, 0x2f, 0x9b, 0x2d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0x5b, 0x54, + 0x7e, 0xed, 0xe0, 0xeb, 0x2c, 0x03, 0x8b, 0xb4, 0x05, 0xf6, 0x86, 0x1c, 0x98, 0x2c, 0xc3, 0xd8, + 0xa6, 0xe7, 0xb6, 0xc5, 0xd4, 0xb5, 0xb6, 0xdb, 0x91, 0xaa, 0x62, 0xe3, 0x83, 0x6a, 0x3a, 0x58, + 0x48, 0xe4, 0xde, 0xd9, 0x9b, 0x84, 0xe8, 0x0d, 0x53, 0x65, 0xc9, 0xcb, 0x30, 0x11, 0xa5, 0x84, + 0x63, 0x78, 0x8e, 0x69, 0xef, 0x5c, 0x55, 0xa8, 0x34, 0xce, 0xef, 0xef, 0x4d, 0x4e, 0x2c, 0xf4, + 0xa1, 0xc1, 0xbe, 0xa5, 0xc9, 0x9b, 0x05, 0x18, 0x8f, 0x32, 0xc5, 0xbc, 0x2a, 0x35, 0x84, 0x23, + 0x9a, 0xb0, 0xf9, 0x36, 0x67, 0x21, 0xc5, 0x02, 0x7b, 0x98, 0x92, 0x05, 0x18, 0x09, 0xdc, 0x98, + 0xbc, 0x2a, 0x5c, 0x5e, 0x9a, 0xda, 0x97, 0xaf, 0xb9, 0x7d, 0xa5, 0x95, 0x28, 0x47, 0x10, 0xce, + 0xa8, 0xf7, 0x94, 0xa4, 0x86, 0xb8, 0xa4, 0xce, 0xed, 0xef, 0x4d, 0x9e, 0x59, 0xcb, 0xa4, 0xc0, + 0x3e, 0x25, 0xc9, 0x57, 0x0a, 0x30, 0xa6, 0xb2, 0xa4, 0x8c, 0x86, 0x8f, 0x52, 0x46, 0x84, 0xf5, + 0x88, 0xb5, 0x04, 0x03, 0x4c, 0x31, 0xd4, 0x7e, 0x5a, 0x86, 0x5a, 0x38, 0xb3, 0x91, 0xc7, 0xa1, + 0xc2, 0x77, 0xdc, 0x52, 0x61, 0x0d, 0x97, 0x2c, 0xbe, 0x31, 0x47, 0x91, 0x47, 0x9e, 0x80, 0x61, + 0xc3, 0x6d, 0xb7, 0x75, 0xc7, 0xe4, 0x56, 0x94, 0x5a, 0xa3, 0xce, 0x56, 0xea, 0x39, 0x91, 0x84, + 0x2a, 0x8f, 0x9c, 0x87, 0xb2, 0xee, 0xb5, 0x84, 0x41, 0xa3, 0x26, 0xe6, 0xa3, 0x59, 0xaf, 0xe5, + 0x23, 0x4f, 0x25, 0x1f, 0x83, 0x12, 0x75, 0x76, 0x26, 0xca, 0xfd, 0x55, 0x81, 0xcb, 0xce, 0xce, + 0x4d, 0xdd, 0x6b, 0xd4, 0x65, 0x1d, 0x4a, 0x97, 0x9d, 0x1d, 0x64, 0x65, 0xc8, 0x32, 0x0c, 0x53, + 0x67, 0x87, 0xb5, 0xbd, 0xb4, 0x34, 0x7c, 0xa0, 0x4f, 0x71, 0x46, 0x22, 0xb5, 0xe2, 0x50, 0xa1, + 0x90, 0xc9, 0xa8, 0x20, 0xc8, 0x67, 0x61, 0x44, 0xe8, 0x16, 0x2b, 0xac, 0x4d, 0xfc, 0x89, 0x21, 0x0e, 0x39, 0xd9, 0x5f, 0x39, 0xe1, 0x74, 0x91, 0x65, 0x27, 0x96, 0xe8, 0x63, 0x02, 0x8a, 0x7c, - 0x06, 0x6a, 0x6a, 0x23, 0xa8, 0xbe, 0x6c, 0xa6, 0x51, 0x44, 0xed, 0x1e, 0x91, 0xbe, 0xde, 0xb5, - 0x3c, 0xda, 0xa6, 0x4e, 0xe0, 0x37, 0x4e, 0xaa, 0x6d, 0xb2, 0xca, 0xf5, 0x31, 0x42, 0x23, 0x1b, - 0xbd, 0xd6, 0x1d, 0x61, 0x9a, 0x78, 0xbc, 0xcf, 0xac, 0x3e, 0x80, 0x69, 0xe7, 0xf3, 0x70, 0x22, - 0x34, 0xbf, 0xc8, 0x1d, 0xbc, 0x30, 0x56, 0x3c, 0xcd, 0x8a, 0x2f, 0x25, 0xb3, 0xee, 0xec, 0x4d, - 0x3e, 0x96, 0xb1, 0x87, 0x8f, 0x08, 0x30, 0x0d, 0xa6, 0xfd, 0x61, 0x09, 0x7a, 0xd5, 0xee, 0xa4, - 0xd0, 0x0a, 0x47, 0x2d, 0xb4, 0x74, 0x83, 0xc4, 0xf4, 0xf9, 0xbc, 0x2c, 0x96, 0xbf, 0x51, 0x59, - 0x1f, 0xa6, 0x74, 0xd4, 0x1f, 0xe6, 0x41, 0x19, 0x3b, 0xda, 0x5b, 0x65, 0x18, 0x9b, 0xd7, 0x69, - 0xdb, 0x75, 0xee, 0xb9, 0x09, 0x29, 0x3c, 0x10, 0x9b, 0x90, 0x4b, 0x50, 0xf5, 0x68, 0xc7, 0xb6, - 0x0c, 0xdd, 0xe7, 0x9f, 0x5e, 0x1a, 0xfd, 0x50, 0xa6, 0x61, 0x98, 0xdb, 0x67, 0xf3, 0x59, 0x7a, - 0x20, 0x37, 0x9f, 0xe5, 0x77, 0x7f, 0xf3, 0xa9, 0x7d, 0xb9, 0x08, 0x5c, 0x51, 0x21, 0x17, 0xa1, - 0xcc, 0x16, 0xe1, 0xb4, 0xc9, 0x83, 0x77, 0x1c, 0x9e, 0x43, 0xce, 0x41, 0x31, 0x70, 0xe5, 0xc8, - 0x03, 0x99, 0x5f, 0x5c, 0x73, 0xb1, 0x18, 0xb8, 0xe4, 0x0d, 0x00, 0xc3, 0x75, 0x4c, 0x4b, 0xd9, - 0xc2, 0xf3, 0x35, 0x6c, 0xc1, 0xf5, 0x6e, 0xe9, 0x9e, 0x39, 0x17, 0x22, 0x8a, 0xed, 0x47, 0xf4, - 0x8e, 0x31, 0x6e, 0xe4, 0x05, 0x18, 0x72, 0x9d, 0x85, 0xae, 0x6d, 0x73, 0x81, 0xd6, 0x1a, 0xff, - 0x8d, 0xed, 0x09, 0x6f, 0xf0, 0x94, 0x3b, 0x7b, 0x93, 0x67, 0x85, 0x7e, 0xcb, 0xde, 0x5e, 0xf2, - 0xac, 0xc0, 0x72, 0x5a, 0xcd, 0xc0, 0xd3, 0x03, 0xda, 0xda, 0x45, 0x59, 0x4c, 0xfb, 0x5a, 0x01, - 0xea, 0x0b, 0xd6, 0x6d, 0x6a, 0xbe, 0x64, 0x39, 0xa6, 0x7b, 0x8b, 0x20, 0x0c, 0xd9, 0xd4, 0x69, - 0x05, 0x5b, 0xb2, 0xf7, 0x4f, 0xc5, 0xc6, 0x5a, 0x78, 0x84, 0x12, 0xd5, 0xbf, 0x4d, 0x03, 0x9d, - 0x8d, 0xbe, 0xf9, 0xae, 0x34, 0xf2, 0x8b, 0x4d, 0x29, 0x47, 0x40, 0x89, 0x44, 0xa6, 0xa1, 0x26, - 0xb4, 0x4f, 0xcb, 0x69, 0x71, 0x19, 0x56, 0xa3, 0x49, 0xaf, 0xa9, 0x32, 0x30, 0xa2, 0xd1, 0x76, - 0xe1, 0x64, 0x8f, 0x18, 0x88, 0x09, 0xe5, 0x40, 0x6f, 0xa9, 0xf9, 0x75, 0x61, 0x60, 0x01, 0xaf, - 0xe9, 0xad, 0x98, 0x70, 0xf9, 0x1a, 0xbf, 0xa6, 0xb3, 0x35, 0x9e, 0xa1, 0x6b, 0x3f, 0x2b, 0x40, - 0x75, 0xa1, 0xeb, 0x18, 0x7c, 0x6f, 0x74, 0x6f, 0x53, 0x98, 0x52, 0x18, 0x8a, 0x99, 0x0a, 0x43, - 0x17, 0x86, 0xb6, 0x6f, 0x85, 0x0a, 0x45, 0x7d, 0x66, 0x65, 0xf0, 0x5e, 0x21, 0xab, 0x34, 0x75, - 0x8d, 0xe3, 0x89, 0x93, 0x9a, 0x31, 0x59, 0xa1, 0xa1, 0x6b, 0x2f, 0x71, 0xa6, 0x92, 0xd9, 0xb9, - 0x8f, 0x41, 0x3d, 0x46, 0x76, 0x28, 0xa3, 0xed, 0xef, 0x96, 0x61, 0x68, 0xb1, 0xd9, 0x9c, 0x5d, - 0x5d, 0x22, 0xcf, 0x40, 0x5d, 0x1a, 0xf1, 0xaf, 0x47, 0x32, 0x08, 0xcf, 0x70, 0x9a, 0x51, 0x16, - 0xc6, 0xe9, 0x98, 0x3a, 0xe6, 0x51, 0xdd, 0x6e, 0xcb, 0xc1, 0x12, 0xaa, 0x63, 0xc8, 0x12, 0x51, - 0xe4, 0x11, 0x1d, 0xc6, 0xd8, 0x0e, 0x8f, 0x89, 0x50, 0xec, 0xde, 0xe4, 0xb0, 0x39, 0xe0, 0xfe, - 0x8e, 0x2b, 0x89, 0xeb, 0x09, 0x00, 0x4c, 0x01, 0x92, 0xe7, 0xa1, 0xaa, 0x77, 0x83, 0x2d, 0xae, - 0x40, 0x8b, 0xb1, 0x71, 0x9e, 0x9f, 0x71, 0xc8, 0xb4, 0x3b, 0x7b, 0x93, 0x23, 0xd7, 0xb0, 0xf1, - 0x8c, 0x7a, 0xc7, 0x90, 0x9a, 0x55, 0x4e, 0xed, 0x18, 0x65, 0xe5, 0x2a, 0x87, 0xae, 0xdc, 0x6a, - 0x02, 0x00, 0x53, 0x80, 0xe4, 0x15, 0x18, 0xd9, 0xa6, 0xbb, 0x81, 0xbe, 0x21, 0x19, 0x0c, 0x1d, - 0x86, 0xc1, 0x38, 0x53, 0xe1, 0xae, 0xc5, 0x8a, 0x63, 0x02, 0x8c, 0xf8, 0x70, 0x7a, 0x9b, 0x7a, - 0x1b, 0xd4, 0x73, 0xe5, 0xee, 0x53, 0x32, 0x19, 0x3e, 0x0c, 0x93, 0x89, 0xfd, 0xbd, 0xc9, 0xd3, - 0xd7, 0x32, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x69, 0x11, 0x4e, 0x2c, 0x8a, 0x53, 0x54, 0xd7, 0x13, - 0x8b, 0x30, 0x39, 0x0b, 0x25, 0xaf, 0xd3, 0xe5, 0x3d, 0xa7, 0x24, 0xec, 0xa4, 0xb8, 0xba, 0x8e, - 0x2c, 0x8d, 0xbc, 0x0c, 0x55, 0x53, 0x4e, 0x19, 0x72, 0xf3, 0x7b, 0xd8, 0x89, 0x86, 0x2f, 0x82, - 0xea, 0x0d, 0x43, 0x34, 0xa6, 0xe9, 0xb7, 0xfd, 0x56, 0xd3, 0x7a, 0x83, 0xca, 0xfd, 0x20, 0xd7, - 0xf4, 0x57, 0x44, 0x12, 0xaa, 0x3c, 0xb6, 0xaa, 0x6e, 0xd3, 0x5d, 0xb1, 0x1b, 0x2a, 0x47, 0xab, - 0xea, 0x35, 0x99, 0x86, 0x61, 0x2e, 0x99, 0x54, 0x83, 0x85, 0xf5, 0x82, 0xb2, 0xd8, 0xc9, 0xdf, - 0x64, 0x09, 0x72, 0xdc, 0xb0, 0x29, 0xf3, 0x35, 0x2b, 0x08, 0xa8, 0x27, 0x3f, 0xe3, 0x40, 0x53, - 0xe6, 0x55, 0x8e, 0x80, 0x12, 0x89, 0x7c, 0x18, 0x6a, 0x1c, 0xbc, 0x61, 0xbb, 0x1b, 0xfc, 0xc3, - 0xd5, 0xc4, 0x9e, 0xfe, 0xa6, 0x4a, 0xc4, 0x28, 0x5f, 0xfb, 0x79, 0x11, 0xce, 0x2c, 0xd2, 0x40, - 0x68, 0x35, 0xf3, 0xb4, 0x63, 0xbb, 0xbb, 0x4c, 0xb5, 0x44, 0xfa, 0x3a, 0x79, 0x11, 0xc0, 0xf2, - 0x37, 0x9a, 0x3b, 0x06, 0x1f, 0x07, 0x62, 0x0c, 0x5f, 0x94, 0x43, 0x12, 0x96, 0x9a, 0x0d, 0x99, - 0x73, 0x27, 0xf1, 0x86, 0xb1, 0x32, 0xd1, 0xf6, 0xaa, 0x78, 0x97, 0xed, 0x55, 0x13, 0xa0, 0x13, - 0x29, 0xa8, 0x25, 0x4e, 0xf9, 0x3f, 0x15, 0x9b, 0xc3, 0xe8, 0xa6, 0x31, 0x98, 0x3c, 0x2a, 0xa3, - 0x03, 0xe3, 0x26, 0xdd, 0xd4, 0xbb, 0x76, 0x10, 0x2a, 0xd5, 0x72, 0x10, 0x1f, 0x5c, 0x2f, 0x0f, - 0x4f, 0x78, 0xe7, 0x53, 0x48, 0xd8, 0x83, 0xad, 0xfd, 0x5e, 0x09, 0xce, 0x2d, 0xd2, 0x20, 0xb4, - 0xb8, 0xc8, 0xd9, 0xb1, 0xd9, 0xa1, 0x06, 0xfb, 0x0a, 0x6f, 0x16, 0x60, 0xc8, 0xd6, 0x37, 0xa8, - 0xcd, 0x56, 0x2f, 0xd6, 0x9a, 0x57, 0x07, 0x5e, 0x08, 0xfa, 0x73, 0x99, 0x5a, 0xe6, 0x1c, 0x52, - 0x4b, 0x83, 0x48, 0x44, 0xc9, 0x9e, 0x4d, 0xea, 0x86, 0xdd, 0xf5, 0x03, 0xea, 0xad, 0xba, 0x5e, - 0x20, 0xf5, 0xc9, 0x70, 0x52, 0x9f, 0x8b, 0xb2, 0x30, 0x4e, 0x47, 0x66, 0x00, 0x0c, 0xdb, 0xa2, - 0x4e, 0xc0, 0x4b, 0x89, 0x71, 0x45, 0xd4, 0xf7, 0x9d, 0x0b, 0x73, 0x30, 0x46, 0xc5, 0x58, 0xb5, - 0x5d, 0xc7, 0x0a, 0x5c, 0xc1, 0xaa, 0x9c, 0x64, 0xb5, 0x12, 0x65, 0x61, 0x9c, 0x8e, 0x17, 0xa3, - 0x81, 0x67, 0x19, 0x3e, 0x2f, 0x56, 0x49, 0x15, 0x8b, 0xb2, 0x30, 0x4e, 0xc7, 0xd6, 0xbc, 0x58, - 0xfb, 0x0f, 0xb5, 0xe6, 0x7d, 0xab, 0x06, 0x17, 0x12, 0x62, 0x0d, 0xf4, 0x80, 0x6e, 0x76, 0xed, - 0x26, 0x0d, 0xd4, 0x07, 0x1c, 0x70, 0x2d, 0xfc, 0x85, 0xe8, 0xbb, 0x0b, 0xdf, 0x0d, 0xe3, 0x68, - 0xbe, 0x7b, 0x4f, 0x05, 0x0f, 0xf4, 0xed, 0xa7, 0xa1, 0xe6, 0xe8, 0x81, 0xcf, 0x07, 0xae, 0x1c, - 0xa3, 0xa1, 0x1a, 0x76, 0x5d, 0x65, 0x60, 0x44, 0x43, 0x56, 0xe1, 0xb4, 0x14, 0xf1, 0xe5, 0xdb, - 0x1d, 0xd7, 0x0b, 0xa8, 0x27, 0xca, 0xca, 0xe5, 0x54, 0x96, 0x3d, 0xbd, 0x92, 0x41, 0x83, 0x99, - 0x25, 0xc9, 0x0a, 0x9c, 0x32, 0xc4, 0x79, 0x36, 0xb5, 0x5d, 0xdd, 0x54, 0x80, 0xc2, 0xc0, 0x15, - 0x6e, 0x8d, 0xe6, 0x7a, 0x49, 0x30, 0xab, 0x5c, 0xba, 0x37, 0x0f, 0x0d, 0xd4, 0x9b, 0x87, 0x07, - 0xe9, 0xcd, 0xd5, 0xc1, 0x7a, 0x73, 0xed, 0x60, 0xbd, 0x99, 0x49, 0x9e, 0xf5, 0x23, 0xea, 0x31, - 0xf5, 0x44, 0xac, 0xb0, 0x31, 0x77, 0x89, 0x50, 0xf2, 0xcd, 0x0c, 0x1a, 0xcc, 0x2c, 0x49, 0x36, - 0xe0, 0x9c, 0x48, 0xbf, 0xec, 0x18, 0xde, 0x6e, 0x87, 0x2d, 0x3c, 0x31, 0xdc, 0x7a, 0xc2, 0xc2, - 0x78, 0xae, 0xd9, 0x97, 0x12, 0xef, 0x82, 0x42, 0xfe, 0x17, 0x8c, 0x8a, 0xaf, 0xb4, 0xa2, 0x77, - 0x38, 0xac, 0x70, 0x9e, 0x78, 0x58, 0xc2, 0x8e, 0xce, 0xc5, 0x33, 0x31, 0x49, 0x4b, 0x66, 0xe1, - 0x44, 0x67, 0xc7, 0x60, 0x8f, 0x4b, 0x9b, 0xd7, 0x29, 0x35, 0xa9, 0xc9, 0x4f, 0x6b, 0x6a, 0x8d, - 0x47, 0x94, 0xa1, 0x63, 0x35, 0x99, 0x8d, 0x69, 0x7a, 0xf2, 0x3c, 0x8c, 0xf8, 0x81, 0xee, 0x05, - 0xd2, 0xac, 0x37, 0x31, 0x26, 0x9c, 0x4b, 0x94, 0xd5, 0xab, 0x19, 0xcb, 0xc3, 0x04, 0x65, 0xe6, - 0x7a, 0x71, 0xe2, 0xf8, 0xd6, 0x8b, 0x3c, 0xb3, 0xd5, 0x9f, 0x15, 0xe1, 0xe2, 0x22, 0x0d, 0x56, - 0x5c, 0x47, 0x1a, 0x45, 0xb3, 0x96, 0xfd, 0x03, 0xd9, 0x44, 0x93, 0x8b, 0x76, 0xf1, 0x48, 0x17, - 0xed, 0xd2, 0x11, 0x2d, 0xda, 0xe5, 0x63, 0x5c, 0xb4, 0xff, 0xa0, 0x08, 0x8f, 0x24, 0x24, 0xb9, - 0xea, 0x9a, 0x6a, 0xc2, 0x7f, 0x5f, 0x80, 0x07, 0x10, 0xe0, 0x1d, 0xa1, 0x77, 0xf2, 0x63, 0xad, - 0x94, 0xc6, 0xf3, 0xd5, 0xb4, 0xc6, 0xf3, 0x4a, 0x9e, 0x95, 0x2f, 0x83, 0xc3, 0x81, 0x56, 0xbc, - 0xab, 0x40, 0x3c, 0x79, 0x08, 0x27, 0x4c, 0x3f, 0x31, 0xa5, 0x27, 0xf4, 0x5e, 0xc3, 0x1e, 0x0a, - 0xcc, 0x28, 0x45, 0x9a, 0xf0, 0xb0, 0x4f, 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, - 0x63, 0x12, 0xee, 0xe1, 0x66, 0x16, 0x11, 0x66, 0x97, 0xcd, 0x33, 0x0f, 0xfc, 0x25, 0x70, 0x95, - 0x53, 0x88, 0xe6, 0xc8, 0x34, 0x96, 0x37, 0xd3, 0x1a, 0xcb, 0xab, 0xf9, 0xbf, 0xdb, 0x60, 0xda, - 0xca, 0x0c, 0x00, 0xff, 0x0a, 0x71, 0x75, 0x25, 0x5c, 0xa4, 0x31, 0xcc, 0xc1, 0x18, 0x15, 0x5b, - 0x80, 0x94, 0x9c, 0xe3, 0x9a, 0x4a, 0xb8, 0x00, 0x35, 0xe3, 0x99, 0x98, 0xa4, 0xed, 0xab, 0xed, - 0x54, 0x06, 0xd6, 0x76, 0xae, 0x02, 0x49, 0x18, 0x1e, 0x05, 0xde, 0x50, 0xd2, 0x79, 0x72, 0xa9, - 0x87, 0x02, 0x33, 0x4a, 0xf5, 0xe9, 0xca, 0xc3, 0x47, 0xdb, 0x95, 0xab, 0x83, 0x77, 0x65, 0xf2, - 0x2a, 0x9c, 0xe5, 0xac, 0xa4, 0x7c, 0x92, 0xc0, 0x42, 0xef, 0xf9, 0x80, 0x04, 0x3e, 0x8b, 0xfd, - 0x08, 0xb1, 0x3f, 0x06, 0xfb, 0x3e, 0x86, 0x47, 0x4d, 0xc6, 0x5c, 0xb7, 0xfb, 0xeb, 0x44, 0x73, - 0x19, 0x34, 0x98, 0x59, 0x92, 0x75, 0xb1, 0x80, 0x75, 0x43, 0x7d, 0xc3, 0xa6, 0xa6, 0x74, 0x1e, - 0x0d, 0xbb, 0xd8, 0xda, 0x72, 0x53, 0xe6, 0x60, 0x8c, 0x2a, 0x4b, 0x4d, 0x19, 0x39, 0xa4, 0x9a, - 0xb2, 0xc8, 0xad, 0xf4, 0x9b, 0x09, 0x6d, 0x48, 0xea, 0x3a, 0xa1, 0x3b, 0xf0, 0x5c, 0x9a, 0x00, - 0x7b, 0xcb, 0x70, 0x2d, 0xd1, 0xf0, 0xac, 0x4e, 0xe0, 0x27, 0xb1, 0xc6, 0x52, 0x5a, 0x62, 0x06, - 0x0d, 0x66, 0x96, 0x64, 0xfa, 0xf9, 0x16, 0xd5, 0xed, 0x60, 0x2b, 0x09, 0x78, 0x22, 0xa9, 0x9f, - 0x5f, 0xe9, 0x25, 0xc1, 0xac, 0x72, 0x99, 0x0b, 0xd2, 0xf8, 0x83, 0xa9, 0x56, 0x7d, 0xa5, 0x04, - 0x67, 0x17, 0x69, 0x10, 0xfa, 0xd5, 0xbc, 0x6f, 0x46, 0x79, 0x17, 0xcc, 0x28, 0xdf, 0xac, 0xc0, - 0xa9, 0x45, 0x1a, 0xf4, 0x68, 0x63, 0xff, 0x45, 0xc5, 0xbf, 0x02, 0xa7, 0x22, 0x57, 0xae, 0x66, - 0xe0, 0x7a, 0x62, 0x2d, 0x4f, 0xed, 0x96, 0x9b, 0xbd, 0x24, 0x98, 0x55, 0x8e, 0x7c, 0x06, 0x1e, - 0xe1, 0x4b, 0xbd, 0xd3, 0x12, 0xf6, 0x59, 0x61, 0x4c, 0x88, 0x5d, 0x46, 0x98, 0x94, 0x90, 0x8f, - 0x34, 0xb3, 0xc9, 0xb0, 0x5f, 0x79, 0xf2, 0x45, 0x18, 0xe9, 0x58, 0x1d, 0x6a, 0x5b, 0x0e, 0xd7, - 0xcf, 0x72, 0xbb, 0x84, 0xac, 0xc6, 0xc0, 0xa2, 0x0d, 0x5c, 0x3c, 0x15, 0x13, 0x0c, 0x33, 0x7b, - 0x6a, 0xf5, 0x18, 0x7b, 0xea, 0xbf, 0x15, 0x61, 0x78, 0xd1, 0x73, 0xbb, 0x9d, 0xc6, 0x2e, 0x69, - 0xc1, 0xd0, 0x2d, 0x7e, 0x78, 0x26, 0x8f, 0xa6, 0x06, 0x77, 0x87, 0x16, 0x67, 0x70, 0x91, 0x4a, - 0x24, 0xde, 0x51, 0xc2, 0xb3, 0x4e, 0xbc, 0x4d, 0x77, 0xa9, 0x29, 0xcf, 0xd0, 0xc2, 0x4e, 0x7c, - 0x8d, 0x25, 0xa2, 0xc8, 0x23, 0x6d, 0x38, 0xa1, 0xdb, 0xb6, 0x7b, 0x8b, 0x9a, 0xcb, 0x7a, 0x40, - 0x1d, 0xea, 0xab, 0x23, 0xc9, 0xc3, 0x9a, 0xa5, 0xf9, 0xb9, 0xfe, 0x6c, 0x12, 0x0a, 0xd3, 0xd8, - 0xe4, 0x35, 0x18, 0xf6, 0x03, 0xd7, 0x53, 0xca, 0x56, 0x7d, 0x66, 0x6e, 0xf0, 0x8f, 0xde, 0xf8, - 0x74, 0x53, 0x40, 0x09, 0x9b, 0xbd, 0x7c, 0x41, 0xc5, 0x40, 0xfb, 0x46, 0x01, 0xe0, 0xca, 0xda, - 0xda, 0xaa, 0x3c, 0x5e, 0x30, 0xa1, 0xac, 0x77, 0xc3, 0x83, 0xca, 0xc1, 0x0f, 0x04, 0x13, 0xfe, - 0x90, 0xf2, 0x0c, 0xaf, 0x1b, 0x6c, 0x21, 0x47, 0x27, 0x1f, 0x82, 0x61, 0xa9, 0x20, 0x4b, 0xb1, - 0x87, 0xae, 0x05, 0x52, 0x89, 0x46, 0x95, 0xaf, 0xfd, 0x4e, 0x11, 0x60, 0xc9, 0xb4, 0x69, 0x53, - 0x79, 0xb0, 0xd7, 0x82, 0x2d, 0x8f, 0xfa, 0x5b, 0xae, 0x6d, 0x0e, 0x78, 0x9a, 0xca, 0x6d, 0xfe, - 0x6b, 0x0a, 0x04, 0x23, 0x3c, 0x62, 0xc2, 0x88, 0x1f, 0xd0, 0xce, 0x92, 0x13, 0x50, 0x6f, 0x47, - 0xb7, 0x07, 0x3c, 0x44, 0x19, 0x17, 0x76, 0x91, 0x08, 0x07, 0x13, 0xa8, 0x44, 0x87, 0xba, 0xe5, - 0x18, 0x62, 0x80, 0x34, 0x76, 0x07, 0xec, 0x48, 0x27, 0xd8, 0x8e, 0x63, 0x29, 0x82, 0xc1, 0x38, - 0xa6, 0xf6, 0xc3, 0x22, 0x9c, 0xe1, 0xfc, 0x58, 0x35, 0x12, 0xfe, 0x98, 0xe4, 0xff, 0xf4, 0xdc, - 0xb6, 0xfb, 0x1f, 0x07, 0x63, 0x2d, 0x2e, 0x6b, 0xad, 0xd0, 0x40, 0x8f, 0xf4, 0xb9, 0x28, 0x2d, - 0x76, 0xc5, 0xae, 0x0b, 0x65, 0x9f, 0xcd, 0x57, 0x42, 0x7a, 0xcd, 0x81, 0xbb, 0x50, 0x76, 0x03, - 0xf8, 0xec, 0x15, 0x9e, 0x1a, 0xf3, 0x59, 0x8b, 0xb3, 0x23, 0xff, 0x0f, 0x86, 0xfc, 0x40, 0x0f, - 0xba, 0x6a, 0x68, 0xae, 0x1f, 0x35, 0x63, 0x0e, 0x1e, 0xcd, 0x23, 0xe2, 0x1d, 0x25, 0x53, 0xed, - 0x87, 0x05, 0x38, 0x97, 0x5d, 0x70, 0xd9, 0xf2, 0x03, 0xf2, 0xbf, 0x7b, 0xc4, 0x7e, 0xc0, 0x2f, - 0xce, 0x4a, 0x73, 0xa1, 0x87, 0x0e, 0xd9, 0x2a, 0x25, 0x26, 0xf2, 0x00, 0x2a, 0x56, 0x40, 0xdb, - 0x6a, 0x7f, 0x79, 0xe3, 0x88, 0x9b, 0x1e, 0x5b, 0xda, 0x19, 0x17, 0x14, 0xcc, 0xb4, 0xb7, 0x8a, - 0xfd, 0x9a, 0xcc, 0x97, 0x0f, 0x3b, 0xe9, 0xf3, 0x7b, 0x2d, 0x9f, 0xcf, 0x6f, 0xb2, 0x42, 0xbd, - 0xae, 0xbf, 0xff, 0xb7, 0xd7, 0xf5, 0xf7, 0x46, 0x7e, 0xd7, 0xdf, 0x94, 0x18, 0xfa, 0x7a, 0x00, - 0xbf, 0x53, 0x82, 0xf3, 0x77, 0xeb, 0x36, 0x6c, 0x3d, 0x93, 0xbd, 0x33, 0xef, 0x7a, 0x76, 0xf7, - 0x7e, 0x48, 0x66, 0xa0, 0xd2, 0xd9, 0xd2, 0x7d, 0xa5, 0x94, 0xa9, 0x0d, 0x4b, 0x65, 0x95, 0x25, - 0xde, 0x61, 0x93, 0x06, 0x57, 0xe6, 0xf8, 0x2b, 0x0a, 0x52, 0x36, 0x1d, 0xb7, 0xa9, 0xef, 0x47, - 0x36, 0x81, 0x70, 0x3a, 0x5e, 0x11, 0xc9, 0xa8, 0xf2, 0x49, 0x00, 0x43, 0xc2, 0xc4, 0x2c, 0x57, - 0xa6, 0xc1, 0x1d, 0xb9, 0x32, 0xdc, 0xc4, 0xa3, 0x46, 0xc9, 0xd3, 0x0a, 0xc9, 0x8b, 0x4c, 0x41, - 0x39, 0x88, 0x9c, 0x76, 0xd5, 0xd6, 0xbc, 0x9c, 0xa1, 0x9f, 0x72, 0x3a, 0xb6, 0xb1, 0x77, 0x37, - 0xb8, 0x51, 0xdd, 0x94, 0xe7, 0xe7, 0x96, 0xeb, 0x70, 0x85, 0xac, 0x14, 0x6d, 0xec, 0x6f, 0xf4, - 0x50, 0x60, 0x46, 0x29, 0xed, 0x6f, 0xaa, 0x70, 0x26, 0xbb, 0x3f, 0x30, 0xb9, 0xed, 0x50, 0xcf, - 0x67, 0xd8, 0x85, 0xa4, 0xdc, 0x6e, 0x8a, 0x64, 0x54, 0xf9, 0xef, 0x69, 0x87, 0xb3, 0x6f, 0x16, - 0xe0, 0xac, 0x27, 0xcf, 0x88, 0xee, 0x87, 0xd3, 0xd9, 0x63, 0xc2, 0x9c, 0xd1, 0x87, 0x21, 0xf6, - 0xaf, 0x0b, 0xf9, 0xad, 0x02, 0x4c, 0xb4, 0x53, 0x76, 0x8e, 0x63, 0xbc, 0x30, 0xc6, 0xbd, 0xe2, - 0x57, 0xfa, 0xf0, 0xc3, 0xbe, 0x35, 0x21, 0x5f, 0x84, 0x7a, 0x87, 0xf5, 0x0b, 0x3f, 0xa0, 0x8e, - 0xa1, 0xee, 0x8c, 0x0d, 0x3e, 0x92, 0x56, 0x23, 0x2c, 0xe5, 0x8a, 0x26, 0xf4, 0x83, 0x58, 0x06, - 0xc6, 0x39, 0x3e, 0xe0, 0x37, 0xc4, 0x2e, 0x41, 0xd5, 0xa7, 0x41, 0x60, 0x39, 0x2d, 0xb1, 0xdf, - 0xa8, 0x89, 0xb1, 0xd2, 0x94, 0x69, 0x18, 0xe6, 0x92, 0x0f, 0x43, 0x8d, 0x1f, 0x39, 0xcd, 0x7a, - 0x2d, 0x7f, 0xa2, 0xc6, 0xdd, 0xc5, 0x46, 0x85, 0x03, 0x9c, 0x4c, 0xc4, 0x28, 0x9f, 0x3c, 0x0d, - 0x23, 0x1b, 0x7c, 0xf8, 0xca, 0x4b, 0xc3, 0xc2, 0xc6, 0xc5, 0xb5, 0xb5, 0x46, 0x2c, 0x1d, 0x13, - 0x54, 0x64, 0x06, 0x80, 0x86, 0xe7, 0x72, 0x69, 0x7b, 0x56, 0x74, 0x62, 0x87, 0x31, 0x2a, 0xf2, - 0x18, 0x94, 0x02, 0xdb, 0xe7, 0x36, 0xac, 0x6a, 0xb4, 0x05, 0x5d, 0x5b, 0x6e, 0x22, 0x4b, 0xd7, - 0x7e, 0x5e, 0x80, 0x13, 0xa9, 0xcb, 0x25, 0xac, 0x48, 0xd7, 0xb3, 0xe5, 0x34, 0x12, 0x16, 0x59, - 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0xa9, 0x96, 0x17, 0x73, 0xc6, 0x47, 0xb8, 0xae, 0x07, 0x3e, - 0xd3, 0xc3, 0x7b, 0x34, 0x72, 0x7e, 0xcc, 0x17, 0xd5, 0x47, 0xae, 0x03, 0xb1, 0x63, 0xbe, 0x28, - 0x0f, 0x13, 0x94, 0x29, 0x83, 0x5f, 0xf9, 0x20, 0x06, 0x3f, 0xed, 0x6b, 0xc5, 0x98, 0x04, 0xa4, - 0x66, 0x7f, 0x0f, 0x09, 0x3c, 0xc9, 0x16, 0xd0, 0x70, 0x71, 0xaf, 0xc5, 0xd7, 0x3f, 0xbe, 0x18, - 0xcb, 0x5c, 0xf2, 0x92, 0x90, 0x7d, 0x29, 0xe7, 0x2d, 0xd4, 0xb5, 0xe5, 0xa6, 0xf0, 0xae, 0x52, - 0x5f, 0x2d, 0xfc, 0x04, 0xe5, 0x63, 0xfa, 0x04, 0xda, 0x5f, 0x94, 0xa0, 0x7e, 0xd5, 0xdd, 0x78, - 0x8f, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, 0x77, 0x71, 0x99, 0x5a, 0x87, 0x47, 0x82, 0xc0, 0x6e, - 0x52, 0xc3, 0x75, 0x4c, 0x7f, 0x76, 0x33, 0xa0, 0xde, 0x82, 0xe5, 0x58, 0xfe, 0x16, 0x35, 0xe5, - 0x71, 0xd2, 0xa3, 0xfb, 0x7b, 0x93, 0x8f, 0xac, 0xad, 0x2d, 0x67, 0x91, 0x60, 0xbf, 0xb2, 0x7c, - 0xda, 0xd0, 0x8d, 0x6d, 0x77, 0x73, 0x93, 0xdf, 0x94, 0x91, 0x3e, 0x37, 0x62, 0xda, 0x88, 0xa5, - 0x63, 0x82, 0x4a, 0x7b, 0xbb, 0x08, 0xb5, 0xf0, 0xe6, 0x3b, 0x79, 0x02, 0x86, 0x37, 0x3c, 0x77, - 0x9b, 0x7a, 0xe2, 0xe4, 0x4e, 0xde, 0x94, 0x69, 0x88, 0x24, 0x54, 0x79, 0xe4, 0x71, 0xa8, 0x04, - 0x6e, 0xc7, 0x32, 0xd2, 0x06, 0xb5, 0x35, 0x96, 0x88, 0x22, 0xef, 0xf8, 0x3a, 0xf8, 0x93, 0x09, - 0xd5, 0xae, 0xd6, 0x57, 0x19, 0x7b, 0x05, 0xca, 0xbe, 0xee, 0xdb, 0x72, 0x3d, 0xcd, 0x71, 0x89, - 0x7c, 0xb6, 0xb9, 0x2c, 0x2f, 0x91, 0xcf, 0x36, 0x97, 0x91, 0x83, 0x6a, 0x3f, 0x29, 0x42, 0x5d, - 0xc8, 0x4d, 0xcc, 0x0a, 0x47, 0x29, 0xb9, 0x17, 0xb8, 0x2b, 0x85, 0xdf, 0x6d, 0x53, 0x8f, 0x9b, - 0x99, 0xe4, 0x24, 0x17, 0x3f, 0x1f, 0x88, 0x32, 0x43, 0x77, 0x8a, 0x28, 0x49, 0x89, 0xbe, 0x7c, - 0x8c, 0xa2, 0xaf, 0x1c, 0x48, 0xf4, 0x43, 0xc7, 0x21, 0xfa, 0x37, 0x8b, 0x50, 0x5b, 0xb6, 0x36, - 0xa9, 0xb1, 0x6b, 0xd8, 0xfc, 0x4e, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x45, 0x4f, 0x37, 0xe8, 0x2a, - 0xf5, 0x2c, 0x1e, 0x19, 0x86, 0x8d, 0x0f, 0x3e, 0x03, 0xc9, 0x3b, 0x81, 0xf3, 0x7d, 0x68, 0xb0, - 0x6f, 0x69, 0xb2, 0x04, 0x23, 0x26, 0xf5, 0x2d, 0x8f, 0x9a, 0xab, 0xb1, 0x8d, 0xca, 0x13, 0x6a, - 0xa9, 0x99, 0x8f, 0xe5, 0xdd, 0xd9, 0x9b, 0x1c, 0x55, 0x06, 0x4a, 0xb1, 0x63, 0x49, 0x14, 0x65, - 0x43, 0xbe, 0xa3, 0x77, 0xfd, 0xac, 0x3a, 0xc6, 0x86, 0xfc, 0x6a, 0x36, 0x09, 0xf6, 0x2b, 0xab, - 0x55, 0xa0, 0xb4, 0xec, 0xb6, 0xb4, 0xb7, 0x4a, 0x10, 0x86, 0x10, 0x22, 0xff, 0xbf, 0x00, 0x75, - 0xdd, 0x71, 0xdc, 0x40, 0x86, 0xe7, 0x11, 0x27, 0xf0, 0x98, 0x3b, 0x52, 0xd1, 0xd4, 0x6c, 0x04, - 0x2a, 0x0e, 0x6f, 0xc3, 0x03, 0xe5, 0x58, 0x0e, 0xc6, 0x79, 0x93, 0x6e, 0xea, 0x3c, 0x79, 0x25, - 0x7f, 0x2d, 0x0e, 0x70, 0x7a, 0x7c, 0xee, 0x53, 0x30, 0x9e, 0xae, 0xec, 0x61, 0x8e, 0x83, 0x72, - 0x1d, 0xcc, 0x17, 0x01, 0x22, 0x9f, 0x92, 0xfb, 0x60, 0xc4, 0xb2, 0x12, 0x46, 0xac, 0xc5, 0xc1, - 0x05, 0x1c, 0x56, 0xba, 0xaf, 0xe1, 0xea, 0xf5, 0x94, 0xe1, 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, - 0x58, 0xf5, 0xdb, 0x05, 0x18, 0x8f, 0x88, 0xe5, 0x0d, 0xd9, 0xe7, 0x60, 0xd4, 0xa3, 0xba, 0xd9, - 0xd0, 0x03, 0x63, 0x8b, 0xbb, 0x7a, 0x17, 0xb8, 0x6f, 0xf6, 0xc9, 0xfd, 0xbd, 0xc9, 0x51, 0x8c, - 0x67, 0x60, 0x92, 0x8e, 0xe8, 0x50, 0x67, 0x09, 0x6b, 0x56, 0x9b, 0xba, 0xdd, 0x60, 0x40, 0xab, - 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x9d, 0x02, 0x8c, 0xc5, 0x2b, 0x7c, 0xec, - 0x16, 0xb5, 0xad, 0xa4, 0x45, 0x6d, 0xee, 0x08, 0xbe, 0x49, 0x1f, 0x2b, 0xda, 0x4f, 0xab, 0xf1, - 0xa6, 0x71, 0xcb, 0x59, 0xdc, 0x58, 0x50, 0xb8, 0xab, 0xb1, 0xe0, 0xbd, 0x1f, 0x35, 0xa6, 0x9f, - 0x96, 0x5b, 0x7e, 0x80, 0xb5, 0xdc, 0x77, 0x33, 0xf4, 0x4c, 0x2c, 0x7c, 0xca, 0x50, 0x8e, 0xf0, - 0x29, 0xed, 0x30, 0x7c, 0xca, 0xf0, 0x91, 0x4d, 0x3a, 0x07, 0x09, 0xa1, 0x52, 0xbd, 0xaf, 0x21, - 0x54, 0x6a, 0xc7, 0x15, 0x42, 0x05, 0xf2, 0x86, 0x50, 0xf9, 0x6a, 0x01, 0xc6, 0xcc, 0xc4, 0x8d, - 0x59, 0x6e, 0x5b, 0xc8, 0xb3, 0xd4, 0x24, 0x2f, 0xe0, 0x8a, 0x2b, 0x53, 0xc9, 0x34, 0x4c, 0xb1, - 0xd4, 0x7e, 0x5c, 0x8e, 0xaf, 0x03, 0xf7, 0xdb, 0x54, 0xfd, 0x6c, 0xd2, 0x54, 0x7d, 0x31, 0x6d, - 0xaa, 0x3e, 0x11, 0xf3, 0x22, 0x8d, 0x9b, 0xab, 0x3f, 0x12, 0x9b, 0x1e, 0xd9, 0x9c, 0x34, 0x1a, - 0x49, 0x3a, 0x63, 0x8a, 0xfc, 0x08, 0x54, 0x7d, 0x15, 0xec, 0x51, 0x6c, 0x6c, 0xa2, 0xef, 0xa2, - 0x02, 0x31, 0x86, 0x14, 0x4c, 0x13, 0xf7, 0xa8, 0xee, 0xbb, 0x4e, 0x5a, 0x13, 0x47, 0x9e, 0x8a, - 0x32, 0x37, 0x6e, 0x32, 0x1f, 0xba, 0x87, 0xc9, 0x5c, 0x87, 0xba, 0xad, 0xfb, 0xc1, 0x7a, 0xc7, - 0xd4, 0x03, 0x6a, 0xca, 0xf1, 0xf6, 0xdf, 0x0f, 0xb6, 0x56, 0xb1, 0xf5, 0x2f, 0x52, 0x08, 0x97, - 0x23, 0x18, 0x8c, 0x63, 0x12, 0x13, 0x46, 0xd8, 0x2b, 0x1f, 0x0d, 0xe6, 0xac, 0x0a, 0x01, 0x70, - 0x18, 0x1e, 0xa1, 0xa5, 0x67, 0x39, 0x86, 0x83, 0x09, 0xd4, 0x3e, 0x56, 0xf5, 0xda, 0x40, 0x56, - 0xf5, 0xaf, 0xd6, 0xa0, 0x7e, 0x5d, 0x0f, 0xac, 0x1d, 0xca, 0x4f, 0x71, 0x8e, 0xc7, 0x94, 0xfe, - 0x6b, 0x05, 0x38, 0x93, 0x74, 0xd5, 0x3b, 0x46, 0x7b, 0x3a, 0x0f, 0xfc, 0x81, 0x99, 0xdc, 0xb0, - 0x4f, 0x2d, 0xb8, 0x65, 0xbd, 0xc7, 0xf3, 0xef, 0xb8, 0x2d, 0xeb, 0xcd, 0x7e, 0x0c, 0xb1, 0x7f, - 0x5d, 0xde, 0x2b, 0x96, 0xf5, 0x07, 0x3b, 0x30, 0x5b, 0xca, 0xee, 0x3f, 0xfc, 0xc0, 0xd8, 0xfd, - 0xab, 0x0f, 0x84, 0xb2, 0xd5, 0x89, 0xd9, 0xfd, 0x6b, 0x39, 0xfd, 0x4f, 0xa4, 0x77, 0xbb, 0x40, - 0xeb, 0x77, 0x7e, 0xc0, 0x2f, 0xa6, 0x2b, 0x7b, 0x2c, 0xd3, 0x51, 0x36, 0x74, 0xdf, 0x32, 0xe4, - 0xb2, 0x97, 0x23, 0x10, 0xa5, 0x8a, 0xd8, 0x25, 0x8e, 0xa9, 0xf9, 0x2b, 0x0a, 0xec, 0x28, 0x32, - 0x58, 0x31, 0x57, 0x64, 0x30, 0x32, 0x07, 0x65, 0x87, 0xed, 0x9e, 0x4b, 0x87, 0x8e, 0x05, 0x76, - 0xfd, 0x1a, 0xdd, 0x45, 0x5e, 0x58, 0x7b, 0xbb, 0x08, 0xc0, 0x9a, 0x7f, 0x30, 0x0b, 0xfc, 0x87, - 0x60, 0xd8, 0xef, 0xf2, 0xbd, 0xb2, 0x5c, 0xb0, 0x23, 0xa7, 0x1d, 0x91, 0x8c, 0x2a, 0x9f, 0x3c, - 0x0e, 0x95, 0xd7, 0xbb, 0xb4, 0xab, 0x8e, 0x93, 0x43, 0x75, 0xed, 0xd3, 0x2c, 0x11, 0x45, 0xde, - 0xf1, 0x59, 0xd3, 0x94, 0xa5, 0xbe, 0x72, 0x5c, 0x96, 0xfa, 0x1a, 0x0c, 0x5f, 0x77, 0xb9, 0x0f, - 0xa0, 0xf6, 0x2f, 0x45, 0x80, 0xc8, 0xc7, 0x8a, 0x7c, 0xa3, 0x00, 0x0f, 0x87, 0x03, 0x2e, 0x10, - 0x5a, 0x37, 0x8f, 0xfd, 0x9a, 0xdb, 0x6a, 0x9f, 0x35, 0xd8, 0xf9, 0x0c, 0xb4, 0x9a, 0xc5, 0x0e, - 0xb3, 0x6b, 0x41, 0x10, 0xaa, 0xb4, 0xdd, 0x09, 0x76, 0xe7, 0x2d, 0x4f, 0xf6, 0xc0, 0x4c, 0x57, - 0xbe, 0xcb, 0x92, 0x46, 0x14, 0x95, 0x5b, 0x43, 0x3e, 0x88, 0x54, 0x0e, 0x86, 0x38, 0x64, 0x0b, - 0xaa, 0x8e, 0xfb, 0xaa, 0xcf, 0xc4, 0x21, 0xbb, 0xe3, 0x8b, 0x83, 0x8b, 0x5c, 0x88, 0x55, 0x58, - 0x79, 0xe5, 0x0b, 0x0e, 0x3b, 0x52, 0xd8, 0x5f, 0x2f, 0xc2, 0xa9, 0x0c, 0x39, 0x90, 0x17, 0x61, - 0x5c, 0xba, 0xb3, 0x45, 0x41, 0x90, 0x0b, 0x51, 0x10, 0xe4, 0x66, 0x2a, 0x0f, 0x7b, 0xa8, 0xc9, - 0xab, 0x00, 0xba, 0x61, 0x50, 0xdf, 0x5f, 0x71, 0x4d, 0xa5, 0x8f, 0xbe, 0xb0, 0xbf, 0x37, 0x09, - 0xb3, 0x61, 0xea, 0x9d, 0xbd, 0xc9, 0x8f, 0x66, 0x79, 0xa8, 0xa6, 0xe4, 0x1c, 0x15, 0xc0, 0x18, - 0x24, 0xf9, 0x3c, 0x80, 0xd8, 0x7a, 0x85, 0x97, 0xe8, 0xef, 0x61, 0xaf, 0x98, 0x52, 0xe1, 0x8a, - 0xa6, 0x3e, 0xdd, 0xd5, 0x9d, 0xc0, 0x0a, 0x76, 0x45, 0xcc, 0x92, 0x9b, 0x21, 0x0a, 0xc6, 0x10, - 0xb5, 0x3f, 0x2d, 0x42, 0x55, 0x59, 0x4a, 0xef, 0x83, 0x79, 0xac, 0x95, 0x30, 0x8f, 0x1d, 0x91, - 0x4f, 0x6a, 0x96, 0x71, 0xcc, 0x4d, 0x19, 0xc7, 0x16, 0xf3, 0xb3, 0xba, 0xbb, 0x69, 0xec, 0xdb, - 0x45, 0x18, 0x53, 0xa4, 0x79, 0x0d, 0x63, 0x9f, 0x84, 0x13, 0xe2, 0x2c, 0x79, 0x45, 0xbf, 0x2d, - 0xc2, 0xb7, 0x70, 0x81, 0x95, 0x85, 0x1b, 0x68, 0x23, 0x99, 0x85, 0x69, 0x5a, 0xd6, 0xad, 0x45, - 0xd2, 0x3a, 0xdb, 0x47, 0x88, 0xd3, 0x27, 0xb1, 0xdf, 0xe1, 0xdd, 0xba, 0x91, 0xca, 0xc3, 0x1e, - 0xea, 0xb4, 0x65, 0xae, 0x7c, 0x0c, 0x96, 0xb9, 0xbf, 0x2d, 0xc0, 0x48, 0x24, 0xaf, 0x63, 0xb7, - 0xcb, 0x6d, 0x26, 0xed, 0x72, 0xb3, 0xb9, 0xbb, 0x43, 0x1f, 0xab, 0xdc, 0x2f, 0x0d, 0x43, 0xc2, - 0x35, 0x9a, 0x6c, 0xc0, 0x39, 0x2b, 0xd3, 0xc1, 0x2b, 0x36, 0xdb, 0x84, 0x77, 0x7d, 0x97, 0xfa, - 0x52, 0xe2, 0x5d, 0x50, 0x48, 0x17, 0xaa, 0x3b, 0xd4, 0x0b, 0x2c, 0x83, 0xaa, 0xf6, 0x2d, 0xe6, - 0x56, 0xc9, 0xa4, 0xed, 0x31, 0x94, 0xe9, 0x4d, 0xc9, 0x00, 0x43, 0x56, 0x64, 0x03, 0x2a, 0xd4, - 0x6c, 0x51, 0x15, 0x50, 0x27, 0x67, 0xb8, 0xca, 0x50, 0x9e, 0xec, 0xcd, 0x47, 0x01, 0x4d, 0x7c, - 0xa8, 0xd9, 0xea, 0x6c, 0x49, 0xf6, 0xc3, 0xc1, 0x15, 0xac, 0xf0, 0x94, 0x2a, 0xba, 0x6b, 0x1f, - 0x26, 0x61, 0xc4, 0x87, 0x6c, 0x87, 0x46, 0xae, 0xca, 0x11, 0x4d, 0x1e, 0x77, 0x31, 0x71, 0xf9, - 0x50, 0xbb, 0xa5, 0x07, 0xd4, 0x6b, 0xeb, 0xde, 0xb6, 0xdc, 0x6d, 0x0c, 0xde, 0xc2, 0x97, 0x14, - 0x52, 0xd4, 0xc2, 0x30, 0x09, 0x23, 0x3e, 0xc4, 0x85, 0x5a, 0x20, 0xd5, 0x67, 0x65, 0xc9, 0x1b, - 0x9c, 0xa9, 0x52, 0xc4, 0x7d, 0xe9, 0x22, 0xad, 0x5e, 0x31, 0xe2, 0x41, 0x76, 0x12, 0xa1, 0x7c, - 0x45, 0x00, 0xe7, 0x46, 0x0e, 0x8b, 0xb0, 0x84, 0x8a, 0x96, 0x9b, 0xec, 0x90, 0xc0, 0xda, 0xdb, - 0x95, 0x68, 0x5a, 0xbe, 0xdf, 0x76, 0xaa, 0xa7, 0x93, 0x76, 0xaa, 0x0b, 0x69, 0x3b, 0x55, 0xea, - 0x88, 0xf2, 0xf0, 0x4e, 0x95, 0x29, 0x0b, 0x51, 0xf9, 0x18, 0x2c, 0x44, 0x4f, 0x41, 0x7d, 0x87, - 0xcf, 0x04, 0x22, 0x3a, 0x4f, 0x85, 0x2f, 0x23, 0x7c, 0x66, 0xbf, 0x19, 0x25, 0x63, 0x9c, 0x86, - 0x15, 0x91, 0x3f, 0x2f, 0x08, 0xc3, 0x9b, 0xca, 0x22, 0xcd, 0x28, 0x19, 0xe3, 0x34, 0xdc, 0x1f, - 0xcb, 0x72, 0xb6, 0x45, 0x81, 0x61, 0x5e, 0x40, 0xf8, 0x63, 0xa9, 0x44, 0x8c, 0xf2, 0xc9, 0x25, - 0xa8, 0x76, 0xcd, 0x4d, 0x41, 0x5b, 0xe5, 0xb4, 0x5c, 0xc3, 0x5c, 0x9f, 0x5f, 0x90, 0xd1, 0x82, - 0x54, 0x2e, 0xab, 0x49, 0x5b, 0xef, 0xa8, 0x0c, 0xbe, 0x37, 0x94, 0x35, 0x59, 0x89, 0x92, 0x31, - 0x4e, 0x43, 0x3e, 0x0e, 0x63, 0x1e, 0x35, 0xbb, 0x06, 0x0d, 0x4b, 0x01, 0x2f, 0x45, 0xc4, 0x5f, - 0x1a, 0xe2, 0x39, 0x98, 0xa2, 0xec, 0x63, 0xe7, 0xaa, 0x0f, 0x64, 0xe7, 0xfa, 0x41, 0x01, 0x48, - 0xaf, 0xff, 0x32, 0xd9, 0x82, 0x21, 0x87, 0x5b, 0xbf, 0x72, 0x07, 0x44, 0x8e, 0x19, 0xd1, 0xc4, - 0xb4, 0x24, 0x13, 0x24, 0x3e, 0x71, 0xa0, 0x4a, 0x6f, 0x07, 0xd4, 0x73, 0xc2, 0xfb, 0x0c, 0x47, - 0x13, 0x7c, 0x59, 0xec, 0x06, 0x24, 0x32, 0x86, 0x3c, 0xb4, 0x1f, 0x15, 0xa1, 0x1e, 0xa3, 0xbb, - 0xd7, 0xa6, 0x92, 0x5f, 0xa9, 0x16, 0x46, 0xa7, 0x75, 0xcf, 0x96, 0x23, 0x2c, 0x76, 0xa5, 0x5a, - 0x66, 0xe1, 0x32, 0xc6, 0xe9, 0xc8, 0x0c, 0x40, 0x5b, 0xf7, 0x03, 0xea, 0xf1, 0xd5, 0x37, 0x75, - 0x91, 0x79, 0x25, 0xcc, 0xc1, 0x18, 0x15, 0xb9, 0x28, 0xc3, 0x67, 0x97, 0x93, 0x81, 0xe7, 0xfa, - 0xc4, 0xc6, 0xae, 0x1c, 0x41, 0x6c, 0x6c, 0xd2, 0x82, 0x71, 0x55, 0x6b, 0x95, 0x7b, 0xb8, 0xb0, - 0x64, 0x62, 0xff, 0x92, 0x82, 0xc0, 0x1e, 0x50, 0xed, 0xed, 0x02, 0x8c, 0x26, 0x4c, 0x1e, 0x22, - 0x64, 0x9c, 0xf2, 0xbe, 0x4f, 0x84, 0x8c, 0x8b, 0x39, 0xcd, 0x3f, 0x09, 0x43, 0x42, 0x40, 0x69, - 0xa7, 0x3a, 0x21, 0x42, 0x94, 0xb9, 0x6c, 0x2e, 0x93, 0x46, 0xd5, 0xf4, 0x5c, 0x26, 0xad, 0xae, - 0xa8, 0xf2, 0x85, 0xb9, 0x5d, 0xd4, 0xae, 0xd7, 0xdc, 0x2e, 0xd2, 0x31, 0xa4, 0xd0, 0x7e, 0x5c, - 0x02, 0xee, 0x82, 0x42, 0x9e, 0x83, 0x5a, 0x9b, 0x1a, 0x5b, 0xba, 0x63, 0xf9, 0x2a, 0x64, 0x24, - 0xdb, 0xdd, 0xd6, 0x56, 0x54, 0xe2, 0x1d, 0x06, 0x30, 0xdb, 0x5c, 0xe6, 0x5e, 0xde, 0x11, 0x2d, - 0x31, 0x60, 0xa8, 0xe5, 0xfb, 0x7a, 0xc7, 0xca, 0x7d, 0x02, 0x2a, 0x42, 0xf4, 0x89, 0x41, 0x24, - 0x9e, 0x51, 0x42, 0x13, 0x03, 0x2a, 0x1d, 0x5b, 0xb7, 0x9c, 0xdc, 0xff, 0x28, 0x61, 0x2d, 0x58, - 0x65, 0x48, 0xc2, 0xa4, 0xc3, 0x1f, 0x51, 0x60, 0x93, 0x2e, 0xd4, 0x7d, 0xc3, 0xd3, 0xdb, 0xfe, - 0x96, 0x3e, 0xf3, 0xcc, 0xb3, 0xb9, 0x95, 0xa4, 0x88, 0x95, 0x98, 0xb3, 0xe7, 0x70, 0x76, 0xa5, - 0x79, 0x65, 0x76, 0xe6, 0x99, 0x67, 0x31, 0xce, 0x27, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x64, 0xbf, - 0x3f, 0x72, 0xb6, 0xcf, 0x3c, 0x35, 0x83, 0x71, 0x3e, 0xda, 0xbf, 0x17, 0xa0, 0x16, 0xd2, 0x92, - 0x75, 0x00, 0x36, 0x02, 0x65, 0x50, 0xbd, 0x43, 0x05, 0xb8, 0xe7, 0xbb, 0xe2, 0xf5, 0xb0, 0x30, - 0xc6, 0x80, 0x32, 0xa2, 0x0e, 0x16, 0x8f, 0x3a, 0xea, 0xe0, 0x34, 0xd4, 0xb6, 0x74, 0xc7, 0xf4, - 0xb7, 0xf4, 0x6d, 0x31, 0x11, 0xc5, 0xe2, 0x70, 0x5e, 0x51, 0x19, 0x18, 0xd1, 0x68, 0x7f, 0x3c, - 0x04, 0xe2, 0xd8, 0x92, 0x0d, 0x15, 0xd3, 0xf2, 0x85, 0xdf, 0x6c, 0x81, 0x97, 0x0c, 0x87, 0xca, - 0xbc, 0x4c, 0xc7, 0x90, 0x82, 0x9c, 0x85, 0x52, 0xdb, 0x72, 0xe4, 0x89, 0x07, 0x37, 0x78, 0xad, - 0x58, 0x0e, 0xb2, 0x34, 0x9e, 0xa5, 0xdf, 0x96, 0x2e, 0x4f, 0x22, 0x4b, 0xbf, 0x8d, 0x2c, 0x8d, - 0x6d, 0x41, 0x6d, 0xd7, 0xdd, 0xde, 0xd0, 0x8d, 0x6d, 0xe5, 0x19, 0x55, 0xe6, 0x0b, 0x21, 0xdf, - 0x82, 0x2e, 0x27, 0xb3, 0x30, 0x4d, 0x4b, 0xd6, 0xe1, 0x91, 0x37, 0xa8, 0xe7, 0xca, 0x51, 0xde, - 0xb4, 0x29, 0xed, 0x28, 0x18, 0xa1, 0x42, 0x70, 0x07, 0xab, 0xcf, 0x66, 0x93, 0x60, 0xbf, 0xb2, - 0xdc, 0x55, 0x53, 0xf7, 0x5a, 0x34, 0x58, 0xf5, 0x5c, 0x83, 0xfa, 0xbe, 0xe5, 0xb4, 0x14, 0xec, - 0x50, 0x04, 0xbb, 0x96, 0x4d, 0x82, 0xfd, 0xca, 0x92, 0x97, 0x61, 0x42, 0x64, 0x89, 0xc5, 0x76, - 0x76, 0x47, 0xb7, 0x6c, 0x7d, 0xc3, 0xb2, 0xd5, 0xaf, 0xbd, 0x46, 0xc5, 0xb9, 0xc2, 0x5a, 0x1f, - 0x1a, 0xec, 0x5b, 0x9a, 0x5c, 0x85, 0x71, 0x75, 0xaa, 0xb4, 0x4a, 0xbd, 0x66, 0x78, 0x94, 0x3d, - 0xda, 0xb8, 0xc0, 0xf6, 0x7b, 0xf3, 0xb4, 0xe3, 0x51, 0x83, 0x6b, 0x5d, 0x29, 0x2a, 0xec, 0x29, - 0x47, 0x10, 0xce, 0xf0, 0xf3, 0xea, 0xf5, 0xce, 0x9c, 0xeb, 0xda, 0xa6, 0x7b, 0xcb, 0x51, 0x6d, - 0x17, 0x8a, 0x0d, 0x3f, 0x48, 0x6a, 0x66, 0x52, 0x60, 0x9f, 0x92, 0xac, 0xe5, 0x3c, 0x67, 0xde, - 0xbd, 0xe5, 0xa4, 0x51, 0x21, 0x6a, 0x79, 0xb3, 0x0f, 0x0d, 0xf6, 0x2d, 0x4d, 0x16, 0x80, 0xa4, - 0x5b, 0xb0, 0xde, 0xe1, 0xca, 0xd0, 0x68, 0xe3, 0x8c, 0x88, 0x8f, 0x91, 0xce, 0xc5, 0x8c, 0x12, - 0x64, 0x19, 0x4e, 0xa7, 0x53, 0x19, 0x3b, 0xee, 0x24, 0x3f, 0x2a, 0x22, 0x63, 0x62, 0x46, 0x3e, - 0x66, 0x96, 0xd2, 0xfe, 0xa4, 0x08, 0xa3, 0x89, 0x0b, 0xd5, 0x0f, 0xdc, 0xc5, 0x55, 0xa6, 0x81, - 0xb6, 0xfd, 0xd6, 0xd2, 0xfc, 0x15, 0xaa, 0x9b, 0xd4, 0xbb, 0x46, 0xd5, 0xe5, 0x77, 0x3e, 0xa9, - 0xac, 0x24, 0x72, 0x30, 0x45, 0x49, 0x36, 0xa1, 0x22, 0xec, 0xa9, 0x79, 0x7f, 0x95, 0xa0, 0x64, - 0xc4, 0x8d, 0xaa, 0x7c, 0xc9, 0x11, 0x26, 0x55, 0x01, 0xaf, 0x05, 0x30, 0x12, 0xa7, 0x60, 0x13, - 0x49, 0xa4, 0xac, 0x0d, 0x27, 0x14, 0xb5, 0x25, 0x28, 0x05, 0xc1, 0xa0, 0x57, 0x62, 0x85, 0x7d, - 0x7e, 0x6d, 0x19, 0x19, 0x86, 0xb6, 0xc9, 0xbe, 0x9d, 0xef, 0x5b, 0xae, 0x23, 0xe3, 0x23, 0xaf, - 0xc3, 0x70, 0x20, 0x4d, 0x54, 0x83, 0x5d, 0xe9, 0xe5, 0xe6, 0x62, 0x65, 0x9e, 0x52, 0x58, 0xda, - 0xdf, 0x15, 0xa1, 0x16, 0x6e, 0x27, 0x0f, 0x10, 0x77, 0xd8, 0x85, 0x5a, 0xe8, 0x6f, 0x93, 0xfb, - 0xb7, 0x67, 0x91, 0x1b, 0x08, 0xdf, 0x01, 0x85, 0xaf, 0x18, 0xf1, 0x88, 0xfb, 0xf2, 0x94, 0x72, - 0xf8, 0xf2, 0x74, 0x60, 0x38, 0xf0, 0xac, 0x56, 0x4b, 0xea, 0xb6, 0x79, 0x9c, 0x79, 0x42, 0x71, - 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, 0xf6, 0x1a, 0x8c, 0xa7, 0x29, 0xb9, 0xe2, - 0x67, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x19, 0x47, 0x8a, 0x9f, 0x4c, 0xc7, 0x90, 0x82, 0x6d, 0xfe, - 0xd8, 0x67, 0x7a, 0xc3, 0x75, 0xd4, 0xb6, 0x9a, 0xeb, 0xd0, 0x6b, 0x32, 0x0d, 0xc3, 0x5c, 0xed, - 0x9f, 0x4b, 0x70, 0x36, 0x32, 0x0a, 0xac, 0xe8, 0x8e, 0xde, 0x3a, 0xc0, 0xbf, 0xae, 0xde, 0xbf, - 0x24, 0x71, 0xd8, 0xe0, 0xf1, 0xa5, 0x07, 0x20, 0x78, 0xfc, 0x4f, 0x0a, 0xc0, 0x7d, 0x03, 0xc9, - 0x17, 0x61, 0x44, 0x8f, 0xfd, 0xe6, 0x50, 0x7e, 0xce, 0xcb, 0xb9, 0x3f, 0x27, 0x77, 0x41, 0x0c, - 0x7d, 0x5d, 0xe2, 0xa9, 0x98, 0x60, 0x48, 0x5c, 0xa8, 0x6e, 0xea, 0xb6, 0xcd, 0x74, 0xa1, 0xdc, - 0x87, 0x1c, 0x09, 0xe6, 0xbc, 0x9b, 0x2f, 0x48, 0x68, 0x0c, 0x99, 0x68, 0xff, 0x54, 0x80, 0xd1, - 0xa6, 0x6d, 0x99, 0x96, 0xd3, 0x3a, 0xc6, 0xa8, 0xf1, 0x37, 0xa0, 0xe2, 0xdb, 0x96, 0x49, 0x07, - 0x9c, 0xc7, 0xc5, 0x0a, 0xc2, 0x00, 0x50, 0xe0, 0x24, 0xc3, 0xd0, 0x97, 0x0e, 0x10, 0x86, 0xfe, - 0x67, 0x43, 0x20, 0xfd, 0x4b, 0x49, 0x17, 0x6a, 0x2d, 0x15, 0xdd, 0x5a, 0xb6, 0xf1, 0x4a, 0x8e, - 0xc8, 0x68, 0x89, 0x38, 0xd9, 0x62, 0xd6, 0x0d, 0x13, 0x31, 0xe2, 0x44, 0x68, 0xf2, 0xcf, 0x96, - 0xf3, 0x39, 0xff, 0x6c, 0x29, 0xd8, 0xf5, 0xfe, 0xdb, 0x52, 0x87, 0xf2, 0x56, 0x10, 0x74, 0xe4, - 0xb8, 0x1a, 0xdc, 0x81, 0x38, 0x0a, 0xce, 0x21, 0xb4, 0x11, 0xf6, 0x8e, 0x1c, 0x9a, 0xb1, 0x70, - 0xf4, 0xf0, 0x87, 0x4a, 0x73, 0xb9, 0x0e, 0xba, 0xe3, 0x2c, 0xd8, 0x3b, 0x72, 0x68, 0xf2, 0x05, - 0xa8, 0x07, 0x9e, 0xee, 0xf8, 0x9b, 0xae, 0xd7, 0xa6, 0x9e, 0xdc, 0x1d, 0x2e, 0xe4, 0xf8, 0xb9, - 0xe3, 0x5a, 0x84, 0x26, 0x4e, 0xd0, 0x12, 0x49, 0x18, 0xe7, 0x46, 0xb6, 0xa1, 0xda, 0x35, 0x45, - 0xc5, 0xa4, 0xd9, 0x64, 0x36, 0xcf, 0xff, 0x3a, 0x63, 0xc7, 0xd8, 0xea, 0x0d, 0x43, 0x06, 0xc9, - 0x7f, 0x87, 0x0d, 0x1f, 0xd5, 0xbf, 0xc3, 0xe2, 0xbd, 0x31, 0x2b, 0x72, 0x00, 0x69, 0x4b, 0x8d, - 0xd2, 0x69, 0x49, 0x2f, 0x9c, 0x85, 0xdc, 0xca, 0x9e, 0x60, 0x59, 0x0f, 0xb5, 0x52, 0xa7, 0x85, - 0x8a, 0x87, 0xd6, 0x06, 0x69, 0xdd, 0x26, 0x46, 0xe2, 0x0f, 0x1b, 0xe2, 0x3a, 0xcb, 0xf4, 0xc1, - 0xe6, 0x83, 0xf0, 0x57, 0x0f, 0xb1, 0x08, 0xbf, 0x99, 0xbf, 0xd2, 0xd0, 0xfe, 0xbe, 0x08, 0xa5, - 0xb5, 0xe5, 0xa6, 0x88, 0xda, 0xc7, 0x7f, 0x5f, 0x43, 0x9b, 0xdb, 0x56, 0xe7, 0x26, 0xf5, 0xac, - 0xcd, 0x5d, 0xb9, 0xe9, 0x8d, 0x45, 0xed, 0x4b, 0x53, 0x60, 0x46, 0x29, 0xf2, 0x0a, 0x8c, 0x18, - 0xfa, 0x1c, 0xf5, 0x82, 0x41, 0xb6, 0xf4, 0xfc, 0xde, 0xde, 0xdc, 0x6c, 0x54, 0x1c, 0x13, 0x60, - 0x64, 0x1d, 0xc0, 0x88, 0xa0, 0x4b, 0x87, 0x36, 0x44, 0xc4, 0x80, 0x63, 0x40, 0x04, 0xa1, 0xb6, - 0xcd, 0x48, 0x39, 0x6a, 0xf9, 0x30, 0xa8, 0xbc, 0xe7, 0x5c, 0x53, 0x65, 0x31, 0x82, 0xd1, 0x1c, - 0x18, 0x4d, 0xfc, 0x76, 0x83, 0x7c, 0x0c, 0xaa, 0x6e, 0x27, 0x36, 0x9d, 0xd6, 0xb8, 0xbf, 0x5f, - 0xf5, 0x86, 0x4c, 0xbb, 0xb3, 0x37, 0x39, 0xba, 0xec, 0xb6, 0x2c, 0x43, 0x25, 0x60, 0x48, 0x4e, - 0x34, 0x18, 0xe2, 0x97, 0x6d, 0xd4, 0x4f, 0x37, 0xf8, 0xda, 0xc1, 0xe3, 0xe2, 0xfb, 0x28, 0x73, - 0xb4, 0x2f, 0x95, 0x21, 0x3a, 0x13, 0x22, 0x3e, 0x0c, 0x09, 0x67, 0x62, 0x39, 0x73, 0x1f, 0xab, - 0xdf, 0xb2, 0x64, 0x45, 0x5a, 0x50, 0x7a, 0xcd, 0xdd, 0xc8, 0x3d, 0x71, 0xc7, 0x6e, 0xd9, 0x0a, - 0x2b, 0x55, 0x2c, 0x01, 0x19, 0x07, 0xf2, 0xeb, 0x05, 0x38, 0xe9, 0xa7, 0x95, 0x4e, 0xd9, 0x1d, - 0x30, 0xbf, 0x76, 0x9d, 0x56, 0x63, 0xa5, 0x63, 0x66, 0xbf, 0x6c, 0xec, 0xad, 0x0b, 0x93, 0xbf, - 0x38, 0xac, 0x91, 0xdd, 0x69, 0x31, 0xe7, 0xaf, 0xe2, 0x92, 0xf2, 0x4f, 0xa6, 0xa1, 0x64, 0xa5, - 0x7d, 0xa5, 0x08, 0xf5, 0xd8, 0x6c, 0x9d, 0xfb, 0x5f, 0x2e, 0xb7, 0x53, 0xff, 0x72, 0x59, 0x1d, - 0xfc, 0xec, 0x32, 0xaa, 0xd5, 0x71, 0xff, 0xce, 0xe5, 0xcf, 0x8b, 0x50, 0x5a, 0x9f, 0x5f, 0x48, - 0x6e, 0x17, 0x0b, 0xf7, 0x61, 0xbb, 0xb8, 0x05, 0xc3, 0x1b, 0x5d, 0xcb, 0x0e, 0x2c, 0x27, 0x77, - 0x1c, 0x00, 0xf5, 0xeb, 0x1b, 0x79, 0x9d, 0x56, 0xa0, 0xa2, 0x82, 0x27, 0x2d, 0x18, 0x6e, 0x89, - 0x40, 0x6c, 0xb9, 0x3d, 0xba, 0x64, 0x40, 0x37, 0xc1, 0x48, 0xbe, 0xa0, 0x42, 0xd7, 0x76, 0x41, - 0xfe, 0x3c, 0xfb, 0xbe, 0x4b, 0x53, 0xfb, 0x02, 0x84, 0x5a, 0xc0, 0xfd, 0x67, 0xfe, 0xaf, 0x05, - 0x48, 0x2a, 0x3e, 0xf7, 0xbf, 0x37, 0x6d, 0xa7, 0x7b, 0xd3, 0xfc, 0x51, 0x0c, 0xbe, 0xec, 0x0e, - 0xa5, 0xfd, 0x51, 0x11, 0x86, 0xee, 0xdb, 0xdd, 0x4d, 0x9a, 0x70, 0x4e, 0x9b, 0xcb, 0x39, 0x31, - 0xf6, 0x75, 0x4d, 0x6b, 0xa7, 0x5c, 0xd3, 0xf2, 0xfe, 0xac, 0xf3, 0x1e, 0x8e, 0x69, 0x7f, 0x5d, - 0x00, 0x39, 0x2d, 0x2f, 0x39, 0x7e, 0xa0, 0x3b, 0x06, 0xff, 0x67, 0xbc, 0x5c, 0x03, 0xf2, 0x7a, - 0x40, 0x48, 0x2f, 0x21, 0xb1, 0xec, 0xf3, 0x67, 0x35, 0xe7, 0x93, 0x8f, 0x40, 0x75, 0xcb, 0xf5, - 0x03, 0x3e, 0xcf, 0x17, 0x93, 0x76, 0x9d, 0x2b, 0x32, 0x1d, 0x43, 0x8a, 0xf4, 0x49, 0x61, 0xa5, - 0xff, 0x49, 0xa1, 0xf6, 0xad, 0x22, 0x8c, 0xbc, 0x57, 0x2e, 0xa0, 0x66, 0xb9, 0xf2, 0x95, 0x72, - 0xba, 0xf2, 0x95, 0x0f, 0xe3, 0xca, 0xa7, 0x7d, 0xaf, 0x00, 0x70, 0xdf, 0x6e, 0xbf, 0x9a, 0x49, - 0x2f, 0xbb, 0xdc, 0xfd, 0x2a, 0xdb, 0xc7, 0xee, 0xf7, 0x2b, 0xaa, 0x49, 0xdc, 0xc3, 0xee, 0xcd, - 0x02, 0x8c, 0xe9, 0x09, 0xaf, 0xb5, 0xdc, 0xaa, 0x65, 0xca, 0x09, 0x2e, 0xbc, 0xe9, 0x97, 0x4c, - 0xc7, 0x14, 0x5b, 0xf2, 0x7c, 0x14, 0x79, 0xf5, 0x7a, 0xd4, 0xed, 0x7b, 0x42, 0xa6, 0x72, 0x35, - 0x27, 0x41, 0x79, 0x0f, 0x2f, 0xc1, 0xd2, 0x91, 0x78, 0x09, 0xc6, 0xef, 0x3f, 0x95, 0xef, 0x7a, - 0xff, 0x69, 0x07, 0x6a, 0x9b, 0x9e, 0xdb, 0xe6, 0x8e, 0x78, 0xf2, 0x37, 0x9f, 0x97, 0x73, 0xac, - 0x29, 0xd1, 0x0f, 0xae, 0x23, 0x1b, 0xcf, 0x82, 0xc2, 0xc7, 0x88, 0x15, 0x37, 0x48, 0xbb, 0x82, - 0xeb, 0xd0, 0x51, 0x72, 0x0d, 0xe7, 0x92, 0x35, 0x81, 0x8e, 0x8a, 0x4d, 0xd2, 0xf9, 0x6e, 0xf8, - 0xfe, 0x38, 0xdf, 0x69, 0xbf, 0x58, 0x56, 0x13, 0xd8, 0x03, 0x17, 0xe4, 0xef, 0xbd, 0x7f, 0x6b, - 0x32, 0x7d, 0xa5, 0x71, 0xf8, 0x3e, 0x5e, 0x69, 0xac, 0x0e, 0xe4, 0xea, 0xb5, 0x57, 0x82, 0xd4, - 0xbe, 0xe9, 0xfd, 0xd3, 0x89, 0xff, 0x54, 0xa7, 0x13, 0x6f, 0x15, 0x21, 0x9a, 0x08, 0x0e, 0xe9, - 0xbd, 0xf1, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, 0x6a, 0xeb, 0xbb, 0x79, 0xfe, 0xcd, 0xb8, 0x22, - 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0xc6, 0x47, 0xce, 0x6d, 0x6d, 0x8e, 0x42, 0x2d, 0x0b, - 0x7b, 0x56, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, 0x15, 0x41, 0x06, 0xd2, 0x26, 0x14, 0x2a, 0x9b, - 0xd6, 0x6d, 0x6a, 0xe6, 0xf6, 0x64, 0x8c, 0xfd, 0x31, 0x57, 0x98, 0xd3, 0x79, 0x02, 0x0a, 0x74, - 0x6e, 0x27, 0x15, 0xc7, 0x23, 0x52, 0x7e, 0x39, 0xec, 0xa4, 0xf1, 0x63, 0x16, 0x69, 0x27, 0x15, - 0x49, 0xa8, 0x78, 0x08, 0xb3, 0x2c, 0x3f, 0xa3, 0x96, 0x22, 0xcd, 0x63, 0x96, 0x8d, 0x9d, 0x75, - 0x2b, 0xb3, 0xac, 0x2f, 0xee, 0x34, 0x4b, 0x1e, 0x8d, 0xcf, 0x7d, 0xf7, 0xfb, 0x17, 0x1e, 0xfa, - 0xde, 0xf7, 0x2f, 0x3c, 0xf4, 0xce, 0xf7, 0x2f, 0x3c, 0xf4, 0xa5, 0xfd, 0x0b, 0x85, 0xef, 0xee, - 0x5f, 0x28, 0x7c, 0x6f, 0xff, 0x42, 0xe1, 0x9d, 0xfd, 0x0b, 0x85, 0x7f, 0xd8, 0xbf, 0x50, 0xf8, - 0x95, 0x7f, 0xbc, 0xf0, 0xd0, 0x67, 0x9f, 0x8b, 0xaa, 0x30, 0xad, 0xaa, 0x30, 0xad, 0x18, 0x4e, - 0x77, 0xb6, 0x5b, 0xd3, 0xac, 0x0a, 0x51, 0x8a, 0xaa, 0xc2, 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, - 0xb2, 0x71, 0xfd, 0x11, 0x25, 0x93, 0x00, 0x00, + 0x16, 0x6a, 0x6a, 0x23, 0xa8, 0x5a, 0x36, 0xd3, 0x28, 0xa2, 0x76, 0x8f, 0x48, 0x5f, 0xef, 0x5a, + 0x1e, 0x6d, 0x53, 0x27, 0xf0, 0x1b, 0x27, 0xd5, 0x36, 0x59, 0xe5, 0xfa, 0x18, 0xa1, 0x91, 0x8d, + 0x5e, 0xeb, 0x8e, 0x30, 0x4d, 0x3c, 0xde, 0x67, 0x56, 0x1f, 0xc0, 0xb4, 0xf3, 0x05, 0x38, 0x11, + 0x9a, 0x5f, 0xe4, 0x0e, 0x5e, 0x18, 0x2b, 0x9e, 0x66, 0xc5, 0x97, 0x92, 0x59, 0x77, 0xf6, 0x26, + 0x1f, 0xcb, 0xd8, 0xc3, 0x47, 0x04, 0x98, 0x06, 0xd3, 0xfe, 0xa0, 0x04, 0xbd, 0x6a, 0x77, 0x52, + 0x68, 0x85, 0xa3, 0x16, 0x5a, 0xfa, 0x83, 0xc4, 0xf4, 0xf9, 0xbc, 0x2c, 0x96, 0xff, 0xa3, 0xb2, + 0x1a, 0xa6, 0x74, 0xd4, 0x0d, 0xf3, 0xa0, 0x8c, 0x1d, 0xed, 0xad, 0x32, 0x8c, 0xcd, 0xeb, 0xb4, + 0xed, 0x3a, 0xf7, 0xdc, 0x84, 0x14, 0x1e, 0x88, 0x4d, 0xc8, 0x25, 0xa8, 0x7a, 0xb4, 0x63, 0x5b, + 0x86, 0xee, 0xf3, 0xa6, 0x97, 0x46, 0x3f, 0x94, 0x69, 0x18, 0xe6, 0xf6, 0xd9, 0x7c, 0x96, 0x1e, + 0xc8, 0xcd, 0x67, 0xf9, 0xdd, 0xdf, 0x7c, 0x6a, 0x5f, 0x29, 0x02, 0x57, 0x54, 0xc8, 0x45, 0x28, + 0xb3, 0x45, 0x38, 0x6d, 0xf2, 0xe0, 0x1d, 0x87, 0xe7, 0x90, 0x73, 0x50, 0x0c, 0x5c, 0x39, 0xf2, + 0x40, 0xe6, 0x17, 0xd7, 0x5c, 0x2c, 0x06, 0x2e, 0x79, 0x03, 0xc0, 0x70, 0x1d, 0xd3, 0x52, 0xb6, + 0xf0, 0x7c, 0x1f, 0xb6, 0xe0, 0x7a, 0xb7, 0x74, 0xcf, 0x9c, 0x0b, 0x11, 0xc5, 0xf6, 0x23, 0x7a, + 0xc7, 0x18, 0x37, 0xf2, 0x02, 0x0c, 0xb9, 0xce, 0x42, 0xd7, 0xb6, 0xb9, 0x40, 0x6b, 0x8d, 0xff, + 0xc2, 0xf6, 0x84, 0x37, 0x78, 0xca, 0x9d, 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, 0x6f, 0x2f, 0x79, + 0x56, 0x60, 0x39, 0xad, 0x66, 0xe0, 0xe9, 0x01, 0x6d, 0xed, 0xa2, 0x2c, 0xa6, 0x7d, 0xbd, 0x00, + 0xf5, 0x05, 0xeb, 0x36, 0x35, 0x5f, 0xb2, 0x1c, 0xd3, 0xbd, 0x45, 0x10, 0x86, 0x6c, 0xea, 0xb4, + 0x82, 0x2d, 0xd9, 0xfb, 0xa7, 0x62, 0x63, 0x2d, 0x3c, 0x42, 0x89, 0xea, 0xdf, 0xa6, 0x81, 0xce, + 0x46, 0xdf, 0x7c, 0x57, 0x1a, 0xf9, 0xc5, 0xa6, 0x94, 0x23, 0xa0, 0x44, 0x22, 0xd3, 0x50, 0x13, + 0xda, 0xa7, 0xe5, 0xb4, 0xb8, 0x0c, 0xab, 0xd1, 0xa4, 0xd7, 0x54, 0x19, 0x18, 0xd1, 0x68, 0xbb, + 0x70, 0xb2, 0x47, 0x0c, 0xc4, 0x84, 0x72, 0xa0, 0xb7, 0xd4, 0xfc, 0xba, 0x30, 0xb0, 0x80, 0xd7, + 0xf4, 0x56, 0x4c, 0xb8, 0x7c, 0x8d, 0x5f, 0xd3, 0xd9, 0x1a, 0xcf, 0xd0, 0xb5, 0x9f, 0x17, 0xa0, + 0xba, 0xd0, 0x75, 0x0c, 0xbe, 0x37, 0xba, 0xb7, 0x29, 0x4c, 0x29, 0x0c, 0xc5, 0x4c, 0x85, 0xa1, + 0x0b, 0x43, 0xdb, 0xb7, 0x42, 0x85, 0xa2, 0x3e, 0xb3, 0x32, 0x78, 0xaf, 0x90, 0x55, 0x9a, 0xba, + 0xc6, 0xf1, 0xc4, 0x49, 0xcd, 0x98, 0xac, 0xd0, 0xd0, 0xb5, 0x97, 0x38, 0x53, 0xc9, 0xec, 0xdc, + 0xc7, 0xa0, 0x1e, 0x23, 0x3b, 0x94, 0xd1, 0xf6, 0x77, 0xca, 0x30, 0xb4, 0xd8, 0x6c, 0xce, 0xae, + 0x2e, 0x91, 0x67, 0xa0, 0x2e, 0x8d, 0xf8, 0xd7, 0x23, 0x19, 0x84, 0x67, 0x38, 0xcd, 0x28, 0x0b, + 0xe3, 0x74, 0x4c, 0x1d, 0xf3, 0xa8, 0x6e, 0xb7, 0xe5, 0x60, 0x09, 0xd5, 0x31, 0x64, 0x89, 0x28, + 0xf2, 0x88, 0x0e, 0x63, 0x6c, 0x87, 0xc7, 0x44, 0x28, 0x76, 0x6f, 0x72, 0xd8, 0x1c, 0x70, 0x7f, + 0xc7, 0x95, 0xc4, 0xf5, 0x04, 0x00, 0xa6, 0x00, 0xc9, 0xf3, 0x50, 0xd5, 0xbb, 0xc1, 0x16, 0x57, + 0xa0, 0xc5, 0xd8, 0x38, 0xcf, 0xcf, 0x38, 0x64, 0xda, 0x9d, 0xbd, 0xc9, 0x91, 0x6b, 0xd8, 0x78, + 0x46, 0xbd, 0x63, 0x48, 0xcd, 0x2a, 0xa7, 0x76, 0x8c, 0xb2, 0x72, 0x95, 0x43, 0x57, 0x6e, 0x35, + 0x01, 0x80, 0x29, 0x40, 0xf2, 0x0a, 0x8c, 0x6c, 0xd3, 0xdd, 0x40, 0xdf, 0x90, 0x0c, 0x86, 0x0e, + 0xc3, 0x60, 0x9c, 0xa9, 0x70, 0xd7, 0x62, 0xc5, 0x31, 0x01, 0x46, 0x7c, 0x38, 0xbd, 0x4d, 0xbd, + 0x0d, 0xea, 0xb9, 0x72, 0xf7, 0x29, 0x99, 0x0c, 0x1f, 0x86, 0xc9, 0xc4, 0xfe, 0xde, 0xe4, 0xe9, + 0x6b, 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xac, 0x08, 0x27, 0x16, 0xc5, 0x29, 0xaa, 0xeb, 0x89, + 0x45, 0x98, 0x9c, 0x85, 0x92, 0xd7, 0xe9, 0xf2, 0x9e, 0x53, 0x12, 0x76, 0x52, 0x5c, 0x5d, 0x47, + 0x96, 0x46, 0x5e, 0x86, 0xaa, 0x29, 0xa7, 0x0c, 0xb9, 0xf9, 0x3d, 0xec, 0x44, 0xc3, 0x17, 0x41, + 0xf5, 0x86, 0x21, 0x1a, 0xd3, 0xf4, 0xdb, 0x7e, 0xab, 0x69, 0xbd, 0x41, 0xe5, 0x7e, 0x90, 0x6b, + 0xfa, 0x2b, 0x22, 0x09, 0x55, 0x1e, 0x5b, 0x55, 0xb7, 0xe9, 0xae, 0xd8, 0x0d, 0x95, 0xa3, 0x55, + 0xf5, 0x9a, 0x4c, 0xc3, 0x30, 0x97, 0x4c, 0xaa, 0xc1, 0xc2, 0x7a, 0x41, 0x59, 0xec, 0xe4, 0x6f, + 0xb2, 0x04, 0x39, 0x6e, 0xd8, 0x94, 0xf9, 0x9a, 0x15, 0x04, 0xd4, 0x93, 0xcd, 0x38, 0xd0, 0x94, + 0x79, 0x95, 0x23, 0xa0, 0x44, 0x22, 0x1f, 0x86, 0x1a, 0x07, 0x6f, 0xd8, 0xee, 0x06, 0x6f, 0xb8, + 0x9a, 0xd8, 0xd3, 0xdf, 0x54, 0x89, 0x18, 0xe5, 0x6b, 0xbf, 0x28, 0xc2, 0x99, 0x45, 0x1a, 0x08, + 0xad, 0x66, 0x9e, 0x76, 0x6c, 0x77, 0x97, 0xa9, 0x96, 0x48, 0x5f, 0x27, 0x2f, 0x02, 0x58, 0xfe, + 0x46, 0x73, 0xc7, 0xe0, 0xe3, 0x40, 0x8c, 0xe1, 0x8b, 0x72, 0x48, 0xc2, 0x52, 0xb3, 0x21, 0x73, + 0xee, 0x24, 0xde, 0x30, 0x56, 0x26, 0xda, 0x5e, 0x15, 0xef, 0xb2, 0xbd, 0x6a, 0x02, 0x74, 0x22, + 0x05, 0xb5, 0xc4, 0x29, 0xff, 0xbb, 0x62, 0x73, 0x18, 0xdd, 0x34, 0x06, 0x93, 0x47, 0x65, 0x74, + 0x60, 0xdc, 0xa4, 0x9b, 0x7a, 0xd7, 0x0e, 0x42, 0xa5, 0x5a, 0x0e, 0xe2, 0x83, 0xeb, 0xe5, 0xe1, + 0x09, 0xef, 0x7c, 0x0a, 0x09, 0x7b, 0xb0, 0xb5, 0xdf, 0x2d, 0xc1, 0xb9, 0x45, 0x1a, 0x84, 0x16, + 0x17, 0x39, 0x3b, 0x36, 0x3b, 0xd4, 0x60, 0xad, 0xf0, 0x66, 0x01, 0x86, 0x6c, 0x7d, 0x83, 0xda, + 0x6c, 0xf5, 0x62, 0x5f, 0xf3, 0xea, 0xc0, 0x0b, 0x41, 0x7f, 0x2e, 0x53, 0xcb, 0x9c, 0x43, 0x6a, + 0x69, 0x10, 0x89, 0x28, 0xd9, 0xb3, 0x49, 0xdd, 0xb0, 0xbb, 0x7e, 0x40, 0xbd, 0x55, 0xd7, 0x0b, + 0xa4, 0x3e, 0x19, 0x4e, 0xea, 0x73, 0x51, 0x16, 0xc6, 0xe9, 0xc8, 0x0c, 0x80, 0x61, 0x5b, 0xd4, + 0x09, 0x78, 0x29, 0x31, 0xae, 0x88, 0x6a, 0xdf, 0xb9, 0x30, 0x07, 0x63, 0x54, 0x8c, 0x55, 0xdb, + 0x75, 0xac, 0xc0, 0x15, 0xac, 0xca, 0x49, 0x56, 0x2b, 0x51, 0x16, 0xc6, 0xe9, 0x78, 0x31, 0x1a, + 0x78, 0x96, 0xe1, 0xf3, 0x62, 0x95, 0x54, 0xb1, 0x28, 0x0b, 0xe3, 0x74, 0x6c, 0xcd, 0x8b, 0x7d, + 0xff, 0xa1, 0xd6, 0xbc, 0x6f, 0xd7, 0xe0, 0x42, 0x42, 0xac, 0x81, 0x1e, 0xd0, 0xcd, 0xae, 0xdd, + 0xa4, 0x81, 0x6a, 0xc0, 0x01, 0xd7, 0xc2, 0xff, 0x1b, 0xb5, 0xbb, 0xf0, 0xdd, 0x30, 0x8e, 0xa6, + 0xdd, 0x7b, 0x2a, 0x78, 0xa0, 0xb6, 0x9f, 0x86, 0x9a, 0xa3, 0x07, 0x3e, 0x1f, 0xb8, 0x72, 0x8c, + 0x86, 0x6a, 0xd8, 0x75, 0x95, 0x81, 0x11, 0x0d, 0x59, 0x85, 0xd3, 0x52, 0xc4, 0x97, 0x6f, 0x77, + 0x5c, 0x2f, 0xa0, 0x9e, 0x28, 0x2b, 0x97, 0x53, 0x59, 0xf6, 0xf4, 0x4a, 0x06, 0x0d, 0x66, 0x96, + 0x24, 0x2b, 0x70, 0xca, 0x10, 0xe7, 0xd9, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x03, 0x57, 0xb8, + 0x35, 0x9a, 0xeb, 0x25, 0xc1, 0xac, 0x72, 0xe9, 0xde, 0x3c, 0x34, 0x50, 0x6f, 0x1e, 0x1e, 0xa4, + 0x37, 0x57, 0x07, 0xeb, 0xcd, 0xb5, 0x83, 0xf5, 0x66, 0x26, 0x79, 0xd6, 0x8f, 0xa8, 0xc7, 0xd4, + 0x13, 0xb1, 0xc2, 0xc6, 0xdc, 0x25, 0x42, 0xc9, 0x37, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd9, 0x80, + 0x73, 0x22, 0xfd, 0xb2, 0x63, 0x78, 0xbb, 0x1d, 0xb6, 0xf0, 0xc4, 0x70, 0xeb, 0x09, 0x0b, 0xe3, + 0xb9, 0x66, 0x5f, 0x4a, 0xbc, 0x0b, 0x0a, 0xf9, 0x04, 0x8c, 0x8a, 0x56, 0x5a, 0xd1, 0x3b, 0x1c, + 0x56, 0x38, 0x4f, 0x3c, 0x2c, 0x61, 0x47, 0xe7, 0xe2, 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, + 0xb3, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0xeb, 0x94, 0x9a, 0xd4, 0xe4, 0xa7, 0x35, 0xb5, 0xc6, 0x23, + 0xca, 0xd0, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x79, 0x1e, 0x46, 0xfc, 0x40, 0xf7, 0x02, 0x69, + 0xd6, 0x9b, 0x18, 0x13, 0xce, 0x25, 0xca, 0xea, 0xd5, 0x8c, 0xe5, 0x61, 0x82, 0x32, 0x73, 0xbd, + 0x38, 0x71, 0x7c, 0xeb, 0x45, 0x9e, 0xd9, 0xea, 0x4f, 0x8b, 0x70, 0x71, 0x91, 0x06, 0x2b, 0xae, + 0x23, 0x8d, 0xa2, 0x59, 0xcb, 0xfe, 0x81, 0x6c, 0xa2, 0xc9, 0x45, 0xbb, 0x78, 0xa4, 0x8b, 0x76, + 0xe9, 0x88, 0x16, 0xed, 0xf2, 0x31, 0x2e, 0xda, 0xbf, 0x5f, 0x84, 0x47, 0x12, 0x92, 0x5c, 0x75, + 0x4d, 0x35, 0xe1, 0xbf, 0x2f, 0xc0, 0x03, 0x08, 0xf0, 0x8e, 0xd0, 0x3b, 0xf9, 0xb1, 0x56, 0x4a, + 0xe3, 0xf9, 0x5a, 0x5a, 0xe3, 0x79, 0x25, 0xcf, 0xca, 0x97, 0xc1, 0xe1, 0x40, 0x2b, 0xde, 0x55, + 0x20, 0x9e, 0x3c, 0x84, 0x13, 0xa6, 0x9f, 0x98, 0xd2, 0x13, 0x7a, 0xaf, 0x61, 0x0f, 0x05, 0x66, + 0x94, 0x22, 0x4d, 0x78, 0xd8, 0xa7, 0x4e, 0x60, 0x39, 0xd4, 0x4e, 0xc2, 0x09, 0x6d, 0xe8, 0x31, + 0x09, 0xf7, 0x70, 0x33, 0x8b, 0x08, 0xb3, 0xcb, 0xe6, 0x99, 0x07, 0xfe, 0x1c, 0xb8, 0xca, 0x29, + 0x44, 0x73, 0x64, 0x1a, 0xcb, 0x9b, 0x69, 0x8d, 0xe5, 0xd5, 0xfc, 0xed, 0x36, 0x98, 0xb6, 0x32, + 0x03, 0xc0, 0x5b, 0x21, 0xae, 0xae, 0x84, 0x8b, 0x34, 0x86, 0x39, 0x18, 0xa3, 0x62, 0x0b, 0x90, + 0x92, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x66, 0x3c, 0x13, 0x93, 0xb4, 0x7d, 0xb5, 0x9d, 0xca, + 0xc0, 0xda, 0xce, 0x55, 0x20, 0x09, 0xc3, 0xa3, 0xc0, 0x1b, 0x4a, 0x3a, 0x4f, 0x2e, 0xf5, 0x50, + 0x60, 0x46, 0xa9, 0x3e, 0x5d, 0x79, 0xf8, 0x68, 0xbb, 0x72, 0x75, 0xf0, 0xae, 0x4c, 0x5e, 0x85, + 0xb3, 0x9c, 0x95, 0x94, 0x4f, 0x12, 0x58, 0xe8, 0x3d, 0x1f, 0x90, 0xc0, 0x67, 0xb1, 0x1f, 0x21, + 0xf6, 0xc7, 0x60, 0xed, 0x63, 0x78, 0xd4, 0x64, 0xcc, 0x75, 0xbb, 0xbf, 0x4e, 0x34, 0x97, 0x41, + 0x83, 0x99, 0x25, 0x59, 0x17, 0x0b, 0x58, 0x37, 0xd4, 0x37, 0x6c, 0x6a, 0x4a, 0xe7, 0xd1, 0xb0, + 0x8b, 0xad, 0x2d, 0x37, 0x65, 0x0e, 0xc6, 0xa8, 0xb2, 0xd4, 0x94, 0x91, 0x43, 0xaa, 0x29, 0x8b, + 0xdc, 0x4a, 0xbf, 0x99, 0xd0, 0x86, 0xa4, 0xae, 0x13, 0xba, 0x03, 0xcf, 0xa5, 0x09, 0xb0, 0xb7, + 0x0c, 0xd7, 0x12, 0x0d, 0xcf, 0xea, 0x04, 0x7e, 0x12, 0x6b, 0x2c, 0xa5, 0x25, 0x66, 0xd0, 0x60, + 0x66, 0x49, 0xa6, 0x9f, 0x6f, 0x51, 0xdd, 0x0e, 0xb6, 0x92, 0x80, 0x27, 0x92, 0xfa, 0xf9, 0x95, + 0x5e, 0x12, 0xcc, 0x2a, 0x97, 0xb9, 0x20, 0x8d, 0x3f, 0x98, 0x6a, 0xd5, 0x57, 0x4b, 0x70, 0x76, + 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, 0x66, 0x94, 0x77, 0xc1, 0x8c, 0xf2, 0xad, 0x0a, 0x9c, 0x5a, + 0xa4, 0x41, 0x8f, 0x36, 0xf6, 0x9f, 0x54, 0xfc, 0x2b, 0x70, 0x2a, 0x72, 0xe5, 0x6a, 0x06, 0xae, + 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, 0xd9, 0x4b, 0x82, 0x59, 0xe5, 0xc8, 0x67, 0xe1, 0x11, 0xbe, + 0xd4, 0x3b, 0x2d, 0x61, 0x9f, 0x15, 0xc6, 0x84, 0xd8, 0x65, 0x84, 0x49, 0x09, 0xf9, 0x48, 0x33, + 0x9b, 0x0c, 0xfb, 0x95, 0x27, 0x5f, 0x82, 0x91, 0x8e, 0xd5, 0xa1, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, + 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, 0xda, 0xc0, 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0x56, + 0x8f, 0xb1, 0xa7, 0xfe, 0x4b, 0x11, 0x86, 0x17, 0x3d, 0xb7, 0xdb, 0x69, 0xec, 0x92, 0x16, 0x0c, + 0xdd, 0xe2, 0x87, 0x67, 0xf2, 0x68, 0x6a, 0x70, 0x77, 0x68, 0x71, 0x06, 0x17, 0xa9, 0x44, 0xe2, + 0x1d, 0x25, 0x3c, 0xeb, 0xc4, 0xdb, 0x74, 0x97, 0x9a, 0xf2, 0x0c, 0x2d, 0xec, 0xc4, 0xd7, 0x58, + 0x22, 0x8a, 0x3c, 0xd2, 0x86, 0x13, 0xba, 0x6d, 0xbb, 0xb7, 0xa8, 0xb9, 0xac, 0x07, 0xd4, 0xa1, + 0xbe, 0x3a, 0x92, 0x3c, 0xac, 0x59, 0x9a, 0x9f, 0xeb, 0xcf, 0x26, 0xa1, 0x30, 0x8d, 0x4d, 0x5e, + 0x83, 0x61, 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0xd5, 0x67, 0xe6, 0x06, 0x6f, 0xf4, 0xc6, 0x67, 0x9a, + 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, 0xda, 0x37, 0x0b, 0x00, 0x57, 0xd6, 0xd6, 0x56, + 0xe5, 0xf1, 0x82, 0x09, 0x65, 0xbd, 0x1b, 0x1e, 0x54, 0x0e, 0x7e, 0x20, 0x98, 0xf0, 0x87, 0x94, + 0x67, 0x78, 0xdd, 0x60, 0x0b, 0x39, 0x3a, 0xf9, 0x10, 0x0c, 0x4b, 0x05, 0x59, 0x8a, 0x3d, 0x74, + 0x2d, 0x90, 0x4a, 0x34, 0xaa, 0x7c, 0xed, 0xb7, 0x8a, 0x00, 0x4b, 0xa6, 0x4d, 0x9b, 0xca, 0x83, + 0xbd, 0x16, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, 0x73, 0xc0, 0xd3, 0x54, 0x6e, 0xf3, 0x5f, 0x53, + 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfc, 0x80, 0x76, 0x96, 0x9c, 0x80, 0x7a, 0x3b, 0xba, 0x3d, + 0xe0, 0x21, 0xca, 0xb8, 0xb0, 0x8b, 0x44, 0x38, 0x98, 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, + 0x03, 0xa4, 0xb1, 0x3b, 0x60, 0x47, 0x3a, 0xc1, 0x76, 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, + 0x1f, 0x17, 0xe1, 0x0c, 0xe7, 0xc7, 0xaa, 0x91, 0xf0, 0xc7, 0x24, 0xff, 0xb3, 0xe7, 0xb6, 0xdd, + 0x7f, 0x3b, 0x18, 0x6b, 0x71, 0x59, 0x6b, 0x85, 0x06, 0x7a, 0xa4, 0xcf, 0x45, 0x69, 0xb1, 0x2b, + 0x76, 0x5d, 0x28, 0xfb, 0x6c, 0xbe, 0x12, 0xd2, 0x6b, 0x0e, 0xdc, 0x85, 0xb2, 0x3f, 0x80, 0xcf, + 0x5e, 0xe1, 0xa9, 0x31, 0x9f, 0xb5, 0x38, 0x3b, 0xf2, 0xbf, 0x61, 0xc8, 0x0f, 0xf4, 0xa0, 0xab, + 0x86, 0xe6, 0xfa, 0x51, 0x33, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, 0xde, 0x51, 0x32, 0xd5, 0x7e, 0x5c, + 0x80, 0x73, 0xd9, 0x05, 0x97, 0x2d, 0x3f, 0x20, 0xff, 0xa3, 0x47, 0xec, 0x07, 0x6c, 0x71, 0x56, + 0x9a, 0x0b, 0x3d, 0x74, 0xc8, 0x56, 0x29, 0x31, 0x91, 0x07, 0x50, 0xb1, 0x02, 0xda, 0x56, 0xfb, + 0xcb, 0x1b, 0x47, 0xfc, 0xe9, 0xb1, 0xa5, 0x9d, 0x71, 0x41, 0xc1, 0x4c, 0x7b, 0xab, 0xd8, 0xef, + 0x93, 0xf9, 0xf2, 0x61, 0x27, 0x7d, 0x7e, 0xaf, 0xe5, 0xf3, 0xf9, 0x4d, 0x56, 0xa8, 0xd7, 0xf5, + 0xf7, 0x7f, 0xf5, 0xba, 0xfe, 0xde, 0xc8, 0xef, 0xfa, 0x9b, 0x12, 0x43, 0x5f, 0x0f, 0xe0, 0x77, + 0x4a, 0x70, 0xfe, 0x6e, 0xdd, 0x86, 0xad, 0x67, 0xb2, 0x77, 0xe6, 0x5d, 0xcf, 0xee, 0xde, 0x0f, + 0xc9, 0x0c, 0x54, 0x3a, 0x5b, 0xba, 0xaf, 0x94, 0x32, 0xb5, 0x61, 0xa9, 0xac, 0xb2, 0xc4, 0x3b, + 0x6c, 0xd2, 0xe0, 0xca, 0x1c, 0x7f, 0x45, 0x41, 0xca, 0xa6, 0xe3, 0x36, 0xf5, 0xfd, 0xc8, 0x26, + 0x10, 0x4e, 0xc7, 0x2b, 0x22, 0x19, 0x55, 0x3e, 0x09, 0x60, 0x48, 0x98, 0x98, 0xe5, 0xca, 0x34, + 0xb8, 0x23, 0x57, 0x86, 0x9b, 0x78, 0xf4, 0x51, 0xf2, 0xb4, 0x42, 0xf2, 0x22, 0x53, 0x50, 0x0e, + 0x22, 0xa7, 0x5d, 0xb5, 0x35, 0x2f, 0x67, 0xe8, 0xa7, 0x9c, 0x8e, 0x6d, 0xec, 0xdd, 0x0d, 0x6e, + 0x54, 0x37, 0xe5, 0xf9, 0xb9, 0xe5, 0x3a, 0x5c, 0x21, 0x2b, 0x45, 0x1b, 0xfb, 0x1b, 0x3d, 0x14, + 0x98, 0x51, 0x4a, 0xfb, 0xab, 0x2a, 0x9c, 0xc9, 0xee, 0x0f, 0x4c, 0x6e, 0x3b, 0xd4, 0xf3, 0x19, + 0x76, 0x21, 0x29, 0xb7, 0x9b, 0x22, 0x19, 0x55, 0xfe, 0x7b, 0xda, 0xe1, 0xec, 0x5b, 0x05, 0x38, + 0xeb, 0xc9, 0x33, 0xa2, 0xfb, 0xe1, 0x74, 0xf6, 0x98, 0x30, 0x67, 0xf4, 0x61, 0x88, 0xfd, 0xeb, + 0x42, 0x7e, 0xa3, 0x00, 0x13, 0xed, 0x94, 0x9d, 0xe3, 0x18, 0x2f, 0x8c, 0x71, 0xaf, 0xf8, 0x95, + 0x3e, 0xfc, 0xb0, 0x6f, 0x4d, 0xc8, 0x97, 0xa0, 0xde, 0x61, 0xfd, 0xc2, 0x0f, 0xa8, 0x63, 0xa8, + 0x3b, 0x63, 0x83, 0x8f, 0xa4, 0xd5, 0x08, 0x4b, 0xb9, 0xa2, 0x09, 0xfd, 0x20, 0x96, 0x81, 0x71, + 0x8e, 0x0f, 0xf8, 0x0d, 0xb1, 0x4b, 0x50, 0xf5, 0x69, 0x10, 0x58, 0x4e, 0x4b, 0xec, 0x37, 0x6a, + 0x62, 0xac, 0x34, 0x65, 0x1a, 0x86, 0xb9, 0xe4, 0xc3, 0x50, 0xe3, 0x47, 0x4e, 0xb3, 0x5e, 0xcb, + 0x9f, 0xa8, 0x71, 0x77, 0xb1, 0x51, 0xe1, 0x00, 0x27, 0x13, 0x31, 0xca, 0x27, 0x4f, 0xc3, 0xc8, + 0x06, 0x1f, 0xbe, 0xf2, 0xd2, 0xb0, 0xb0, 0x71, 0x71, 0x6d, 0xad, 0x11, 0x4b, 0xc7, 0x04, 0x15, + 0x99, 0x01, 0xa0, 0xe1, 0xb9, 0x5c, 0xda, 0x9e, 0x15, 0x9d, 0xd8, 0x61, 0x8c, 0x8a, 0x3c, 0x06, + 0xa5, 0xc0, 0xf6, 0xb9, 0x0d, 0xab, 0x1a, 0x6d, 0x41, 0xd7, 0x96, 0x9b, 0xc8, 0xd2, 0xb5, 0x5f, + 0x14, 0xe0, 0x44, 0xea, 0x72, 0x09, 0x2b, 0xd2, 0xf5, 0x6c, 0x39, 0x8d, 0x84, 0x45, 0xd6, 0x71, + 0x19, 0x59, 0x3a, 0x79, 0x55, 0xaa, 0xe5, 0xc5, 0x9c, 0xf1, 0x11, 0xae, 0xeb, 0x81, 0xcf, 0xf4, + 0xf0, 0x1e, 0x8d, 0x9c, 0x1f, 0xf3, 0x45, 0xf5, 0x91, 0xeb, 0x40, 0xec, 0x98, 0x2f, 0xca, 0xc3, + 0x04, 0x65, 0xca, 0xe0, 0x57, 0x3e, 0x88, 0xc1, 0x4f, 0xfb, 0x7a, 0x31, 0x26, 0x01, 0xa9, 0xd9, + 0xdf, 0x43, 0x02, 0x4f, 0xb2, 0x05, 0x34, 0x5c, 0xdc, 0x6b, 0xf1, 0xf5, 0x8f, 0x2f, 0xc6, 0x32, + 0x97, 0xbc, 0x24, 0x64, 0x5f, 0xca, 0x79, 0x0b, 0x75, 0x6d, 0xb9, 0x29, 0xbc, 0xab, 0x54, 0xab, + 0x85, 0x4d, 0x50, 0x3e, 0xa6, 0x26, 0xd0, 0xfe, 0xac, 0x04, 0xf5, 0xab, 0xee, 0xc6, 0x7b, 0xc4, + 0x83, 0x3a, 0x7b, 0x99, 0x2a, 0xbe, 0x8b, 0xcb, 0xd4, 0x3a, 0x3c, 0x12, 0x04, 0x76, 0x93, 0x1a, + 0xae, 0x63, 0xfa, 0xb3, 0x9b, 0x01, 0xf5, 0x16, 0x2c, 0xc7, 0xf2, 0xb7, 0xa8, 0x29, 0x8f, 0x93, + 0x1e, 0xdd, 0xdf, 0x9b, 0x7c, 0x64, 0x6d, 0x6d, 0x39, 0x8b, 0x04, 0xfb, 0x95, 0xe5, 0xd3, 0x86, + 0x6e, 0x6c, 0xbb, 0x9b, 0x9b, 0xfc, 0xa6, 0x8c, 0xf4, 0xb9, 0x11, 0xd3, 0x46, 0x2c, 0x1d, 0x13, + 0x54, 0xda, 0xdb, 0x45, 0xa8, 0x85, 0x37, 0xdf, 0xc9, 0x13, 0x30, 0xbc, 0xe1, 0xb9, 0xdb, 0xd4, + 0x13, 0x27, 0x77, 0xf2, 0xa6, 0x4c, 0x43, 0x24, 0xa1, 0xca, 0x23, 0x8f, 0x43, 0x25, 0x70, 0x3b, + 0x96, 0x91, 0x36, 0xa8, 0xad, 0xb1, 0x44, 0x14, 0x79, 0xc7, 0xd7, 0xc1, 0x9f, 0x4c, 0xa8, 0x76, + 0xb5, 0xbe, 0xca, 0xd8, 0x2b, 0x50, 0xf6, 0x75, 0xdf, 0x96, 0xeb, 0x69, 0x8e, 0x4b, 0xe4, 0xb3, + 0xcd, 0x65, 0x79, 0x89, 0x7c, 0xb6, 0xb9, 0x8c, 0x1c, 0x54, 0xfb, 0x69, 0x11, 0xea, 0x42, 0x6e, + 0x62, 0x56, 0x38, 0x4a, 0xc9, 0xbd, 0xc0, 0x5d, 0x29, 0xfc, 0x6e, 0x9b, 0x7a, 0xdc, 0xcc, 0x24, + 0x27, 0xb9, 0xf8, 0xf9, 0x40, 0x94, 0x19, 0xba, 0x53, 0x44, 0x49, 0x4a, 0xf4, 0xe5, 0x63, 0x14, + 0x7d, 0xe5, 0x40, 0xa2, 0x1f, 0x3a, 0x0e, 0xd1, 0xbf, 0x59, 0x84, 0xda, 0xb2, 0xb5, 0x49, 0x8d, + 0x5d, 0xc3, 0xe6, 0x77, 0x02, 0x4d, 0x6a, 0xd3, 0x80, 0x2e, 0x7a, 0xba, 0x41, 0x57, 0xa9, 0x67, + 0xf1, 0xc8, 0x30, 0x6c, 0x7c, 0xf0, 0x19, 0x48, 0xde, 0x09, 0x9c, 0xef, 0x43, 0x83, 0x7d, 0x4b, + 0x93, 0x25, 0x18, 0x31, 0xa9, 0x6f, 0x79, 0xd4, 0x5c, 0x8d, 0x6d, 0x54, 0x9e, 0x50, 0x4b, 0xcd, + 0x7c, 0x2c, 0xef, 0xce, 0xde, 0xe4, 0xa8, 0x32, 0x50, 0x8a, 0x1d, 0x4b, 0xa2, 0x28, 0x1b, 0xf2, + 0x1d, 0xbd, 0xeb, 0x67, 0xd5, 0x31, 0x36, 0xe4, 0x57, 0xb3, 0x49, 0xb0, 0x5f, 0x59, 0xad, 0x02, + 0xa5, 0x65, 0xb7, 0xa5, 0xbd, 0x55, 0x82, 0x30, 0x84, 0x10, 0xf9, 0x3f, 0x05, 0xa8, 0xeb, 0x8e, + 0xe3, 0x06, 0x32, 0x3c, 0x8f, 0x38, 0x81, 0xc7, 0xdc, 0x91, 0x8a, 0xa6, 0x66, 0x23, 0x50, 0x71, + 0x78, 0x1b, 0x1e, 0x28, 0xc7, 0x72, 0x30, 0xce, 0x9b, 0x74, 0x53, 0xe7, 0xc9, 0x2b, 0xf9, 0x6b, + 0x71, 0x80, 0xd3, 0xe3, 0x73, 0x9f, 0x86, 0xf1, 0x74, 0x65, 0x0f, 0x73, 0x1c, 0x94, 0xeb, 0x60, + 0xbe, 0x08, 0x10, 0xf9, 0x94, 0xdc, 0x07, 0x23, 0x96, 0x95, 0x30, 0x62, 0x2d, 0x0e, 0x2e, 0xe0, + 0xb0, 0xd2, 0x7d, 0x0d, 0x57, 0xaf, 0xa7, 0x0c, 0x57, 0x4b, 0x47, 0xc1, 0xec, 0xee, 0xc6, 0xaa, + 0xdf, 0x2c, 0xc0, 0x78, 0x44, 0x2c, 0x6f, 0xc8, 0x3e, 0x07, 0xa3, 0x1e, 0xd5, 0xcd, 0x86, 0x1e, + 0x18, 0x5b, 0xdc, 0xd5, 0xbb, 0xc0, 0x7d, 0xb3, 0x4f, 0xee, 0xef, 0x4d, 0x8e, 0x62, 0x3c, 0x03, + 0x93, 0x74, 0x44, 0x87, 0x3a, 0x4b, 0x58, 0xb3, 0xda, 0xd4, 0xed, 0x06, 0x03, 0x5a, 0x4d, 0xf9, + 0x86, 0x05, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xef, 0x14, 0x60, 0x2c, 0x5e, 0xe1, 0x63, 0xb7, 0xa8, + 0x6d, 0x25, 0x2d, 0x6a, 0x73, 0x47, 0xd0, 0x26, 0x7d, 0xac, 0x68, 0x3f, 0xab, 0xc6, 0x3f, 0x8d, + 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x5d, 0x8d, 0x05, 0xef, 0xfd, 0xa8, 0x31, 0xfd, 0xb4, 0xdc, + 0xf2, 0x03, 0xac, 0xe5, 0xbe, 0x9b, 0xa1, 0x67, 0x62, 0xe1, 0x53, 0x86, 0x72, 0x84, 0x4f, 0x69, + 0x87, 0xe1, 0x53, 0x86, 0x8f, 0x6c, 0xd2, 0x39, 0x48, 0x08, 0x95, 0xea, 0x7d, 0x0d, 0xa1, 0x52, + 0x3b, 0xae, 0x10, 0x2a, 0x90, 0x37, 0x84, 0xca, 0xd7, 0x0a, 0x30, 0x66, 0x26, 0x6e, 0xcc, 0x72, + 0xdb, 0x42, 0x9e, 0xa5, 0x26, 0x79, 0x01, 0x57, 0x5c, 0x99, 0x4a, 0xa6, 0x61, 0x8a, 0xa5, 0xf6, + 0xdb, 0x95, 0xf8, 0x3a, 0x70, 0xbf, 0x4d, 0xd5, 0xcf, 0x26, 0x4d, 0xd5, 0x17, 0xd3, 0xa6, 0xea, + 0x13, 0x31, 0x2f, 0xd2, 0xb8, 0xb9, 0xfa, 0x23, 0xb1, 0xe9, 0x91, 0xcd, 0x49, 0xa3, 0x91, 0xa4, + 0x33, 0xa6, 0xc8, 0x8f, 0x40, 0xd5, 0x57, 0xc1, 0x1e, 0xc5, 0xc6, 0x26, 0x6a, 0x17, 0x15, 0x88, + 0x31, 0xa4, 0x60, 0x9a, 0xb8, 0x47, 0x75, 0xdf, 0x75, 0xd2, 0x9a, 0x38, 0xf2, 0x54, 0x94, 0xb9, + 0x71, 0x93, 0xf9, 0xd0, 0x3d, 0x4c, 0xe6, 0x3a, 0xd4, 0x6d, 0xdd, 0x0f, 0xd6, 0x3b, 0xa6, 0x1e, + 0x50, 0x53, 0x8e, 0xb7, 0xff, 0x7a, 0xb0, 0xb5, 0x8a, 0xad, 0x7f, 0x91, 0x42, 0xb8, 0x1c, 0xc1, + 0x60, 0x1c, 0x93, 0x98, 0x30, 0xc2, 0x5e, 0xf9, 0x68, 0x30, 0x67, 0x55, 0x08, 0x80, 0xc3, 0xf0, + 0x08, 0x2d, 0x3d, 0xcb, 0x31, 0x1c, 0x4c, 0xa0, 0xf6, 0xb1, 0xaa, 0xd7, 0x06, 0xb1, 0xaa, 0x93, + 0x4f, 0x08, 0x65, 0x63, 0x57, 0x35, 0x18, 0xb7, 0xc6, 0x8d, 0x46, 0x5e, 0x85, 0x18, 0xcf, 0xc4, + 0x24, 0xad, 0xf6, 0xb5, 0x1a, 0xd4, 0xaf, 0xeb, 0x81, 0xb5, 0x43, 0xf9, 0x11, 0xd0, 0xf1, 0xd8, + 0xe1, 0x7f, 0xa5, 0x00, 0x67, 0x92, 0x7e, 0x7e, 0xc7, 0x68, 0x8c, 0xe7, 0x51, 0x43, 0x30, 0x93, + 0x1b, 0xf6, 0xa9, 0x05, 0x37, 0xcb, 0xf7, 0xb8, 0x0d, 0x1e, 0xb7, 0x59, 0xbe, 0xd9, 0x8f, 0x21, + 0xf6, 0xaf, 0xcb, 0x7b, 0xc5, 0x2c, 0xff, 0x60, 0x47, 0x75, 0x4b, 0x1d, 0x1a, 0x0c, 0x3f, 0x30, + 0x87, 0x06, 0xd5, 0x07, 0x42, 0x53, 0xeb, 0xc4, 0x0e, 0x0d, 0x6a, 0x39, 0x9d, 0x57, 0xa4, 0x6b, + 0xbc, 0x40, 0xeb, 0x77, 0xf8, 0xc0, 0x6f, 0xb5, 0x2b, 0x63, 0x2e, 0x53, 0x70, 0x36, 0x74, 0xdf, + 0x32, 0xe4, 0x9a, 0x99, 0x23, 0x8a, 0xa5, 0x0a, 0xf7, 0x25, 0xce, 0xb8, 0xf9, 0x2b, 0x0a, 0xec, + 0x28, 0xac, 0x58, 0x31, 0x57, 0x58, 0x31, 0x32, 0x07, 0x65, 0x87, 0x6d, 0xbd, 0x4b, 0x87, 0x0e, + 0x24, 0x76, 0xfd, 0x1a, 0xdd, 0x45, 0x5e, 0x58, 0x7b, 0xbb, 0x08, 0xc0, 0x3e, 0xff, 0x60, 0xe6, + 0xfb, 0x0f, 0xc1, 0xb0, 0xdf, 0xe5, 0x1b, 0x6d, 0xb9, 0xda, 0x47, 0x1e, 0x3f, 0x22, 0x19, 0x55, + 0x3e, 0x79, 0x1c, 0x2a, 0xaf, 0x77, 0x69, 0x57, 0x9d, 0x45, 0x87, 0xba, 0xde, 0x67, 0x58, 0x22, + 0x8a, 0xbc, 0xe3, 0x33, 0xc5, 0x29, 0x33, 0x7f, 0xe5, 0xb8, 0xcc, 0xfc, 0x35, 0x18, 0xbe, 0xee, + 0x72, 0x07, 0x42, 0xed, 0x9f, 0x8a, 0x00, 0x91, 0x83, 0x16, 0xf9, 0x66, 0x01, 0x1e, 0x0e, 0x07, + 0x5c, 0x20, 0x54, 0x76, 0x1e, 0x38, 0x36, 0xb7, 0xc9, 0x3f, 0x6b, 0xb0, 0xf3, 0x19, 0x68, 0x35, + 0x8b, 0x1d, 0x66, 0xd7, 0x82, 0x20, 0x54, 0x69, 0xbb, 0x13, 0xec, 0xce, 0x5b, 0x9e, 0xec, 0x81, + 0x99, 0x7e, 0x80, 0x97, 0x25, 0x8d, 0x28, 0x2a, 0xf7, 0x95, 0x7c, 0x10, 0xa9, 0x1c, 0x0c, 0x71, + 0xc8, 0x16, 0x54, 0x1d, 0xf7, 0x55, 0x9f, 0x89, 0x43, 0x76, 0xc7, 0x17, 0x07, 0x17, 0xb9, 0x10, + 0xab, 0x30, 0x11, 0xcb, 0x17, 0x1c, 0x76, 0xa4, 0xb0, 0xbf, 0x51, 0x84, 0x53, 0x19, 0x72, 0x20, + 0x2f, 0xc2, 0xb8, 0xf4, 0x85, 0x8b, 0x22, 0x28, 0x17, 0xa2, 0x08, 0xca, 0xcd, 0x54, 0x1e, 0xf6, + 0x50, 0x93, 0x57, 0x01, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, 0xe2, 0x9a, 0x4a, 0x99, 0x7d, 0x61, 0x7f, + 0x6f, 0x12, 0x66, 0xc3, 0xd4, 0x3b, 0x7b, 0x93, 0x1f, 0xcd, 0x72, 0x6f, 0x4d, 0xc9, 0x39, 0x2a, + 0x80, 0x31, 0x48, 0xf2, 0x05, 0x00, 0xb1, 0x6f, 0x0b, 0x6f, 0xe0, 0xdf, 0xc3, 0xd8, 0x31, 0xa5, + 0x62, 0x1d, 0x4d, 0x7d, 0xa6, 0xab, 0x3b, 0x81, 0x15, 0xec, 0x8a, 0x80, 0x27, 0x37, 0x43, 0x14, + 0x8c, 0x21, 0x6a, 0x7f, 0x52, 0x84, 0xaa, 0x32, 0xb3, 0xde, 0x07, 0xdb, 0x5a, 0x2b, 0x61, 0x5b, + 0x3b, 0x22, 0x87, 0xd6, 0x2c, 0xcb, 0x9a, 0x9b, 0xb2, 0xac, 0x2d, 0xe6, 0x67, 0x75, 0x77, 0xbb, + 0xda, 0x77, 0x8a, 0x30, 0xa6, 0x48, 0xf3, 0x5a, 0xd5, 0x3e, 0x05, 0x27, 0xc4, 0x41, 0xf4, 0x8a, + 0x7e, 0x5b, 0xc4, 0x7e, 0xe1, 0x02, 0x2b, 0x0b, 0x1f, 0xd2, 0x46, 0x32, 0x0b, 0xd3, 0xb4, 0xac, + 0x5b, 0x8b, 0xa4, 0x75, 0xb6, 0x09, 0x11, 0x47, 0x57, 0x62, 0xb3, 0xc4, 0xbb, 0x75, 0x23, 0x95, + 0x87, 0x3d, 0xd4, 0x69, 0xb3, 0x5e, 0xf9, 0x18, 0xcc, 0x7a, 0x7f, 0x5d, 0x80, 0x91, 0x48, 0x5e, + 0xc7, 0x6e, 0xd4, 0xdb, 0x4c, 0x1a, 0xf5, 0x66, 0x73, 0x77, 0x87, 0x3e, 0x26, 0xbd, 0xff, 0x37, + 0x0c, 0x09, 0xbf, 0x6a, 0xb2, 0x01, 0xe7, 0xac, 0x4c, 0xef, 0xb0, 0xd8, 0x6c, 0x13, 0x5e, 0x14, + 0x5e, 0xea, 0x4b, 0x89, 0x77, 0x41, 0x21, 0x5d, 0xa8, 0xee, 0x50, 0x2f, 0xb0, 0x0c, 0xaa, 0xbe, + 0x6f, 0x31, 0xb7, 0x4a, 0x26, 0x0d, 0x97, 0xa1, 0x4c, 0x6f, 0x4a, 0x06, 0x18, 0xb2, 0x22, 0x1b, + 0x50, 0xa1, 0x66, 0x8b, 0xaa, 0x68, 0x3c, 0x39, 0x63, 0x5d, 0x86, 0xf2, 0x64, 0x6f, 0x3e, 0x0a, + 0x68, 0xe2, 0x43, 0xcd, 0x56, 0x07, 0x53, 0xb2, 0x1f, 0x0e, 0xae, 0x60, 0x85, 0x47, 0x5c, 0xd1, + 0x45, 0xfd, 0x30, 0x09, 0x23, 0x3e, 0x64, 0x3b, 0xb4, 0x90, 0x55, 0x8e, 0x68, 0xf2, 0xb8, 0x8b, + 0x7d, 0xcc, 0x87, 0xda, 0x2d, 0x3d, 0xa0, 0x5e, 0x5b, 0xf7, 0xb6, 0xe5, 0x6e, 0x63, 0xf0, 0x2f, + 0x7c, 0x49, 0x21, 0x45, 0x5f, 0x18, 0x26, 0x61, 0xc4, 0x87, 0xb8, 0x50, 0x0b, 0xa4, 0xfa, 0xac, + 0xcc, 0x80, 0x83, 0x33, 0x55, 0x8a, 0xb8, 0x2f, 0xfd, 0xab, 0xd5, 0x2b, 0x46, 0x3c, 0xc8, 0x4e, + 0x22, 0x0e, 0xb0, 0x88, 0xfe, 0xdc, 0xc8, 0x61, 0x4e, 0x96, 0x50, 0xd1, 0x72, 0x93, 0x1d, 0x4f, + 0x58, 0x7b, 0xbb, 0x12, 0x4d, 0xcb, 0xf7, 0xdb, 0xc8, 0xf5, 0x74, 0xd2, 0xc8, 0x75, 0x21, 0x6d, + 0xe4, 0x4a, 0x9d, 0x6f, 0x1e, 0xde, 0x23, 0x33, 0x65, 0x5e, 0x2a, 0x1f, 0x83, 0x79, 0xe9, 0x29, + 0xa8, 0xef, 0xf0, 0x99, 0x40, 0x84, 0xf6, 0xa9, 0xf0, 0x65, 0x84, 0xcf, 0xec, 0x37, 0xa3, 0x64, + 0x8c, 0xd3, 0xb0, 0x22, 0xf2, 0xcf, 0x07, 0x61, 0x6c, 0x54, 0x59, 0xa4, 0x19, 0x25, 0x63, 0x9c, + 0x86, 0x3b, 0x73, 0x59, 0xce, 0xb6, 0x28, 0x30, 0xcc, 0x0b, 0x08, 0x67, 0x2e, 0x95, 0x88, 0x51, + 0x3e, 0xb9, 0x04, 0xd5, 0xae, 0xb9, 0x29, 0x68, 0xab, 0x9c, 0x96, 0x6b, 0x98, 0xeb, 0xf3, 0x0b, + 0x32, 0xd4, 0x90, 0xca, 0x65, 0x35, 0x69, 0xeb, 0x1d, 0x95, 0xc1, 0xf7, 0x86, 0xb2, 0x26, 0x2b, + 0x51, 0x32, 0xc6, 0x69, 0xc8, 0xc7, 0x61, 0xcc, 0xa3, 0x66, 0xd7, 0xa0, 0x61, 0x29, 0x61, 0x9d, + 0x22, 0xe2, 0x17, 0x0f, 0xf1, 0x1c, 0x4c, 0x51, 0xf6, 0x31, 0x92, 0xd5, 0x07, 0x72, 0x3d, 0xfd, + 0x51, 0x01, 0x48, 0xaf, 0xf3, 0x33, 0xd9, 0x82, 0x21, 0x87, 0x5b, 0xbf, 0x72, 0x47, 0x53, 0x8e, + 0x19, 0xd1, 0xc4, 0xb4, 0x24, 0x13, 0x24, 0x3e, 0x71, 0xa0, 0x4a, 0x6f, 0x07, 0xd4, 0x73, 0xc2, + 0xcb, 0x10, 0x47, 0x13, 0xb9, 0x59, 0xec, 0x06, 0x24, 0x32, 0x86, 0x3c, 0xb4, 0x9f, 0x14, 0xa1, + 0x1e, 0xa3, 0xbb, 0xd7, 0xa6, 0x92, 0xdf, 0xc7, 0x16, 0x46, 0xa7, 0x75, 0xcf, 0x96, 0x23, 0x2c, + 0x76, 0x1f, 0x5b, 0x66, 0xe1, 0x32, 0xc6, 0xe9, 0xc8, 0x0c, 0x40, 0x5b, 0xf7, 0x03, 0xea, 0xf1, + 0xd5, 0x37, 0x75, 0x0b, 0x7a, 0x25, 0xcc, 0xc1, 0x18, 0x15, 0xb9, 0x28, 0x63, 0x6f, 0x97, 0x93, + 0x51, 0xeb, 0xfa, 0x04, 0xd6, 0xae, 0x1c, 0x41, 0x60, 0x6d, 0xd2, 0x82, 0x71, 0x55, 0x6b, 0x95, + 0x7b, 0xb8, 0x98, 0x66, 0x62, 0xff, 0x92, 0x82, 0xc0, 0x1e, 0x50, 0xed, 0xed, 0x02, 0x8c, 0x26, + 0x4c, 0x1e, 0x22, 0xde, 0x9c, 0x72, 0xdd, 0x4f, 0xc4, 0x9b, 0x8b, 0x79, 0xdc, 0x3f, 0x09, 0x43, + 0x42, 0x40, 0x69, 0x8f, 0x3c, 0x21, 0x42, 0x94, 0xb9, 0x6c, 0x2e, 0x93, 0x46, 0xd5, 0xf4, 0x5c, + 0x26, 0xad, 0xae, 0xa8, 0xf2, 0x85, 0xad, 0x5e, 0xd4, 0xae, 0xd7, 0x56, 0x2f, 0xd2, 0x31, 0xa4, + 0xd0, 0xfe, 0xad, 0x04, 0xdc, 0x7f, 0x85, 0x3c, 0x07, 0xb5, 0x36, 0x35, 0xb6, 0x74, 0xc7, 0xf2, + 0x55, 0xbc, 0x49, 0xb6, 0xbb, 0xad, 0xad, 0xa8, 0xc4, 0x3b, 0x0c, 0x60, 0xb6, 0xb9, 0xcc, 0x5d, + 0xc4, 0x23, 0x5a, 0x62, 0xc0, 0x50, 0xcb, 0xf7, 0xf5, 0x8e, 0x95, 0xfb, 0xf8, 0x54, 0xc4, 0xf7, + 0x13, 0x83, 0x48, 0x3c, 0xa3, 0x84, 0x26, 0x06, 0x54, 0x3a, 0xb6, 0x6e, 0x39, 0xb9, 0x7f, 0x70, + 0xc2, 0xbe, 0x60, 0x95, 0x21, 0x09, 0x93, 0x0e, 0x7f, 0x44, 0x81, 0x4d, 0xba, 0x50, 0xf7, 0x0d, + 0x4f, 0x6f, 0xfb, 0x5b, 0xfa, 0xcc, 0x33, 0xcf, 0xe6, 0x56, 0x92, 0x22, 0x56, 0x62, 0xce, 0x9e, + 0xc3, 0xd9, 0x95, 0xe6, 0x95, 0xd9, 0x99, 0x67, 0x9e, 0xc5, 0x38, 0x9f, 0x38, 0xdb, 0x67, 0x9e, + 0x9a, 0x91, 0xfd, 0xfe, 0xc8, 0xd9, 0x3e, 0xf3, 0xd4, 0x0c, 0xc6, 0xf9, 0x68, 0xff, 0x5a, 0x80, + 0x5a, 0x48, 0x4b, 0xd6, 0x01, 0xd8, 0x08, 0x94, 0x11, 0xf9, 0x0e, 0x15, 0x1d, 0x9f, 0xef, 0x8a, + 0xd7, 0xc3, 0xc2, 0x18, 0x03, 0xca, 0x08, 0x59, 0x58, 0x3c, 0xea, 0x90, 0x85, 0xd3, 0x50, 0xdb, + 0xd2, 0x1d, 0xd3, 0xdf, 0xd2, 0xb7, 0xc5, 0x44, 0x14, 0x0b, 0xe2, 0x79, 0x45, 0x65, 0x60, 0x44, + 0xa3, 0xfd, 0xd1, 0x10, 0x88, 0x33, 0x4f, 0x36, 0x54, 0x4c, 0xcb, 0x17, 0x4e, 0xb7, 0x05, 0x5e, + 0x32, 0x1c, 0x2a, 0xf3, 0x32, 0x1d, 0x43, 0x0a, 0x72, 0x16, 0x4a, 0x6d, 0xcb, 0x91, 0x27, 0x1e, + 0xdc, 0xe0, 0xb5, 0x62, 0x39, 0xc8, 0xd2, 0x78, 0x96, 0x7e, 0x5b, 0xfa, 0x4b, 0x89, 0x2c, 0xfd, + 0x36, 0xb2, 0x34, 0xb6, 0x05, 0xb5, 0x5d, 0x77, 0x7b, 0x43, 0x37, 0xb6, 0x95, 0x5b, 0x55, 0x99, + 0x2f, 0x84, 0x7c, 0x0b, 0xba, 0x9c, 0xcc, 0xc2, 0x34, 0x2d, 0x59, 0x87, 0x47, 0xde, 0xa0, 0x9e, + 0x2b, 0x47, 0x79, 0xd3, 0xa6, 0xb4, 0xa3, 0x60, 0x84, 0x0a, 0xc1, 0xbd, 0xb3, 0x3e, 0x97, 0x4d, + 0x82, 0xfd, 0xca, 0x72, 0x3f, 0x4f, 0xdd, 0x6b, 0xd1, 0x60, 0xd5, 0x73, 0x0d, 0xea, 0xfb, 0x96, + 0xd3, 0x52, 0xb0, 0x43, 0x11, 0xec, 0x5a, 0x36, 0x09, 0xf6, 0x2b, 0x4b, 0x5e, 0x86, 0x09, 0x91, + 0x25, 0x16, 0xdb, 0xd9, 0x1d, 0xdd, 0xb2, 0xf5, 0x0d, 0xcb, 0x56, 0xff, 0x05, 0x1b, 0x15, 0xe7, + 0x0a, 0x6b, 0x7d, 0x68, 0xb0, 0x6f, 0x69, 0x72, 0x15, 0xc6, 0xd5, 0xa9, 0xd2, 0x2a, 0xf5, 0x9a, + 0xe1, 0x39, 0xf8, 0x68, 0xe3, 0x02, 0xdb, 0xef, 0xcd, 0xd3, 0x8e, 0x47, 0x0d, 0xae, 0x75, 0xa5, + 0xa8, 0xb0, 0xa7, 0x1c, 0x41, 0x38, 0xc3, 0x0f, 0xbb, 0xd7, 0x3b, 0x73, 0xae, 0x6b, 0x9b, 0xee, + 0x2d, 0x47, 0x7d, 0xbb, 0x50, 0x6c, 0xf8, 0x41, 0x52, 0x33, 0x93, 0x02, 0xfb, 0x94, 0x64, 0x5f, + 0xce, 0x73, 0xe6, 0xdd, 0x5b, 0x4e, 0x1a, 0x15, 0xa2, 0x2f, 0x6f, 0xf6, 0xa1, 0xc1, 0xbe, 0xa5, + 0xc9, 0x02, 0x90, 0xf4, 0x17, 0xac, 0x77, 0xb8, 0x32, 0x34, 0xda, 0x38, 0x23, 0x82, 0x6b, 0xa4, + 0x73, 0x31, 0xa3, 0x04, 0x59, 0x86, 0xd3, 0xe9, 0x54, 0xc6, 0x8e, 0x7b, 0xd8, 0x8f, 0x8a, 0xb0, + 0x9a, 0x98, 0x91, 0x8f, 0x99, 0xa5, 0xb4, 0x3f, 0x2e, 0xc2, 0x68, 0xe2, 0x36, 0xf6, 0x03, 0x77, + 0xeb, 0x95, 0x69, 0xa0, 0x6d, 0xbf, 0xb5, 0x34, 0x7f, 0x85, 0xea, 0x26, 0xf5, 0xae, 0x51, 0x75, + 0x73, 0x9e, 0x4f, 0x2a, 0x2b, 0x89, 0x1c, 0x4c, 0x51, 0x92, 0x4d, 0xa8, 0x08, 0x7b, 0x6a, 0xde, + 0xff, 0x2c, 0x28, 0x19, 0x71, 0xa3, 0x2a, 0x5f, 0x72, 0x84, 0x49, 0x55, 0xc0, 0x6b, 0x01, 0x8c, + 0xc4, 0x29, 0xd8, 0x44, 0x12, 0x29, 0x6b, 0xc3, 0x09, 0x45, 0x6d, 0x09, 0x4a, 0x41, 0x30, 0xe8, + 0x7d, 0x5a, 0x61, 0x9f, 0x5f, 0x5b, 0x46, 0x86, 0xa1, 0x6d, 0xb2, 0xb6, 0xf3, 0x7d, 0xcb, 0x75, + 0x64, 0x70, 0xe5, 0x75, 0x18, 0x0e, 0xa4, 0x89, 0x6a, 0xb0, 0xfb, 0xc0, 0xdc, 0x5c, 0xac, 0xcc, + 0x53, 0x0a, 0x4b, 0xfb, 0x9b, 0x22, 0xd4, 0xc2, 0xed, 0xe4, 0x01, 0x82, 0x16, 0xbb, 0x50, 0x0b, + 0x9d, 0x75, 0x72, 0xff, 0x33, 0x2d, 0xf2, 0x21, 0xe1, 0x3b, 0xa0, 0xf0, 0x15, 0x23, 0x1e, 0x71, + 0x47, 0xa0, 0x52, 0x0e, 0x47, 0xa0, 0x0e, 0x0c, 0x07, 0x9e, 0xd5, 0x6a, 0x49, 0xdd, 0x36, 0x8f, + 0x27, 0x50, 0x28, 0xae, 0x35, 0x01, 0x28, 0x25, 0x2b, 0x5e, 0x50, 0xb1, 0xd1, 0x5e, 0x83, 0xf1, + 0x34, 0x25, 0x57, 0xfc, 0x8c, 0x2d, 0x6a, 0x76, 0x6d, 0x25, 0xe3, 0x48, 0xf1, 0x93, 0xe9, 0x18, + 0x52, 0xb0, 0xcd, 0x1f, 0x6b, 0xa6, 0x37, 0x5c, 0x47, 0x6d, 0xab, 0xb9, 0x0e, 0xbd, 0x26, 0xd3, + 0x30, 0xcc, 0xd5, 0xfe, 0xb1, 0x04, 0x67, 0x23, 0xa3, 0xc0, 0x8a, 0xee, 0xe8, 0xad, 0x03, 0xfc, + 0x28, 0xeb, 0xfd, 0x1b, 0x16, 0x87, 0x8d, 0x3c, 0x5f, 0x7a, 0x00, 0x22, 0xcf, 0xff, 0xb4, 0x00, + 0xdc, 0xb1, 0x90, 0x7c, 0x09, 0x46, 0xf4, 0xd8, 0x3f, 0x12, 0x65, 0x73, 0x5e, 0xce, 0xdd, 0x9c, + 0xdc, 0x7f, 0x31, 0x74, 0x94, 0x89, 0xa7, 0x62, 0x82, 0x21, 0x71, 0xa1, 0xba, 0xa9, 0xdb, 0x36, + 0xd3, 0x85, 0x72, 0x1f, 0x72, 0x24, 0x98, 0xf3, 0x6e, 0xbe, 0x20, 0xa1, 0x31, 0x64, 0xa2, 0xfd, + 0x43, 0x01, 0x46, 0x9b, 0xb6, 0x65, 0x5a, 0x4e, 0xeb, 0x18, 0x43, 0xce, 0xdf, 0x80, 0x8a, 0x6f, + 0x5b, 0x26, 0x1d, 0x70, 0x1e, 0x17, 0x2b, 0x08, 0x03, 0x40, 0x81, 0x93, 0x8c, 0x61, 0x5f, 0x3a, + 0x40, 0x0c, 0xfb, 0x9f, 0x0f, 0x81, 0x74, 0x4e, 0x25, 0x5d, 0xa8, 0xb5, 0x54, 0x68, 0x6c, 0xf9, + 0x8d, 0x57, 0x72, 0x84, 0x55, 0x4b, 0x04, 0xd9, 0x16, 0xb3, 0x6e, 0x98, 0x88, 0x11, 0x27, 0x42, + 0x93, 0xbf, 0xc5, 0x9c, 0xcf, 0xf9, 0x5b, 0x4c, 0xc1, 0xae, 0xf7, 0xc7, 0x98, 0x3a, 0x94, 0xb7, + 0x82, 0xa0, 0x23, 0xc7, 0xd5, 0xe0, 0xde, 0xc7, 0x51, 0x64, 0x0f, 0xa1, 0x8d, 0xb0, 0x77, 0xe4, + 0xd0, 0x8c, 0x85, 0xa3, 0x87, 0x7f, 0x63, 0x9a, 0xcb, 0x75, 0xd0, 0x1d, 0x67, 0xc1, 0xde, 0x91, + 0x43, 0x93, 0x2f, 0x42, 0x3d, 0xf0, 0x74, 0xc7, 0xdf, 0x74, 0xbd, 0x36, 0xf5, 0xe4, 0xee, 0x70, + 0x21, 0xc7, 0x9f, 0x21, 0xd7, 0x22, 0x34, 0x71, 0x82, 0x96, 0x48, 0xc2, 0x38, 0x37, 0xb2, 0x0d, + 0xd5, 0xae, 0x29, 0x2a, 0x26, 0xcd, 0x26, 0xb3, 0x79, 0x7e, 0xf6, 0x19, 0x3b, 0xc6, 0x56, 0x6f, + 0x18, 0x32, 0x48, 0xfe, 0x78, 0x6c, 0xf8, 0xa8, 0x7e, 0x3c, 0x16, 0xef, 0x8d, 0x59, 0x61, 0x07, + 0x48, 0x5b, 0x6a, 0x94, 0x4e, 0x4b, 0x7a, 0xe1, 0x2c, 0xe4, 0x56, 0xf6, 0x04, 0xcb, 0x7a, 0xa8, + 0x95, 0x3a, 0x2d, 0x54, 0x3c, 0xb4, 0x36, 0x48, 0xeb, 0x36, 0x31, 0x12, 0xbf, 0xe7, 0x10, 0x77, + 0x61, 0xa6, 0x0f, 0x36, 0x1f, 0x84, 0xff, 0x89, 0x88, 0x85, 0x07, 0xce, 0xfc, 0x0f, 0x87, 0xf6, + 0xb7, 0x45, 0x28, 0xad, 0x2d, 0x37, 0x45, 0xc8, 0x3f, 0xfe, 0xef, 0x1b, 0xda, 0xdc, 0xb6, 0x3a, + 0x37, 0xa9, 0x67, 0x6d, 0xee, 0xca, 0x4d, 0x6f, 0x2c, 0xe4, 0x5f, 0x9a, 0x02, 0x33, 0x4a, 0x91, + 0x57, 0x60, 0xc4, 0xd0, 0xe7, 0xa8, 0x17, 0x0c, 0xb2, 0xa5, 0xe7, 0x97, 0xfe, 0xe6, 0x66, 0xa3, + 0xe2, 0x98, 0x00, 0x23, 0xeb, 0x00, 0x46, 0x04, 0x5d, 0x3a, 0xb4, 0x21, 0x22, 0x06, 0x1c, 0x03, + 0x22, 0x08, 0xb5, 0x6d, 0x46, 0xca, 0x51, 0xcb, 0x87, 0x41, 0xe5, 0x3d, 0xe7, 0x9a, 0x2a, 0x8b, + 0x11, 0x8c, 0xe6, 0xc0, 0x68, 0xe2, 0x9f, 0x1d, 0xe4, 0x63, 0x50, 0x75, 0x3b, 0xb1, 0xe9, 0xb4, + 0xc6, 0xfd, 0xfd, 0xaa, 0x37, 0x64, 0xda, 0x9d, 0xbd, 0xc9, 0xd1, 0x65, 0xb7, 0x65, 0x19, 0x2a, + 0x01, 0x43, 0x72, 0xa2, 0xc1, 0x10, 0xbf, 0xa9, 0xa3, 0xfe, 0xd8, 0xc1, 0xd7, 0x0e, 0x1e, 0x54, + 0xdf, 0x47, 0x99, 0xa3, 0x7d, 0xb9, 0x0c, 0xd1, 0x99, 0x10, 0xf1, 0x61, 0x48, 0x78, 0x22, 0xcb, + 0x99, 0xfb, 0x58, 0x9d, 0x9e, 0x25, 0x2b, 0xd2, 0x82, 0xd2, 0x6b, 0xee, 0x46, 0xee, 0x89, 0x3b, + 0x76, 0x45, 0x57, 0x58, 0xa9, 0x62, 0x09, 0xc8, 0x38, 0x90, 0x5f, 0x2d, 0xc0, 0x49, 0x3f, 0xad, + 0x74, 0xca, 0xee, 0x80, 0xf9, 0xb5, 0xeb, 0xb4, 0x1a, 0x2b, 0x1d, 0x33, 0xfb, 0x65, 0x63, 0x6f, + 0x5d, 0x98, 0xfc, 0xc5, 0x61, 0x8d, 0xec, 0x4e, 0x8b, 0x39, 0xff, 0x33, 0x97, 0x94, 0x7f, 0x32, + 0x0d, 0x25, 0x2b, 0xed, 0xab, 0x45, 0xa8, 0xc7, 0x66, 0xeb, 0xdc, 0x3f, 0x82, 0xb9, 0x9d, 0xfa, + 0x11, 0xcc, 0xea, 0xe0, 0x67, 0x97, 0x51, 0xad, 0x8e, 0xfb, 0x5f, 0x30, 0xdf, 0x2f, 0x42, 0x69, + 0x7d, 0x7e, 0x21, 0xb9, 0x5d, 0x2c, 0xdc, 0x87, 0xed, 0xe2, 0x16, 0x0c, 0x6f, 0x74, 0x2d, 0x3b, + 0xb0, 0x9c, 0xdc, 0x41, 0x04, 0xd4, 0x7f, 0x73, 0xe4, 0x5d, 0x5c, 0x81, 0x8a, 0x0a, 0x9e, 0xb4, + 0x60, 0xb8, 0x25, 0xa2, 0xb8, 0xe5, 0xf6, 0xe8, 0x92, 0xd1, 0xe0, 0x04, 0x23, 0xf9, 0x82, 0x0a, + 0x5d, 0xdb, 0x05, 0xf9, 0xe7, 0xed, 0xfb, 0x2e, 0x4d, 0xed, 0x8b, 0x10, 0x6a, 0x01, 0xf7, 0x9f, + 0xf9, 0x3f, 0x17, 0x20, 0xa9, 0xf8, 0xdc, 0xff, 0xde, 0xb4, 0x9d, 0xee, 0x4d, 0xf3, 0x47, 0x31, + 0xf8, 0xb2, 0x3b, 0x94, 0xf6, 0x87, 0x45, 0x18, 0xba, 0x6f, 0x17, 0x3f, 0x69, 0xc2, 0x39, 0x6d, + 0x2e, 0xe7, 0xc4, 0xd8, 0xd7, 0x35, 0xad, 0x9d, 0x72, 0x4d, 0xcb, 0xfb, 0xa7, 0xcf, 0x7b, 0x38, + 0xa6, 0xfd, 0x65, 0x01, 0xe4, 0xb4, 0xbc, 0xe4, 0xf8, 0x81, 0xee, 0x18, 0xfc, 0x87, 0xf3, 0x72, + 0x0d, 0xc8, 0xeb, 0x01, 0x21, 0xbd, 0x84, 0xc4, 0xb2, 0xcf, 0x9f, 0xd5, 0x9c, 0x4f, 0x3e, 0x02, + 0xd5, 0x2d, 0xd7, 0x0f, 0xf8, 0x3c, 0x5f, 0x4c, 0xda, 0x75, 0xae, 0xc8, 0x74, 0x0c, 0x29, 0xd2, + 0x27, 0x85, 0x95, 0xfe, 0x27, 0x85, 0xda, 0xb7, 0x8b, 0x30, 0xf2, 0x5e, 0xb9, 0xbd, 0x9a, 0xe5, + 0xca, 0x57, 0xca, 0xe9, 0xca, 0x57, 0x3e, 0x8c, 0x2b, 0x9f, 0xf6, 0x83, 0x02, 0xc0, 0x7d, 0xbb, + 0x3a, 0x6b, 0x26, 0xbd, 0xec, 0x72, 0xf7, 0xab, 0x6c, 0x1f, 0xbb, 0xdf, 0xab, 0xa8, 0x4f, 0xe2, + 0x1e, 0x76, 0x6f, 0x16, 0x60, 0x4c, 0x4f, 0x78, 0xad, 0xe5, 0x56, 0x2d, 0x53, 0x4e, 0x70, 0xe1, + 0x35, 0xc1, 0x64, 0x3a, 0xa6, 0xd8, 0x92, 0xe7, 0xa3, 0xb0, 0xad, 0xd7, 0xa3, 0x6e, 0xdf, 0x13, + 0x6f, 0x95, 0xab, 0x39, 0x09, 0xca, 0x7b, 0x78, 0x09, 0x96, 0x8e, 0xc4, 0x4b, 0x30, 0x7e, 0xff, + 0xa9, 0x7c, 0xd7, 0xfb, 0x4f, 0x3b, 0x50, 0xdb, 0xf4, 0xdc, 0x36, 0x77, 0xc4, 0x93, 0xff, 0x08, + 0xbd, 0x9c, 0x63, 0x4d, 0x89, 0xfe, 0x8e, 0x1d, 0xd9, 0x78, 0x16, 0x14, 0x3e, 0x46, 0xac, 0xb8, + 0x41, 0xda, 0x15, 0x5c, 0x87, 0x8e, 0x92, 0x6b, 0x38, 0x97, 0xac, 0x09, 0x74, 0x54, 0x6c, 0x92, + 0xce, 0x77, 0xc3, 0xf7, 0xc7, 0xf9, 0x4e, 0xfb, 0x7e, 0x59, 0x4d, 0x60, 0x0f, 0x5c, 0x84, 0xc0, + 0xf7, 0xfe, 0x95, 0xcb, 0xf4, 0x7d, 0xc8, 0xe1, 0xfb, 0x78, 0x1f, 0xb2, 0x7a, 0x34, 0xf7, 0x21, + 0x6b, 0x87, 0xb8, 0x0f, 0xb9, 0x57, 0x82, 0xd4, 0xa6, 0xeb, 0xfd, 0xa3, 0x8d, 0xff, 0x50, 0x47, + 0x1b, 0x6f, 0x15, 0x21, 0x9a, 0x45, 0x0e, 0xe9, 0xfa, 0xf1, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, + 0x6a, 0xeb, 0xbb, 0x79, 0xfe, 0x0a, 0xb9, 0x22, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0x46, + 0x66, 0xce, 0x6d, 0xaa, 0x8e, 0x82, 0x3c, 0x0b, 0x63, 0x58, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, + 0x14, 0x41, 0x86, 0xf0, 0x26, 0x14, 0x2a, 0x9b, 0xd6, 0x6d, 0x6a, 0xe6, 0x76, 0x83, 0x8c, 0xfd, + 0xab, 0x57, 0xd8, 0xe2, 0x79, 0x02, 0x0a, 0x74, 0x6e, 0x64, 0x15, 0x67, 0x2b, 0x52, 0x7e, 0x39, + 0x8c, 0xac, 0xf1, 0x33, 0x1a, 0x69, 0x64, 0x15, 0x49, 0xa8, 0x78, 0x08, 0x9b, 0x2e, 0x3f, 0xe0, + 0x96, 0x22, 0xcd, 0x63, 0xd3, 0x8d, 0x1d, 0x94, 0x2b, 0x9b, 0xae, 0x2f, 0x2e, 0x44, 0x4b, 0x1e, + 0x8d, 0xcf, 0x7f, 0xef, 0x87, 0x17, 0x1e, 0xfa, 0xc1, 0x0f, 0x2f, 0x3c, 0xf4, 0xce, 0x0f, 0x2f, + 0x3c, 0xf4, 0xe5, 0xfd, 0x0b, 0x85, 0xef, 0xed, 0x5f, 0x28, 0xfc, 0x60, 0xff, 0x42, 0xe1, 0x9d, + 0xfd, 0x0b, 0x85, 0xbf, 0xdb, 0xbf, 0x50, 0xf8, 0xa5, 0xbf, 0xbf, 0xf0, 0xd0, 0xe7, 0x9e, 0x8b, + 0xaa, 0x30, 0xad, 0xaa, 0x30, 0xad, 0x18, 0x4e, 0x77, 0xb6, 0x5b, 0xd3, 0xac, 0x0a, 0x51, 0x8a, + 0xaa, 0xc2, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x80, 0x4f, 0xc1, 0x9f, 0x93, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6367,6 +6368,9 @@ func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x50 i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x48 @@ -8785,6 +8789,9 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x48 i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x40 @@ -10208,6 +10215,7 @@ func (m *MonoVertexStatus) Size() (n int) { l = m.LastScaledAt.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) return n } @@ -11103,6 +11111,7 @@ func (m *VertexStatus) Size() (n int) { l = m.LastScaledAt.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) return n } @@ -12015,6 +12024,7 @@ func (this *MonoVertexStatus) String() string { `LastUpdated:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdated), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `}`, }, "") return s @@ -12599,6 +12609,7 @@ func (this *VertexStatus) String() string { `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `}`, }, "") return s @@ -23278,6 +23289,25 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30427,6 +30457,25 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { break } } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index b8935365e6..8c5a6db0f2 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -854,6 +854,7 @@ message Metadata { // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` @@ -934,21 +935,36 @@ message MonoVertexSpec { message MonoVertexStatus { optional Status status = 1; + // +optional optional string phase = 2; + // Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector). + // +optional optional uint32 replicas = 3; + // +optional optional string selector = 4; + // +optional optional string reason = 5; + // +optional optional string message = 6; + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 7; + // Time of last scaling operation. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; + // The generation observed by the MonoVertex controller. + // +optional optional int64 observedGeneration = 9; + + // The number of pods targeted by this MonoVertex with a Ready Condition. + // +optional + optional uint32 readyReplicas = 10; } message NativeRedis { @@ -1148,24 +1164,35 @@ message PipelineSpec { message PipelineStatus { optional Status status = 1; + // +optional optional string phase = 2; + // +optional optional string message = 3; + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 4; + // +optional optional uint32 vertexCount = 5; + // +optional optional uint32 sourceCount = 6; + // +optional optional uint32 sinkCount = 7; + // +optional optional uint32 udfCount = 8; + // +optional optional uint32 mapUDFCount = 9; + // +optional optional uint32 reduceUDFCount = 10; + // The generation observed by the Pipeline controller. + // +optional optional int64 observedGeneration = 11; } @@ -1533,6 +1560,7 @@ message UDTransformer { // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` @@ -1613,19 +1641,33 @@ message VertexSpec { message VertexStatus { optional Status status = 1; + // +optional optional string phase = 2; + // Total number of non-terminated pods targeted by this Vertex (their labels match the selector). + // +optional optional uint32 replicas = 3; + // +optional optional string selector = 4; + // +optional optional string reason = 5; + // +optional optional string message = 6; + // Time of last scaling operation. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 7; + // The generation observed by the Vertex controller. + // +optional optional int64 observedGeneration = 8; + + // The number of pods targeted by this Vertex with a Ready Condition. + // +optional + optional uint32 readyReplicas = 9; } message VertexTemplate { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 81c9cf56e6..15544b81b6 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -56,6 +56,7 @@ const ( // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` @@ -463,15 +464,29 @@ func (mvl MonoVertexLimits) GetReadTimeout() time.Duration { } type MonoVertexStatus struct { - Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - Phase MonoVertexPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=MonoVertexPhase"` - Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` - LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,7,opt,name=lastUpdated"` - LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,8,opt,name=lastScaledAt"` - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,9,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + // +optional + Phase MonoVertexPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=MonoVertexPhase"` + // Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector). + // +optional + Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + // +optional + LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,7,opt,name=lastUpdated"` + // Time of last scaling operation. + // +optional + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,8,opt,name=lastScaledAt"` + // The generation observed by the MonoVertex controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,9,opt,name=observedGeneration"` + // The number of pods targeted by this MonoVertex with a Ready Condition. + // +optional + ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,10,opt,name=readyReplicas"` } // SetObservedGeneration sets the Status ObservedGeneration diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 0d129cdab1..8ed0f228c0 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -3344,9 +3344,10 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCall }, "replicas": { SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", + Description: "Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector).", + Default: 0, + Type: []string{"integer"}, + Format: "int64", }, }, "selector": { @@ -3374,17 +3375,25 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCall }, "lastScaledAt": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Description: "Time of last scaling operation.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", + Description: "The generation observed by the MonoVertex controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods targeted by this MonoVertex with a Ready Condition.", + Type: []string{"integer"}, + Format: "int64", }, }, }, - Required: []string{"replicas"}, }, }, Dependencies: []string{ @@ -4060,8 +4069,9 @@ func schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref common.ReferenceCallba }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", + Description: "The generation observed by the Pipeline controller.", + Type: []string{"integer"}, + Format: "int64", }, }, }, @@ -5651,9 +5661,10 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback }, "replicas": { SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", + Description: "Total number of non-terminated pods targeted by this Vertex (their labels match the selector).", + Default: 0, + Type: []string{"integer"}, + Format: "int64", }, }, "selector": { @@ -5676,17 +5687,25 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback }, "lastScaledAt": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Description: "Time of last scaling operation.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", + Description: "The generation observed by the Vertex controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods targeted by this Vertex with a Ready Condition.", + Type: []string{"integer"}, + Format: "int64", }, }, }, - Required: []string{"phase", "replicas"}, }, }, Dependencies: []string{ diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index 07d62f673d..4604239674 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -611,17 +611,28 @@ type PipelineLimits struct { } type PipelineStatus struct { - Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - Phase PipelinePhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=PipelinePhase"` - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,4,opt,name=lastUpdated"` - VertexCount *uint32 `json:"vertexCount,omitempty" protobuf:"varint,5,opt,name=vertexCount"` - SourceCount *uint32 `json:"sourceCount,omitempty" protobuf:"varint,6,opt,name=sourceCount"` - SinkCount *uint32 `json:"sinkCount,omitempty" protobuf:"varint,7,opt,name=sinkCount"` - UDFCount *uint32 `json:"udfCount,omitempty" protobuf:"varint,8,opt,name=udfCount"` - MapUDFCount *uint32 `json:"mapUDFCount,omitempty" protobuf:"varint,9,opt,name=mapUDFCount"` - ReduceUDFCount *uint32 `json:"reduceUDFCount,omitempty" protobuf:"varint,10,opt,name=reduceUDFCount"` - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + // +optional + Phase PipelinePhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=PipelinePhase"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // +optional + LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,4,opt,name=lastUpdated"` + // +optional + VertexCount *uint32 `json:"vertexCount,omitempty" protobuf:"varint,5,opt,name=vertexCount"` + // +optional + SourceCount *uint32 `json:"sourceCount,omitempty" protobuf:"varint,6,opt,name=sourceCount"` + // +optional + SinkCount *uint32 `json:"sinkCount,omitempty" protobuf:"varint,7,opt,name=sinkCount"` + // +optional + UDFCount *uint32 `json:"udfCount,omitempty" protobuf:"varint,8,opt,name=udfCount"` + // +optional + MapUDFCount *uint32 `json:"mapUDFCount,omitempty" protobuf:"varint,9,opt,name=mapUDFCount"` + // +optional + ReduceUDFCount *uint32 `json:"reduceUDFCount,omitempty" protobuf:"varint,10,opt,name=reduceUDFCount"` + // The generation observed by the Pipeline controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` } // SetVertexCounts sets the counts of vertices. diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 5b97700944..2e78357bb9 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -60,6 +60,7 @@ const NumaflowRustBinary = "/bin/numaflow-rs" // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` // +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` @@ -708,14 +709,27 @@ func (v VertexSpec) getType() containerSupplier { } type VertexStatus struct { - Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - Phase VertexPhase `json:"phase" protobuf:"bytes,2,opt,name=phase,casttype=VertexPhase"` - Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` - LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,7,opt,name=lastScaledAt"` - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` + Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + // +optional + Phase VertexPhase `json:"phase" protobuf:"bytes,2,opt,name=phase,casttype=VertexPhase"` + // Total number of non-terminated pods targeted by this Vertex (their labels match the selector). + // +optional + Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + // Time of last scaling operation. + // +optional + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,7,opt,name=lastScaledAt"` + // The generation observed by the Vertex controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` + // The number of pods targeted by this Vertex with a Ready Condition. + // +optional + ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` } func (vs *VertexStatus) MarkPhase(phase VertexPhase, reason, message string) { diff --git a/pkg/metrics/metrics_server.go b/pkg/metrics/metrics_server.go index 7cafaa9070..651ffa52b6 100644 --- a/pkg/metrics/metrics_server.go +++ b/pkg/metrics/metrics_server.go @@ -241,6 +241,14 @@ func (ms *metricsServer) Start(ctx context.Context) (func(ctx context.Context) e mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) mux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { + for _, ex := range ms.healthCheckExecutors { + if err := ex(); err != nil { + log.Errorw("Failed to execute sidecar health check", zap.Error(err)) + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + } w.WriteHeader(http.StatusNoContent) }) mux.HandleFunc("/livez", func(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 3bec61394e..104b7d9728 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -484,7 +484,12 @@ func (mr *monoVertexReconciler) checkChildrenResourceStatus(ctx context.Context, monoVtx.Status.MarkPodNotHealthy("ListMonoVerticesPodsFailed", err.Error()) return fmt.Errorf("failed to get pods of a vertex: %w", err) } - if healthy, reason, msg := reconciler.CheckVertexPodsStatus(&podList); healthy { + readyPods := reconciler.NumOfReadyPods(podList) + if readyPods > int(monoVtx.Status.Replicas) { // It might happen in some corner cases, such as during rollout + readyPods = int(monoVtx.Status.Replicas) + } + monoVtx.Status.ReadyReplicas = uint32(readyPods) + if healthy, reason, msg := reconciler.CheckPodsStatus(&podList); healthy { monoVtx.Status.MarkPodHealthy(reason, msg) } else { // Do not need to explicitly requeue, since the it keeps watching the status change of the pods diff --git a/pkg/reconciler/util.go b/pkg/reconciler/util.go index 70f0fe7376..6d27a749bb 100644 --- a/pkg/reconciler/util.go +++ b/pkg/reconciler/util.go @@ -30,13 +30,13 @@ import ( // which should be considered as unhealthy var unhealthyWaitingStatus = []string{"CrashLoopBackOff", "ImagePullBackOff"} -// CheckVertexPodsStatus checks the status by iterating over pods objects -func CheckVertexPodsStatus(vertexPods *corev1.PodList) (healthy bool, reason string, message string) { +// CheckPodsStatus checks the status by iterating over pods objects +func CheckPodsStatus(pods *corev1.PodList) (healthy bool, reason string, message string) { // TODO: Need to revisit later. - if len(vertexPods.Items) == 0 { + if len(pods.Items) == 0 { return true, "NoPodsFound", "No Pods found" } else { - for _, pod := range vertexPods.Items { + for _, pod := range pods.Items { if podHealthy, msg := isPodHealthy(&pod); !podHealthy { message = fmt.Sprintf("Pod %s is unhealthy", pod.Name) reason = "Pod" + msg @@ -45,7 +45,7 @@ func CheckVertexPodsStatus(vertexPods *corev1.PodList) (healthy bool, reason str } } } - return true, "Running", "All vertex pods are healthy" + return true, "Running", "All pods are healthy" } func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { @@ -60,6 +60,23 @@ func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { return true, "" } +func NumOfReadyPods(pods corev1.PodList) int { + result := 0 + for _, pod := range pods.Items { + ready := true + for _, s := range pod.Status.ContainerStatuses { + if !s.Ready { + ready = false + break + } + } + if ready { + result++ + } + } + return result +} + // CheckVertexStatus will calculate the status of the vertices and return the status and reason func CheckVertexStatus(vertices *dfv1.VertexList) (healthy bool, reason string, message string) { for _, vertex := range vertices.Items { diff --git a/pkg/reconciler/util_test.go b/pkg/reconciler/util_test.go index 36fbbfa5c5..51348f5a32 100644 --- a/pkg/reconciler/util_test.go +++ b/pkg/reconciler/util_test.go @@ -36,8 +36,8 @@ func TestCheckVertexPodsStatus(t *testing.T) { }}, }}, } - done, reason, message := CheckVertexPodsStatus(&pods) - assert.Equal(t, "All vertex pods are healthy", message) + done, reason, message := CheckPodsStatus(&pods) + assert.Equal(t, "All pods are healthy", message) assert.Equal(t, "Running", reason) assert.True(t, done) }) @@ -52,7 +52,7 @@ func TestCheckVertexPodsStatus(t *testing.T) { }, }, } - done, reason, message := CheckVertexPodsStatus(&pods) + done, reason, message := CheckPodsStatus(&pods) assert.Equal(t, "Pod test-pod is unhealthy", message) assert.Equal(t, "PodCrashLoopBackOff", reason) assert.False(t, done) @@ -62,7 +62,7 @@ func TestCheckVertexPodsStatus(t *testing.T) { pods := corev1.PodList{ Items: []corev1.Pod{}, } - done, reason, message := CheckVertexPodsStatus(&pods) + done, reason, message := CheckPodsStatus(&pods) assert.Equal(t, "No Pods found", message) assert.Equal(t, "NoPodsFound", reason) assert.True(t, done) @@ -254,3 +254,77 @@ func TestGetStatefulSetStatus(t *testing.T) { assert.Equal(t, "Waiting for statefulset spec update to be observed...", msg) }) } + +func TestNumOfReadyPods(t *testing.T) { + pods := corev1.PodList{ + Items: []corev1.Pod{ + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + }, + { + Ready: true, + }, + }, + }, + }, + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: false, + }, + { + Ready: true, + }, + }, + }, + }, + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + }, + { + Ready: false, + }, + }, + }, + }, + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + }, + { + Ready: true, + }, + { + Ready: true, + }, + }, + }, + }, + { + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: false, + }, + { + Ready: false, + }, + { + Ready: false, + }, + }, + }, + }, + }, + } + assert.Equal(t, 2, NumOfReadyPods(pods)) +} diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index f9e82436d1..c1e6b8febb 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -330,7 +330,12 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( vertex.Status.MarkPodNotHealthy("ListVerticesPodsFailed", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to get pods of a vertex: %w", err) } - if healthy, reason, msg := reconciler.CheckVertexPodsStatus(&podList); healthy { + readyPods := reconciler.NumOfReadyPods(podList) + if readyPods > desiredReplicas { // It might happen in some corner cases, such as during rollout + readyPods = desiredReplicas + } + vertex.Status.ReadyReplicas = uint32(readyPods) + if healthy, reason, msg := reconciler.CheckPodsStatus(&podList); healthy { vertex.Status.MarkPodHealthy(reason, msg) } else { // Do not need to explicitly requeue, since the it keeps watching the status change of the pods diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index fd612ba12a..3befdf04b7 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -253,7 +253,7 @@ fn metrics_router(metrics_state: MetricsState) -> Router { Router::new() .route("/metrics", get(metrics_handler)) .route("/livez", get(livez)) - .route("/readyz", get(readyz)) + .route("/readyz", get(sidecar_livez)) .route("/sidecar-livez", get(sidecar_livez)) .with_state(metrics_state) } @@ -262,10 +262,6 @@ async fn livez() -> impl IntoResponse { StatusCode::NO_CONTENT } -async fn readyz() -> impl IntoResponse { - StatusCode::NO_CONTENT -} - async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { if !state.source_client.is_ready().await { error!("Source client is not available"); diff --git a/rust/numaflow-models/src/models/mono_vertex_status.rs b/rust/numaflow-models/src/models/mono_vertex_status.rs index 86c0cb489f..b0773192e8 100644 --- a/rust/numaflow-models/src/models/mono_vertex_status.rs +++ b/rust/numaflow-models/src/models/mono_vertex_status.rs @@ -27,20 +27,25 @@ pub struct MonoVertexStatus { pub last_updated: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] pub message: Option, + /// The generation observed by the MonoVertex controller. #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] pub observed_generation: Option, #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] pub phase: Option, + /// The number of pods targeted by this MonoVertex with a Ready Condition. + #[serde(rename = "readyReplicas", skip_serializing_if = "Option::is_none")] + pub ready_replicas: Option, #[serde(rename = "reason", skip_serializing_if = "Option::is_none")] pub reason: Option, - #[serde(rename = "replicas")] - pub replicas: i64, + /// Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector). + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, } impl MonoVertexStatus { - pub fn new(replicas: i64) -> MonoVertexStatus { + pub fn new() -> MonoVertexStatus { MonoVertexStatus { conditions: None, last_scaled_at: None, @@ -48,8 +53,9 @@ impl MonoVertexStatus { message: None, observed_generation: None, phase: None, + ready_replicas: None, reason: None, - replicas, + replicas: None, selector: None, } } diff --git a/rust/numaflow-models/src/models/pipeline_status.rs b/rust/numaflow-models/src/models/pipeline_status.rs index 6e061bbdbe..27d6049658 100644 --- a/rust/numaflow-models/src/models/pipeline_status.rs +++ b/rust/numaflow-models/src/models/pipeline_status.rs @@ -27,6 +27,7 @@ pub struct PipelineStatus { pub map_udf_count: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] pub message: Option, + /// The generation observed by the Pipeline controller. #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] pub observed_generation: Option, #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] diff --git a/rust/numaflow-models/src/models/vertex_status.rs b/rust/numaflow-models/src/models/vertex_status.rs index b0c9cd0e9a..326a83c9b7 100644 --- a/rust/numaflow-models/src/models/vertex_status.rs +++ b/rust/numaflow-models/src/models/vertex_status.rs @@ -25,28 +25,34 @@ pub struct VertexStatus { pub last_scaled_at: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] pub message: Option, + /// The generation observed by the Vertex controller. #[serde(rename = "observedGeneration", skip_serializing_if = "Option::is_none")] pub observed_generation: Option, - #[serde(rename = "phase")] - pub phase: String, + #[serde(rename = "phase", skip_serializing_if = "Option::is_none")] + pub phase: Option, + /// The number of pods targeted by this Vertex with a Ready Condition. + #[serde(rename = "readyReplicas", skip_serializing_if = "Option::is_none")] + pub ready_replicas: Option, #[serde(rename = "reason", skip_serializing_if = "Option::is_none")] pub reason: Option, - #[serde(rename = "replicas")] - pub replicas: i64, + /// Total number of non-terminated pods targeted by this Vertex (their labels match the selector). + #[serde(rename = "replicas", skip_serializing_if = "Option::is_none")] + pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, } impl VertexStatus { - pub fn new(phase: String, replicas: i64) -> VertexStatus { + pub fn new() -> VertexStatus { VertexStatus { conditions: None, last_scaled_at: None, message: None, observed_generation: None, - phase, + phase: None, + ready_replicas: None, reason: None, - replicas, + replicas: None, selector: None, } } From a77c9391e9e6dbdd00cbc50376b90b99eebc6cc5 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Thu, 29 Aug 2024 20:01:10 -0700 Subject: [PATCH 037/188] fix: add latency metrics for mvtx (#2013) Signed-off-by: Vigith Maurice --- rust/monovertex/src/forwarder.rs | 31 ++++++++++- rust/monovertex/src/metrics.rs | 96 +++++++++++++++++++++++--------- 2 files changed, 98 insertions(+), 29 deletions(-) diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 8f61b68ff1..e3b3c5285f 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -106,7 +106,7 @@ impl Forwarder { } forward_metrics() - .e2e_processing_time + .e2e_time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); } @@ -127,6 +127,10 @@ impl Forwarder { messages.len(), start_time.elapsed().as_millis() ); + forward_metrics() + .read_time + .get_or_create(&self.common_labels) + .observe(start_time.elapsed().as_micros() as f64); // read returned 0 messages, nothing more to be done. if messages.is_empty() { @@ -192,6 +196,10 @@ impl Forwarder { "Transformer latency - {}ms", start_time.elapsed().as_millis() ); + forward_metrics() + .transform_time + .get_or_create(&self.common_labels) + .observe(start_time.elapsed().as_micros() as f64); Ok(results) } @@ -204,6 +212,9 @@ impl Forwarder { return Ok(()); } + // this start time is for tracking the total time taken + let start_time_e2e = tokio::time::Instant::now(); + let mut attempts = 0; let mut error_map = HashMap::new(); let mut fallback_msgs = Vec::new(); @@ -215,7 +226,11 @@ impl Forwarder { let start_time = tokio::time::Instant::now(); match self.sink_client.sink_fn(messages_to_send.clone()).await { Ok(response) => { - debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); + debug!( + attempts=attempts, + "Sink latency - {}ms", + start_time.elapsed().as_millis() + ); attempts += 1; // create a map of id to result, since there is no strict requirement @@ -275,6 +290,10 @@ impl Forwarder { self.handle_fallback_messages(fallback_msgs).await?; } + forward_metrics() + .sink_time + .get_or_create(&self.common_labels) + .observe(start_time_e2e.elapsed().as_micros() as f64); forward_metrics() .sink_write_total .get_or_create(&self.common_labels) @@ -379,8 +398,16 @@ impl Forwarder { async fn acknowledge_messages(&mut self, offsets: Vec) -> Result<()> { let n = offsets.len(); let start_time = tokio::time::Instant::now(); + self.source_client.ack_fn(offsets).await?; + debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); + + forward_metrics() + .ack_time + .get_or_create(&self.common_labels) + .observe(start_time.elapsed().as_micros() as f64); + forward_metrics() .ack_total .get_or_create(&self.common_labels) diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 3befdf04b7..712118166b 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -29,7 +29,7 @@ use prometheus_client::registry::Registry; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon -const VERTEX_NAME_LABEL: &str = "mvtx_name"; +const MVTX_NAME_LABEL: &str = "mvtx_name"; const REPLICA_LABEL: &str = "mvtx_replica"; const PENDING_PERIOD_LABEL: &str = "period"; @@ -37,14 +37,22 @@ const PENDING_PERIOD_LABEL: &str = "period"; // Note: We do not add a suffix to the metric name, as the suffix is inferred through the metric type // by the prometheus client library // refer: https://github.com/prometheus/client_rust/blob/master/src/registry.rs#L102 - // Note: Please keep consistent with the definitions in MonoVertex daemon + +// counters (please note the prefix _total, and read above link) const READ_TOTAL: &str = "monovtx_read"; const READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; const ACK_TOTAL: &str = "monovtx_ack"; const SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; -const E2E_PROCESSING_TIME: &str = "monovtx_processing_time"; +// pending as gauge const SOURCE_PENDING: &str = "monovtx_pending"; +// processing times as timers +const E2E_TIME: &str = "monovtx_processing_time"; +const READ_TIME: &str = "monovtx_read_time"; +const TRANSFORM_TIME: &str = "monovtx_transformer_time"; +const ACK_TIME: &str = "monovtx_ack_time"; +const SINK_TIME: &str = "monovtx_sink_time"; + #[derive(Clone)] pub(crate) struct MetricsState { @@ -88,35 +96,47 @@ fn global_registry() -> &'static GlobalRegistry { // The labels are provided in the form of Vec<(String, String) // The second argument is the metric kind. pub struct MonoVtxMetrics { + // counters pub read_total: Family, Counter>, pub read_bytes_total: Family, Counter>, pub ack_total: Family, Counter>, pub sink_write_total: Family, Counter>, - pub e2e_processing_time: Family, Histogram>, + // gauge pub source_pending: Family, Gauge>, + // timers + pub e2e_time: Family, Histogram>, + pub read_time: Family, Histogram>, + pub transform_time: Family, Histogram>, + pub ack_time: Family, Histogram>, + pub sink_time: Family, Histogram>, } /// impl the MonoVtxMetrics struct and create a new object impl MonoVtxMetrics { fn new() -> Self { - let monovtx_read_total = Family::, Counter>::default(); - let monovtx_ack_total = Family::, Counter>::default(); - let monovtx_read_bytes_total = Family::, Counter>::default(); - let monovtx_sink_write_total = Family::, Counter>::default(); - - let monovtx_processing_time = - Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) - }); - let monovtx_pending = Family::, Gauge>::default(); - let metrics = Self { - read_total: monovtx_read_total, - read_bytes_total: monovtx_read_bytes_total, - ack_total: monovtx_ack_total, - sink_write_total: monovtx_sink_write_total, - e2e_processing_time: monovtx_processing_time, - source_pending: monovtx_pending, + read_total: Family::, Counter>::default(), + read_bytes_total: Family::, Counter>::default(), + ack_total: Family::, Counter>::default(), + sink_write_total: Family::, Counter>::default(), + // gauge + source_pending: Family::, Gauge>::default(), + // timers + e2e_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + }), + read_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + }), + transform_time: Family::, Histogram>::new_with_constructor( + || Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)), + ), + ack_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + }), + sink_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + }), }; let mut registry = global_registry().registry.lock(); @@ -136,21 +156,43 @@ impl MonoVtxMetrics { "A Counter to keep track of the total number of messages acknowledged by the sink", metrics.ack_total.clone(), ); - registry.register( - E2E_PROCESSING_TIME, - "A Histogram to keep track of the total time taken to forward a chunk, the time is in microseconds", - metrics.e2e_processing_time.clone(), - ); registry.register( READ_BYTES_TOTAL, "A Counter to keep track of the total number of bytes read from the source", metrics.read_bytes_total.clone(), ); + // gauges registry.register( SOURCE_PENDING, "A Gauge to keep track of the total number of pending messages for the monovtx", metrics.source_pending.clone(), ); + // timers + registry.register( + E2E_TIME, + "A Histogram to keep track of the total time taken to forward a chunk, in microseconds", + metrics.e2e_time.clone(), + ); + registry.register( + READ_TIME, + "A Histogram to keep track of the total time taken to Read from the Source, in microseconds", + metrics.read_time.clone(), + ); + registry.register( + TRANSFORM_TIME, + "A Histogram to keep track of the total time taken to Transform, in microseconds", + metrics.transform_time.clone(), + ); + registry.register( + ACK_TIME, + "A Histogram to keep track of the total time taken to Ack to the Source, in microseconds", + metrics.ack_time.clone(), + ); + registry.register( + SINK_TIME, + "A Histogram to keep track of the total time taken to Write to the Sink, in microseconds", + metrics.sink_time.clone(), + ); metrics } @@ -177,7 +219,7 @@ pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { MONOVTX_METRICS_LABELS.get_or_init(|| { let common_labels = vec![ ( - VERTEX_NAME_LABEL.to_string(), + MVTX_NAME_LABEL.to_string(), config().mono_vertex_name.clone(), ), (REPLICA_LABEL.to_string(), config().replica.to_string()), From 39e6ddd47cb6d2a012d7463a66ff667993197df8 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Sat, 31 Aug 2024 00:21:46 +0530 Subject: [PATCH 038/188] chore: use RUST_LOG env to control logging in rust binary (#2017) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/Cargo.lock | 1 + rust/Cargo.toml | 1 + rust/monovertex/Cargo.toml | 5 ++--- rust/monovertex/src/config.rs | 10 ---------- rust/monovertex/src/forwarder.rs | 2 +- rust/monovertex/src/lib.rs | 14 +------------- rust/monovertex/src/metrics.rs | 6 +----- rust/servesink/src/lib.rs | 9 --------- rust/serving/src/lib.rs | 12 ------------ rust/src/bin/main.rs | 15 +++++++++++++++ 10 files changed, 22 insertions(+), 53 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index c269c5e60d..4e3ba1085b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1594,6 +1594,7 @@ dependencies = [ "serving", "tokio", "tracing", + "tracing-subscriber", ] [[package]] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 45a630b732..d4c5152f12 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -17,3 +17,4 @@ servesink = { path = "servesink" } serving = { path = "serving" } monovertex = { path = "monovertex" } tracing = "0.1.40" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index d56a502541..00c51e6e9a 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -23,7 +23,7 @@ tower = "0.4.13" uuid = { version = "1.10.0", features = ["v4"] } once_cell = "1.19.0" serde_json = "1.0.122" -numaflow-models = { path = "../numaflow-models"} +numaflow-models = { path = "../numaflow-models" } trait-variant = "0.1.2" rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } @@ -35,9 +35,8 @@ parking_lot = "0.12.3" prometheus-client = "0.22.3" [dev-dependencies] -tower = "0.4.13" tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch="main" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } [build-dependencies] tonic-build = "0.12.1" diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index 5a3121e862..00f966b9bc 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -3,7 +3,6 @@ use std::sync::OnceLock; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use tracing::level_filters::LevelFilter; use numaflow_models::models::MonoVertex; @@ -14,7 +13,6 @@ const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB const DEFAULT_METRICS_PORT: u16 = 2469; -const ENV_LOG_LEVEL: &str = "NUMAFLOW_DEBUG"; const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; const DEFAULT_BATCH_SIZE: u64 = 500; @@ -38,7 +36,6 @@ pub struct Settings { pub batch_size: u64, pub timeout_in_ms: u32, pub metrics_server_listen_port: u16, - pub log_level: String, pub grpc_max_message_size: usize, pub is_transformer_enabled: bool, pub is_fallback_enabled: bool, @@ -56,7 +53,6 @@ impl Default for Settings { batch_size: DEFAULT_BATCH_SIZE, timeout_in_ms: DEFAULT_TIMEOUT_IN_MS, metrics_server_listen_port: DEFAULT_METRICS_PORT, - log_level: LevelFilter::INFO.to_string(), grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, is_transformer_enabled: false, is_fallback_enabled: false, @@ -122,9 +118,6 @@ impl Settings { .is_some(); } - settings.log_level = - env::var(ENV_LOG_LEVEL).unwrap_or_else(|_| LevelFilter::INFO.to_string()); - settings.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) .unwrap_or_else(|_| DEFAULT_GRPC_MAX_MESSAGE_SIZE.to_string()) .parse() @@ -152,7 +145,6 @@ mod tests { // Set up environment variables unsafe { env::set_var(ENV_MONO_VERTEX_OBJ, "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLW1vbm8tdmVydGV4IiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJyZXBsaWNhcyI6MCwic291cmNlIjp7InRyYW5zZm9ybWVyIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InF1YXkuaW8vbnVtYWlvL251bWFmbG93LXJzL21hcHQtZXZlbnQtdGltZS1maWx0ZXI6c3RhYmxlIiwicmVzb3VyY2VzIjp7fX0sImJ1aWx0aW4iOm51bGx9LCJ1ZHNvdXJjZSI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJkb2NrZXIuaW50dWl0LmNvbS9wZXJzb25hbC95aGwwMS9zaW1wbGUtc291cmNlOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImRvY2tlci5pbnR1aXQuY29tL3BlcnNvbmFsL3lobDAxL2JsYWNraG9sZS1zaW5rOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sImxpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMifSwic2NhbGUiOnt9fSwic3RhdHVzIjp7InJlcGxpY2FzIjowLCJsYXN0VXBkYXRlZCI6bnVsbCwibGFzdFNjYWxlZEF0IjpudWxsfX0="); - env::set_var(ENV_LOG_LEVEL, "debug"); env::set_var(ENV_GRPC_MAX_MESSAGE_SIZE, "128000000"); }; @@ -163,13 +155,11 @@ mod tests { assert_eq!(settings.mono_vertex_name, "simple-mono-vertex"); assert_eq!(settings.batch_size, 500); assert_eq!(settings.timeout_in_ms, 1000); - assert_eq!(settings.log_level, "debug"); assert_eq!(settings.grpc_max_message_size, 128000000); // Clean up environment variables unsafe { env::remove_var(ENV_MONO_VERTEX_OBJ); - env::remove_var(ENV_LOG_LEVEL); env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); }; } diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index e3b3c5285f..8efe16c844 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -227,7 +227,7 @@ impl Forwarder { match self.sink_client.sink_fn(messages_to_send.clone()).await { Ok(response) => { debug!( - attempts=attempts, + attempts = attempts, "Sink latency - {}ms", start_time.elapsed().as_millis() ); diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index 9acabc95ea..c1d172adf9 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -12,9 +12,7 @@ use tokio::signal; use tokio::task::JoinHandle; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::level_filters::LevelFilter; use tracing::{error, info, warn}; -use tracing_subscriber::EnvFilter; /// SourcerSinker orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -43,17 +41,6 @@ mod server_info; mod metrics; pub async fn mono_vertex() { - // Initialize the logger - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .parse_lossy(&config().log_level), - ) - .with_target(false) - .with_ansi(false) - .init(); - // Initialize the source, sink and transformer configurations // We are using the default configurations for now. let source_config = SourceConfig { @@ -83,6 +70,7 @@ pub async fn mono_vertex() { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); + // wait for SIG{INT,TERM} and invoke cancellation token. let shutdown_handle: JoinHandle> = tokio::spawn(async move { shutdown_signal().await; diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 712118166b..dc5dfd6dc9 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -53,7 +53,6 @@ const TRANSFORM_TIME: &str = "monovtx_transformer_time"; const ACK_TIME: &str = "monovtx_ack_time"; const SINK_TIME: &str = "monovtx_sink_time"; - #[derive(Clone)] pub(crate) struct MetricsState { pub source_client: SourceClient, @@ -204,10 +203,7 @@ static MONOVTX_METRICS: OnceLock = OnceLock::new(); // forward_metrics is a helper function used to fetch the // MonoVtxMetrics object pub(crate) fn forward_metrics() -> &'static MonoVtxMetrics { - MONOVTX_METRICS.get_or_init(|| { - let metrics = MonoVtxMetrics::new(); - metrics - }) + MONOVTX_METRICS.get_or_init(MonoVtxMetrics::new) } /// MONOVTX_METRICS_LABELS are used to store the common labels used in the metrics diff --git a/rust/servesink/src/lib.rs b/rust/servesink/src/lib.rs index b54ed8b580..3c384b657f 100644 --- a/rust/servesink/src/lib.rs +++ b/rust/servesink/src/lib.rs @@ -3,21 +3,12 @@ use std::error::Error; use numaflow::sink::{self, Response, SinkRequest}; use reqwest::Client; use tracing::{error, warn}; -use tracing_subscriber::prelude::*; const NUMAFLOW_CALLBACK_URL_HEADER: &str = "X-Numaflow-Callback-Url"; const NUMAFLOW_ID_HEADER: &str = "X-Numaflow-Id"; /// servesink is a Numaflow Sink which forwards the payload to the Numaflow serving URL. pub async fn servesink() -> Result<(), Box> { - tracing_subscriber::registry() - .with( - tracing_subscriber::EnvFilter::try_from_default_env() - .unwrap_or_else(|_| "servesink=debug".into()), - ) - .with(tracing_subscriber::fmt::layer().with_ansi(false)) - .init(); - sink::Server::new(ServeSink::new()).start().await } diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 86265a5ed0..1838fdb77c 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -6,8 +6,6 @@ use crate::pipeline::min_pipeline_spec; use axum_server::tls_rustls::RustlsConfig; use std::net::SocketAddr; use tracing::info; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; mod app; mod config; @@ -23,16 +21,6 @@ pub async fn serve() -> std::result::Result<(), Box = env::args().collect(); + // Set up the tracing subscriber. RUST_LOG can be used to set the log level. + // The default log level is `info`. The `axum::rejection=trace` enables showing + // rejections from built-in extractors at `TRACE` level. + tracing_subscriber::registry() + .with( + tracing_subscriber::EnvFilter::try_from_default_env() + // TODO: add a better default based on entry point invocation + // e.g., serving/monovertex might need a different default + .unwrap_or_else(|_| "info".into()), + ) + .with(tracing_subscriber::fmt::layer().with_ansi(false)) + .init(); + // Based on the argument, run the appropriate component. if args.contains(&"--serving".to_string()) { if let Err(e) = serving::serve().await { From 55230e84fd86f05bcac96dd4b42afe73aa1b2e4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Aug 2024 13:28:00 -0700 Subject: [PATCH 039/188] chore(deps): bump webpack from 5.93.0 to 5.94.0 in /ui (#2018) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index eda56fac2e..36a04c8dd9 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -2640,22 +2640,6 @@ dependencies: "@types/ms" "*" -"@types/eslint-scope@^3.7.3": - version "3.7.7" - resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.7.tgz#3108bd5f18b0cdb277c867b3dd449c9ed7079ac5" - integrity sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg== - dependencies: - "@types/eslint" "*" - "@types/estree" "*" - -"@types/eslint@*": - version "9.6.0" - resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-9.6.0.tgz#51d4fe4d0316da9e9f2c80884f2c20ed5fb022ff" - integrity sha512-gi6WQJ7cHRgZxtkQEoyHMppPjq9Kxo5Tjn2prSKDSmZrCz8TZ3jSRCeTJm+WoM+oB0WG37bRqLzaaU3q7JypGg== - dependencies: - "@types/estree" "*" - "@types/json-schema" "*" - "@types/eslint@^7.29.0 || ^8.4.1": version "8.56.11" resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-8.56.11.tgz#e2ff61510a3b9454b3329fe7731e3b4c6f780041" @@ -5192,7 +5176,7 @@ encodeurl@~1.0.2: resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== -enhanced-resolve@^5.17.0: +enhanced-resolve@^5.17.1: version "5.17.1" resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== @@ -11419,11 +11403,10 @@ webpack-sources@^3.2.3: integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== webpack@^5.64.4: - version "5.93.0" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.93.0.tgz#2e89ec7035579bdfba9760d26c63ac5c3462a5e5" - integrity sha512-Y0m5oEY1LRuwly578VqluorkXbvXKh7U3rLoQCEO04M97ScRr44afGVkI0FQFsXzysk5OgFAxjZAb9rsGQVihA== + version "5.94.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.94.0.tgz#77a6089c716e7ab90c1c67574a28da518a20970f" + integrity sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg== dependencies: - "@types/eslint-scope" "^3.7.3" "@types/estree" "^1.0.5" "@webassemblyjs/ast" "^1.12.1" "@webassemblyjs/wasm-edit" "^1.12.1" @@ -11432,7 +11415,7 @@ webpack@^5.64.4: acorn-import-attributes "^1.9.5" browserslist "^4.21.10" chrome-trace-event "^1.0.2" - enhanced-resolve "^5.17.0" + enhanced-resolve "^5.17.1" es-module-lexer "^1.2.1" eslint-scope "5.1.1" events "^3.2.0" From 40a3d2f5bd3ac57e075bc23b076c1e5df8436fc8 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Fri, 30 Aug 2024 16:37:07 -0700 Subject: [PATCH 040/188] feat: allow configurable retryStrategy (#2010) Signed-off-by: Sidhant Kohli Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- api/json-schema/schema.json | 33 + api/openapi-spec/swagger.json | 33 + .../numaflow.numaproj.io_monovertices.yaml | 15 + .../full/numaflow.numaproj.io_pipelines.yaml | 15 + .../full/numaflow.numaproj.io_vertices.yaml | 15 + config/install.yaml | 45 + config/namespace-install.yaml | 45 + docs/APIs.md | 218 +++ pkg/apis/numaflow/v1alpha1/const.go | 24 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 1586 +++++++++++------ pkg/apis/numaflow/v1alpha1/generated.proto | 29 + .../numaflow/v1alpha1/openapi_generated.go | 67 +- pkg/apis/numaflow/v1alpha1/retry_strategy.go | 102 ++ .../numaflow/v1alpha1/retry_strategy_test.go | 115 ++ pkg/apis/numaflow/v1alpha1/sink.go | 3 + .../v1alpha1/zz_generated.deepcopy.go | 53 + pkg/metrics/metrics.go | 26 +- pkg/reconciler/pipeline/validate.go | 34 + pkg/reconciler/pipeline/validate_test.go | 122 ++ pkg/sinks/forward/forward.go | 238 ++- pkg/sinks/forward/options.go | 13 - rust/Cargo.lock | 1 + rust/monovertex/Cargo.toml | 1 + rust/monovertex/src/config.rs | 286 ++- rust/monovertex/src/forwarder.rs | 250 ++- rust/monovertex/src/metrics.rs | 24 +- rust/numaflow-models/src/models/backoff.rs | 38 + rust/numaflow-models/src/models/mod.rs | 4 + .../src/models/retry_strategy.rs | 38 + rust/numaflow-models/src/models/sink.rs | 3 + 30 files changed, 2752 insertions(+), 724 deletions(-) create mode 100644 pkg/apis/numaflow/v1alpha1/retry_strategy.go create mode 100644 pkg/apis/numaflow/v1alpha1/retry_strategy_test.go create mode 100644 rust/numaflow-models/src/models/backoff.rs create mode 100644 rust/numaflow-models/src/models/retry_strategy.rs diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 584220c245..496b6a963f 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -17808,6 +17808,21 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.Backoff": { + "description": "Backoff defines parameters used to systematically configure the retry strategy.", + "properties": { + "interval": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration", + "description": "Interval sets the delay to wait before retry, after a failure occurs." + }, + "steps": { + "description": "Steps defines the number of times to try writing to a sink including retries", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.BasicAuth": { "description": "BasicAuth represents the basic authentication approach which contains a user name and a password.", "properties": { @@ -19752,6 +19767,20 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.RetryStrategy": { + "description": "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure.", + "properties": { + "backoff": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Backoff", + "description": "BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase." + }, + "onFailure": { + "description": "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + "type": "string" + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.SASL": { "properties": { "gssapi": { @@ -20064,6 +20093,10 @@ "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Log", "description": "Log sink is used to write the data to the log." }, + "retryStrategy": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.RetryStrategy", + "description": "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures." + }, "udsink": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDSink", "description": "UDSink sink is used to write the data to the user-defined sink." diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index a326712b6c..41d81a0efb 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -17812,6 +17812,21 @@ } } }, + "io.numaproj.numaflow.v1alpha1.Backoff": { + "description": "Backoff defines parameters used to systematically configure the retry strategy.", + "type": "object", + "properties": { + "interval": { + "description": "Interval sets the delay to wait before retry, after a failure occurs.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration" + }, + "steps": { + "description": "Steps defines the number of times to try writing to a sink including retries", + "type": "integer", + "format": "int64" + } + } + }, "io.numaproj.numaflow.v1alpha1.BasicAuth": { "description": "BasicAuth represents the basic authentication approach which contains a user name and a password.", "type": "object", @@ -19738,6 +19753,20 @@ } } }, + "io.numaproj.numaflow.v1alpha1.RetryStrategy": { + "description": "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure.", + "type": "object", + "properties": { + "backoff": { + "description": "BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Backoff" + }, + "onFailure": { + "description": "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + "type": "string" + } + } + }, "io.numaproj.numaflow.v1alpha1.SASL": { "type": "object", "required": [ @@ -20051,6 +20080,10 @@ "description": "Log sink is used to write the data to the log.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Log" }, + "retryStrategy": { + "description": "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.RetryStrategy" + }, "udsink": { "description": "UDSink sink is used to write the data to the user-defined sink.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDSink" diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index d456bbb249..c07e927f5b 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -3615,6 +3615,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 070ba0b033..4ddc954f61 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -7571,6 +7571,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 7973694b88..8f0d150360 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -3158,6 +3158,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: diff --git a/config/install.yaml b/config/install.yaml index a648c1413e..d2e76c26b3 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -6259,6 +6259,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: @@ -15799,6 +15814,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: @@ -21243,6 +21273,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index a922d0f7c5..13afdb3228 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -6259,6 +6259,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: @@ -15799,6 +15814,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: @@ -21243,6 +21273,21 @@ spec: type: object log: type: object + retryStrategy: + properties: + backoff: + properties: + interval: + default: 1ms + type: string + steps: + format: int32 + type: integer + type: object + onFailure: + default: retry + type: string + type: object udsink: properties: container: diff --git a/docs/APIs.md b/docs/APIs.md index b5ba81450d..3e631f3870 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -908,6 +908,94 @@ needs to add “Authorization: Bearer ” in the header +

+ +Backoff +

+ +

+ +(Appears on: +RetryStrategy) +

+ +

+ +

+ +Backoff defines parameters used to systematically configure the retry +strategy. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +interval
+ +Kubernetes meta/v1.Duration +
+ +(Optional) +

+ +Interval sets the delay to wait before retry, after a failure occurs. +

+ +
+ +steps
uint32 +
+ +(Optional) +

+ +Steps defines the number of times to try writing to a sink including +retries +

+ +
+

BasicAuth @@ -6773,6 +6861,23 @@ etc.).

+

+ +OnFailureRetryStrategy (string alias) +

+ +

+ +

+ +(Appears on: +RetryStrategy) +

+ +

+ +

+

PBQStorage @@ -8126,6 +8231,97 @@ config +

+ +RetryStrategy +

+ +

+ +(Appears on: +Sink) +

+ +

+ +

+ +RetryStrategy struct encapsulates the settings for retrying operations +in the event of failures. It includes a BackOff strategy to manage the +timing of retries and defines the action to take upon failure. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +backoff
+ Backoff +
+ +(Optional) +

+ +BackOff specifies the parameters for the backoff strategy, controlling +how delays between retries should increase. +

+ +
+ +onFailure
+ +OnFailureRetryStrategy +
+ +(Optional) +

+ +OnFailure specifies the action to take when a retry fails. The default +action is to retry. +

+ +
+

SASL @@ -9318,6 +9514,28 @@ it. + + + + +retryStrategy
+ RetryStrategy + + + + + +(Optional) +

+ +RetryStrategy struct encapsulates the settings for retrying operations +in the event of failures. +

+ + + + + diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 8677101378..d0e9eb62f0 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "fmt" + "math" "time" ) @@ -243,6 +244,29 @@ const ( // Serving source DefaultServingTTL = 24 * time.Hour + + // Retry Strategy + + // DefaultRetryInterval specifies the default time interval between retry attempts. + // This value can be adjusted depending on the specific requirements + // for responsiveness and system load considerations. + DefaultRetryInterval = 1 * time.Millisecond + + // DefaultRetrySteps is defined to dictate how many times the platform should attempt to retry + // a write operation to a sink following a failure. The value is set to math.MaxInt32 - 1, + // effectively indicating an almost indefinite number of retries. This large default is chosen + // to ensure that the system will try persistently to carry out the operation unless explicitly + // configured otherwise. This approach can be useful in environments where loss of data + // due to intermittent failures is unacceptable. + DefaultRetrySteps = math.MaxInt32 - 1 + + // DefaultOnFailureRetryStrategy specifies the strategy to be used when the write to a sink fails and + // the retries count specified are exhausted. + // Setting this to 'OnFailureRetry' means the system is configured by default + // to retry the failed operation until successful completion. + // This strategy argues for robustness in operations, aiming + // to minimize the chances of data loss or failed deliveries in transient failure scenarios. + DefaultOnFailureRetryStrategy = OnFailureRetry ) var ( diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 3a29fb484b..3514c89361 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -160,10 +160,38 @@ func (m *Authorization) XXX_DiscardUnknown() { var xxx_messageInfo_Authorization proto.InternalMessageInfo +func (m *Backoff) Reset() { *m = Backoff{} } +func (*Backoff) ProtoMessage() {} +func (*Backoff) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{4} +} +func (m *Backoff) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backoff) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Backoff) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backoff.Merge(m, src) +} +func (m *Backoff) XXX_Size() int { + return m.Size() +} +func (m *Backoff) XXX_DiscardUnknown() { + xxx_messageInfo_Backoff.DiscardUnknown(m) +} + +var xxx_messageInfo_Backoff proto.InternalMessageInfo + func (m *BasicAuth) Reset() { *m = BasicAuth{} } func (*BasicAuth) ProtoMessage() {} func (*BasicAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{4} + return fileDescriptor_9d0d1b17d3865563, []int{5} } func (m *BasicAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +219,7 @@ var xxx_messageInfo_BasicAuth proto.InternalMessageInfo func (m *Blackhole) Reset() { *m = Blackhole{} } func (*Blackhole) ProtoMessage() {} func (*Blackhole) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{5} + return fileDescriptor_9d0d1b17d3865563, []int{6} } func (m *Blackhole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +247,7 @@ var xxx_messageInfo_Blackhole proto.InternalMessageInfo func (m *BufferServiceConfig) Reset() { *m = BufferServiceConfig{} } func (*BufferServiceConfig) ProtoMessage() {} func (*BufferServiceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{6} + return fileDescriptor_9d0d1b17d3865563, []int{7} } func (m *BufferServiceConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +275,7 @@ var xxx_messageInfo_BufferServiceConfig proto.InternalMessageInfo func (m *CombinedEdge) Reset() { *m = CombinedEdge{} } func (*CombinedEdge) ProtoMessage() {} func (*CombinedEdge) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{7} + return fileDescriptor_9d0d1b17d3865563, []int{8} } func (m *CombinedEdge) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +303,7 @@ var xxx_messageInfo_CombinedEdge proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{8} + return fileDescriptor_9d0d1b17d3865563, []int{9} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +331,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerTemplate) Reset() { *m = ContainerTemplate{} } func (*ContainerTemplate) ProtoMessage() {} func (*ContainerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{9} + return fileDescriptor_9d0d1b17d3865563, []int{10} } func (m *ContainerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +359,7 @@ var xxx_messageInfo_ContainerTemplate proto.InternalMessageInfo func (m *DaemonTemplate) Reset() { *m = DaemonTemplate{} } func (*DaemonTemplate) ProtoMessage() {} func (*DaemonTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{10} + return fileDescriptor_9d0d1b17d3865563, []int{11} } func (m *DaemonTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +387,7 @@ var xxx_messageInfo_DaemonTemplate proto.InternalMessageInfo func (m *Edge) Reset() { *m = Edge{} } func (*Edge) ProtoMessage() {} func (*Edge) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{11} + return fileDescriptor_9d0d1b17d3865563, []int{12} } func (m *Edge) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +415,7 @@ var xxx_messageInfo_Edge proto.InternalMessageInfo func (m *FixedWindow) Reset() { *m = FixedWindow{} } func (*FixedWindow) ProtoMessage() {} func (*FixedWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{12} + return fileDescriptor_9d0d1b17d3865563, []int{13} } func (m *FixedWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +443,7 @@ var xxx_messageInfo_FixedWindow proto.InternalMessageInfo func (m *ForwardConditions) Reset() { *m = ForwardConditions{} } func (*ForwardConditions) ProtoMessage() {} func (*ForwardConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{13} + return fileDescriptor_9d0d1b17d3865563, []int{14} } func (m *ForwardConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +471,7 @@ var xxx_messageInfo_ForwardConditions proto.InternalMessageInfo func (m *Function) Reset() { *m = Function{} } func (*Function) ProtoMessage() {} func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{14} + return fileDescriptor_9d0d1b17d3865563, []int{15} } func (m *Function) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +499,7 @@ var xxx_messageInfo_Function proto.InternalMessageInfo func (m *GSSAPI) Reset() { *m = GSSAPI{} } func (*GSSAPI) ProtoMessage() {} func (*GSSAPI) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{15} + return fileDescriptor_9d0d1b17d3865563, []int{16} } func (m *GSSAPI) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +527,7 @@ var xxx_messageInfo_GSSAPI proto.InternalMessageInfo func (m *GeneratorSource) Reset() { *m = GeneratorSource{} } func (*GeneratorSource) ProtoMessage() {} func (*GeneratorSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{16} + return fileDescriptor_9d0d1b17d3865563, []int{17} } func (m *GeneratorSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +555,7 @@ var xxx_messageInfo_GeneratorSource proto.InternalMessageInfo func (m *GetDaemonDeploymentReq) Reset() { *m = GetDaemonDeploymentReq{} } func (*GetDaemonDeploymentReq) ProtoMessage() {} func (*GetDaemonDeploymentReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{17} + return fileDescriptor_9d0d1b17d3865563, []int{18} } func (m *GetDaemonDeploymentReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +583,7 @@ var xxx_messageInfo_GetDaemonDeploymentReq proto.InternalMessageInfo func (m *GetJetStreamServiceSpecReq) Reset() { *m = GetJetStreamServiceSpecReq{} } func (*GetJetStreamServiceSpecReq) ProtoMessage() {} func (*GetJetStreamServiceSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{18} + return fileDescriptor_9d0d1b17d3865563, []int{19} } func (m *GetJetStreamServiceSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +611,7 @@ var xxx_messageInfo_GetJetStreamServiceSpecReq proto.InternalMessageInfo func (m *GetJetStreamStatefulSetSpecReq) Reset() { *m = GetJetStreamStatefulSetSpecReq{} } func (*GetJetStreamStatefulSetSpecReq) ProtoMessage() {} func (*GetJetStreamStatefulSetSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{19} + return fileDescriptor_9d0d1b17d3865563, []int{20} } func (m *GetJetStreamStatefulSetSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +639,7 @@ var xxx_messageInfo_GetJetStreamStatefulSetSpecReq proto.InternalMessageInfo func (m *GetMonoVertexDaemonDeploymentReq) Reset() { *m = GetMonoVertexDaemonDeploymentReq{} } func (*GetMonoVertexDaemonDeploymentReq) ProtoMessage() {} func (*GetMonoVertexDaemonDeploymentReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{20} + return fileDescriptor_9d0d1b17d3865563, []int{21} } func (m *GetMonoVertexDaemonDeploymentReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +667,7 @@ var xxx_messageInfo_GetMonoVertexDaemonDeploymentReq proto.InternalMessageInfo func (m *GetMonoVertexPodSpecReq) Reset() { *m = GetMonoVertexPodSpecReq{} } func (*GetMonoVertexPodSpecReq) ProtoMessage() {} func (*GetMonoVertexPodSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{21} + return fileDescriptor_9d0d1b17d3865563, []int{22} } func (m *GetMonoVertexPodSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +695,7 @@ var xxx_messageInfo_GetMonoVertexPodSpecReq proto.InternalMessageInfo func (m *GetRedisServiceSpecReq) Reset() { *m = GetRedisServiceSpecReq{} } func (*GetRedisServiceSpecReq) ProtoMessage() {} func (*GetRedisServiceSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{22} + return fileDescriptor_9d0d1b17d3865563, []int{23} } func (m *GetRedisServiceSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +723,7 @@ var xxx_messageInfo_GetRedisServiceSpecReq proto.InternalMessageInfo func (m *GetRedisStatefulSetSpecReq) Reset() { *m = GetRedisStatefulSetSpecReq{} } func (*GetRedisStatefulSetSpecReq) ProtoMessage() {} func (*GetRedisStatefulSetSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{23} + return fileDescriptor_9d0d1b17d3865563, []int{24} } func (m *GetRedisStatefulSetSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +751,7 @@ var xxx_messageInfo_GetRedisStatefulSetSpecReq proto.InternalMessageInfo func (m *GetSideInputDeploymentReq) Reset() { *m = GetSideInputDeploymentReq{} } func (*GetSideInputDeploymentReq) ProtoMessage() {} func (*GetSideInputDeploymentReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{24} + return fileDescriptor_9d0d1b17d3865563, []int{25} } func (m *GetSideInputDeploymentReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +779,7 @@ var xxx_messageInfo_GetSideInputDeploymentReq proto.InternalMessageInfo func (m *GetVertexPodSpecReq) Reset() { *m = GetVertexPodSpecReq{} } func (*GetVertexPodSpecReq) ProtoMessage() {} func (*GetVertexPodSpecReq) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{25} + return fileDescriptor_9d0d1b17d3865563, []int{26} } func (m *GetVertexPodSpecReq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +807,7 @@ var xxx_messageInfo_GetVertexPodSpecReq proto.InternalMessageInfo func (m *GroupBy) Reset() { *m = GroupBy{} } func (*GroupBy) ProtoMessage() {} func (*GroupBy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{26} + return fileDescriptor_9d0d1b17d3865563, []int{27} } func (m *GroupBy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -807,7 +835,7 @@ var xxx_messageInfo_GroupBy proto.InternalMessageInfo func (m *HTTPSource) Reset() { *m = HTTPSource{} } func (*HTTPSource) ProtoMessage() {} func (*HTTPSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{27} + return fileDescriptor_9d0d1b17d3865563, []int{28} } func (m *HTTPSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +863,7 @@ var xxx_messageInfo_HTTPSource proto.InternalMessageInfo func (m *IdleSource) Reset() { *m = IdleSource{} } func (*IdleSource) ProtoMessage() {} func (*IdleSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{28} + return fileDescriptor_9d0d1b17d3865563, []int{29} } func (m *IdleSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +891,7 @@ var xxx_messageInfo_IdleSource proto.InternalMessageInfo func (m *InterStepBufferService) Reset() { *m = InterStepBufferService{} } func (*InterStepBufferService) ProtoMessage() {} func (*InterStepBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{29} + return fileDescriptor_9d0d1b17d3865563, []int{30} } func (m *InterStepBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +919,7 @@ var xxx_messageInfo_InterStepBufferService proto.InternalMessageInfo func (m *InterStepBufferServiceList) Reset() { *m = InterStepBufferServiceList{} } func (*InterStepBufferServiceList) ProtoMessage() {} func (*InterStepBufferServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{30} + return fileDescriptor_9d0d1b17d3865563, []int{31} } func (m *InterStepBufferServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +947,7 @@ var xxx_messageInfo_InterStepBufferServiceList proto.InternalMessageInfo func (m *InterStepBufferServiceSpec) Reset() { *m = InterStepBufferServiceSpec{} } func (*InterStepBufferServiceSpec) ProtoMessage() {} func (*InterStepBufferServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{31} + return fileDescriptor_9d0d1b17d3865563, []int{32} } func (m *InterStepBufferServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +975,7 @@ var xxx_messageInfo_InterStepBufferServiceSpec proto.InternalMessageInfo func (m *InterStepBufferServiceStatus) Reset() { *m = InterStepBufferServiceStatus{} } func (*InterStepBufferServiceStatus) ProtoMessage() {} func (*InterStepBufferServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{32} + return fileDescriptor_9d0d1b17d3865563, []int{33} } func (m *InterStepBufferServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +1003,7 @@ var xxx_messageInfo_InterStepBufferServiceStatus proto.InternalMessageInfo func (m *JetStreamBufferService) Reset() { *m = JetStreamBufferService{} } func (*JetStreamBufferService) ProtoMessage() {} func (*JetStreamBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{33} + return fileDescriptor_9d0d1b17d3865563, []int{34} } func (m *JetStreamBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1031,7 @@ var xxx_messageInfo_JetStreamBufferService proto.InternalMessageInfo func (m *JetStreamConfig) Reset() { *m = JetStreamConfig{} } func (*JetStreamConfig) ProtoMessage() {} func (*JetStreamConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{34} + return fileDescriptor_9d0d1b17d3865563, []int{35} } func (m *JetStreamConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +1059,7 @@ var xxx_messageInfo_JetStreamConfig proto.InternalMessageInfo func (m *JetStreamSource) Reset() { *m = JetStreamSource{} } func (*JetStreamSource) ProtoMessage() {} func (*JetStreamSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{35} + return fileDescriptor_9d0d1b17d3865563, []int{36} } func (m *JetStreamSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1059,7 +1087,7 @@ var xxx_messageInfo_JetStreamSource proto.InternalMessageInfo func (m *JobTemplate) Reset() { *m = JobTemplate{} } func (*JobTemplate) ProtoMessage() {} func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{36} + return fileDescriptor_9d0d1b17d3865563, []int{37} } func (m *JobTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1087,7 +1115,7 @@ var xxx_messageInfo_JobTemplate proto.InternalMessageInfo func (m *KafkaSink) Reset() { *m = KafkaSink{} } func (*KafkaSink) ProtoMessage() {} func (*KafkaSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{37} + return fileDescriptor_9d0d1b17d3865563, []int{38} } func (m *KafkaSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1115,7 +1143,7 @@ var xxx_messageInfo_KafkaSink proto.InternalMessageInfo func (m *KafkaSource) Reset() { *m = KafkaSource{} } func (*KafkaSource) ProtoMessage() {} func (*KafkaSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{38} + return fileDescriptor_9d0d1b17d3865563, []int{39} } func (m *KafkaSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1143,7 +1171,7 @@ var xxx_messageInfo_KafkaSource proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{39} + return fileDescriptor_9d0d1b17d3865563, []int{40} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1171,7 +1199,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *Log) Reset() { *m = Log{} } func (*Log) ProtoMessage() {} func (*Log) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{40} + return fileDescriptor_9d0d1b17d3865563, []int{41} } func (m *Log) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1199,7 +1227,7 @@ var xxx_messageInfo_Log proto.InternalMessageInfo func (m *Metadata) Reset() { *m = Metadata{} } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{41} + return fileDescriptor_9d0d1b17d3865563, []int{42} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1227,7 +1255,7 @@ var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *MonoVertex) Reset() { *m = MonoVertex{} } func (*MonoVertex) ProtoMessage() {} func (*MonoVertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{42} + return fileDescriptor_9d0d1b17d3865563, []int{43} } func (m *MonoVertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1255,7 +1283,7 @@ var xxx_messageInfo_MonoVertex proto.InternalMessageInfo func (m *MonoVertexLimits) Reset() { *m = MonoVertexLimits{} } func (*MonoVertexLimits) ProtoMessage() {} func (*MonoVertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{43} + return fileDescriptor_9d0d1b17d3865563, []int{44} } func (m *MonoVertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1283,7 +1311,7 @@ var xxx_messageInfo_MonoVertexLimits proto.InternalMessageInfo func (m *MonoVertexList) Reset() { *m = MonoVertexList{} } func (*MonoVertexList) ProtoMessage() {} func (*MonoVertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{44} + return fileDescriptor_9d0d1b17d3865563, []int{45} } func (m *MonoVertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1311,7 +1339,7 @@ var xxx_messageInfo_MonoVertexList proto.InternalMessageInfo func (m *MonoVertexSpec) Reset() { *m = MonoVertexSpec{} } func (*MonoVertexSpec) ProtoMessage() {} func (*MonoVertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{45} + return fileDescriptor_9d0d1b17d3865563, []int{46} } func (m *MonoVertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1339,7 +1367,7 @@ var xxx_messageInfo_MonoVertexSpec proto.InternalMessageInfo func (m *MonoVertexStatus) Reset() { *m = MonoVertexStatus{} } func (*MonoVertexStatus) ProtoMessage() {} func (*MonoVertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{46} + return fileDescriptor_9d0d1b17d3865563, []int{47} } func (m *MonoVertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1367,7 +1395,7 @@ var xxx_messageInfo_MonoVertexStatus proto.InternalMessageInfo func (m *NativeRedis) Reset() { *m = NativeRedis{} } func (*NativeRedis) ProtoMessage() {} func (*NativeRedis) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{47} + return fileDescriptor_9d0d1b17d3865563, []int{48} } func (m *NativeRedis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1395,7 +1423,7 @@ var xxx_messageInfo_NativeRedis proto.InternalMessageInfo func (m *NatsAuth) Reset() { *m = NatsAuth{} } func (*NatsAuth) ProtoMessage() {} func (*NatsAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{48} + return fileDescriptor_9d0d1b17d3865563, []int{49} } func (m *NatsAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1423,7 +1451,7 @@ var xxx_messageInfo_NatsAuth proto.InternalMessageInfo func (m *NatsSource) Reset() { *m = NatsSource{} } func (*NatsSource) ProtoMessage() {} func (*NatsSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{49} + return fileDescriptor_9d0d1b17d3865563, []int{50} } func (m *NatsSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1451,7 +1479,7 @@ var xxx_messageInfo_NatsSource proto.InternalMessageInfo func (m *NoStore) Reset() { *m = NoStore{} } func (*NoStore) ProtoMessage() {} func (*NoStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{50} + return fileDescriptor_9d0d1b17d3865563, []int{51} } func (m *NoStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1479,7 +1507,7 @@ var xxx_messageInfo_NoStore proto.InternalMessageInfo func (m *PBQStorage) Reset() { *m = PBQStorage{} } func (*PBQStorage) ProtoMessage() {} func (*PBQStorage) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{51} + return fileDescriptor_9d0d1b17d3865563, []int{52} } func (m *PBQStorage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1507,7 +1535,7 @@ var xxx_messageInfo_PBQStorage proto.InternalMessageInfo func (m *PersistenceStrategy) Reset() { *m = PersistenceStrategy{} } func (*PersistenceStrategy) ProtoMessage() {} func (*PersistenceStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{52} + return fileDescriptor_9d0d1b17d3865563, []int{53} } func (m *PersistenceStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1535,7 +1563,7 @@ var xxx_messageInfo_PersistenceStrategy proto.InternalMessageInfo func (m *Pipeline) Reset() { *m = Pipeline{} } func (*Pipeline) ProtoMessage() {} func (*Pipeline) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{53} + return fileDescriptor_9d0d1b17d3865563, []int{54} } func (m *Pipeline) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1563,7 +1591,7 @@ var xxx_messageInfo_Pipeline proto.InternalMessageInfo func (m *PipelineLimits) Reset() { *m = PipelineLimits{} } func (*PipelineLimits) ProtoMessage() {} func (*PipelineLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{54} + return fileDescriptor_9d0d1b17d3865563, []int{55} } func (m *PipelineLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1591,7 +1619,7 @@ var xxx_messageInfo_PipelineLimits proto.InternalMessageInfo func (m *PipelineList) Reset() { *m = PipelineList{} } func (*PipelineList) ProtoMessage() {} func (*PipelineList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{55} + return fileDescriptor_9d0d1b17d3865563, []int{56} } func (m *PipelineList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1619,7 +1647,7 @@ var xxx_messageInfo_PipelineList proto.InternalMessageInfo func (m *PipelineSpec) Reset() { *m = PipelineSpec{} } func (*PipelineSpec) ProtoMessage() {} func (*PipelineSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{56} + return fileDescriptor_9d0d1b17d3865563, []int{57} } func (m *PipelineSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1647,7 +1675,7 @@ var xxx_messageInfo_PipelineSpec proto.InternalMessageInfo func (m *PipelineStatus) Reset() { *m = PipelineStatus{} } func (*PipelineStatus) ProtoMessage() {} func (*PipelineStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{57} + return fileDescriptor_9d0d1b17d3865563, []int{58} } func (m *PipelineStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1675,7 +1703,7 @@ var xxx_messageInfo_PipelineStatus proto.InternalMessageInfo func (m *RedisBufferService) Reset() { *m = RedisBufferService{} } func (*RedisBufferService) ProtoMessage() {} func (*RedisBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{58} + return fileDescriptor_9d0d1b17d3865563, []int{59} } func (m *RedisBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1703,7 +1731,7 @@ var xxx_messageInfo_RedisBufferService proto.InternalMessageInfo func (m *RedisConfig) Reset() { *m = RedisConfig{} } func (*RedisConfig) ProtoMessage() {} func (*RedisConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{59} + return fileDescriptor_9d0d1b17d3865563, []int{60} } func (m *RedisConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1731,7 +1759,7 @@ var xxx_messageInfo_RedisConfig proto.InternalMessageInfo func (m *RedisSettings) Reset() { *m = RedisSettings{} } func (*RedisSettings) ProtoMessage() {} func (*RedisSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{60} + return fileDescriptor_9d0d1b17d3865563, []int{61} } func (m *RedisSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1756,10 +1784,38 @@ func (m *RedisSettings) XXX_DiscardUnknown() { var xxx_messageInfo_RedisSettings proto.InternalMessageInfo +func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } +func (*RetryStrategy) ProtoMessage() {} +func (*RetryStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{62} +} +func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RetryStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryStrategy.Merge(m, src) +} +func (m *RetryStrategy) XXX_Size() int { + return m.Size() +} +func (m *RetryStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RetryStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo + func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{61} + return fileDescriptor_9d0d1b17d3865563, []int{63} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1787,7 +1843,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{62} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1815,7 +1871,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1843,7 +1899,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1871,7 +1927,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1899,7 +1955,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1927,7 +1983,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1955,7 +2011,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1983,7 +2039,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2011,7 +2067,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2039,7 +2095,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2067,7 +2123,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2095,7 +2151,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2123,7 +2179,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2151,7 +2207,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2179,7 +2235,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2207,7 +2263,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2235,7 +2291,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2263,7 +2319,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2291,7 +2347,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2319,7 +2375,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2347,7 +2403,7 @@ var xxx_messageInfo_UDTransformer proto.InternalMessageInfo func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2375,7 +2431,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{85} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2403,7 +2459,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{84} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2431,7 +2487,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{85} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2459,7 +2515,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{86} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2487,7 +2543,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{87} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2515,7 +2571,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{88} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2543,7 +2599,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{89} + return fileDescriptor_9d0d1b17d3865563, []int{91} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2571,7 +2627,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{90} + return fileDescriptor_9d0d1b17d3865563, []int{92} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2602,6 +2658,7 @@ func init() { proto.RegisterType((*AbstractSink)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.AbstractSink") proto.RegisterType((*AbstractVertex)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.AbstractVertex") proto.RegisterType((*Authorization)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Authorization") + proto.RegisterType((*Backoff)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Backoff") proto.RegisterType((*BasicAuth)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.BasicAuth") proto.RegisterType((*Blackhole)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Blackhole") proto.RegisterType((*BufferServiceConfig)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.BufferServiceConfig") @@ -2666,6 +2723,7 @@ func init() { proto.RegisterType((*RedisBufferService)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisBufferService") proto.RegisterType((*RedisConfig)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisConfig") proto.RegisterType((*RedisSettings)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisSettings") + proto.RegisterType((*RetryStrategy)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RetryStrategy") proto.RegisterType((*SASL)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.SASL") proto.RegisterType((*SASLPlain)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.SASLPlain") proto.RegisterType((*Scale)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Scale") @@ -2704,475 +2762,483 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7488 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5b, 0x6c, 0x1d, 0xd7, - 0x79, 0xae, 0xf7, 0x8d, 0xdc, 0xfb, 0xdf, 0x24, 0x45, 0x2d, 0xc9, 0x32, 0x25, 0xcb, 0xa2, 0x32, - 0x8e, 0x7d, 0x94, 0x93, 0x84, 0x3c, 0xe6, 0xf1, 0x2d, 0x57, 0x9b, 0x9b, 0x14, 0x29, 0x4a, 0xa4, - 0xc4, 0xfc, 0x9b, 0x94, 0x9d, 0xf8, 0x24, 0x3e, 0xc3, 0x99, 0xc5, 0xcd, 0x31, 0x67, 0xcf, 0x6c, - 0xcf, 0xcc, 0xa6, 0x44, 0xe7, 0x1c, 0xe4, 0xf6, 0x60, 0x1f, 0x1c, 0x1c, 0x9c, 0x22, 0x4f, 0x01, - 0x8a, 0xb4, 0x68, 0x51, 0x20, 0x0f, 0x41, 0xfa, 0x50, 0xd4, 0x7d, 0x28, 0xd0, 0x4b, 0x8a, 0xa2, - 0x4d, 0x8a, 0x5e, 0xf2, 0x50, 0xa0, 0xee, 0x0b, 0xd1, 0xb0, 0xe8, 0x43, 0x0b, 0x34, 0x08, 0x1a, - 0xa0, 0x4d, 0x84, 0x00, 0x29, 0xd6, 0x6d, 0x6e, 0x7b, 0xb6, 0x44, 0xee, 0x21, 0x65, 0xb9, 0xf5, - 0xdb, 0xcc, 0x5a, 0xff, 0xfa, 0xfe, 0x35, 0xff, 0xba, 0xfd, 0xeb, 0x5f, 0xff, 0xfa, 0x07, 0x16, - 0x5b, 0x56, 0xb0, 0xd5, 0xdd, 0x98, 0x32, 0xdc, 0xf6, 0xb4, 0xd3, 0x6d, 0xeb, 0x1d, 0xcf, 0x7d, - 0x8d, 0x3f, 0x6c, 0xda, 0xee, 0xad, 0xe9, 0xce, 0x76, 0x6b, 0x5a, 0xef, 0x58, 0x7e, 0x94, 0xb2, - 0xf3, 0x94, 0x6e, 0x77, 0xb6, 0xf4, 0xa7, 0xa6, 0x5b, 0xd4, 0xa1, 0x9e, 0x1e, 0x50, 0x73, 0xaa, - 0xe3, 0xb9, 0x81, 0x4b, 0x9e, 0x8b, 0x80, 0xa6, 0x14, 0xd0, 0x94, 0x2a, 0x36, 0xd5, 0xd9, 0x6e, - 0x4d, 0x31, 0xa0, 0x28, 0x45, 0x01, 0x9d, 0xfb, 0x68, 0xac, 0x06, 0x2d, 0xb7, 0xe5, 0x4e, 0x73, - 0xbc, 0x8d, 0xee, 0x26, 0x7f, 0xe3, 0x2f, 0xfc, 0x49, 0xf0, 0x39, 0xa7, 0x6d, 0x3f, 0xef, 0x4f, - 0x59, 0x2e, 0xab, 0xd6, 0xb4, 0xe1, 0x7a, 0x74, 0x7a, 0xa7, 0xa7, 0x2e, 0xe7, 0x9e, 0x8e, 0x68, - 0xda, 0xba, 0xb1, 0x65, 0x39, 0xd4, 0xdb, 0x55, 0xdf, 0x32, 0xed, 0x51, 0xdf, 0xed, 0x7a, 0x06, - 0x3d, 0x54, 0x29, 0x7f, 0xba, 0x4d, 0x03, 0x3d, 0x8b, 0xd7, 0x74, 0xbf, 0x52, 0x5e, 0xd7, 0x09, - 0xac, 0x76, 0x2f, 0x9b, 0x67, 0xef, 0x55, 0xc0, 0x37, 0xb6, 0x68, 0x5b, 0x4f, 0x97, 0xd3, 0xbe, - 0x0b, 0x70, 0x6a, 0x76, 0xc3, 0x0f, 0x3c, 0xdd, 0x08, 0x56, 0x5d, 0x73, 0x8d, 0xb6, 0x3b, 0xb6, - 0x1e, 0x50, 0xb2, 0x0d, 0x55, 0x56, 0x37, 0x53, 0x0f, 0xf4, 0x89, 0xc2, 0xc5, 0xc2, 0xa5, 0xfa, - 0xcc, 0xec, 0xd4, 0x80, 0x6d, 0x31, 0xb5, 0x22, 0x81, 0x1a, 0x23, 0xfb, 0x7b, 0x93, 0x55, 0xf5, - 0x86, 0x21, 0x03, 0xf2, 0x8d, 0x02, 0x8c, 0x38, 0xae, 0x49, 0x9b, 0xd4, 0xa6, 0x46, 0xe0, 0x7a, - 0x13, 0xc5, 0x8b, 0xa5, 0x4b, 0xf5, 0x99, 0x2f, 0x0c, 0xcc, 0x31, 0xe3, 0x8b, 0xa6, 0xae, 0xc7, - 0x18, 0x5c, 0x76, 0x02, 0x6f, 0xb7, 0x71, 0xfa, 0x7b, 0x7b, 0x93, 0x0f, 0xed, 0xef, 0x4d, 0x8e, - 0xc4, 0xb3, 0x30, 0x51, 0x13, 0xb2, 0x0e, 0xf5, 0xc0, 0xb5, 0x99, 0xc8, 0x2c, 0xd7, 0xf1, 0x27, - 0x4a, 0xbc, 0x62, 0x17, 0xa6, 0x84, 0xb4, 0x19, 0xfb, 0x29, 0xd6, 0x5d, 0xa6, 0x76, 0x9e, 0x9a, - 0x5a, 0x0b, 0xc9, 0x1a, 0xa7, 0x24, 0x70, 0x3d, 0x4a, 0xf3, 0x31, 0x8e, 0x43, 0x28, 0x9c, 0xf0, - 0xa9, 0xd1, 0xf5, 0xac, 0x60, 0x77, 0xce, 0x75, 0x02, 0x7a, 0x3b, 0x98, 0x28, 0x73, 0x29, 0x3f, - 0x99, 0x05, 0xbd, 0xea, 0x9a, 0xcd, 0x24, 0x75, 0xe3, 0xd4, 0xfe, 0xde, 0xe4, 0x89, 0x54, 0x22, - 0xa6, 0x31, 0x89, 0x03, 0xe3, 0x56, 0x5b, 0x6f, 0xd1, 0xd5, 0xae, 0x6d, 0x37, 0xa9, 0xe1, 0xd1, - 0xc0, 0x9f, 0xa8, 0xf0, 0x4f, 0xb8, 0x94, 0xc5, 0x67, 0xd9, 0x35, 0x74, 0xfb, 0xc6, 0xc6, 0x6b, - 0xd4, 0x08, 0x90, 0x6e, 0x52, 0x8f, 0x3a, 0x06, 0x6d, 0x4c, 0xc8, 0x8f, 0x19, 0x5f, 0x4a, 0x21, - 0x61, 0x0f, 0x36, 0x59, 0x84, 0x93, 0x1d, 0xcf, 0x72, 0x79, 0x15, 0x6c, 0xdd, 0xf7, 0xaf, 0xeb, - 0x6d, 0x3a, 0x31, 0x74, 0xb1, 0x70, 0xa9, 0xd6, 0x38, 0x2b, 0x61, 0x4e, 0xae, 0xa6, 0x09, 0xb0, - 0xb7, 0x0c, 0xb9, 0x04, 0x55, 0x95, 0x38, 0x31, 0x7c, 0xb1, 0x70, 0xa9, 0x22, 0xfa, 0x8e, 0x2a, - 0x8b, 0x61, 0x2e, 0x59, 0x80, 0xaa, 0xbe, 0xb9, 0x69, 0x39, 0x8c, 0xb2, 0xca, 0x45, 0x78, 0x3e, - 0xeb, 0xd3, 0x66, 0x25, 0x8d, 0xc0, 0x51, 0x6f, 0x18, 0x96, 0x25, 0x57, 0x81, 0xf8, 0xd4, 0xdb, - 0xb1, 0x0c, 0x3a, 0x6b, 0x18, 0x6e, 0xd7, 0x09, 0x78, 0xdd, 0x6b, 0xbc, 0xee, 0xe7, 0x64, 0xdd, - 0x49, 0xb3, 0x87, 0x02, 0x33, 0x4a, 0x91, 0x17, 0x61, 0x5c, 0x0e, 0xbb, 0x48, 0x0a, 0xc0, 0x91, - 0x4e, 0x33, 0x41, 0x62, 0x2a, 0x0f, 0x7b, 0xa8, 0x89, 0x09, 0xe7, 0xf5, 0x6e, 0xe0, 0xb6, 0x19, - 0x64, 0x92, 0xe9, 0x9a, 0xbb, 0x4d, 0x9d, 0x89, 0xfa, 0xc5, 0xc2, 0xa5, 0x6a, 0xe3, 0xe2, 0xfe, - 0xde, 0xe4, 0xf9, 0xd9, 0xbb, 0xd0, 0xe1, 0x5d, 0x51, 0xc8, 0x0d, 0xa8, 0x99, 0x8e, 0xbf, 0xea, - 0xda, 0x96, 0xb1, 0x3b, 0x31, 0xc2, 0x2b, 0xf8, 0x94, 0xfc, 0xd4, 0xda, 0xfc, 0xf5, 0xa6, 0xc8, - 0xb8, 0xb3, 0x37, 0x79, 0xbe, 0x77, 0x76, 0x9c, 0x0a, 0xf3, 0x31, 0xc2, 0x20, 0x2b, 0x1c, 0x70, - 0xce, 0x75, 0x36, 0xad, 0xd6, 0xc4, 0x28, 0x6f, 0x8d, 0x8b, 0x7d, 0x3a, 0xf4, 0xfc, 0xf5, 0xa6, - 0xa0, 0x6b, 0x8c, 0x4a, 0x76, 0xe2, 0x15, 0x23, 0x04, 0x62, 0xc2, 0x98, 0x9a, 0x57, 0xe7, 0x6c, - 0xdd, 0x6a, 0xfb, 0x13, 0x63, 0xbc, 0xf3, 0x7e, 0xb0, 0x0f, 0x26, 0xc6, 0x89, 0x1b, 0x67, 0xe4, - 0xa7, 0x8c, 0x25, 0x92, 0x7d, 0x4c, 0x61, 0x9e, 0x7b, 0x01, 0x4e, 0xf6, 0xcc, 0x0d, 0x64, 0x1c, - 0x4a, 0xdb, 0x74, 0x97, 0x4f, 0x7d, 0x35, 0x64, 0x8f, 0xe4, 0x34, 0x54, 0x76, 0x74, 0xbb, 0x4b, - 0x27, 0x8a, 0x3c, 0x4d, 0xbc, 0x7c, 0xbc, 0xf8, 0x7c, 0x41, 0xfb, 0xf5, 0x12, 0x8c, 0xa8, 0x19, - 0xa7, 0x69, 0x39, 0xdb, 0xe4, 0x25, 0x28, 0xd9, 0x6e, 0x4b, 0xce, 0x9b, 0x9f, 0x1c, 0x78, 0x16, - 0x5b, 0x76, 0x5b, 0x8d, 0xe1, 0xfd, 0xbd, 0xc9, 0xd2, 0xb2, 0xdb, 0x42, 0x86, 0x48, 0x0c, 0xa8, - 0x6c, 0xeb, 0x9b, 0xdb, 0x3a, 0xaf, 0x43, 0x7d, 0xa6, 0x31, 0x30, 0xf4, 0x35, 0x86, 0xc2, 0xea, - 0xda, 0xa8, 0xed, 0xef, 0x4d, 0x56, 0xf8, 0x2b, 0x0a, 0x6c, 0xe2, 0x42, 0x6d, 0xc3, 0xd6, 0x8d, - 0xed, 0x2d, 0xd7, 0xa6, 0x13, 0xa5, 0x9c, 0x8c, 0x1a, 0x0a, 0x49, 0x34, 0x73, 0xf8, 0x8a, 0x11, - 0x0f, 0x62, 0xc0, 0x50, 0xd7, 0xf4, 0x2d, 0x67, 0x5b, 0xce, 0x81, 0x2f, 0x0c, 0xcc, 0x6d, 0x7d, - 0x9e, 0x7f, 0x13, 0xec, 0xef, 0x4d, 0x0e, 0x89, 0x67, 0x94, 0xd0, 0xda, 0x8f, 0xea, 0x30, 0xa6, - 0x1a, 0xe9, 0x26, 0xf5, 0x02, 0x7a, 0x9b, 0x5c, 0x84, 0xb2, 0xc3, 0x86, 0x26, 0x6f, 0xe4, 0xc6, - 0x88, 0xec, 0x2e, 0x65, 0x3e, 0x24, 0x79, 0x0e, 0xab, 0x99, 0xe8, 0x2a, 0x52, 0xe0, 0x83, 0xd7, - 0xac, 0xc9, 0x61, 0x44, 0xcd, 0xc4, 0x33, 0x4a, 0x68, 0xf2, 0x0a, 0x94, 0xf9, 0xc7, 0x0b, 0x51, - 0x7f, 0x6a, 0x70, 0x16, 0xec, 0xd3, 0xab, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, 0xd6, 0x15, 0xbb, - 0xe6, 0xa6, 0x14, 0xec, 0x27, 0x73, 0x08, 0x76, 0x41, 0x74, 0xc5, 0xf5, 0xf9, 0x05, 0x64, 0x88, - 0xe4, 0xff, 0x17, 0xe0, 0xa4, 0xe1, 0x3a, 0x81, 0xce, 0x54, 0x0d, 0xb5, 0xc8, 0x4e, 0x54, 0x38, - 0x9f, 0xab, 0x03, 0xf3, 0x99, 0x4b, 0x23, 0x36, 0x1e, 0x66, 0x6b, 0x46, 0x4f, 0x32, 0xf6, 0xf2, - 0x26, 0xbf, 0x5c, 0x80, 0x87, 0xd9, 0x5c, 0xde, 0x43, 0xcc, 0x57, 0xa0, 0xa3, 0xad, 0xd5, 0xd9, - 0xfd, 0xbd, 0xc9, 0x87, 0x97, 0xb2, 0x98, 0x61, 0x76, 0x1d, 0x58, 0xed, 0x4e, 0xe9, 0xbd, 0x6a, - 0x09, 0x5f, 0xdd, 0xea, 0x33, 0xcb, 0x47, 0xa9, 0xea, 0x34, 0x1e, 0x95, 0x5d, 0x39, 0x4b, 0xb3, - 0xc3, 0xac, 0x5a, 0x90, 0xcb, 0x30, 0xbc, 0xe3, 0xda, 0xdd, 0x36, 0xf5, 0x27, 0xaa, 0x7c, 0x8a, - 0x3d, 0x97, 0x35, 0xc5, 0xde, 0xe4, 0x24, 0x8d, 0x13, 0x12, 0x7e, 0x58, 0xbc, 0xfb, 0xa8, 0xca, - 0x12, 0x0b, 0x86, 0x6c, 0xab, 0x6d, 0x05, 0x3e, 0x5f, 0x38, 0xeb, 0x33, 0x97, 0x07, 0xfe, 0x2c, - 0x31, 0x44, 0x97, 0x39, 0x98, 0x18, 0x35, 0xe2, 0x19, 0x25, 0x03, 0x36, 0x15, 0xfa, 0x86, 0x6e, - 0x8b, 0x85, 0xb5, 0x3e, 0xf3, 0xe9, 0xc1, 0x87, 0x0d, 0x43, 0x69, 0x8c, 0xca, 0x6f, 0xaa, 0xf0, - 0x57, 0x14, 0xd8, 0xe4, 0xf3, 0x30, 0x96, 0x68, 0x4d, 0x7f, 0xa2, 0xce, 0xa5, 0xf3, 0x58, 0x96, - 0x74, 0x42, 0xaa, 0x68, 0xe5, 0x49, 0xf4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x1a, 0x54, 0x7d, 0xcb, - 0xa4, 0x86, 0xee, 0xf9, 0x13, 0x23, 0x07, 0x01, 0x1e, 0x97, 0xc0, 0xd5, 0xa6, 0x2c, 0x86, 0x21, - 0x00, 0x99, 0x02, 0xe8, 0xe8, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x94, 0x2b, 0x4d, 0x63, 0xfb, 0x7b, - 0x93, 0xb0, 0x1a, 0xa6, 0x62, 0x8c, 0x82, 0xd1, 0xb3, 0xb2, 0x4b, 0x4e, 0xa7, 0x1b, 0x88, 0x85, - 0xb5, 0x26, 0xe8, 0x9b, 0x61, 0x2a, 0xc6, 0x28, 0xc8, 0x77, 0x0a, 0xf0, 0x68, 0xf4, 0xda, 0x3b, - 0xc8, 0x4e, 0x1c, 0xf9, 0x20, 0x9b, 0xdc, 0xdf, 0x9b, 0x7c, 0xb4, 0xd9, 0x9f, 0x25, 0xde, 0xad, - 0x3e, 0xda, 0x4b, 0x30, 0x3a, 0xdb, 0x0d, 0xb6, 0x5c, 0xcf, 0x7a, 0x83, 0x2b, 0xdd, 0x64, 0x01, - 0x2a, 0x01, 0x57, 0x9e, 0xc4, 0xba, 0xfc, 0x44, 0x96, 0xa8, 0x85, 0x22, 0x7b, 0x8d, 0xee, 0x2a, - 0x6d, 0x40, 0xac, 0x8f, 0x42, 0x99, 0x12, 0xc5, 0xb5, 0x5f, 0x2b, 0x40, 0xad, 0xa1, 0xfb, 0x96, - 0xc1, 0xe0, 0xc9, 0x1c, 0x94, 0xbb, 0x3e, 0xf5, 0x0e, 0x07, 0xca, 0x67, 0xe9, 0x75, 0x9f, 0x7a, - 0xc8, 0x0b, 0x93, 0x1b, 0x50, 0xed, 0xe8, 0xbe, 0x7f, 0xcb, 0xf5, 0x4c, 0xb9, 0xd2, 0x1c, 0x10, - 0x48, 0x68, 0xc5, 0xb2, 0x28, 0x86, 0x20, 0x5a, 0x1d, 0xa2, 0xa5, 0x56, 0xfb, 0x49, 0x01, 0x4e, - 0x35, 0xba, 0x9b, 0x9b, 0xd4, 0x93, 0x4a, 0xa0, 0x54, 0xaf, 0x28, 0x54, 0x3c, 0x6a, 0x5a, 0xbe, - 0xac, 0xfb, 0xfc, 0xc0, 0x4d, 0x87, 0x0c, 0x45, 0x6a, 0x73, 0x5c, 0x5e, 0x3c, 0x01, 0x05, 0x3a, - 0xe9, 0x42, 0xed, 0x35, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xb6, 0xfc, 0xba, 0x2b, 0x03, 0xb3, 0xba, - 0x4a, 0x83, 0x26, 0x47, 0x8a, 0x2b, 0x8f, 0x61, 0x22, 0x46, 0x9c, 0xb4, 0xef, 0x56, 0x60, 0x64, - 0xce, 0x6d, 0x6f, 0x58, 0x0e, 0x35, 0x2f, 0x9b, 0x2d, 0x4a, 0x5e, 0x85, 0x32, 0x35, 0x5b, 0x54, - 0x7e, 0xed, 0xe0, 0xeb, 0x2c, 0x03, 0x8b, 0xb4, 0x05, 0xf6, 0x86, 0x1c, 0x98, 0x2c, 0xc3, 0xd8, - 0xa6, 0xe7, 0xb6, 0xc5, 0xd4, 0xb5, 0xb6, 0xdb, 0x91, 0xaa, 0x62, 0xe3, 0x83, 0x6a, 0x3a, 0x58, - 0x48, 0xe4, 0xde, 0xd9, 0x9b, 0x84, 0xe8, 0x0d, 0x53, 0x65, 0xc9, 0xcb, 0x30, 0x11, 0xa5, 0x84, - 0x63, 0x78, 0x8e, 0x69, 0xef, 0x5c, 0x55, 0xa8, 0x34, 0xce, 0xef, 0xef, 0x4d, 0x4e, 0x2c, 0xf4, - 0xa1, 0xc1, 0xbe, 0xa5, 0xc9, 0x9b, 0x05, 0x18, 0x8f, 0x32, 0xc5, 0xbc, 0x2a, 0x35, 0x84, 0x23, - 0x9a, 0xb0, 0xf9, 0x36, 0x67, 0x21, 0xc5, 0x02, 0x7b, 0x98, 0x92, 0x05, 0x18, 0x09, 0xdc, 0x98, - 0xbc, 0x2a, 0x5c, 0x5e, 0x9a, 0xda, 0x97, 0xaf, 0xb9, 0x7d, 0xa5, 0x95, 0x28, 0x47, 0x10, 0xce, - 0xa8, 0xf7, 0x94, 0xa4, 0x86, 0xb8, 0xa4, 0xce, 0xed, 0xef, 0x4d, 0x9e, 0x59, 0xcb, 0xa4, 0xc0, - 0x3e, 0x25, 0xc9, 0x57, 0x0a, 0x30, 0xa6, 0xb2, 0xa4, 0x8c, 0x86, 0x8f, 0x52, 0x46, 0x84, 0xf5, - 0x88, 0xb5, 0x04, 0x03, 0x4c, 0x31, 0xd4, 0x7e, 0x5a, 0x86, 0x5a, 0x38, 0xb3, 0x91, 0xc7, 0xa1, - 0xc2, 0x77, 0xdc, 0x52, 0x61, 0x0d, 0x97, 0x2c, 0xbe, 0x31, 0x47, 0x91, 0x47, 0x9e, 0x80, 0x61, - 0xc3, 0x6d, 0xb7, 0x75, 0xc7, 0xe4, 0x56, 0x94, 0x5a, 0xa3, 0xce, 0x56, 0xea, 0x39, 0x91, 0x84, - 0x2a, 0x8f, 0x9c, 0x87, 0xb2, 0xee, 0xb5, 0x84, 0x41, 0xa3, 0x26, 0xe6, 0xa3, 0x59, 0xaf, 0xe5, - 0x23, 0x4f, 0x25, 0x1f, 0x83, 0x12, 0x75, 0x76, 0x26, 0xca, 0xfd, 0x55, 0x81, 0xcb, 0xce, 0xce, - 0x4d, 0xdd, 0x6b, 0xd4, 0x65, 0x1d, 0x4a, 0x97, 0x9d, 0x1d, 0x64, 0x65, 0xc8, 0x32, 0x0c, 0x53, - 0x67, 0x87, 0xb5, 0xbd, 0xb4, 0x34, 0x7c, 0xa0, 0x4f, 0x71, 0x46, 0x22, 0xb5, 0xe2, 0x50, 0xa1, - 0x90, 0xc9, 0xa8, 0x20, 0xc8, 0x67, 0x61, 0x44, 0xe8, 0x16, 0x2b, 0xac, 0x4d, 0xfc, 0x89, 0x21, - 0x0e, 0x39, 0xd9, 0x5f, 0x39, 0xe1, 0x74, 0x91, 0x65, 0x27, 0x96, 0xe8, 0x63, 0x02, 0x8a, 0x7c, - 0x16, 0x6a, 0x6a, 0x23, 0xa8, 0x5a, 0x36, 0xd3, 0x28, 0xa2, 0x76, 0x8f, 0x48, 0x5f, 0xef, 0x5a, - 0x1e, 0x6d, 0x53, 0x27, 0xf0, 0x1b, 0x27, 0xd5, 0x36, 0x59, 0xe5, 0xfa, 0x18, 0xa1, 0x91, 0x8d, - 0x5e, 0xeb, 0x8e, 0x30, 0x4d, 0x3c, 0xde, 0x67, 0x56, 0x1f, 0xc0, 0xb4, 0xf3, 0x05, 0x38, 0x11, - 0x9a, 0x5f, 0xe4, 0x0e, 0x5e, 0x18, 0x2b, 0x9e, 0x66, 0xc5, 0x97, 0x92, 0x59, 0x77, 0xf6, 0x26, - 0x1f, 0xcb, 0xd8, 0xc3, 0x47, 0x04, 0x98, 0x06, 0xd3, 0xfe, 0xa0, 0x04, 0xbd, 0x6a, 0x77, 0x52, - 0x68, 0x85, 0xa3, 0x16, 0x5a, 0xfa, 0x83, 0xc4, 0xf4, 0xf9, 0xbc, 0x2c, 0x96, 0xff, 0xa3, 0xb2, - 0x1a, 0xa6, 0x74, 0xd4, 0x0d, 0xf3, 0xa0, 0x8c, 0x1d, 0xed, 0xad, 0x32, 0x8c, 0xcd, 0xeb, 0xb4, - 0xed, 0x3a, 0xf7, 0xdc, 0x84, 0x14, 0x1e, 0x88, 0x4d, 0xc8, 0x25, 0xa8, 0x7a, 0xb4, 0x63, 0x5b, - 0x86, 0xee, 0xf3, 0xa6, 0x97, 0x46, 0x3f, 0x94, 0x69, 0x18, 0xe6, 0xf6, 0xd9, 0x7c, 0x96, 0x1e, - 0xc8, 0xcd, 0x67, 0xf9, 0xdd, 0xdf, 0x7c, 0x6a, 0x5f, 0x29, 0x02, 0x57, 0x54, 0xc8, 0x45, 0x28, - 0xb3, 0x45, 0x38, 0x6d, 0xf2, 0xe0, 0x1d, 0x87, 0xe7, 0x90, 0x73, 0x50, 0x0c, 0x5c, 0x39, 0xf2, - 0x40, 0xe6, 0x17, 0xd7, 0x5c, 0x2c, 0x06, 0x2e, 0x79, 0x03, 0xc0, 0x70, 0x1d, 0xd3, 0x52, 0xb6, - 0xf0, 0x7c, 0x1f, 0xb6, 0xe0, 0x7a, 0xb7, 0x74, 0xcf, 0x9c, 0x0b, 0x11, 0xc5, 0xf6, 0x23, 0x7a, - 0xc7, 0x18, 0x37, 0xf2, 0x02, 0x0c, 0xb9, 0xce, 0x42, 0xd7, 0xb6, 0xb9, 0x40, 0x6b, 0x8d, 0xff, - 0xc2, 0xf6, 0x84, 0x37, 0x78, 0xca, 0x9d, 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, 0x6f, 0x2f, 0x79, - 0x56, 0x60, 0x39, 0xad, 0x66, 0xe0, 0xe9, 0x01, 0x6d, 0xed, 0xa2, 0x2c, 0xa6, 0x7d, 0xbd, 0x00, - 0xf5, 0x05, 0xeb, 0x36, 0x35, 0x5f, 0xb2, 0x1c, 0xd3, 0xbd, 0x45, 0x10, 0x86, 0x6c, 0xea, 0xb4, - 0x82, 0x2d, 0xd9, 0xfb, 0xa7, 0x62, 0x63, 0x2d, 0x3c, 0x42, 0x89, 0xea, 0xdf, 0xa6, 0x81, 0xce, - 0x46, 0xdf, 0x7c, 0x57, 0x1a, 0xf9, 0xc5, 0xa6, 0x94, 0x23, 0xa0, 0x44, 0x22, 0xd3, 0x50, 0x13, - 0xda, 0xa7, 0xe5, 0xb4, 0xb8, 0x0c, 0xab, 0xd1, 0xa4, 0xd7, 0x54, 0x19, 0x18, 0xd1, 0x68, 0xbb, - 0x70, 0xb2, 0x47, 0x0c, 0xc4, 0x84, 0x72, 0xa0, 0xb7, 0xd4, 0xfc, 0xba, 0x30, 0xb0, 0x80, 0xd7, - 0xf4, 0x56, 0x4c, 0xb8, 0x7c, 0x8d, 0x5f, 0xd3, 0xd9, 0x1a, 0xcf, 0xd0, 0xb5, 0x9f, 0x17, 0xa0, - 0xba, 0xd0, 0x75, 0x0c, 0xbe, 0x37, 0xba, 0xb7, 0x29, 0x4c, 0x29, 0x0c, 0xc5, 0x4c, 0x85, 0xa1, - 0x0b, 0x43, 0xdb, 0xb7, 0x42, 0x85, 0xa2, 0x3e, 0xb3, 0x32, 0x78, 0xaf, 0x90, 0x55, 0x9a, 0xba, - 0xc6, 0xf1, 0xc4, 0x49, 0xcd, 0x98, 0xac, 0xd0, 0xd0, 0xb5, 0x97, 0x38, 0x53, 0xc9, 0xec, 0xdc, - 0xc7, 0xa0, 0x1e, 0x23, 0x3b, 0x94, 0xd1, 0xf6, 0x77, 0xca, 0x30, 0xb4, 0xd8, 0x6c, 0xce, 0xae, - 0x2e, 0x91, 0x67, 0xa0, 0x2e, 0x8d, 0xf8, 0xd7, 0x23, 0x19, 0x84, 0x67, 0x38, 0xcd, 0x28, 0x0b, - 0xe3, 0x74, 0x4c, 0x1d, 0xf3, 0xa8, 0x6e, 0xb7, 0xe5, 0x60, 0x09, 0xd5, 0x31, 0x64, 0x89, 0x28, - 0xf2, 0x88, 0x0e, 0x63, 0x6c, 0x87, 0xc7, 0x44, 0x28, 0x76, 0x6f, 0x72, 0xd8, 0x1c, 0x70, 0x7f, - 0xc7, 0x95, 0xc4, 0xf5, 0x04, 0x00, 0xa6, 0x00, 0xc9, 0xf3, 0x50, 0xd5, 0xbb, 0xc1, 0x16, 0x57, - 0xa0, 0xc5, 0xd8, 0x38, 0xcf, 0xcf, 0x38, 0x64, 0xda, 0x9d, 0xbd, 0xc9, 0x91, 0x6b, 0xd8, 0x78, - 0x46, 0xbd, 0x63, 0x48, 0xcd, 0x2a, 0xa7, 0x76, 0x8c, 0xb2, 0x72, 0x95, 0x43, 0x57, 0x6e, 0x35, - 0x01, 0x80, 0x29, 0x40, 0xf2, 0x0a, 0x8c, 0x6c, 0xd3, 0xdd, 0x40, 0xdf, 0x90, 0x0c, 0x86, 0x0e, - 0xc3, 0x60, 0x9c, 0xa9, 0x70, 0xd7, 0x62, 0xc5, 0x31, 0x01, 0x46, 0x7c, 0x38, 0xbd, 0x4d, 0xbd, - 0x0d, 0xea, 0xb9, 0x72, 0xf7, 0x29, 0x99, 0x0c, 0x1f, 0x86, 0xc9, 0xc4, 0xfe, 0xde, 0xe4, 0xe9, - 0x6b, 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xac, 0x08, 0x27, 0x16, 0xc5, 0x29, 0xaa, 0xeb, 0x89, - 0x45, 0x98, 0x9c, 0x85, 0x92, 0xd7, 0xe9, 0xf2, 0x9e, 0x53, 0x12, 0x76, 0x52, 0x5c, 0x5d, 0x47, - 0x96, 0x46, 0x5e, 0x86, 0xaa, 0x29, 0xa7, 0x0c, 0xb9, 0xf9, 0x3d, 0xec, 0x44, 0xc3, 0x17, 0x41, - 0xf5, 0x86, 0x21, 0x1a, 0xd3, 0xf4, 0xdb, 0x7e, 0xab, 0x69, 0xbd, 0x41, 0xe5, 0x7e, 0x90, 0x6b, - 0xfa, 0x2b, 0x22, 0x09, 0x55, 0x1e, 0x5b, 0x55, 0xb7, 0xe9, 0xae, 0xd8, 0x0d, 0x95, 0xa3, 0x55, - 0xf5, 0x9a, 0x4c, 0xc3, 0x30, 0x97, 0x4c, 0xaa, 0xc1, 0xc2, 0x7a, 0x41, 0x59, 0xec, 0xe4, 0x6f, - 0xb2, 0x04, 0x39, 0x6e, 0xd8, 0x94, 0xf9, 0x9a, 0x15, 0x04, 0xd4, 0x93, 0xcd, 0x38, 0xd0, 0x94, - 0x79, 0x95, 0x23, 0xa0, 0x44, 0x22, 0x1f, 0x86, 0x1a, 0x07, 0x6f, 0xd8, 0xee, 0x06, 0x6f, 0xb8, - 0x9a, 0xd8, 0xd3, 0xdf, 0x54, 0x89, 0x18, 0xe5, 0x6b, 0xbf, 0x28, 0xc2, 0x99, 0x45, 0x1a, 0x08, - 0xad, 0x66, 0x9e, 0x76, 0x6c, 0x77, 0x97, 0xa9, 0x96, 0x48, 0x5f, 0x27, 0x2f, 0x02, 0x58, 0xfe, - 0x46, 0x73, 0xc7, 0xe0, 0xe3, 0x40, 0x8c, 0xe1, 0x8b, 0x72, 0x48, 0xc2, 0x52, 0xb3, 0x21, 0x73, - 0xee, 0x24, 0xde, 0x30, 0x56, 0x26, 0xda, 0x5e, 0x15, 0xef, 0xb2, 0xbd, 0x6a, 0x02, 0x74, 0x22, - 0x05, 0xb5, 0xc4, 0x29, 0xff, 0xbb, 0x62, 0x73, 0x18, 0xdd, 0x34, 0x06, 0x93, 0x47, 0x65, 0x74, - 0x60, 0xdc, 0xa4, 0x9b, 0x7a, 0xd7, 0x0e, 0x42, 0xa5, 0x5a, 0x0e, 0xe2, 0x83, 0xeb, 0xe5, 0xe1, - 0x09, 0xef, 0x7c, 0x0a, 0x09, 0x7b, 0xb0, 0xb5, 0xdf, 0x2d, 0xc1, 0xb9, 0x45, 0x1a, 0x84, 0x16, - 0x17, 0x39, 0x3b, 0x36, 0x3b, 0xd4, 0x60, 0xad, 0xf0, 0x66, 0x01, 0x86, 0x6c, 0x7d, 0x83, 0xda, - 0x6c, 0xf5, 0x62, 0x5f, 0xf3, 0xea, 0xc0, 0x0b, 0x41, 0x7f, 0x2e, 0x53, 0xcb, 0x9c, 0x43, 0x6a, - 0x69, 0x10, 0x89, 0x28, 0xd9, 0xb3, 0x49, 0xdd, 0xb0, 0xbb, 0x7e, 0x40, 0xbd, 0x55, 0xd7, 0x0b, - 0xa4, 0x3e, 0x19, 0x4e, 0xea, 0x73, 0x51, 0x16, 0xc6, 0xe9, 0xc8, 0x0c, 0x80, 0x61, 0x5b, 0xd4, - 0x09, 0x78, 0x29, 0x31, 0xae, 0x88, 0x6a, 0xdf, 0xb9, 0x30, 0x07, 0x63, 0x54, 0x8c, 0x55, 0xdb, - 0x75, 0xac, 0xc0, 0x15, 0xac, 0xca, 0x49, 0x56, 0x2b, 0x51, 0x16, 0xc6, 0xe9, 0x78, 0x31, 0x1a, - 0x78, 0x96, 0xe1, 0xf3, 0x62, 0x95, 0x54, 0xb1, 0x28, 0x0b, 0xe3, 0x74, 0x6c, 0xcd, 0x8b, 0x7d, - 0xff, 0xa1, 0xd6, 0xbc, 0x6f, 0xd7, 0xe0, 0x42, 0x42, 0xac, 0x81, 0x1e, 0xd0, 0xcd, 0xae, 0xdd, - 0xa4, 0x81, 0x6a, 0xc0, 0x01, 0xd7, 0xc2, 0xff, 0x1b, 0xb5, 0xbb, 0xf0, 0xdd, 0x30, 0x8e, 0xa6, - 0xdd, 0x7b, 0x2a, 0x78, 0xa0, 0xb6, 0x9f, 0x86, 0x9a, 0xa3, 0x07, 0x3e, 0x1f, 0xb8, 0x72, 0x8c, - 0x86, 0x6a, 0xd8, 0x75, 0x95, 0x81, 0x11, 0x0d, 0x59, 0x85, 0xd3, 0x52, 0xc4, 0x97, 0x6f, 0x77, - 0x5c, 0x2f, 0xa0, 0x9e, 0x28, 0x2b, 0x97, 0x53, 0x59, 0xf6, 0xf4, 0x4a, 0x06, 0x0d, 0x66, 0x96, - 0x24, 0x2b, 0x70, 0xca, 0x10, 0xe7, 0xd9, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x03, 0x57, 0xb8, - 0x35, 0x9a, 0xeb, 0x25, 0xc1, 0xac, 0x72, 0xe9, 0xde, 0x3c, 0x34, 0x50, 0x6f, 0x1e, 0x1e, 0xa4, - 0x37, 0x57, 0x07, 0xeb, 0xcd, 0xb5, 0x83, 0xf5, 0x66, 0x26, 0x79, 0xd6, 0x8f, 0xa8, 0xc7, 0xd4, - 0x13, 0xb1, 0xc2, 0xc6, 0xdc, 0x25, 0x42, 0xc9, 0x37, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd9, 0x80, - 0x73, 0x22, 0xfd, 0xb2, 0x63, 0x78, 0xbb, 0x1d, 0xb6, 0xf0, 0xc4, 0x70, 0xeb, 0x09, 0x0b, 0xe3, - 0xb9, 0x66, 0x5f, 0x4a, 0xbc, 0x0b, 0x0a, 0xf9, 0x04, 0x8c, 0x8a, 0x56, 0x5a, 0xd1, 0x3b, 0x1c, - 0x56, 0x38, 0x4f, 0x3c, 0x2c, 0x61, 0x47, 0xe7, 0xe2, 0x99, 0x98, 0xa4, 0x25, 0xb3, 0x70, 0xa2, - 0xb3, 0x63, 0xb0, 0xc7, 0xa5, 0xcd, 0xeb, 0x94, 0x9a, 0xd4, 0xe4, 0xa7, 0x35, 0xb5, 0xc6, 0x23, - 0xca, 0xd0, 0xb1, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x79, 0x1e, 0x46, 0xfc, 0x40, 0xf7, 0x02, 0x69, - 0xd6, 0x9b, 0x18, 0x13, 0xce, 0x25, 0xca, 0xea, 0xd5, 0x8c, 0xe5, 0x61, 0x82, 0x32, 0x73, 0xbd, - 0x38, 0x71, 0x7c, 0xeb, 0x45, 0x9e, 0xd9, 0xea, 0x4f, 0x8b, 0x70, 0x71, 0x91, 0x06, 0x2b, 0xae, - 0x23, 0x8d, 0xa2, 0x59, 0xcb, 0xfe, 0x81, 0x6c, 0xa2, 0xc9, 0x45, 0xbb, 0x78, 0xa4, 0x8b, 0x76, - 0xe9, 0x88, 0x16, 0xed, 0xf2, 0x31, 0x2e, 0xda, 0xbf, 0x5f, 0x84, 0x47, 0x12, 0x92, 0x5c, 0x75, - 0x4d, 0x35, 0xe1, 0xbf, 0x2f, 0xc0, 0x03, 0x08, 0xf0, 0x8e, 0xd0, 0x3b, 0xf9, 0xb1, 0x56, 0x4a, - 0xe3, 0xf9, 0x5a, 0x5a, 0xe3, 0x79, 0x25, 0xcf, 0xca, 0x97, 0xc1, 0xe1, 0x40, 0x2b, 0xde, 0x55, - 0x20, 0x9e, 0x3c, 0x84, 0x13, 0xa6, 0x9f, 0x98, 0xd2, 0x13, 0x7a, 0xaf, 0x61, 0x0f, 0x05, 0x66, - 0x94, 0x22, 0x4d, 0x78, 0xd8, 0xa7, 0x4e, 0x60, 0x39, 0xd4, 0x4e, 0xc2, 0x09, 0x6d, 0xe8, 0x31, - 0x09, 0xf7, 0x70, 0x33, 0x8b, 0x08, 0xb3, 0xcb, 0xe6, 0x99, 0x07, 0xfe, 0x1c, 0xb8, 0xca, 0x29, - 0x44, 0x73, 0x64, 0x1a, 0xcb, 0x9b, 0x69, 0x8d, 0xe5, 0xd5, 0xfc, 0xed, 0x36, 0x98, 0xb6, 0x32, - 0x03, 0xc0, 0x5b, 0x21, 0xae, 0xae, 0x84, 0x8b, 0x34, 0x86, 0x39, 0x18, 0xa3, 0x62, 0x0b, 0x90, - 0x92, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x66, 0x3c, 0x13, 0x93, 0xb4, 0x7d, 0xb5, 0x9d, 0xca, - 0xc0, 0xda, 0xce, 0x55, 0x20, 0x09, 0xc3, 0xa3, 0xc0, 0x1b, 0x4a, 0x3a, 0x4f, 0x2e, 0xf5, 0x50, - 0x60, 0x46, 0xa9, 0x3e, 0x5d, 0x79, 0xf8, 0x68, 0xbb, 0x72, 0x75, 0xf0, 0xae, 0x4c, 0x5e, 0x85, - 0xb3, 0x9c, 0x95, 0x94, 0x4f, 0x12, 0x58, 0xe8, 0x3d, 0x1f, 0x90, 0xc0, 0x67, 0xb1, 0x1f, 0x21, - 0xf6, 0xc7, 0x60, 0xed, 0x63, 0x78, 0xd4, 0x64, 0xcc, 0x75, 0xbb, 0xbf, 0x4e, 0x34, 0x97, 0x41, - 0x83, 0x99, 0x25, 0x59, 0x17, 0x0b, 0x58, 0x37, 0xd4, 0x37, 0x6c, 0x6a, 0x4a, 0xe7, 0xd1, 0xb0, - 0x8b, 0xad, 0x2d, 0x37, 0x65, 0x0e, 0xc6, 0xa8, 0xb2, 0xd4, 0x94, 0x91, 0x43, 0xaa, 0x29, 0x8b, - 0xdc, 0x4a, 0xbf, 0x99, 0xd0, 0x86, 0xa4, 0xae, 0x13, 0xba, 0x03, 0xcf, 0xa5, 0x09, 0xb0, 0xb7, - 0x0c, 0xd7, 0x12, 0x0d, 0xcf, 0xea, 0x04, 0x7e, 0x12, 0x6b, 0x2c, 0xa5, 0x25, 0x66, 0xd0, 0x60, - 0x66, 0x49, 0xa6, 0x9f, 0x6f, 0x51, 0xdd, 0x0e, 0xb6, 0x92, 0x80, 0x27, 0x92, 0xfa, 0xf9, 0x95, - 0x5e, 0x12, 0xcc, 0x2a, 0x97, 0xb9, 0x20, 0x8d, 0x3f, 0x98, 0x6a, 0xd5, 0x57, 0x4b, 0x70, 0x76, - 0x91, 0x06, 0xa1, 0x5f, 0xcd, 0xfb, 0x66, 0x94, 0x77, 0xc1, 0x8c, 0xf2, 0xad, 0x0a, 0x9c, 0x5a, - 0xa4, 0x41, 0x8f, 0x36, 0xf6, 0x9f, 0x54, 0xfc, 0x2b, 0x70, 0x2a, 0x72, 0xe5, 0x6a, 0x06, 0xae, - 0x27, 0xd6, 0xf2, 0xd4, 0x6e, 0xb9, 0xd9, 0x4b, 0x82, 0x59, 0xe5, 0xc8, 0x67, 0xe1, 0x11, 0xbe, - 0xd4, 0x3b, 0x2d, 0x61, 0x9f, 0x15, 0xc6, 0x84, 0xd8, 0x65, 0x84, 0x49, 0x09, 0xf9, 0x48, 0x33, - 0x9b, 0x0c, 0xfb, 0x95, 0x27, 0x5f, 0x82, 0x91, 0x8e, 0xd5, 0xa1, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, - 0xb7, 0x4b, 0xc8, 0x6a, 0x0c, 0x2c, 0xda, 0xc0, 0xc5, 0x53, 0x31, 0xc1, 0x30, 0xb3, 0xa7, 0x56, - 0x8f, 0xb1, 0xa7, 0xfe, 0x4b, 0x11, 0x86, 0x17, 0x3d, 0xb7, 0xdb, 0x69, 0xec, 0x92, 0x16, 0x0c, - 0xdd, 0xe2, 0x87, 0x67, 0xf2, 0x68, 0x6a, 0x70, 0x77, 0x68, 0x71, 0x06, 0x17, 0xa9, 0x44, 0xe2, - 0x1d, 0x25, 0x3c, 0xeb, 0xc4, 0xdb, 0x74, 0x97, 0x9a, 0xf2, 0x0c, 0x2d, 0xec, 0xc4, 0xd7, 0x58, - 0x22, 0x8a, 0x3c, 0xd2, 0x86, 0x13, 0xba, 0x6d, 0xbb, 0xb7, 0xa8, 0xb9, 0xac, 0x07, 0xd4, 0xa1, - 0xbe, 0x3a, 0x92, 0x3c, 0xac, 0x59, 0x9a, 0x9f, 0xeb, 0xcf, 0x26, 0xa1, 0x30, 0x8d, 0x4d, 0x5e, - 0x83, 0x61, 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0xd5, 0x67, 0xe6, 0x06, 0x6f, 0xf4, 0xc6, 0x67, 0x9a, - 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, 0xda, 0x37, 0x0b, 0x00, 0x57, 0xd6, 0xd6, 0x56, - 0xe5, 0xf1, 0x82, 0x09, 0x65, 0xbd, 0x1b, 0x1e, 0x54, 0x0e, 0x7e, 0x20, 0x98, 0xf0, 0x87, 0x94, - 0x67, 0x78, 0xdd, 0x60, 0x0b, 0x39, 0x3a, 0xf9, 0x10, 0x0c, 0x4b, 0x05, 0x59, 0x8a, 0x3d, 0x74, - 0x2d, 0x90, 0x4a, 0x34, 0xaa, 0x7c, 0xed, 0xb7, 0x8a, 0x00, 0x4b, 0xa6, 0x4d, 0x9b, 0xca, 0x83, - 0xbd, 0x16, 0x6c, 0x79, 0xd4, 0xdf, 0x72, 0x6d, 0x73, 0xc0, 0xd3, 0x54, 0x6e, 0xf3, 0x5f, 0x53, - 0x20, 0x18, 0xe1, 0x11, 0x13, 0x46, 0xfc, 0x80, 0x76, 0x96, 0x9c, 0x80, 0x7a, 0x3b, 0xba, 0x3d, - 0xe0, 0x21, 0xca, 0xb8, 0xb0, 0x8b, 0x44, 0x38, 0x98, 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, - 0x03, 0xa4, 0xb1, 0x3b, 0x60, 0x47, 0x3a, 0xc1, 0x76, 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, - 0x1f, 0x17, 0xe1, 0x0c, 0xe7, 0xc7, 0xaa, 0x91, 0xf0, 0xc7, 0x24, 0xff, 0xb3, 0xe7, 0xb6, 0xdd, - 0x7f, 0x3b, 0x18, 0x6b, 0x71, 0x59, 0x6b, 0x85, 0x06, 0x7a, 0xa4, 0xcf, 0x45, 0x69, 0xb1, 0x2b, - 0x76, 0x5d, 0x28, 0xfb, 0x6c, 0xbe, 0x12, 0xd2, 0x6b, 0x0e, 0xdc, 0x85, 0xb2, 0x3f, 0x80, 0xcf, - 0x5e, 0xe1, 0xa9, 0x31, 0x9f, 0xb5, 0x38, 0x3b, 0xf2, 0xbf, 0x61, 0xc8, 0x0f, 0xf4, 0xa0, 0xab, - 0x86, 0xe6, 0xfa, 0x51, 0x33, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, 0xde, 0x51, 0x32, 0xd5, 0x7e, 0x5c, - 0x80, 0x73, 0xd9, 0x05, 0x97, 0x2d, 0x3f, 0x20, 0xff, 0xa3, 0x47, 0xec, 0x07, 0x6c, 0x71, 0x56, - 0x9a, 0x0b, 0x3d, 0x74, 0xc8, 0x56, 0x29, 0x31, 0x91, 0x07, 0x50, 0xb1, 0x02, 0xda, 0x56, 0xfb, - 0xcb, 0x1b, 0x47, 0xfc, 0xe9, 0xb1, 0xa5, 0x9d, 0x71, 0x41, 0xc1, 0x4c, 0x7b, 0xab, 0xd8, 0xef, - 0x93, 0xf9, 0xf2, 0x61, 0x27, 0x7d, 0x7e, 0xaf, 0xe5, 0xf3, 0xf9, 0x4d, 0x56, 0xa8, 0xd7, 0xf5, - 0xf7, 0x7f, 0xf5, 0xba, 0xfe, 0xde, 0xc8, 0xef, 0xfa, 0x9b, 0x12, 0x43, 0x5f, 0x0f, 0xe0, 0x77, - 0x4a, 0x70, 0xfe, 0x6e, 0xdd, 0x86, 0xad, 0x67, 0xb2, 0x77, 0xe6, 0x5d, 0xcf, 0xee, 0xde, 0x0f, - 0xc9, 0x0c, 0x54, 0x3a, 0x5b, 0xba, 0xaf, 0x94, 0x32, 0xb5, 0x61, 0xa9, 0xac, 0xb2, 0xc4, 0x3b, - 0x6c, 0xd2, 0xe0, 0xca, 0x1c, 0x7f, 0x45, 0x41, 0xca, 0xa6, 0xe3, 0x36, 0xf5, 0xfd, 0xc8, 0x26, - 0x10, 0x4e, 0xc7, 0x2b, 0x22, 0x19, 0x55, 0x3e, 0x09, 0x60, 0x48, 0x98, 0x98, 0xe5, 0xca, 0x34, - 0xb8, 0x23, 0x57, 0x86, 0x9b, 0x78, 0xf4, 0x51, 0xf2, 0xb4, 0x42, 0xf2, 0x22, 0x53, 0x50, 0x0e, - 0x22, 0xa7, 0x5d, 0xb5, 0x35, 0x2f, 0x67, 0xe8, 0xa7, 0x9c, 0x8e, 0x6d, 0xec, 0xdd, 0x0d, 0x6e, - 0x54, 0x37, 0xe5, 0xf9, 0xb9, 0xe5, 0x3a, 0x5c, 0x21, 0x2b, 0x45, 0x1b, 0xfb, 0x1b, 0x3d, 0x14, - 0x98, 0x51, 0x4a, 0xfb, 0xab, 0x2a, 0x9c, 0xc9, 0xee, 0x0f, 0x4c, 0x6e, 0x3b, 0xd4, 0xf3, 0x19, - 0x76, 0x21, 0x29, 0xb7, 0x9b, 0x22, 0x19, 0x55, 0xfe, 0x7b, 0xda, 0xe1, 0xec, 0x5b, 0x05, 0x38, - 0xeb, 0xc9, 0x33, 0xa2, 0xfb, 0xe1, 0x74, 0xf6, 0x98, 0x30, 0x67, 0xf4, 0x61, 0x88, 0xfd, 0xeb, - 0x42, 0x7e, 0xa3, 0x00, 0x13, 0xed, 0x94, 0x9d, 0xe3, 0x18, 0x2f, 0x8c, 0x71, 0xaf, 0xf8, 0x95, - 0x3e, 0xfc, 0xb0, 0x6f, 0x4d, 0xc8, 0x97, 0xa0, 0xde, 0x61, 0xfd, 0xc2, 0x0f, 0xa8, 0x63, 0xa8, - 0x3b, 0x63, 0x83, 0x8f, 0xa4, 0xd5, 0x08, 0x4b, 0xb9, 0xa2, 0x09, 0xfd, 0x20, 0x96, 0x81, 0x71, - 0x8e, 0x0f, 0xf8, 0x0d, 0xb1, 0x4b, 0x50, 0xf5, 0x69, 0x10, 0x58, 0x4e, 0x4b, 0xec, 0x37, 0x6a, - 0x62, 0xac, 0x34, 0x65, 0x1a, 0x86, 0xb9, 0xe4, 0xc3, 0x50, 0xe3, 0x47, 0x4e, 0xb3, 0x5e, 0xcb, - 0x9f, 0xa8, 0x71, 0x77, 0xb1, 0x51, 0xe1, 0x00, 0x27, 0x13, 0x31, 0xca, 0x27, 0x4f, 0xc3, 0xc8, - 0x06, 0x1f, 0xbe, 0xf2, 0xd2, 0xb0, 0xb0, 0x71, 0x71, 0x6d, 0xad, 0x11, 0x4b, 0xc7, 0x04, 0x15, - 0x99, 0x01, 0xa0, 0xe1, 0xb9, 0x5c, 0xda, 0x9e, 0x15, 0x9d, 0xd8, 0x61, 0x8c, 0x8a, 0x3c, 0x06, - 0xa5, 0xc0, 0xf6, 0xb9, 0x0d, 0xab, 0x1a, 0x6d, 0x41, 0xd7, 0x96, 0x9b, 0xc8, 0xd2, 0xb5, 0x5f, - 0x14, 0xe0, 0x44, 0xea, 0x72, 0x09, 0x2b, 0xd2, 0xf5, 0x6c, 0x39, 0x8d, 0x84, 0x45, 0xd6, 0x71, - 0x19, 0x59, 0x3a, 0x79, 0x55, 0xaa, 0xe5, 0xc5, 0x9c, 0xf1, 0x11, 0xae, 0xeb, 0x81, 0xcf, 0xf4, - 0xf0, 0x1e, 0x8d, 0x9c, 0x1f, 0xf3, 0x45, 0xf5, 0x91, 0xeb, 0x40, 0xec, 0x98, 0x2f, 0xca, 0xc3, - 0x04, 0x65, 0xca, 0xe0, 0x57, 0x3e, 0x88, 0xc1, 0x4f, 0xfb, 0x7a, 0x31, 0x26, 0x01, 0xa9, 0xd9, - 0xdf, 0x43, 0x02, 0x4f, 0xb2, 0x05, 0x34, 0x5c, 0xdc, 0x6b, 0xf1, 0xf5, 0x8f, 0x2f, 0xc6, 0x32, - 0x97, 0xbc, 0x24, 0x64, 0x5f, 0xca, 0x79, 0x0b, 0x75, 0x6d, 0xb9, 0x29, 0xbc, 0xab, 0x54, 0xab, - 0x85, 0x4d, 0x50, 0x3e, 0xa6, 0x26, 0xd0, 0xfe, 0xac, 0x04, 0xf5, 0xab, 0xee, 0xc6, 0x7b, 0xc4, - 0x83, 0x3a, 0x7b, 0x99, 0x2a, 0xbe, 0x8b, 0xcb, 0xd4, 0x3a, 0x3c, 0x12, 0x04, 0x76, 0x93, 0x1a, - 0xae, 0x63, 0xfa, 0xb3, 0x9b, 0x01, 0xf5, 0x16, 0x2c, 0xc7, 0xf2, 0xb7, 0xa8, 0x29, 0x8f, 0x93, - 0x1e, 0xdd, 0xdf, 0x9b, 0x7c, 0x64, 0x6d, 0x6d, 0x39, 0x8b, 0x04, 0xfb, 0x95, 0xe5, 0xd3, 0x86, - 0x6e, 0x6c, 0xbb, 0x9b, 0x9b, 0xfc, 0xa6, 0x8c, 0xf4, 0xb9, 0x11, 0xd3, 0x46, 0x2c, 0x1d, 0x13, - 0x54, 0xda, 0xdb, 0x45, 0xa8, 0x85, 0x37, 0xdf, 0xc9, 0x13, 0x30, 0xbc, 0xe1, 0xb9, 0xdb, 0xd4, - 0x13, 0x27, 0x77, 0xf2, 0xa6, 0x4c, 0x43, 0x24, 0xa1, 0xca, 0x23, 0x8f, 0x43, 0x25, 0x70, 0x3b, - 0x96, 0x91, 0x36, 0xa8, 0xad, 0xb1, 0x44, 0x14, 0x79, 0xc7, 0xd7, 0xc1, 0x9f, 0x4c, 0xa8, 0x76, - 0xb5, 0xbe, 0xca, 0xd8, 0x2b, 0x50, 0xf6, 0x75, 0xdf, 0x96, 0xeb, 0x69, 0x8e, 0x4b, 0xe4, 0xb3, - 0xcd, 0x65, 0x79, 0x89, 0x7c, 0xb6, 0xb9, 0x8c, 0x1c, 0x54, 0xfb, 0x69, 0x11, 0xea, 0x42, 0x6e, - 0x62, 0x56, 0x38, 0x4a, 0xc9, 0xbd, 0xc0, 0x5d, 0x29, 0xfc, 0x6e, 0x9b, 0x7a, 0xdc, 0xcc, 0x24, - 0x27, 0xb9, 0xf8, 0xf9, 0x40, 0x94, 0x19, 0xba, 0x53, 0x44, 0x49, 0x4a, 0xf4, 0xe5, 0x63, 0x14, - 0x7d, 0xe5, 0x40, 0xa2, 0x1f, 0x3a, 0x0e, 0xd1, 0xbf, 0x59, 0x84, 0xda, 0xb2, 0xb5, 0x49, 0x8d, - 0x5d, 0xc3, 0xe6, 0x77, 0x02, 0x4d, 0x6a, 0xd3, 0x80, 0x2e, 0x7a, 0xba, 0x41, 0x57, 0xa9, 0x67, - 0xf1, 0xc8, 0x30, 0x6c, 0x7c, 0xf0, 0x19, 0x48, 0xde, 0x09, 0x9c, 0xef, 0x43, 0x83, 0x7d, 0x4b, - 0x93, 0x25, 0x18, 0x31, 0xa9, 0x6f, 0x79, 0xd4, 0x5c, 0x8d, 0x6d, 0x54, 0x9e, 0x50, 0x4b, 0xcd, - 0x7c, 0x2c, 0xef, 0xce, 0xde, 0xe4, 0xa8, 0x32, 0x50, 0x8a, 0x1d, 0x4b, 0xa2, 0x28, 0x1b, 0xf2, - 0x1d, 0xbd, 0xeb, 0x67, 0xd5, 0x31, 0x36, 0xe4, 0x57, 0xb3, 0x49, 0xb0, 0x5f, 0x59, 0xad, 0x02, - 0xa5, 0x65, 0xb7, 0xa5, 0xbd, 0x55, 0x82, 0x30, 0x84, 0x10, 0xf9, 0x3f, 0x05, 0xa8, 0xeb, 0x8e, - 0xe3, 0x06, 0x32, 0x3c, 0x8f, 0x38, 0x81, 0xc7, 0xdc, 0x91, 0x8a, 0xa6, 0x66, 0x23, 0x50, 0x71, - 0x78, 0x1b, 0x1e, 0x28, 0xc7, 0x72, 0x30, 0xce, 0x9b, 0x74, 0x53, 0xe7, 0xc9, 0x2b, 0xf9, 0x6b, - 0x71, 0x80, 0xd3, 0xe3, 0x73, 0x9f, 0x86, 0xf1, 0x74, 0x65, 0x0f, 0x73, 0x1c, 0x94, 0xeb, 0x60, - 0xbe, 0x08, 0x10, 0xf9, 0x94, 0xdc, 0x07, 0x23, 0x96, 0x95, 0x30, 0x62, 0x2d, 0x0e, 0x2e, 0xe0, - 0xb0, 0xd2, 0x7d, 0x0d, 0x57, 0xaf, 0xa7, 0x0c, 0x57, 0x4b, 0x47, 0xc1, 0xec, 0xee, 0xc6, 0xaa, - 0xdf, 0x2c, 0xc0, 0x78, 0x44, 0x2c, 0x6f, 0xc8, 0x3e, 0x07, 0xa3, 0x1e, 0xd5, 0xcd, 0x86, 0x1e, - 0x18, 0x5b, 0xdc, 0xd5, 0xbb, 0xc0, 0x7d, 0xb3, 0x4f, 0xee, 0xef, 0x4d, 0x8e, 0x62, 0x3c, 0x03, - 0x93, 0x74, 0x44, 0x87, 0x3a, 0x4b, 0x58, 0xb3, 0xda, 0xd4, 0xed, 0x06, 0x03, 0x5a, 0x4d, 0xf9, - 0x86, 0x05, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xef, 0x14, 0x60, 0x2c, 0x5e, 0xe1, 0x63, 0xb7, 0xa8, - 0x6d, 0x25, 0x2d, 0x6a, 0x73, 0x47, 0xd0, 0x26, 0x7d, 0xac, 0x68, 0x3f, 0xab, 0xc6, 0x3f, 0x8d, - 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x5d, 0x8d, 0x05, 0xef, 0xfd, 0xa8, 0x31, 0xfd, 0xb4, 0xdc, - 0xf2, 0x03, 0xac, 0xe5, 0xbe, 0x9b, 0xa1, 0x67, 0x62, 0xe1, 0x53, 0x86, 0x72, 0x84, 0x4f, 0x69, - 0x87, 0xe1, 0x53, 0x86, 0x8f, 0x6c, 0xd2, 0x39, 0x48, 0x08, 0x95, 0xea, 0x7d, 0x0d, 0xa1, 0x52, - 0x3b, 0xae, 0x10, 0x2a, 0x90, 0x37, 0x84, 0xca, 0xd7, 0x0a, 0x30, 0x66, 0x26, 0x6e, 0xcc, 0x72, - 0xdb, 0x42, 0x9e, 0xa5, 0x26, 0x79, 0x01, 0x57, 0x5c, 0x99, 0x4a, 0xa6, 0x61, 0x8a, 0xa5, 0xf6, - 0xdb, 0x95, 0xf8, 0x3a, 0x70, 0xbf, 0x4d, 0xd5, 0xcf, 0x26, 0x4d, 0xd5, 0x17, 0xd3, 0xa6, 0xea, - 0x13, 0x31, 0x2f, 0xd2, 0xb8, 0xb9, 0xfa, 0x23, 0xb1, 0xe9, 0x91, 0xcd, 0x49, 0xa3, 0x91, 0xa4, - 0x33, 0xa6, 0xc8, 0x8f, 0x40, 0xd5, 0x57, 0xc1, 0x1e, 0xc5, 0xc6, 0x26, 0x6a, 0x17, 0x15, 0x88, - 0x31, 0xa4, 0x60, 0x9a, 0xb8, 0x47, 0x75, 0xdf, 0x75, 0xd2, 0x9a, 0x38, 0xf2, 0x54, 0x94, 0xb9, - 0x71, 0x93, 0xf9, 0xd0, 0x3d, 0x4c, 0xe6, 0x3a, 0xd4, 0x6d, 0xdd, 0x0f, 0xd6, 0x3b, 0xa6, 0x1e, - 0x50, 0x53, 0x8e, 0xb7, 0xff, 0x7a, 0xb0, 0xb5, 0x8a, 0xad, 0x7f, 0x91, 0x42, 0xb8, 0x1c, 0xc1, - 0x60, 0x1c, 0x93, 0x98, 0x30, 0xc2, 0x5e, 0xf9, 0x68, 0x30, 0x67, 0x55, 0x08, 0x80, 0xc3, 0xf0, - 0x08, 0x2d, 0x3d, 0xcb, 0x31, 0x1c, 0x4c, 0xa0, 0xf6, 0xb1, 0xaa, 0xd7, 0x06, 0xb1, 0xaa, 0x93, - 0x4f, 0x08, 0x65, 0x63, 0x57, 0x35, 0x18, 0xb7, 0xc6, 0x8d, 0x46, 0x5e, 0x85, 0x18, 0xcf, 0xc4, - 0x24, 0xad, 0xf6, 0xb5, 0x1a, 0xd4, 0xaf, 0xeb, 0x81, 0xb5, 0x43, 0xf9, 0x11, 0xd0, 0xf1, 0xd8, - 0xe1, 0x7f, 0xa5, 0x00, 0x67, 0x92, 0x7e, 0x7e, 0xc7, 0x68, 0x8c, 0xe7, 0x51, 0x43, 0x30, 0x93, - 0x1b, 0xf6, 0xa9, 0x05, 0x37, 0xcb, 0xf7, 0xb8, 0x0d, 0x1e, 0xb7, 0x59, 0xbe, 0xd9, 0x8f, 0x21, - 0xf6, 0xaf, 0xcb, 0x7b, 0xc5, 0x2c, 0xff, 0x60, 0x47, 0x75, 0x4b, 0x1d, 0x1a, 0x0c, 0x3f, 0x30, - 0x87, 0x06, 0xd5, 0x07, 0x42, 0x53, 0xeb, 0xc4, 0x0e, 0x0d, 0x6a, 0x39, 0x9d, 0x57, 0xa4, 0x6b, - 0xbc, 0x40, 0xeb, 0x77, 0xf8, 0xc0, 0x6f, 0xb5, 0x2b, 0x63, 0x2e, 0x53, 0x70, 0x36, 0x74, 0xdf, - 0x32, 0xe4, 0x9a, 0x99, 0x23, 0x8a, 0xa5, 0x0a, 0xf7, 0x25, 0xce, 0xb8, 0xf9, 0x2b, 0x0a, 0xec, - 0x28, 0xac, 0x58, 0x31, 0x57, 0x58, 0x31, 0x32, 0x07, 0x65, 0x87, 0x6d, 0xbd, 0x4b, 0x87, 0x0e, - 0x24, 0x76, 0xfd, 0x1a, 0xdd, 0x45, 0x5e, 0x58, 0x7b, 0xbb, 0x08, 0xc0, 0x3e, 0xff, 0x60, 0xe6, - 0xfb, 0x0f, 0xc1, 0xb0, 0xdf, 0xe5, 0x1b, 0x6d, 0xb9, 0xda, 0x47, 0x1e, 0x3f, 0x22, 0x19, 0x55, - 0x3e, 0x79, 0x1c, 0x2a, 0xaf, 0x77, 0x69, 0x57, 0x9d, 0x45, 0x87, 0xba, 0xde, 0x67, 0x58, 0x22, - 0x8a, 0xbc, 0xe3, 0x33, 0xc5, 0x29, 0x33, 0x7f, 0xe5, 0xb8, 0xcc, 0xfc, 0x35, 0x18, 0xbe, 0xee, - 0x72, 0x07, 0x42, 0xed, 0x9f, 0x8a, 0x00, 0x91, 0x83, 0x16, 0xf9, 0x66, 0x01, 0x1e, 0x0e, 0x07, - 0x5c, 0x20, 0x54, 0x76, 0x1e, 0x38, 0x36, 0xb7, 0xc9, 0x3f, 0x6b, 0xb0, 0xf3, 0x19, 0x68, 0x35, - 0x8b, 0x1d, 0x66, 0xd7, 0x82, 0x20, 0x54, 0x69, 0xbb, 0x13, 0xec, 0xce, 0x5b, 0x9e, 0xec, 0x81, - 0x99, 0x7e, 0x80, 0x97, 0x25, 0x8d, 0x28, 0x2a, 0xf7, 0x95, 0x7c, 0x10, 0xa9, 0x1c, 0x0c, 0x71, - 0xc8, 0x16, 0x54, 0x1d, 0xf7, 0x55, 0x9f, 0x89, 0x43, 0x76, 0xc7, 0x17, 0x07, 0x17, 0xb9, 0x10, - 0xab, 0x30, 0x11, 0xcb, 0x17, 0x1c, 0x76, 0xa4, 0xb0, 0xbf, 0x51, 0x84, 0x53, 0x19, 0x72, 0x20, - 0x2f, 0xc2, 0xb8, 0xf4, 0x85, 0x8b, 0x22, 0x28, 0x17, 0xa2, 0x08, 0xca, 0xcd, 0x54, 0x1e, 0xf6, - 0x50, 0x93, 0x57, 0x01, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, 0xe2, 0x9a, 0x4a, 0x99, 0x7d, 0x61, 0x7f, - 0x6f, 0x12, 0x66, 0xc3, 0xd4, 0x3b, 0x7b, 0x93, 0x1f, 0xcd, 0x72, 0x6f, 0x4d, 0xc9, 0x39, 0x2a, - 0x80, 0x31, 0x48, 0xf2, 0x05, 0x00, 0xb1, 0x6f, 0x0b, 0x6f, 0xe0, 0xdf, 0xc3, 0xd8, 0x31, 0xa5, - 0x62, 0x1d, 0x4d, 0x7d, 0xa6, 0xab, 0x3b, 0x81, 0x15, 0xec, 0x8a, 0x80, 0x27, 0x37, 0x43, 0x14, - 0x8c, 0x21, 0x6a, 0x7f, 0x52, 0x84, 0xaa, 0x32, 0xb3, 0xde, 0x07, 0xdb, 0x5a, 0x2b, 0x61, 0x5b, - 0x3b, 0x22, 0x87, 0xd6, 0x2c, 0xcb, 0x9a, 0x9b, 0xb2, 0xac, 0x2d, 0xe6, 0x67, 0x75, 0x77, 0xbb, - 0xda, 0x77, 0x8a, 0x30, 0xa6, 0x48, 0xf3, 0x5a, 0xd5, 0x3e, 0x05, 0x27, 0xc4, 0x41, 0xf4, 0x8a, - 0x7e, 0x5b, 0xc4, 0x7e, 0xe1, 0x02, 0x2b, 0x0b, 0x1f, 0xd2, 0x46, 0x32, 0x0b, 0xd3, 0xb4, 0xac, - 0x5b, 0x8b, 0xa4, 0x75, 0xb6, 0x09, 0x11, 0x47, 0x57, 0x62, 0xb3, 0xc4, 0xbb, 0x75, 0x23, 0x95, - 0x87, 0x3d, 0xd4, 0x69, 0xb3, 0x5e, 0xf9, 0x18, 0xcc, 0x7a, 0x7f, 0x5d, 0x80, 0x91, 0x48, 0x5e, - 0xc7, 0x6e, 0xd4, 0xdb, 0x4c, 0x1a, 0xf5, 0x66, 0x73, 0x77, 0x87, 0x3e, 0x26, 0xbd, 0xff, 0x37, - 0x0c, 0x09, 0xbf, 0x6a, 0xb2, 0x01, 0xe7, 0xac, 0x4c, 0xef, 0xb0, 0xd8, 0x6c, 0x13, 0x5e, 0x14, - 0x5e, 0xea, 0x4b, 0x89, 0x77, 0x41, 0x21, 0x5d, 0xa8, 0xee, 0x50, 0x2f, 0xb0, 0x0c, 0xaa, 0xbe, - 0x6f, 0x31, 0xb7, 0x4a, 0x26, 0x0d, 0x97, 0xa1, 0x4c, 0x6f, 0x4a, 0x06, 0x18, 0xb2, 0x22, 0x1b, - 0x50, 0xa1, 0x66, 0x8b, 0xaa, 0x68, 0x3c, 0x39, 0x63, 0x5d, 0x86, 0xf2, 0x64, 0x6f, 0x3e, 0x0a, - 0x68, 0xe2, 0x43, 0xcd, 0x56, 0x07, 0x53, 0xb2, 0x1f, 0x0e, 0xae, 0x60, 0x85, 0x47, 0x5c, 0xd1, - 0x45, 0xfd, 0x30, 0x09, 0x23, 0x3e, 0x64, 0x3b, 0xb4, 0x90, 0x55, 0x8e, 0x68, 0xf2, 0xb8, 0x8b, - 0x7d, 0xcc, 0x87, 0xda, 0x2d, 0x3d, 0xa0, 0x5e, 0x5b, 0xf7, 0xb6, 0xe5, 0x6e, 0x63, 0xf0, 0x2f, - 0x7c, 0x49, 0x21, 0x45, 0x5f, 0x18, 0x26, 0x61, 0xc4, 0x87, 0xb8, 0x50, 0x0b, 0xa4, 0xfa, 0xac, - 0xcc, 0x80, 0x83, 0x33, 0x55, 0x8a, 0xb8, 0x2f, 0xfd, 0xab, 0xd5, 0x2b, 0x46, 0x3c, 0xc8, 0x4e, - 0x22, 0x0e, 0xb0, 0x88, 0xfe, 0xdc, 0xc8, 0x61, 0x4e, 0x96, 0x50, 0xd1, 0x72, 0x93, 0x1d, 0x4f, - 0x58, 0x7b, 0xbb, 0x12, 0x4d, 0xcb, 0xf7, 0xdb, 0xc8, 0xf5, 0x74, 0xd2, 0xc8, 0x75, 0x21, 0x6d, - 0xe4, 0x4a, 0x9d, 0x6f, 0x1e, 0xde, 0x23, 0x33, 0x65, 0x5e, 0x2a, 0x1f, 0x83, 0x79, 0xe9, 0x29, - 0xa8, 0xef, 0xf0, 0x99, 0x40, 0x84, 0xf6, 0xa9, 0xf0, 0x65, 0x84, 0xcf, 0xec, 0x37, 0xa3, 0x64, - 0x8c, 0xd3, 0xb0, 0x22, 0xf2, 0xcf, 0x07, 0x61, 0x6c, 0x54, 0x59, 0xa4, 0x19, 0x25, 0x63, 0x9c, - 0x86, 0x3b, 0x73, 0x59, 0xce, 0xb6, 0x28, 0x30, 0xcc, 0x0b, 0x08, 0x67, 0x2e, 0x95, 0x88, 0x51, - 0x3e, 0xb9, 0x04, 0xd5, 0xae, 0xb9, 0x29, 0x68, 0xab, 0x9c, 0x96, 0x6b, 0x98, 0xeb, 0xf3, 0x0b, - 0x32, 0xd4, 0x90, 0xca, 0x65, 0x35, 0x69, 0xeb, 0x1d, 0x95, 0xc1, 0xf7, 0x86, 0xb2, 0x26, 0x2b, - 0x51, 0x32, 0xc6, 0x69, 0xc8, 0xc7, 0x61, 0xcc, 0xa3, 0x66, 0xd7, 0xa0, 0x61, 0x29, 0x61, 0x9d, - 0x22, 0xe2, 0x17, 0x0f, 0xf1, 0x1c, 0x4c, 0x51, 0xf6, 0x31, 0x92, 0xd5, 0x07, 0x72, 0x3d, 0xfd, - 0x51, 0x01, 0x48, 0xaf, 0xf3, 0x33, 0xd9, 0x82, 0x21, 0x87, 0x5b, 0xbf, 0x72, 0x47, 0x53, 0x8e, - 0x19, 0xd1, 0xc4, 0xb4, 0x24, 0x13, 0x24, 0x3e, 0x71, 0xa0, 0x4a, 0x6f, 0x07, 0xd4, 0x73, 0xc2, - 0xcb, 0x10, 0x47, 0x13, 0xb9, 0x59, 0xec, 0x06, 0x24, 0x32, 0x86, 0x3c, 0xb4, 0x9f, 0x14, 0xa1, - 0x1e, 0xa3, 0xbb, 0xd7, 0xa6, 0x92, 0xdf, 0xc7, 0x16, 0x46, 0xa7, 0x75, 0xcf, 0x96, 0x23, 0x2c, - 0x76, 0x1f, 0x5b, 0x66, 0xe1, 0x32, 0xc6, 0xe9, 0xc8, 0x0c, 0x40, 0x5b, 0xf7, 0x03, 0xea, 0xf1, - 0xd5, 0x37, 0x75, 0x0b, 0x7a, 0x25, 0xcc, 0xc1, 0x18, 0x15, 0xb9, 0x28, 0x63, 0x6f, 0x97, 0x93, - 0x51, 0xeb, 0xfa, 0x04, 0xd6, 0xae, 0x1c, 0x41, 0x60, 0x6d, 0xd2, 0x82, 0x71, 0x55, 0x6b, 0x95, - 0x7b, 0xb8, 0x98, 0x66, 0x62, 0xff, 0x92, 0x82, 0xc0, 0x1e, 0x50, 0xed, 0xed, 0x02, 0x8c, 0x26, - 0x4c, 0x1e, 0x22, 0xde, 0x9c, 0x72, 0xdd, 0x4f, 0xc4, 0x9b, 0x8b, 0x79, 0xdc, 0x3f, 0x09, 0x43, - 0x42, 0x40, 0x69, 0x8f, 0x3c, 0x21, 0x42, 0x94, 0xb9, 0x6c, 0x2e, 0x93, 0x46, 0xd5, 0xf4, 0x5c, - 0x26, 0xad, 0xae, 0xa8, 0xf2, 0x85, 0xad, 0x5e, 0xd4, 0xae, 0xd7, 0x56, 0x2f, 0xd2, 0x31, 0xa4, - 0xd0, 0xfe, 0xad, 0x04, 0xdc, 0x7f, 0x85, 0x3c, 0x07, 0xb5, 0x36, 0x35, 0xb6, 0x74, 0xc7, 0xf2, - 0x55, 0xbc, 0x49, 0xb6, 0xbb, 0xad, 0xad, 0xa8, 0xc4, 0x3b, 0x0c, 0x60, 0xb6, 0xb9, 0xcc, 0x5d, - 0xc4, 0x23, 0x5a, 0x62, 0xc0, 0x50, 0xcb, 0xf7, 0xf5, 0x8e, 0x95, 0xfb, 0xf8, 0x54, 0xc4, 0xf7, - 0x13, 0x83, 0x48, 0x3c, 0xa3, 0x84, 0x26, 0x06, 0x54, 0x3a, 0xb6, 0x6e, 0x39, 0xb9, 0x7f, 0x70, - 0xc2, 0xbe, 0x60, 0x95, 0x21, 0x09, 0x93, 0x0e, 0x7f, 0x44, 0x81, 0x4d, 0xba, 0x50, 0xf7, 0x0d, - 0x4f, 0x6f, 0xfb, 0x5b, 0xfa, 0xcc, 0x33, 0xcf, 0xe6, 0x56, 0x92, 0x22, 0x56, 0x62, 0xce, 0x9e, - 0xc3, 0xd9, 0x95, 0xe6, 0x95, 0xd9, 0x99, 0x67, 0x9e, 0xc5, 0x38, 0x9f, 0x38, 0xdb, 0x67, 0x9e, - 0x9a, 0x91, 0xfd, 0xfe, 0xc8, 0xd9, 0x3e, 0xf3, 0xd4, 0x0c, 0xc6, 0xf9, 0x68, 0xff, 0x5a, 0x80, - 0x5a, 0x48, 0x4b, 0xd6, 0x01, 0xd8, 0x08, 0x94, 0x11, 0xf9, 0x0e, 0x15, 0x1d, 0x9f, 0xef, 0x8a, - 0xd7, 0xc3, 0xc2, 0x18, 0x03, 0xca, 0x08, 0x59, 0x58, 0x3c, 0xea, 0x90, 0x85, 0xd3, 0x50, 0xdb, - 0xd2, 0x1d, 0xd3, 0xdf, 0xd2, 0xb7, 0xc5, 0x44, 0x14, 0x0b, 0xe2, 0x79, 0x45, 0x65, 0x60, 0x44, - 0xa3, 0xfd, 0xd1, 0x10, 0x88, 0x33, 0x4f, 0x36, 0x54, 0x4c, 0xcb, 0x17, 0x4e, 0xb7, 0x05, 0x5e, - 0x32, 0x1c, 0x2a, 0xf3, 0x32, 0x1d, 0x43, 0x0a, 0x72, 0x16, 0x4a, 0x6d, 0xcb, 0x91, 0x27, 0x1e, - 0xdc, 0xe0, 0xb5, 0x62, 0x39, 0xc8, 0xd2, 0x78, 0x96, 0x7e, 0x5b, 0xfa, 0x4b, 0x89, 0x2c, 0xfd, - 0x36, 0xb2, 0x34, 0xb6, 0x05, 0xb5, 0x5d, 0x77, 0x7b, 0x43, 0x37, 0xb6, 0x95, 0x5b, 0x55, 0x99, - 0x2f, 0x84, 0x7c, 0x0b, 0xba, 0x9c, 0xcc, 0xc2, 0x34, 0x2d, 0x59, 0x87, 0x47, 0xde, 0xa0, 0x9e, - 0x2b, 0x47, 0x79, 0xd3, 0xa6, 0xb4, 0xa3, 0x60, 0x84, 0x0a, 0xc1, 0xbd, 0xb3, 0x3e, 0x97, 0x4d, - 0x82, 0xfd, 0xca, 0x72, 0x3f, 0x4f, 0xdd, 0x6b, 0xd1, 0x60, 0xd5, 0x73, 0x0d, 0xea, 0xfb, 0x96, - 0xd3, 0x52, 0xb0, 0x43, 0x11, 0xec, 0x5a, 0x36, 0x09, 0xf6, 0x2b, 0x4b, 0x5e, 0x86, 0x09, 0x91, - 0x25, 0x16, 0xdb, 0xd9, 0x1d, 0xdd, 0xb2, 0xf5, 0x0d, 0xcb, 0x56, 0xff, 0x05, 0x1b, 0x15, 0xe7, - 0x0a, 0x6b, 0x7d, 0x68, 0xb0, 0x6f, 0x69, 0x72, 0x15, 0xc6, 0xd5, 0xa9, 0xd2, 0x2a, 0xf5, 0x9a, - 0xe1, 0x39, 0xf8, 0x68, 0xe3, 0x02, 0xdb, 0xef, 0xcd, 0xd3, 0x8e, 0x47, 0x0d, 0xae, 0x75, 0xa5, - 0xa8, 0xb0, 0xa7, 0x1c, 0x41, 0x38, 0xc3, 0x0f, 0xbb, 0xd7, 0x3b, 0x73, 0xae, 0x6b, 0x9b, 0xee, - 0x2d, 0x47, 0x7d, 0xbb, 0x50, 0x6c, 0xf8, 0x41, 0x52, 0x33, 0x93, 0x02, 0xfb, 0x94, 0x64, 0x5f, - 0xce, 0x73, 0xe6, 0xdd, 0x5b, 0x4e, 0x1a, 0x15, 0xa2, 0x2f, 0x6f, 0xf6, 0xa1, 0xc1, 0xbe, 0xa5, - 0xc9, 0x02, 0x90, 0xf4, 0x17, 0xac, 0x77, 0xb8, 0x32, 0x34, 0xda, 0x38, 0x23, 0x82, 0x6b, 0xa4, - 0x73, 0x31, 0xa3, 0x04, 0x59, 0x86, 0xd3, 0xe9, 0x54, 0xc6, 0x8e, 0x7b, 0xd8, 0x8f, 0x8a, 0xb0, - 0x9a, 0x98, 0x91, 0x8f, 0x99, 0xa5, 0xb4, 0x3f, 0x2e, 0xc2, 0x68, 0xe2, 0x36, 0xf6, 0x03, 0x77, - 0xeb, 0x95, 0x69, 0xa0, 0x6d, 0xbf, 0xb5, 0x34, 0x7f, 0x85, 0xea, 0x26, 0xf5, 0xae, 0x51, 0x75, - 0x73, 0x9e, 0x4f, 0x2a, 0x2b, 0x89, 0x1c, 0x4c, 0x51, 0x92, 0x4d, 0xa8, 0x08, 0x7b, 0x6a, 0xde, - 0xff, 0x2c, 0x28, 0x19, 0x71, 0xa3, 0x2a, 0x5f, 0x72, 0x84, 0x49, 0x55, 0xc0, 0x6b, 0x01, 0x8c, - 0xc4, 0x29, 0xd8, 0x44, 0x12, 0x29, 0x6b, 0xc3, 0x09, 0x45, 0x6d, 0x09, 0x4a, 0x41, 0x30, 0xe8, - 0x7d, 0x5a, 0x61, 0x9f, 0x5f, 0x5b, 0x46, 0x86, 0xa1, 0x6d, 0xb2, 0xb6, 0xf3, 0x7d, 0xcb, 0x75, - 0x64, 0x70, 0xe5, 0x75, 0x18, 0x0e, 0xa4, 0x89, 0x6a, 0xb0, 0xfb, 0xc0, 0xdc, 0x5c, 0xac, 0xcc, - 0x53, 0x0a, 0x4b, 0xfb, 0x9b, 0x22, 0xd4, 0xc2, 0xed, 0xe4, 0x01, 0x82, 0x16, 0xbb, 0x50, 0x0b, - 0x9d, 0x75, 0x72, 0xff, 0x33, 0x2d, 0xf2, 0x21, 0xe1, 0x3b, 0xa0, 0xf0, 0x15, 0x23, 0x1e, 0x71, - 0x47, 0xa0, 0x52, 0x0e, 0x47, 0xa0, 0x0e, 0x0c, 0x07, 0x9e, 0xd5, 0x6a, 0x49, 0xdd, 0x36, 0x8f, - 0x27, 0x50, 0x28, 0xae, 0x35, 0x01, 0x28, 0x25, 0x2b, 0x5e, 0x50, 0xb1, 0xd1, 0x5e, 0x83, 0xf1, - 0x34, 0x25, 0x57, 0xfc, 0x8c, 0x2d, 0x6a, 0x76, 0x6d, 0x25, 0xe3, 0x48, 0xf1, 0x93, 0xe9, 0x18, - 0x52, 0xb0, 0xcd, 0x1f, 0x6b, 0xa6, 0x37, 0x5c, 0x47, 0x6d, 0xab, 0xb9, 0x0e, 0xbd, 0x26, 0xd3, - 0x30, 0xcc, 0xd5, 0xfe, 0xb1, 0x04, 0x67, 0x23, 0xa3, 0xc0, 0x8a, 0xee, 0xe8, 0xad, 0x03, 0xfc, - 0x28, 0xeb, 0xfd, 0x1b, 0x16, 0x87, 0x8d, 0x3c, 0x5f, 0x7a, 0x00, 0x22, 0xcf, 0xff, 0xb4, 0x00, - 0xdc, 0xb1, 0x90, 0x7c, 0x09, 0x46, 0xf4, 0xd8, 0x3f, 0x12, 0x65, 0x73, 0x5e, 0xce, 0xdd, 0x9c, - 0xdc, 0x7f, 0x31, 0x74, 0x94, 0x89, 0xa7, 0x62, 0x82, 0x21, 0x71, 0xa1, 0xba, 0xa9, 0xdb, 0x36, - 0xd3, 0x85, 0x72, 0x1f, 0x72, 0x24, 0x98, 0xf3, 0x6e, 0xbe, 0x20, 0xa1, 0x31, 0x64, 0xa2, 0xfd, - 0x43, 0x01, 0x46, 0x9b, 0xb6, 0x65, 0x5a, 0x4e, 0xeb, 0x18, 0x43, 0xce, 0xdf, 0x80, 0x8a, 0x6f, - 0x5b, 0x26, 0x1d, 0x70, 0x1e, 0x17, 0x2b, 0x08, 0x03, 0x40, 0x81, 0x93, 0x8c, 0x61, 0x5f, 0x3a, - 0x40, 0x0c, 0xfb, 0x9f, 0x0f, 0x81, 0x74, 0x4e, 0x25, 0x5d, 0xa8, 0xb5, 0x54, 0x68, 0x6c, 0xf9, - 0x8d, 0x57, 0x72, 0x84, 0x55, 0x4b, 0x04, 0xd9, 0x16, 0xb3, 0x6e, 0x98, 0x88, 0x11, 0x27, 0x42, - 0x93, 0xbf, 0xc5, 0x9c, 0xcf, 0xf9, 0x5b, 0x4c, 0xc1, 0xae, 0xf7, 0xc7, 0x98, 0x3a, 0x94, 0xb7, - 0x82, 0xa0, 0x23, 0xc7, 0xd5, 0xe0, 0xde, 0xc7, 0x51, 0x64, 0x0f, 0xa1, 0x8d, 0xb0, 0x77, 0xe4, - 0xd0, 0x8c, 0x85, 0xa3, 0x87, 0x7f, 0x63, 0x9a, 0xcb, 0x75, 0xd0, 0x1d, 0x67, 0xc1, 0xde, 0x91, - 0x43, 0x93, 0x2f, 0x42, 0x3d, 0xf0, 0x74, 0xc7, 0xdf, 0x74, 0xbd, 0x36, 0xf5, 0xe4, 0xee, 0x70, - 0x21, 0xc7, 0x9f, 0x21, 0xd7, 0x22, 0x34, 0x71, 0x82, 0x96, 0x48, 0xc2, 0x38, 0x37, 0xb2, 0x0d, - 0xd5, 0xae, 0x29, 0x2a, 0x26, 0xcd, 0x26, 0xb3, 0x79, 0x7e, 0xf6, 0x19, 0x3b, 0xc6, 0x56, 0x6f, - 0x18, 0x32, 0x48, 0xfe, 0x78, 0x6c, 0xf8, 0xa8, 0x7e, 0x3c, 0x16, 0xef, 0x8d, 0x59, 0x61, 0x07, - 0x48, 0x5b, 0x6a, 0x94, 0x4e, 0x4b, 0x7a, 0xe1, 0x2c, 0xe4, 0x56, 0xf6, 0x04, 0xcb, 0x7a, 0xa8, - 0x95, 0x3a, 0x2d, 0x54, 0x3c, 0xb4, 0x36, 0x48, 0xeb, 0x36, 0x31, 0x12, 0xbf, 0xe7, 0x10, 0x77, - 0x61, 0xa6, 0x0f, 0x36, 0x1f, 0x84, 0xff, 0x89, 0x88, 0x85, 0x07, 0xce, 0xfc, 0x0f, 0x87, 0xf6, - 0xb7, 0x45, 0x28, 0xad, 0x2d, 0x37, 0x45, 0xc8, 0x3f, 0xfe, 0xef, 0x1b, 0xda, 0xdc, 0xb6, 0x3a, - 0x37, 0xa9, 0x67, 0x6d, 0xee, 0xca, 0x4d, 0x6f, 0x2c, 0xe4, 0x5f, 0x9a, 0x02, 0x33, 0x4a, 0x91, - 0x57, 0x60, 0xc4, 0xd0, 0xe7, 0xa8, 0x17, 0x0c, 0xb2, 0xa5, 0xe7, 0x97, 0xfe, 0xe6, 0x66, 0xa3, - 0xe2, 0x98, 0x00, 0x23, 0xeb, 0x00, 0x46, 0x04, 0x5d, 0x3a, 0xb4, 0x21, 0x22, 0x06, 0x1c, 0x03, - 0x22, 0x08, 0xb5, 0x6d, 0x46, 0xca, 0x51, 0xcb, 0x87, 0x41, 0xe5, 0x3d, 0xe7, 0x9a, 0x2a, 0x8b, - 0x11, 0x8c, 0xe6, 0xc0, 0x68, 0xe2, 0x9f, 0x1d, 0xe4, 0x63, 0x50, 0x75, 0x3b, 0xb1, 0xe9, 0xb4, - 0xc6, 0xfd, 0xfd, 0xaa, 0x37, 0x64, 0xda, 0x9d, 0xbd, 0xc9, 0xd1, 0x65, 0xb7, 0x65, 0x19, 0x2a, - 0x01, 0x43, 0x72, 0xa2, 0xc1, 0x10, 0xbf, 0xa9, 0xa3, 0xfe, 0xd8, 0xc1, 0xd7, 0x0e, 0x1e, 0x54, - 0xdf, 0x47, 0x99, 0xa3, 0x7d, 0xb9, 0x0c, 0xd1, 0x99, 0x10, 0xf1, 0x61, 0x48, 0x78, 0x22, 0xcb, - 0x99, 0xfb, 0x58, 0x9d, 0x9e, 0x25, 0x2b, 0xd2, 0x82, 0xd2, 0x6b, 0xee, 0x46, 0xee, 0x89, 0x3b, - 0x76, 0x45, 0x57, 0x58, 0xa9, 0x62, 0x09, 0xc8, 0x38, 0x90, 0x5f, 0x2d, 0xc0, 0x49, 0x3f, 0xad, - 0x74, 0xca, 0xee, 0x80, 0xf9, 0xb5, 0xeb, 0xb4, 0x1a, 0x2b, 0x1d, 0x33, 0xfb, 0x65, 0x63, 0x6f, - 0x5d, 0x98, 0xfc, 0xc5, 0x61, 0x8d, 0xec, 0x4e, 0x8b, 0x39, 0xff, 0x33, 0x97, 0x94, 0x7f, 0x32, - 0x0d, 0x25, 0x2b, 0xed, 0xab, 0x45, 0xa8, 0xc7, 0x66, 0xeb, 0xdc, 0x3f, 0x82, 0xb9, 0x9d, 0xfa, - 0x11, 0xcc, 0xea, 0xe0, 0x67, 0x97, 0x51, 0xad, 0x8e, 0xfb, 0x5f, 0x30, 0xdf, 0x2f, 0x42, 0x69, - 0x7d, 0x7e, 0x21, 0xb9, 0x5d, 0x2c, 0xdc, 0x87, 0xed, 0xe2, 0x16, 0x0c, 0x6f, 0x74, 0x2d, 0x3b, - 0xb0, 0x9c, 0xdc, 0x41, 0x04, 0xd4, 0x7f, 0x73, 0xe4, 0x5d, 0x5c, 0x81, 0x8a, 0x0a, 0x9e, 0xb4, - 0x60, 0xb8, 0x25, 0xa2, 0xb8, 0xe5, 0xf6, 0xe8, 0x92, 0xd1, 0xe0, 0x04, 0x23, 0xf9, 0x82, 0x0a, - 0x5d, 0xdb, 0x05, 0xf9, 0xe7, 0xed, 0xfb, 0x2e, 0x4d, 0xed, 0x8b, 0x10, 0x6a, 0x01, 0xf7, 0x9f, - 0xf9, 0x3f, 0x17, 0x20, 0xa9, 0xf8, 0xdc, 0xff, 0xde, 0xb4, 0x9d, 0xee, 0x4d, 0xf3, 0x47, 0x31, - 0xf8, 0xb2, 0x3b, 0x94, 0xf6, 0x87, 0x45, 0x18, 0xba, 0x6f, 0x17, 0x3f, 0x69, 0xc2, 0x39, 0x6d, - 0x2e, 0xe7, 0xc4, 0xd8, 0xd7, 0x35, 0xad, 0x9d, 0x72, 0x4d, 0xcb, 0xfb, 0xa7, 0xcf, 0x7b, 0x38, - 0xa6, 0xfd, 0x65, 0x01, 0xe4, 0xb4, 0xbc, 0xe4, 0xf8, 0x81, 0xee, 0x18, 0xfc, 0x87, 0xf3, 0x72, - 0x0d, 0xc8, 0xeb, 0x01, 0x21, 0xbd, 0x84, 0xc4, 0xb2, 0xcf, 0x9f, 0xd5, 0x9c, 0x4f, 0x3e, 0x02, - 0xd5, 0x2d, 0xd7, 0x0f, 0xf8, 0x3c, 0x5f, 0x4c, 0xda, 0x75, 0xae, 0xc8, 0x74, 0x0c, 0x29, 0xd2, - 0x27, 0x85, 0x95, 0xfe, 0x27, 0x85, 0xda, 0xb7, 0x8b, 0x30, 0xf2, 0x5e, 0xb9, 0xbd, 0x9a, 0xe5, - 0xca, 0x57, 0xca, 0xe9, 0xca, 0x57, 0x3e, 0x8c, 0x2b, 0x9f, 0xf6, 0x83, 0x02, 0xc0, 0x7d, 0xbb, - 0x3a, 0x6b, 0x26, 0xbd, 0xec, 0x72, 0xf7, 0xab, 0x6c, 0x1f, 0xbb, 0xdf, 0xab, 0xa8, 0x4f, 0xe2, - 0x1e, 0x76, 0x6f, 0x16, 0x60, 0x4c, 0x4f, 0x78, 0xad, 0xe5, 0x56, 0x2d, 0x53, 0x4e, 0x70, 0xe1, - 0x35, 0xc1, 0x64, 0x3a, 0xa6, 0xd8, 0x92, 0xe7, 0xa3, 0xb0, 0xad, 0xd7, 0xa3, 0x6e, 0xdf, 0x13, - 0x6f, 0x95, 0xab, 0x39, 0x09, 0xca, 0x7b, 0x78, 0x09, 0x96, 0x8e, 0xc4, 0x4b, 0x30, 0x7e, 0xff, - 0xa9, 0x7c, 0xd7, 0xfb, 0x4f, 0x3b, 0x50, 0xdb, 0xf4, 0xdc, 0x36, 0x77, 0xc4, 0x93, 0xff, 0x08, - 0xbd, 0x9c, 0x63, 0x4d, 0x89, 0xfe, 0x8e, 0x1d, 0xd9, 0x78, 0x16, 0x14, 0x3e, 0x46, 0xac, 0xb8, - 0x41, 0xda, 0x15, 0x5c, 0x87, 0x8e, 0x92, 0x6b, 0x38, 0x97, 0xac, 0x09, 0x74, 0x54, 0x6c, 0x92, - 0xce, 0x77, 0xc3, 0xf7, 0xc7, 0xf9, 0x4e, 0xfb, 0x7e, 0x59, 0x4d, 0x60, 0x0f, 0x5c, 0x84, 0xc0, - 0xf7, 0xfe, 0x95, 0xcb, 0xf4, 0x7d, 0xc8, 0xe1, 0xfb, 0x78, 0x1f, 0xb2, 0x7a, 0x34, 0xf7, 0x21, - 0x6b, 0x87, 0xb8, 0x0f, 0xb9, 0x57, 0x82, 0xd4, 0xa6, 0xeb, 0xfd, 0xa3, 0x8d, 0xff, 0x50, 0x47, - 0x1b, 0x6f, 0x15, 0x21, 0x9a, 0x45, 0x0e, 0xe9, 0xfa, 0xf1, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, - 0x6a, 0xeb, 0xbb, 0x79, 0xfe, 0x0a, 0xb9, 0x22, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0x46, - 0x66, 0xce, 0x6d, 0xaa, 0x8e, 0x82, 0x3c, 0x0b, 0x63, 0x58, 0xf4, 0x8e, 0x31, 0x36, 0xda, 0x5f, - 0x14, 0x41, 0x86, 0xf0, 0x26, 0x14, 0x2a, 0x9b, 0xd6, 0x6d, 0x6a, 0xe6, 0x76, 0x83, 0x8c, 0xfd, - 0xab, 0x57, 0xd8, 0xe2, 0x79, 0x02, 0x0a, 0x74, 0x6e, 0x64, 0x15, 0x67, 0x2b, 0x52, 0x7e, 0x39, - 0x8c, 0xac, 0xf1, 0x33, 0x1a, 0x69, 0x64, 0x15, 0x49, 0xa8, 0x78, 0x08, 0x9b, 0x2e, 0x3f, 0xe0, - 0x96, 0x22, 0xcd, 0x63, 0xd3, 0x8d, 0x1d, 0x94, 0x2b, 0x9b, 0xae, 0x2f, 0x2e, 0x44, 0x4b, 0x1e, - 0x8d, 0xcf, 0x7f, 0xef, 0x87, 0x17, 0x1e, 0xfa, 0xc1, 0x0f, 0x2f, 0x3c, 0xf4, 0xce, 0x0f, 0x2f, - 0x3c, 0xf4, 0xe5, 0xfd, 0x0b, 0x85, 0xef, 0xed, 0x5f, 0x28, 0xfc, 0x60, 0xff, 0x42, 0xe1, 0x9d, - 0xfd, 0x0b, 0x85, 0xbf, 0xdb, 0xbf, 0x50, 0xf8, 0xa5, 0xbf, 0xbf, 0xf0, 0xd0, 0xe7, 0x9e, 0x8b, - 0xaa, 0x30, 0xad, 0xaa, 0x30, 0xad, 0x18, 0x4e, 0x77, 0xb6, 0x5b, 0xd3, 0xac, 0x0a, 0x51, 0x8a, - 0xaa, 0xc2, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x80, 0x4f, 0xc1, 0x9f, 0x93, 0x00, 0x00, + // 7611 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd7, + 0x75, 0xa6, 0xfa, 0x8f, 0xec, 0x3e, 0x4d, 0x72, 0x38, 0x77, 0x46, 0x23, 0xce, 0x68, 0x34, 0x1c, + 0x97, 0x2c, 0x79, 0xbc, 0xb6, 0xc9, 0x15, 0x57, 0x7f, 0xfe, 0x95, 0xd8, 0xe4, 0x90, 0xc3, 0x19, + 0x72, 0x86, 0x3e, 0x4d, 0x8e, 0x64, 0x6b, 0x6d, 0x6d, 0xb1, 0xea, 0xb2, 0x59, 0x62, 0x75, 0x55, + 0xab, 0xaa, 0x9a, 0x33, 0x94, 0x77, 0xe1, 0x1f, 0x2d, 0x20, 0x2d, 0x16, 0x8b, 0x5d, 0xf8, 0xc9, + 0xc0, 0xc2, 0xbb, 0xd8, 0xc5, 0x02, 0x7e, 0x30, 0xbc, 0x0f, 0x8b, 0xd5, 0x3e, 0x2c, 0x90, 0x38, + 0x0e, 0x82, 0xc4, 0x0e, 0xf2, 0xe3, 0x87, 0x00, 0x51, 0x5e, 0x88, 0x98, 0x41, 0x1e, 0x12, 0x20, + 0x86, 0x11, 0x03, 0x89, 0x3d, 0x30, 0xe2, 0xe0, 0xfe, 0xd5, 0x5f, 0x57, 0xcf, 0x90, 0x5d, 0xe4, + 0x68, 0x94, 0xe8, 0xad, 0xea, 0xdc, 0x73, 0xbf, 0x73, 0xef, 0xa9, 0xfb, 0x73, 0xee, 0xb9, 0xe7, + 0xde, 0x82, 0xc5, 0x96, 0x15, 0x6c, 0x75, 0x37, 0xa6, 0x0c, 0xb7, 0x3d, 0xed, 0x74, 0xdb, 0x7a, + 0xc7, 0x73, 0x5f, 0xe3, 0x0f, 0x9b, 0xb6, 0x7b, 0x6b, 0xba, 0xb3, 0xdd, 0x9a, 0xd6, 0x3b, 0x96, + 0x1f, 0x51, 0x76, 0x9e, 0xd2, 0xed, 0xce, 0x96, 0xfe, 0xd4, 0x74, 0x8b, 0x3a, 0xd4, 0xd3, 0x03, + 0x6a, 0x4e, 0x75, 0x3c, 0x37, 0x70, 0xc9, 0x73, 0x11, 0xd0, 0x94, 0x02, 0x9a, 0x52, 0xd9, 0xa6, + 0x3a, 0xdb, 0xad, 0x29, 0x06, 0x14, 0x51, 0x14, 0xd0, 0xb9, 0x4f, 0xc4, 0x4a, 0xd0, 0x72, 0x5b, + 0xee, 0x34, 0xc7, 0xdb, 0xe8, 0x6e, 0xf2, 0x37, 0xfe, 0xc2, 0x9f, 0x84, 0x9c, 0x73, 0xda, 0xf6, + 0xf3, 0xfe, 0x94, 0xe5, 0xb2, 0x62, 0x4d, 0x1b, 0xae, 0x47, 0xa7, 0x77, 0x7a, 0xca, 0x72, 0xee, + 0xe9, 0x88, 0xa7, 0xad, 0x1b, 0x5b, 0x96, 0x43, 0xbd, 0x5d, 0x55, 0x97, 0x69, 0x8f, 0xfa, 0x6e, + 0xd7, 0x33, 0xe8, 0xa1, 0x72, 0xf9, 0xd3, 0x6d, 0x1a, 0xe8, 0x59, 0xb2, 0xa6, 0xfb, 0xe5, 0xf2, + 0xba, 0x4e, 0x60, 0xb5, 0x7b, 0xc5, 0x3c, 0x7b, 0xaf, 0x0c, 0xbe, 0xb1, 0x45, 0xdb, 0x7a, 0x3a, + 0x9f, 0xf6, 0x03, 0x80, 0x53, 0xb3, 0x1b, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xea, 0x9a, 0x6b, 0xb4, + 0xdd, 0xb1, 0xf5, 0x80, 0x92, 0x6d, 0xa8, 0xb2, 0xb2, 0x99, 0x7a, 0xa0, 0x4f, 0x14, 0x2e, 0x16, + 0x2e, 0xd5, 0x67, 0x66, 0xa7, 0x06, 0xfc, 0x16, 0x53, 0x2b, 0x12, 0xa8, 0x31, 0xb2, 0xbf, 0x37, + 0x59, 0x55, 0x6f, 0x18, 0x0a, 0x20, 0xdf, 0x2a, 0xc0, 0x88, 0xe3, 0x9a, 0xb4, 0x49, 0x6d, 0x6a, + 0x04, 0xae, 0x37, 0x51, 0xbc, 0x58, 0xba, 0x54, 0x9f, 0xf9, 0xf2, 0xc0, 0x12, 0x33, 0x6a, 0x34, + 0x75, 0x3d, 0x26, 0xe0, 0xb2, 0x13, 0x78, 0xbb, 0x8d, 0xd3, 0x3f, 0xdc, 0x9b, 0x7c, 0x68, 0x7f, + 0x6f, 0x72, 0x24, 0x9e, 0x84, 0x89, 0x92, 0x90, 0x75, 0xa8, 0x07, 0xae, 0xcd, 0x54, 0x66, 0xb9, + 0x8e, 0x3f, 0x51, 0xe2, 0x05, 0xbb, 0x30, 0x25, 0xb4, 0xcd, 0xc4, 0x4f, 0xb1, 0xe6, 0x32, 0xb5, + 0xf3, 0xd4, 0xd4, 0x5a, 0xc8, 0xd6, 0x38, 0x25, 0x81, 0xeb, 0x11, 0xcd, 0xc7, 0x38, 0x0e, 0xa1, + 0x70, 0xc2, 0xa7, 0x46, 0xd7, 0xb3, 0x82, 0xdd, 0x39, 0xd7, 0x09, 0xe8, 0xed, 0x60, 0xa2, 0xcc, + 0xb5, 0xfc, 0x64, 0x16, 0xf4, 0xaa, 0x6b, 0x36, 0x93, 0xdc, 0x8d, 0x53, 0xfb, 0x7b, 0x93, 0x27, + 0x52, 0x44, 0x4c, 0x63, 0x12, 0x07, 0xc6, 0xad, 0xb6, 0xde, 0xa2, 0xab, 0x5d, 0xdb, 0x6e, 0x52, + 0xc3, 0xa3, 0x81, 0x3f, 0x51, 0xe1, 0x55, 0xb8, 0x94, 0x25, 0x67, 0xd9, 0x35, 0x74, 0xfb, 0xc6, + 0xc6, 0x6b, 0xd4, 0x08, 0x90, 0x6e, 0x52, 0x8f, 0x3a, 0x06, 0x6d, 0x4c, 0xc8, 0xca, 0x8c, 0x2f, + 0xa5, 0x90, 0xb0, 0x07, 0x9b, 0x2c, 0xc2, 0xc9, 0x8e, 0x67, 0xb9, 0xbc, 0x08, 0xb6, 0xee, 0xfb, + 0xd7, 0xf5, 0x36, 0x9d, 0x18, 0xba, 0x58, 0xb8, 0x54, 0x6b, 0x9c, 0x95, 0x30, 0x27, 0x57, 0xd3, + 0x0c, 0xd8, 0x9b, 0x87, 0x5c, 0x82, 0xaa, 0x22, 0x4e, 0x0c, 0x5f, 0x2c, 0x5c, 0xaa, 0x88, 0xb6, + 0xa3, 0xf2, 0x62, 0x98, 0x4a, 0x16, 0xa0, 0xaa, 0x6f, 0x6e, 0x5a, 0x0e, 0xe3, 0xac, 0x72, 0x15, + 0x9e, 0xcf, 0xaa, 0xda, 0xac, 0xe4, 0x11, 0x38, 0xea, 0x0d, 0xc3, 0xbc, 0xe4, 0x2a, 0x10, 0x9f, + 0x7a, 0x3b, 0x96, 0x41, 0x67, 0x0d, 0xc3, 0xed, 0x3a, 0x01, 0x2f, 0x7b, 0x8d, 0x97, 0xfd, 0x9c, + 0x2c, 0x3b, 0x69, 0xf6, 0x70, 0x60, 0x46, 0x2e, 0xf2, 0x22, 0x8c, 0xcb, 0x6e, 0x17, 0x69, 0x01, + 0x38, 0xd2, 0x69, 0xa6, 0x48, 0x4c, 0xa5, 0x61, 0x0f, 0x37, 0x31, 0xe1, 0xbc, 0xde, 0x0d, 0xdc, + 0x36, 0x83, 0x4c, 0x0a, 0x5d, 0x73, 0xb7, 0xa9, 0x33, 0x51, 0xbf, 0x58, 0xb8, 0x54, 0x6d, 0x5c, + 0xdc, 0xdf, 0x9b, 0x3c, 0x3f, 0x7b, 0x17, 0x3e, 0xbc, 0x2b, 0x0a, 0xb9, 0x01, 0x35, 0xd3, 0xf1, + 0x57, 0x5d, 0xdb, 0x32, 0x76, 0x27, 0x46, 0x78, 0x01, 0x9f, 0x92, 0x55, 0xad, 0xcd, 0x5f, 0x6f, + 0x8a, 0x84, 0x3b, 0x7b, 0x93, 0xe7, 0x7b, 0x47, 0xc7, 0xa9, 0x30, 0x1d, 0x23, 0x0c, 0xb2, 0xc2, + 0x01, 0xe7, 0x5c, 0x67, 0xd3, 0x6a, 0x4d, 0x8c, 0xf2, 0xaf, 0x71, 0xb1, 0x4f, 0x83, 0x9e, 0xbf, + 0xde, 0x14, 0x7c, 0x8d, 0x51, 0x29, 0x4e, 0xbc, 0x62, 0x84, 0x40, 0x4c, 0x18, 0x53, 0xe3, 0xea, + 0x9c, 0xad, 0x5b, 0x6d, 0x7f, 0x62, 0x8c, 0x37, 0xde, 0x0f, 0xf7, 0xc1, 0xc4, 0x38, 0x73, 0xe3, + 0x8c, 0xac, 0xca, 0x58, 0x82, 0xec, 0x63, 0x0a, 0xf3, 0xdc, 0x0b, 0x70, 0xb2, 0x67, 0x6c, 0x20, + 0xe3, 0x50, 0xda, 0xa6, 0xbb, 0x7c, 0xe8, 0xab, 0x21, 0x7b, 0x24, 0xa7, 0xa1, 0xb2, 0xa3, 0xdb, + 0x5d, 0x3a, 0x51, 0xe4, 0x34, 0xf1, 0xf2, 0xa9, 0xe2, 0xf3, 0x05, 0xed, 0x7f, 0x96, 0x60, 0x44, + 0x8d, 0x38, 0x4d, 0xcb, 0xd9, 0x26, 0x2f, 0x41, 0xc9, 0x76, 0x5b, 0x72, 0xdc, 0xfc, 0xcc, 0xc0, + 0xa3, 0xd8, 0xb2, 0xdb, 0x6a, 0x0c, 0xef, 0xef, 0x4d, 0x96, 0x96, 0xdd, 0x16, 0x32, 0x44, 0x62, + 0x40, 0x65, 0x5b, 0xdf, 0xdc, 0xd6, 0x79, 0x19, 0xea, 0x33, 0x8d, 0x81, 0xa1, 0xaf, 0x31, 0x14, + 0x56, 0xd6, 0x46, 0x6d, 0x7f, 0x6f, 0xb2, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x6a, 0x1b, 0xb6, + 0x6e, 0x6c, 0x6f, 0xb9, 0x36, 0x9d, 0x28, 0xe5, 0x14, 0xd4, 0x50, 0x48, 0xe2, 0x33, 0x87, 0xaf, + 0x18, 0xc9, 0x20, 0x06, 0x0c, 0x75, 0x4d, 0xdf, 0x72, 0xb6, 0xe5, 0x18, 0xf8, 0xc2, 0xc0, 0xd2, + 0xd6, 0xe7, 0x79, 0x9d, 0x60, 0x7f, 0x6f, 0x72, 0x48, 0x3c, 0xa3, 0x84, 0xd6, 0x7e, 0x5a, 0x87, + 0x31, 0xf5, 0x91, 0x6e, 0x52, 0x2f, 0xa0, 0xb7, 0xc9, 0x45, 0x28, 0x3b, 0xac, 0x6b, 0xf2, 0x8f, + 0xdc, 0x18, 0x91, 0xcd, 0xa5, 0xcc, 0xbb, 0x24, 0x4f, 0x61, 0x25, 0x13, 0x4d, 0x45, 0x2a, 0x7c, + 0xf0, 0x92, 0x35, 0x39, 0x8c, 0x28, 0x99, 0x78, 0x46, 0x09, 0x4d, 0x5e, 0x81, 0x32, 0xaf, 0xbc, + 0x50, 0xf5, 0x67, 0x07, 0x17, 0xc1, 0xaa, 0x5e, 0x65, 0x35, 0xe0, 0x15, 0xe7, 0xa0, 0xac, 0x29, + 0x76, 0xcd, 0x4d, 0xa9, 0xd8, 0xcf, 0xe4, 0x50, 0xec, 0x82, 0x68, 0x8a, 0xeb, 0xf3, 0x0b, 0xc8, + 0x10, 0xc9, 0x7f, 0x2e, 0xc0, 0x49, 0xc3, 0x75, 0x02, 0x9d, 0x99, 0x1a, 0x6a, 0x92, 0x9d, 0xa8, + 0x70, 0x39, 0x57, 0x07, 0x96, 0x33, 0x97, 0x46, 0x6c, 0x3c, 0xcc, 0xe6, 0x8c, 0x1e, 0x32, 0xf6, + 0xca, 0x26, 0xff, 0xb5, 0x00, 0x0f, 0xb3, 0xb1, 0xbc, 0x87, 0x99, 0xcf, 0x40, 0x47, 0x5b, 0xaa, + 0xb3, 0xfb, 0x7b, 0x93, 0x0f, 0x2f, 0x65, 0x09, 0xc3, 0xec, 0x32, 0xb0, 0xd2, 0x9d, 0xd2, 0x7b, + 0xcd, 0x12, 0x3e, 0xbb, 0xd5, 0x67, 0x96, 0x8f, 0xd2, 0xd4, 0x69, 0x3c, 0x2a, 0x9b, 0x72, 0x96, + 0x65, 0x87, 0x59, 0xa5, 0x20, 0x97, 0x61, 0x78, 0xc7, 0xb5, 0xbb, 0x6d, 0xea, 0x4f, 0x54, 0xf9, + 0x10, 0x7b, 0x2e, 0x6b, 0x88, 0xbd, 0xc9, 0x59, 0x1a, 0x27, 0x24, 0xfc, 0xb0, 0x78, 0xf7, 0x51, + 0xe5, 0x25, 0x16, 0x0c, 0xd9, 0x56, 0xdb, 0x0a, 0x7c, 0x3e, 0x71, 0xd6, 0x67, 0x2e, 0x0f, 0x5c, + 0x2d, 0xd1, 0x45, 0x97, 0x39, 0x98, 0xe8, 0x35, 0xe2, 0x19, 0xa5, 0x00, 0x36, 0x14, 0xfa, 0x86, + 0x6e, 0x8b, 0x89, 0xb5, 0x3e, 0xf3, 0xb9, 0xc1, 0xbb, 0x0d, 0x43, 0x69, 0x8c, 0xca, 0x3a, 0x55, + 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0x25, 0x18, 0x4b, 0x7c, 0x4d, 0x7f, 0xa2, 0xce, 0xb5, 0xf3, 0x58, + 0x96, 0x76, 0x42, 0xae, 0x68, 0xe6, 0x49, 0xb4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x1a, 0x54, 0x7d, + 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0x13, 0x23, 0x07, 0x01, 0x1e, 0x97, 0xc0, 0xd5, 0xa6, 0xcc, 0x86, + 0x21, 0x00, 0x99, 0x02, 0xe8, 0xe8, 0x5e, 0x60, 0x09, 0x43, 0x75, 0x94, 0x1b, 0x4d, 0x63, 0xfb, + 0x7b, 0x93, 0xb0, 0x1a, 0x52, 0x31, 0xc6, 0xc1, 0xf8, 0x59, 0xde, 0x25, 0xa7, 0xd3, 0x0d, 0xc4, + 0xc4, 0x5a, 0x13, 0xfc, 0xcd, 0x90, 0x8a, 0x31, 0x0e, 0xf2, 0xbd, 0x02, 0x3c, 0x1a, 0xbd, 0xf6, + 0x76, 0xb2, 0x13, 0x47, 0xde, 0xc9, 0x26, 0xf7, 0xf7, 0x26, 0x1f, 0x6d, 0xf6, 0x17, 0x89, 0x77, + 0x2b, 0x8f, 0xf6, 0x12, 0x8c, 0xce, 0x76, 0x83, 0x2d, 0xd7, 0xb3, 0xde, 0xe0, 0x46, 0x37, 0x59, + 0x80, 0x4a, 0xc0, 0x8d, 0x27, 0x31, 0x2f, 0x3f, 0x91, 0xa5, 0x6a, 0x61, 0xc8, 0x5e, 0xa3, 0xbb, + 0xca, 0x1a, 0x10, 0xf3, 0xa3, 0x30, 0xa6, 0x44, 0x76, 0xed, 0xdf, 0x17, 0x60, 0xb8, 0xa1, 0x1b, + 0xdb, 0xee, 0xe6, 0x26, 0x79, 0x19, 0xaa, 0x96, 0x13, 0x50, 0x6f, 0x47, 0xb7, 0x25, 0xec, 0x54, + 0x0c, 0x36, 0x5c, 0x89, 0x45, 0xf5, 0x66, 0x6b, 0x1e, 0x26, 0x68, 0xbe, 0x2b, 0xd7, 0x0a, 0xdc, + 0x1e, 0x5d, 0x92, 0x18, 0x18, 0xa2, 0x91, 0x49, 0xa8, 0xf8, 0x01, 0xed, 0xf8, 0x7c, 0xe6, 0x19, + 0x15, 0xc5, 0x68, 0x32, 0x02, 0x0a, 0xba, 0xf6, 0x3f, 0x0a, 0x50, 0x6b, 0xe8, 0xbe, 0x65, 0xb0, + 0x5a, 0x92, 0x39, 0x28, 0x77, 0x7d, 0xea, 0x1d, 0xae, 0x6e, 0x7c, 0xb2, 0x58, 0xf7, 0xa9, 0x87, + 0x3c, 0x33, 0xb9, 0x01, 0xd5, 0x8e, 0xee, 0xfb, 0xb7, 0x5c, 0xcf, 0x94, 0x13, 0xde, 0x01, 0x81, + 0x84, 0x71, 0x2e, 0xb3, 0x62, 0x08, 0xa2, 0xd5, 0x21, 0x9a, 0xf1, 0xb5, 0x9f, 0x17, 0xe0, 0x54, + 0xa3, 0xbb, 0xb9, 0x49, 0x3d, 0x69, 0x8b, 0x4a, 0x2b, 0x8f, 0x42, 0xc5, 0xa3, 0xa6, 0xe5, 0xcb, + 0xb2, 0xcf, 0x0f, 0xdc, 0x82, 0x90, 0xa1, 0x48, 0xa3, 0x92, 0xeb, 0x8b, 0x13, 0x50, 0xa0, 0x93, + 0x2e, 0xd4, 0x5e, 0xa3, 0x81, 0x1f, 0x78, 0x54, 0x6f, 0xcb, 0xda, 0x5d, 0x19, 0x58, 0xd4, 0x55, + 0x1a, 0x34, 0x39, 0x52, 0xdc, 0x86, 0x0d, 0x89, 0x18, 0x49, 0xd2, 0x7e, 0x50, 0x81, 0x91, 0x39, + 0xb7, 0xbd, 0x61, 0x39, 0xd4, 0xbc, 0x6c, 0xb6, 0x28, 0x79, 0x15, 0xca, 0xd4, 0x6c, 0x51, 0x59, + 0xdb, 0xc1, 0xa7, 0x7b, 0x06, 0x16, 0x19, 0x2d, 0xec, 0x0d, 0x39, 0x30, 0x59, 0x86, 0xb1, 0x4d, + 0xcf, 0x6d, 0x8b, 0x11, 0x74, 0x6d, 0xb7, 0x23, 0x2d, 0xd6, 0xc6, 0x87, 0xd5, 0xa8, 0xb4, 0x90, + 0x48, 0xbd, 0xb3, 0x37, 0x09, 0xd1, 0x1b, 0xa6, 0xf2, 0x92, 0x97, 0x61, 0x22, 0xa2, 0x84, 0x43, + 0xc9, 0x1c, 0x5b, 0x44, 0x70, 0x8b, 0xa5, 0xd2, 0x38, 0xbf, 0xbf, 0x37, 0x39, 0xb1, 0xd0, 0x87, + 0x07, 0xfb, 0xe6, 0x26, 0x6f, 0x15, 0x60, 0x3c, 0x4a, 0x14, 0xc3, 0xbb, 0x34, 0x54, 0x8e, 0x68, + 0xde, 0xe0, 0xab, 0xad, 0x85, 0x94, 0x08, 0xec, 0x11, 0x4a, 0x16, 0x60, 0x24, 0x70, 0x63, 0xfa, + 0xaa, 0x70, 0x7d, 0x69, 0xca, 0x3d, 0xb0, 0xe6, 0xf6, 0xd5, 0x56, 0x22, 0x1f, 0x41, 0x38, 0xa3, + 0xde, 0x53, 0x9a, 0x1a, 0xe2, 0x9a, 0x3a, 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x03, 0xfb, + 0xe4, 0x24, 0x5f, 0x2f, 0xc0, 0x98, 0x4a, 0x92, 0x3a, 0x1a, 0x3e, 0x4a, 0x1d, 0x11, 0xd6, 0x22, + 0xd6, 0x12, 0x02, 0x30, 0x25, 0x50, 0xfb, 0x45, 0x19, 0x6a, 0xe1, 0x00, 0x4b, 0x1e, 0x87, 0x0a, + 0x5f, 0xf8, 0x4b, 0xbb, 0x39, 0x9c, 0x39, 0xb9, 0x7f, 0x00, 0x45, 0x1a, 0x79, 0x02, 0x86, 0x0d, + 0xb7, 0xdd, 0xd6, 0x1d, 0x93, 0x3b, 0x73, 0x6a, 0x8d, 0x3a, 0x33, 0x18, 0xe6, 0x04, 0x09, 0x55, + 0x1a, 0x39, 0x0f, 0x65, 0xdd, 0x6b, 0x09, 0xbf, 0x4a, 0x4d, 0x8c, 0x47, 0xb3, 0x5e, 0xcb, 0x47, + 0x4e, 0x25, 0x9f, 0x84, 0x12, 0x75, 0x76, 0x26, 0xca, 0xfd, 0x2d, 0x92, 0xcb, 0xce, 0xce, 0x4d, + 0xdd, 0x6b, 0xd4, 0x65, 0x19, 0x4a, 0x97, 0x9d, 0x1d, 0x64, 0x79, 0xc8, 0x32, 0x0c, 0x53, 0x67, + 0x87, 0x7d, 0x7b, 0xe9, 0xf0, 0xf8, 0x50, 0x9f, 0xec, 0x8c, 0x45, 0x1a, 0xe7, 0xa1, 0x5d, 0x23, + 0xc9, 0xa8, 0x20, 0xc8, 0x17, 0x60, 0x44, 0x98, 0x38, 0x2b, 0xec, 0x9b, 0xf8, 0x13, 0x43, 0x1c, + 0x72, 0xb2, 0xbf, 0x8d, 0xc4, 0xf9, 0x22, 0x07, 0x53, 0x8c, 0xe8, 0x63, 0x02, 0x8a, 0x7c, 0x01, + 0x6a, 0x6a, 0x3d, 0xaa, 0xbe, 0x6c, 0xa6, 0x6f, 0x46, 0x2d, 0x62, 0x91, 0xbe, 0xde, 0xb5, 0x3c, + 0xda, 0xa6, 0x4e, 0xe0, 0x37, 0x4e, 0xaa, 0xd5, 0xba, 0x4a, 0xf5, 0x31, 0x42, 0x23, 0x1b, 0xbd, + 0x4e, 0x26, 0xe1, 0x21, 0x79, 0xbc, 0xcf, 0xa8, 0x3e, 0x80, 0x87, 0xe9, 0xcb, 0x70, 0x22, 0xf4, + 0x02, 0x49, 0x47, 0x82, 0xf0, 0x99, 0x3c, 0xcd, 0xb2, 0x2f, 0x25, 0x93, 0xee, 0xec, 0x4d, 0x3e, + 0x96, 0xe1, 0x4a, 0x88, 0x18, 0x30, 0x0d, 0xa6, 0x7d, 0xbf, 0x04, 0xbd, 0xd6, 0x7f, 0x52, 0x69, + 0x85, 0xa3, 0x56, 0x5a, 0xba, 0x42, 0x62, 0xf8, 0x7c, 0x5e, 0x66, 0xcb, 0x5f, 0xa9, 0xac, 0x0f, + 0x53, 0x3a, 0xea, 0x0f, 0xf3, 0xa0, 0xf4, 0x1d, 0xed, 0xed, 0x32, 0x8c, 0xcd, 0xeb, 0xb4, 0xed, + 0x3a, 0xf7, 0x5c, 0x0b, 0x15, 0x1e, 0x88, 0xb5, 0xd0, 0x25, 0xa8, 0x7a, 0xb4, 0x63, 0x5b, 0x86, + 0x2e, 0x8c, 0x2f, 0xe9, 0x7b, 0x44, 0x49, 0xc3, 0x30, 0xb5, 0xcf, 0x1a, 0xb8, 0xf4, 0x40, 0xae, + 0x81, 0xcb, 0xef, 0xfd, 0x1a, 0x58, 0xfb, 0x7a, 0x11, 0xb8, 0xa1, 0x42, 0x2e, 0x42, 0x99, 0x4d, + 0xc2, 0x69, 0xcf, 0x0b, 0x6f, 0x38, 0x3c, 0x85, 0x9c, 0x83, 0x62, 0xe0, 0xca, 0x9e, 0x07, 0x32, + 0xbd, 0xb8, 0xe6, 0x62, 0x31, 0x70, 0xc9, 0x1b, 0x00, 0x86, 0xeb, 0x98, 0x96, 0x72, 0xc9, 0xe7, + 0xab, 0xd8, 0x82, 0xeb, 0xdd, 0xd2, 0x3d, 0x73, 0x2e, 0x44, 0x14, 0xab, 0xa0, 0xe8, 0x1d, 0x63, + 0xd2, 0xc8, 0x0b, 0x30, 0xe4, 0x3a, 0x0b, 0x5d, 0xdb, 0xe6, 0x0a, 0xad, 0x35, 0x3e, 0xc2, 0x96, + 0xa6, 0x37, 0x38, 0xe5, 0xce, 0xde, 0xe4, 0x59, 0x61, 0xdf, 0xb2, 0xb7, 0x97, 0x3c, 0x2b, 0xb0, + 0x9c, 0x56, 0x33, 0xf0, 0xf4, 0x80, 0xb6, 0x76, 0x51, 0x66, 0xd3, 0xbe, 0x59, 0x80, 0xfa, 0x82, + 0x75, 0x9b, 0x9a, 0x2f, 0x59, 0x8e, 0xe9, 0xde, 0x22, 0x08, 0x43, 0x36, 0x75, 0x5a, 0xc1, 0xd6, + 0x80, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x34, 0xd4, 0x84, 0xf5, 0x69, 0x39, + 0x2d, 0xae, 0xc3, 0x6a, 0x34, 0xe8, 0x35, 0x55, 0x02, 0x46, 0x3c, 0xda, 0x2e, 0x9c, 0xec, 0x51, + 0x03, 0x31, 0xa1, 0x1c, 0xe8, 0x2d, 0x35, 0xbe, 0x2e, 0x0c, 0xac, 0xe0, 0x35, 0xbd, 0x15, 0x53, + 0x2e, 0x9f, 0xe3, 0xd7, 0x74, 0x36, 0xc7, 0x33, 0x74, 0xed, 0x57, 0x05, 0xa8, 0x2e, 0x74, 0x1d, + 0x83, 0x2f, 0xd1, 0xee, 0xed, 0x91, 0x53, 0x06, 0x43, 0x31, 0xd3, 0x60, 0xe8, 0xc2, 0xd0, 0xf6, + 0xad, 0xd0, 0xa0, 0xa8, 0xcf, 0xac, 0x0c, 0xde, 0x2a, 0x64, 0x91, 0xa6, 0xae, 0x71, 0x3c, 0xb1, + 0x61, 0x34, 0x26, 0x0b, 0x34, 0x74, 0xed, 0x25, 0x2e, 0x54, 0x0a, 0x3b, 0xf7, 0x49, 0xa8, 0xc7, + 0xd8, 0x0e, 0xe5, 0x3b, 0xfe, 0x7f, 0x65, 0x18, 0x5a, 0x6c, 0x36, 0x67, 0x57, 0x97, 0xc8, 0x33, + 0x50, 0x97, 0x7b, 0x09, 0xd7, 0x23, 0x1d, 0x84, 0x5b, 0x49, 0xcd, 0x28, 0x09, 0xe3, 0x7c, 0xcc, + 0x1c, 0xf3, 0xa8, 0x6e, 0xb7, 0x65, 0x67, 0x09, 0xcd, 0x31, 0x64, 0x44, 0x14, 0x69, 0x44, 0x87, + 0x31, 0xb6, 0xc2, 0x63, 0x2a, 0x14, 0xab, 0x37, 0xd9, 0x6d, 0x0e, 0xb8, 0xbe, 0xe3, 0x46, 0xe2, + 0x7a, 0x02, 0x00, 0x53, 0x80, 0xe4, 0x79, 0xa8, 0xea, 0xdd, 0x60, 0x8b, 0x1b, 0xd0, 0xa2, 0x6f, + 0x9c, 0xe7, 0x5b, 0x2d, 0x92, 0x76, 0x67, 0x6f, 0x72, 0xe4, 0x1a, 0x36, 0x9e, 0x51, 0xef, 0x18, + 0x72, 0xb3, 0xc2, 0xa9, 0x15, 0xa3, 0x2c, 0x5c, 0xe5, 0xd0, 0x85, 0x5b, 0x4d, 0x00, 0x60, 0x0a, + 0x90, 0xbc, 0x02, 0x23, 0xdb, 0x74, 0x37, 0xd0, 0x37, 0xa4, 0x80, 0xa1, 0xc3, 0x08, 0x18, 0x67, + 0x26, 0xdc, 0xb5, 0x58, 0x76, 0x4c, 0x80, 0x11, 0x1f, 0x4e, 0x6f, 0x53, 0x6f, 0x83, 0x7a, 0xae, + 0x5c, 0x7d, 0x4a, 0x21, 0xc3, 0x87, 0x11, 0x32, 0xb1, 0xbf, 0x37, 0x79, 0xfa, 0x5a, 0x06, 0x0c, + 0x66, 0x82, 0x6b, 0xbf, 0x2c, 0xc2, 0x89, 0x45, 0xb1, 0x99, 0xeb, 0x7a, 0x62, 0x12, 0x26, 0x67, + 0xa1, 0xe4, 0x75, 0xba, 0xbc, 0xe5, 0x94, 0x84, 0xbb, 0x16, 0x57, 0xd7, 0x91, 0xd1, 0xc8, 0xcb, + 0x50, 0x35, 0xe5, 0x90, 0x21, 0x17, 0xbf, 0x03, 0x39, 0x2a, 0xd4, 0x1b, 0x86, 0x68, 0xcc, 0xd2, + 0x6f, 0xfb, 0xad, 0xa6, 0xf5, 0x06, 0x95, 0xeb, 0x41, 0x6e, 0xe9, 0xaf, 0x08, 0x12, 0xaa, 0x34, + 0x36, 0xab, 0x6e, 0xd3, 0x5d, 0xb1, 0x1a, 0x2a, 0x47, 0xb3, 0xea, 0x35, 0x49, 0xc3, 0x30, 0x95, + 0x4c, 0xaa, 0xce, 0xc2, 0x5a, 0x41, 0x59, 0xac, 0xe4, 0x6f, 0x32, 0x82, 0xec, 0x37, 0x6c, 0xc8, + 0x7c, 0xcd, 0x0a, 0x02, 0xea, 0xc9, 0xcf, 0x38, 0xd0, 0x90, 0x79, 0x95, 0x23, 0xa0, 0x44, 0x22, + 0x1f, 0x83, 0x1a, 0x07, 0x6f, 0xd8, 0xee, 0x06, 0xff, 0x70, 0x35, 0xb1, 0xa6, 0xbf, 0xa9, 0x88, + 0x18, 0xa5, 0x6b, 0xbf, 0x2e, 0xc2, 0x99, 0x45, 0x1a, 0x08, 0xab, 0x66, 0x9e, 0x76, 0x6c, 0x77, + 0x97, 0x99, 0x96, 0x48, 0x5f, 0x27, 0x2f, 0x02, 0x58, 0xfe, 0x46, 0x73, 0xc7, 0xe0, 0xfd, 0x40, + 0xf4, 0xe1, 0x8b, 0xb2, 0x4b, 0xc2, 0x52, 0xb3, 0x21, 0x53, 0xee, 0x24, 0xde, 0x30, 0x96, 0x27, + 0x5a, 0x5e, 0x15, 0xef, 0xb2, 0xbc, 0x6a, 0x02, 0x74, 0x22, 0x03, 0xb5, 0xc4, 0x39, 0xff, 0x95, + 0x12, 0x73, 0x18, 0xdb, 0x34, 0x06, 0x93, 0xc7, 0x64, 0x74, 0x60, 0xdc, 0xa4, 0x9b, 0x7a, 0xd7, + 0x0e, 0x42, 0xa3, 0x5a, 0x76, 0xe2, 0x83, 0xdb, 0xe5, 0xe1, 0x46, 0xf3, 0x7c, 0x0a, 0x09, 0x7b, + 0xb0, 0xb5, 0xff, 0x5f, 0x82, 0x73, 0x8b, 0x34, 0x08, 0x3d, 0x2e, 0x72, 0x74, 0x6c, 0x76, 0xa8, + 0xc1, 0xbe, 0xc2, 0x5b, 0x05, 0x18, 0xb2, 0xf5, 0x0d, 0x6a, 0xb3, 0xd9, 0x8b, 0xd5, 0xe6, 0xd5, + 0x81, 0x27, 0x82, 0xfe, 0x52, 0xa6, 0x96, 0xb9, 0x84, 0xd4, 0xd4, 0x20, 0x88, 0x28, 0xc5, 0xb3, + 0x41, 0xdd, 0xb0, 0xbb, 0x7e, 0x40, 0xbd, 0x55, 0xd7, 0x0b, 0xa4, 0x3d, 0x19, 0x0e, 0xea, 0x73, + 0x51, 0x12, 0xc6, 0xf9, 0xc8, 0x0c, 0x80, 0x61, 0x5b, 0xd4, 0x09, 0x78, 0x2e, 0xd1, 0xaf, 0x88, + 0xfa, 0xbe, 0x73, 0x61, 0x0a, 0xc6, 0xb8, 0x98, 0xa8, 0xb6, 0xeb, 0x58, 0x81, 0x2b, 0x44, 0x95, + 0x93, 0xa2, 0x56, 0xa2, 0x24, 0x8c, 0xf3, 0xf1, 0x6c, 0x34, 0xf0, 0x2c, 0xc3, 0xe7, 0xd9, 0x2a, + 0xa9, 0x6c, 0x51, 0x12, 0xc6, 0xf9, 0xd8, 0x9c, 0x17, 0xab, 0xff, 0xa1, 0xe6, 0xbc, 0xef, 0xd6, + 0xe0, 0x42, 0x42, 0xad, 0x81, 0x1e, 0xd0, 0xcd, 0xae, 0xdd, 0xa4, 0x81, 0xfa, 0x80, 0x03, 0xce, + 0x85, 0xff, 0x31, 0xfa, 0xee, 0x22, 0x84, 0xc4, 0x38, 0x9a, 0xef, 0xde, 0x53, 0xc0, 0x03, 0x7d, + 0xfb, 0x69, 0xa8, 0x39, 0x7a, 0xe0, 0xf3, 0x8e, 0x2b, 0xfb, 0x68, 0x68, 0x86, 0x5d, 0x57, 0x09, + 0x18, 0xf1, 0x90, 0x55, 0x38, 0x2d, 0x55, 0x7c, 0xf9, 0x76, 0xc7, 0xf5, 0x02, 0xea, 0x89, 0xbc, + 0x72, 0x3a, 0x95, 0x79, 0x4f, 0xaf, 0x64, 0xf0, 0x60, 0x66, 0x4e, 0xb2, 0x02, 0xa7, 0x0c, 0xb1, + 0xad, 0x4e, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x70, 0x70, 0x85, 0x4b, 0xa3, 0xb9, 0x5e, 0x16, 0xcc, + 0xca, 0x97, 0x6e, 0xcd, 0x43, 0x03, 0xb5, 0xe6, 0xe1, 0x41, 0x5a, 0x73, 0x75, 0xb0, 0xd6, 0x5c, + 0x3b, 0x58, 0x6b, 0x66, 0x9a, 0x67, 0xed, 0x88, 0x7a, 0xcc, 0x3c, 0x11, 0x33, 0x6c, 0x2c, 0x6a, + 0x23, 0xd4, 0x7c, 0x33, 0x83, 0x07, 0x33, 0x73, 0x92, 0x0d, 0x38, 0x27, 0xe8, 0x97, 0x1d, 0xc3, + 0xdb, 0xed, 0xb0, 0x89, 0x27, 0x86, 0x5b, 0x4f, 0x78, 0x18, 0xcf, 0x35, 0xfb, 0x72, 0xe2, 0x5d, + 0x50, 0xc8, 0xa7, 0x61, 0x54, 0x7c, 0xa5, 0x15, 0xbd, 0xc3, 0x61, 0x45, 0x0c, 0xc7, 0xc3, 0x12, + 0x76, 0x74, 0x2e, 0x9e, 0x88, 0x49, 0x5e, 0x32, 0x0b, 0x27, 0x3a, 0x3b, 0x06, 0x7b, 0x5c, 0xda, + 0xbc, 0x4e, 0xa9, 0x49, 0x4d, 0xbe, 0x69, 0x54, 0x6b, 0x3c, 0xa2, 0x1c, 0x1d, 0xab, 0xc9, 0x64, + 0x4c, 0xf3, 0x93, 0xe7, 0x61, 0xc4, 0x0f, 0x74, 0x2f, 0x90, 0x6e, 0xbd, 0x89, 0x31, 0x11, 0xe3, + 0xa2, 0xbc, 0x5e, 0xcd, 0x58, 0x1a, 0x26, 0x38, 0x33, 0xe7, 0x8b, 0x13, 0xc7, 0x37, 0x5f, 0xe4, + 0x19, 0xad, 0x7e, 0xaf, 0x08, 0x17, 0x17, 0x69, 0xb0, 0xe2, 0x3a, 0xd2, 0x29, 0x9a, 0x35, 0xed, + 0x1f, 0xc8, 0x27, 0x9a, 0x9c, 0xb4, 0x8b, 0x47, 0x3a, 0x69, 0x97, 0x8e, 0x68, 0xd2, 0x2e, 0x1f, + 0xe3, 0xa4, 0xfd, 0x9b, 0x45, 0x78, 0x24, 0xa1, 0xc9, 0x55, 0xd7, 0x54, 0x03, 0xfe, 0x07, 0x0a, + 0x3c, 0x80, 0x02, 0xef, 0x08, 0xbb, 0x93, 0x6f, 0x6b, 0xa5, 0x2c, 0x9e, 0x37, 0xd3, 0x16, 0xcf, + 0x2b, 0x79, 0x66, 0xbe, 0x0c, 0x09, 0x07, 0x9a, 0xf1, 0xae, 0x02, 0xf1, 0xe4, 0x26, 0x9c, 0x70, + 0xfd, 0xc4, 0x8c, 0x9e, 0x30, 0x88, 0x0e, 0x7b, 0x38, 0x30, 0x23, 0x17, 0x69, 0xc2, 0xc3, 0x3e, + 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x58, 0x43, 0x8f, 0x49, 0xb8, 0x87, 0x9b, 0x59, 0x4c, + 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0x07, 0xc0, 0x4d, 0x4e, 0xa1, 0x9a, 0x23, 0xb3, 0x58, 0xde, + 0x4a, 0x5b, 0x2c, 0xaf, 0xe6, 0xff, 0x6e, 0x83, 0x59, 0x2b, 0x33, 0x00, 0xfc, 0x2b, 0xc4, 0xcd, + 0x95, 0x70, 0x92, 0xc6, 0x30, 0x05, 0x63, 0x5c, 0x6c, 0x02, 0x52, 0x7a, 0x8e, 0x5b, 0x2a, 0xe1, + 0x04, 0xd4, 0x8c, 0x27, 0x62, 0x92, 0xb7, 0xaf, 0xb5, 0x53, 0x19, 0xd8, 0xda, 0xb9, 0x0a, 0x24, + 0xe1, 0x78, 0x14, 0x78, 0x43, 0xc9, 0x18, 0xce, 0xa5, 0x1e, 0x0e, 0xcc, 0xc8, 0xd5, 0xa7, 0x29, + 0x0f, 0x1f, 0x6d, 0x53, 0xae, 0x0e, 0xde, 0x94, 0xc9, 0xab, 0x70, 0x96, 0x8b, 0x92, 0xfa, 0x49, + 0x02, 0x0b, 0xbb, 0xe7, 0x43, 0x12, 0xf8, 0x2c, 0xf6, 0x63, 0xc4, 0xfe, 0x18, 0xec, 0xfb, 0x18, + 0x1e, 0x35, 0x99, 0x70, 0xdd, 0xee, 0x6f, 0x13, 0xcd, 0x65, 0xf0, 0x60, 0x66, 0x4e, 0xd6, 0xc4, + 0x02, 0xd6, 0x0c, 0xf5, 0x0d, 0x9b, 0x9a, 0x32, 0x86, 0x35, 0x6c, 0x62, 0x6b, 0xcb, 0x4d, 0x99, + 0x82, 0x31, 0xae, 0x2c, 0x33, 0x65, 0xe4, 0x90, 0x66, 0xca, 0x22, 0xf7, 0xd2, 0x6f, 0x26, 0xac, + 0x21, 0x69, 0xeb, 0x84, 0x51, 0xc9, 0x73, 0x69, 0x06, 0xec, 0xcd, 0xc3, 0xad, 0x44, 0xc3, 0xb3, + 0x3a, 0x81, 0x9f, 0xc4, 0x1a, 0x4b, 0x59, 0x89, 0x19, 0x3c, 0x98, 0x99, 0x93, 0xd9, 0xe7, 0x5b, + 0x54, 0xb7, 0x83, 0xad, 0x24, 0xe0, 0x89, 0xa4, 0x7d, 0x7e, 0xa5, 0x97, 0x05, 0xb3, 0xf2, 0x65, + 0x4e, 0x48, 0xe3, 0x0f, 0xa6, 0x59, 0xf5, 0x8d, 0x12, 0x9c, 0x5d, 0xa4, 0x41, 0x18, 0xde, 0xf3, + 0x81, 0x1b, 0xe5, 0x3d, 0x70, 0xa3, 0x7c, 0xa7, 0x02, 0xa7, 0x16, 0x69, 0xd0, 0x63, 0x8d, 0xfd, + 0x33, 0x55, 0xff, 0x0a, 0x9c, 0x8a, 0x22, 0xca, 0x9a, 0x81, 0xeb, 0x89, 0xb9, 0x3c, 0xb5, 0x5a, + 0x6e, 0xf6, 0xb2, 0x60, 0x56, 0x3e, 0xf2, 0x05, 0x78, 0x84, 0x4f, 0xf5, 0x4e, 0x4b, 0xf8, 0x67, + 0x85, 0x33, 0x21, 0x76, 0x26, 0x62, 0x52, 0x42, 0x3e, 0xd2, 0xcc, 0x66, 0xc3, 0x7e, 0xf9, 0xc9, + 0x57, 0x61, 0xa4, 0x63, 0x75, 0xa8, 0x6d, 0x39, 0xdc, 0x3e, 0xcb, 0x1d, 0x12, 0xb2, 0x1a, 0x03, + 0x8b, 0x16, 0x70, 0x71, 0x2a, 0x26, 0x04, 0x66, 0xb6, 0xd4, 0xea, 0x31, 0xb6, 0xd4, 0xbf, 0x2d, + 0xc2, 0xf0, 0xa2, 0xe7, 0x76, 0x3b, 0x8d, 0x5d, 0xd2, 0x82, 0xa1, 0x5b, 0x7c, 0xf3, 0x4c, 0x6e, + 0x4d, 0x0d, 0x1e, 0x95, 0x2d, 0xf6, 0xe0, 0x22, 0x93, 0x48, 0xbc, 0xa3, 0x84, 0x67, 0x8d, 0x78, + 0x9b, 0xee, 0x52, 0x53, 0xee, 0xa1, 0x85, 0x8d, 0xf8, 0x1a, 0x23, 0xa2, 0x48, 0x23, 0x6d, 0x38, + 0xa1, 0xdb, 0xb6, 0x7b, 0x8b, 0x9a, 0xcb, 0x7a, 0x40, 0x1d, 0xea, 0xab, 0x2d, 0xc9, 0xc3, 0xba, + 0xa5, 0xf9, 0xbe, 0xfe, 0x6c, 0x12, 0x0a, 0xd3, 0xd8, 0xe4, 0x35, 0x18, 0xf6, 0x03, 0xd7, 0x53, + 0xc6, 0x56, 0x7d, 0x66, 0x6e, 0xf0, 0x8f, 0xde, 0xf8, 0x7c, 0x53, 0x40, 0x09, 0x9f, 0xbd, 0x7c, + 0x41, 0x25, 0x40, 0xfb, 0x76, 0x01, 0xe0, 0xca, 0xda, 0xda, 0xaa, 0xdc, 0x5e, 0x30, 0xa1, 0xac, + 0x77, 0xc3, 0x8d, 0xca, 0xc1, 0x37, 0x04, 0x13, 0x61, 0x99, 0x72, 0x0f, 0xaf, 0x1b, 0x6c, 0x21, + 0x47, 0x27, 0x1f, 0x85, 0x61, 0x69, 0x20, 0x4b, 0xb5, 0x87, 0xa1, 0x05, 0xd2, 0x88, 0x46, 0x95, + 0xae, 0xfd, 0x9f, 0x22, 0xc0, 0x92, 0x69, 0xd3, 0xa6, 0x0a, 0xa4, 0xaf, 0x05, 0x5b, 0x1e, 0xf5, + 0xb7, 0x5c, 0xdb, 0x1c, 0x70, 0x37, 0x95, 0xfb, 0xfc, 0xd7, 0x14, 0x08, 0x46, 0x78, 0xc4, 0x84, + 0x11, 0x3f, 0xa0, 0x1d, 0x15, 0xa9, 0x39, 0xe0, 0x26, 0xca, 0xb8, 0xf0, 0x8b, 0x44, 0x38, 0x98, + 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, 0x1d, 0xa4, 0xb1, 0x3b, 0x60, 0x43, 0x3a, 0xc1, 0x56, + 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x9f, 0x15, 0xe1, 0x0c, 0x97, 0xc7, 0x8a, 0x91, 0x88, + 0xc7, 0x24, 0xff, 0xa6, 0xe7, 0xd0, 0xdf, 0xbf, 0x3c, 0x98, 0x68, 0x71, 0x66, 0x6c, 0x85, 0x06, + 0x7a, 0x64, 0xcf, 0x45, 0xb4, 0xd8, 0x49, 0xbf, 0x2e, 0x94, 0x7d, 0x36, 0x5e, 0x09, 0xed, 0x35, + 0x07, 0x6e, 0x42, 0xd9, 0x15, 0xe0, 0xa3, 0x57, 0xb8, 0x6b, 0xcc, 0x47, 0x2d, 0x2e, 0x8e, 0xfc, + 0x3b, 0x18, 0xf2, 0x03, 0x3d, 0xe8, 0xaa, 0xae, 0xb9, 0x7e, 0xd4, 0x82, 0x39, 0x78, 0x34, 0x8e, + 0x88, 0x77, 0x94, 0x42, 0xb5, 0x9f, 0x15, 0xe0, 0x5c, 0x76, 0xc6, 0x65, 0xcb, 0x0f, 0xc8, 0xbf, + 0xee, 0x51, 0xfb, 0x01, 0xbf, 0x38, 0xcb, 0xcd, 0x95, 0x1e, 0xc6, 0x85, 0x2b, 0x4a, 0x4c, 0xe5, + 0x01, 0x54, 0xac, 0x80, 0xb6, 0xd5, 0xfa, 0xf2, 0xc6, 0x11, 0x57, 0x3d, 0x36, 0xb5, 0x33, 0x29, + 0x28, 0x84, 0x69, 0x6f, 0x17, 0xfb, 0x55, 0x99, 0x4f, 0x1f, 0x76, 0x32, 0xe6, 0xf7, 0x5a, 0xbe, + 0x98, 0xdf, 0x64, 0x81, 0x7a, 0x43, 0x7f, 0xff, 0x6d, 0x6f, 0xe8, 0xef, 0x8d, 0xfc, 0xa1, 0xbf, + 0x29, 0x35, 0xf4, 0x8d, 0x00, 0x7e, 0xb7, 0x04, 0xe7, 0xef, 0xd6, 0x6c, 0xd8, 0x7c, 0x26, 0x5b, + 0x67, 0xde, 0xf9, 0xec, 0xee, 0xed, 0x90, 0xcc, 0x40, 0xa5, 0xb3, 0xa5, 0xfb, 0xca, 0x28, 0x53, + 0x0b, 0x96, 0xca, 0x2a, 0x23, 0xde, 0x61, 0x83, 0x06, 0x37, 0xe6, 0xf8, 0x2b, 0x0a, 0x56, 0x36, + 0x1c, 0xb7, 0xa9, 0xef, 0x47, 0x3e, 0x81, 0x70, 0x38, 0x5e, 0x11, 0x64, 0x54, 0xe9, 0x24, 0x80, + 0x21, 0xe1, 0x62, 0x96, 0x33, 0xd3, 0xe0, 0x81, 0x5c, 0x19, 0x61, 0xe2, 0x51, 0xa5, 0xe4, 0x6e, + 0x85, 0x94, 0x45, 0xa6, 0xa0, 0x1c, 0x44, 0x41, 0xbb, 0x6a, 0x69, 0x5e, 0xce, 0xb0, 0x4f, 0x39, + 0x1f, 0x5b, 0xd8, 0xbb, 0x1b, 0xdc, 0xa9, 0x6e, 0xca, 0xfd, 0x73, 0xcb, 0x75, 0xb8, 0x41, 0x56, + 0x8a, 0x16, 0xf6, 0x37, 0x7a, 0x38, 0x30, 0x23, 0x97, 0xf6, 0xc7, 0x55, 0x38, 0x93, 0xdd, 0x1e, + 0x98, 0xde, 0x76, 0xa8, 0xe7, 0x33, 0xec, 0x42, 0x52, 0x6f, 0x37, 0x05, 0x19, 0x55, 0xfa, 0xfb, + 0x3a, 0xe0, 0xec, 0x3b, 0x05, 0x38, 0xeb, 0xc9, 0x3d, 0xa2, 0xfb, 0x11, 0x74, 0xf6, 0x98, 0x70, + 0x67, 0xf4, 0x11, 0x88, 0xfd, 0xcb, 0x42, 0xfe, 0x57, 0x01, 0x26, 0xda, 0x29, 0x3f, 0xc7, 0x31, + 0x9e, 0x5b, 0xe3, 0x51, 0xf1, 0x2b, 0x7d, 0xe4, 0x61, 0xdf, 0x92, 0x90, 0xaf, 0x42, 0xbd, 0xc3, + 0xda, 0x85, 0x1f, 0x50, 0xc7, 0x50, 0x47, 0xd7, 0x06, 0xef, 0x49, 0xab, 0x11, 0x96, 0x0a, 0x45, + 0x13, 0xf6, 0x41, 0x2c, 0x01, 0xe3, 0x12, 0x1f, 0xf0, 0x83, 0x6a, 0x97, 0xa0, 0xea, 0xd3, 0x20, + 0xb0, 0x9c, 0x96, 0x58, 0x6f, 0xd4, 0x44, 0x5f, 0x69, 0x4a, 0x1a, 0x86, 0xa9, 0xe4, 0x63, 0x50, + 0xe3, 0x5b, 0x4e, 0xb3, 0x5e, 0xcb, 0x9f, 0xa8, 0xf1, 0x70, 0xb1, 0x51, 0x11, 0x00, 0x27, 0x89, + 0x18, 0xa5, 0x93, 0xa7, 0x61, 0x64, 0x83, 0x77, 0x5f, 0x79, 0x76, 0x59, 0xf8, 0xb8, 0xb8, 0xb5, + 0xd6, 0x88, 0xd1, 0x31, 0xc1, 0x45, 0x66, 0x00, 0x68, 0xb8, 0x2f, 0x97, 0xf6, 0x67, 0x45, 0x3b, + 0x76, 0x18, 0xe3, 0x22, 0x8f, 0x41, 0x29, 0xb0, 0x7d, 0xee, 0xc3, 0xaa, 0x46, 0x4b, 0xd0, 0xb5, + 0xe5, 0x26, 0x32, 0xba, 0xf6, 0xeb, 0x02, 0x9c, 0x48, 0x1d, 0x2e, 0x61, 0x59, 0xba, 0x9e, 0x2d, + 0x87, 0x91, 0x30, 0xcb, 0x3a, 0x2e, 0x23, 0xa3, 0x93, 0x57, 0xa5, 0x59, 0x5e, 0xcc, 0x79, 0x4d, + 0xc3, 0x75, 0x3d, 0xf0, 0x99, 0x1d, 0xde, 0x63, 0x91, 0xf3, 0x6d, 0xbe, 0xa8, 0x3c, 0x72, 0x1e, + 0x88, 0x6d, 0xf3, 0x45, 0x69, 0x98, 0xe0, 0x4c, 0x39, 0xfc, 0xca, 0x07, 0x71, 0xf8, 0x69, 0xdf, + 0x2c, 0xc6, 0x34, 0x20, 0x2d, 0xfb, 0x7b, 0x68, 0xe0, 0x49, 0x36, 0x81, 0x86, 0x93, 0x7b, 0x2d, + 0x3e, 0xff, 0xf1, 0xc9, 0x58, 0xa6, 0x92, 0x97, 0x84, 0xee, 0x4b, 0x39, 0x0f, 0xc3, 0xae, 0x2d, + 0x37, 0x45, 0x74, 0x95, 0xfa, 0x6a, 0xe1, 0x27, 0x28, 0x1f, 0xd3, 0x27, 0xd0, 0x7e, 0xbf, 0x04, + 0xf5, 0xab, 0xee, 0xc6, 0xfb, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0xbe, 0x87, 0xd3, 0xd4, 0x3a, + 0x3c, 0x12, 0x04, 0x76, 0x93, 0x1a, 0xae, 0x63, 0xfa, 0xb3, 0x9b, 0x01, 0xf5, 0x16, 0x2c, 0xc7, + 0xf2, 0xb7, 0xa8, 0x29, 0xb7, 0x93, 0x1e, 0xdd, 0xdf, 0x9b, 0x7c, 0x64, 0x6d, 0x6d, 0x39, 0x8b, + 0x05, 0xfb, 0xe5, 0xe5, 0xc3, 0x86, 0x38, 0x09, 0xc8, 0x4f, 0xca, 0xc8, 0x98, 0x1b, 0x31, 0x6c, + 0xc4, 0xe8, 0x98, 0xe0, 0xd2, 0xde, 0x29, 0x42, 0x2d, 0x3c, 0x80, 0x4f, 0x9e, 0x80, 0xe1, 0x0d, + 0xcf, 0xdd, 0xa6, 0x9e, 0xd8, 0xb9, 0x93, 0x27, 0x65, 0x1a, 0x82, 0x84, 0x2a, 0x8d, 0x3c, 0x0e, + 0x95, 0xc0, 0xed, 0x58, 0x46, 0xda, 0xa1, 0xb6, 0xc6, 0x88, 0x28, 0xd2, 0x8e, 0xaf, 0x81, 0x3f, + 0x99, 0x30, 0xed, 0x6a, 0x7d, 0x8d, 0xb1, 0x57, 0xa0, 0xec, 0xeb, 0xbe, 0x2d, 0xe7, 0xd3, 0x1c, + 0x67, 0xd9, 0x67, 0x9b, 0xcb, 0xf2, 0x2c, 0xfb, 0x6c, 0x73, 0x19, 0x39, 0xa8, 0xf6, 0x8b, 0x22, + 0xd4, 0x85, 0xde, 0xc4, 0xa8, 0x70, 0x94, 0x9a, 0x7b, 0x81, 0x87, 0x52, 0xf8, 0xdd, 0x36, 0xf5, + 0xb8, 0x9b, 0x49, 0x0e, 0x72, 0xf1, 0xfd, 0x81, 0x28, 0x31, 0x0c, 0xa7, 0x88, 0x48, 0x4a, 0xf5, + 0xe5, 0x63, 0x54, 0x7d, 0xe5, 0x40, 0xaa, 0x1f, 0x3a, 0x0e, 0xd5, 0xbf, 0x55, 0x84, 0xda, 0xb2, + 0xb5, 0x49, 0x8d, 0x5d, 0xc3, 0xe6, 0x67, 0x02, 0x4d, 0x6a, 0xd3, 0x80, 0x2e, 0x7a, 0xba, 0x41, + 0x57, 0xa9, 0x67, 0xf1, 0x0b, 0x6a, 0x58, 0xff, 0xe0, 0x23, 0x90, 0x3c, 0x13, 0x38, 0xdf, 0x87, + 0x07, 0xfb, 0xe6, 0x26, 0x4b, 0x30, 0x62, 0x52, 0xdf, 0xf2, 0xa8, 0xb9, 0x1a, 0x5b, 0xa8, 0x3c, + 0xa1, 0xa6, 0x9a, 0xf9, 0x58, 0xda, 0x9d, 0xbd, 0xc9, 0x51, 0xe5, 0xa0, 0x14, 0x2b, 0x96, 0x44, + 0x56, 0xd6, 0xe5, 0x3b, 0x7a, 0xd7, 0xcf, 0x2a, 0x63, 0xac, 0xcb, 0xaf, 0x66, 0xb3, 0x60, 0xbf, + 0xbc, 0x5a, 0x05, 0x4a, 0xcb, 0x6e, 0x4b, 0x7b, 0xbb, 0x04, 0xe1, 0x4d, 0x46, 0xe4, 0x3f, 0x14, + 0xa0, 0xae, 0x3b, 0x8e, 0x1b, 0xc8, 0x5b, 0x82, 0xc4, 0x0e, 0x3c, 0xe6, 0xbe, 0x30, 0x69, 0x6a, + 0x36, 0x02, 0x15, 0x9b, 0xb7, 0xe1, 0x86, 0x72, 0x2c, 0x05, 0xe3, 0xb2, 0x49, 0x37, 0xb5, 0x9f, + 0xbc, 0x92, 0xbf, 0x14, 0x07, 0xd8, 0x3d, 0x3e, 0xf7, 0x39, 0x18, 0x4f, 0x17, 0xf6, 0x30, 0xdb, + 0x41, 0xb9, 0x36, 0xe6, 0x8b, 0x00, 0x51, 0x4c, 0xc9, 0x7d, 0x70, 0x62, 0x59, 0x09, 0x27, 0xd6, + 0xe2, 0xe0, 0x0a, 0x0e, 0x0b, 0xdd, 0xd7, 0x71, 0xf5, 0x7a, 0xca, 0x71, 0xb5, 0x74, 0x14, 0xc2, + 0xee, 0xee, 0xac, 0xfa, 0xdf, 0x05, 0x18, 0x8f, 0x98, 0xe5, 0x09, 0xd9, 0xe7, 0x60, 0xd4, 0xa3, + 0xba, 0xd9, 0xd0, 0x03, 0x63, 0x8b, 0x87, 0x7a, 0x17, 0x78, 0x6c, 0xf6, 0xc9, 0xfd, 0xbd, 0xc9, + 0x51, 0x8c, 0x27, 0x60, 0x92, 0x8f, 0xe8, 0x50, 0x67, 0x84, 0x35, 0xab, 0x4d, 0xdd, 0x6e, 0x30, + 0xa0, 0xd7, 0x94, 0x2f, 0x58, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x6e, 0x01, 0xc6, 0xe2, 0x05, + 0x3e, 0x76, 0x8f, 0xda, 0x56, 0xd2, 0xa3, 0x36, 0x77, 0x04, 0xdf, 0xa4, 0x8f, 0x17, 0xed, 0x97, + 0xd5, 0x78, 0xd5, 0xb8, 0xe7, 0x2c, 0xee, 0x2c, 0x28, 0xdc, 0xd5, 0x59, 0xf0, 0xfe, 0xbf, 0xbc, + 0xa6, 0x9f, 0x95, 0x5b, 0x7e, 0x80, 0xad, 0xdc, 0xf7, 0xf2, 0x06, 0x9c, 0xd8, 0x2d, 0x2e, 0x43, + 0x39, 0x6e, 0x71, 0x69, 0x87, 0xb7, 0xb8, 0x0c, 0x1f, 0xd9, 0xa0, 0x73, 0x90, 0x9b, 0x5c, 0xaa, + 0xf7, 0xf5, 0x26, 0x97, 0xda, 0x71, 0xdd, 0xe4, 0x02, 0x79, 0x6f, 0x72, 0x79, 0xb3, 0x00, 0x63, + 0x66, 0xe2, 0xc4, 0x2c, 0xf7, 0x2d, 0xe4, 0x99, 0x6a, 0x92, 0x07, 0x70, 0xc5, 0x91, 0xa9, 0x24, + 0x0d, 0x53, 0x22, 0xb5, 0xff, 0x5b, 0x89, 0xcf, 0x03, 0xf7, 0xdb, 0x55, 0xfd, 0x6c, 0xd2, 0x55, + 0x7d, 0x31, 0xed, 0xaa, 0x3e, 0x11, 0x8b, 0x22, 0x8d, 0xbb, 0xab, 0x3f, 0x1e, 0x1b, 0x1e, 0x4b, + 0xfc, 0xe6, 0x94, 0x50, 0xd3, 0x19, 0x43, 0xe4, 0xc7, 0xa1, 0xea, 0xab, 0x3b, 0x27, 0xc5, 0xc2, + 0x26, 0xfa, 0x2e, 0xea, 0x3e, 0xc8, 0x90, 0x83, 0x59, 0xe2, 0x1e, 0xd5, 0x7d, 0xd7, 0x49, 0x5b, + 0xe2, 0xc8, 0xa9, 0x28, 0x53, 0xe3, 0x2e, 0xf3, 0xa1, 0x7b, 0xb8, 0xcc, 0x75, 0xa8, 0xdb, 0xba, + 0x1f, 0xac, 0x77, 0x4c, 0x3d, 0xa0, 0xa6, 0xec, 0x6f, 0xff, 0xe2, 0x60, 0x73, 0x15, 0x9b, 0xff, + 0x22, 0x83, 0x70, 0x39, 0x82, 0xc1, 0x38, 0x26, 0x31, 0x61, 0x84, 0xbd, 0xf2, 0xde, 0x60, 0xce, + 0xaa, 0x2b, 0x00, 0x0e, 0x23, 0x23, 0xf4, 0xf4, 0x2c, 0xc7, 0x70, 0x30, 0x81, 0xda, 0xc7, 0xab, + 0x5e, 0x1b, 0xc4, 0xab, 0x4e, 0x3e, 0x2d, 0x8c, 0x8d, 0x5d, 0xf5, 0xc1, 0xb8, 0x37, 0x6e, 0x34, + 0x8a, 0x2a, 0xc4, 0x78, 0x22, 0x26, 0x79, 0xb5, 0x37, 0x6b, 0x50, 0xbf, 0xae, 0x07, 0xd6, 0x0e, + 0xe5, 0x5b, 0x40, 0xc7, 0xe3, 0x87, 0xff, 0x6f, 0x05, 0x38, 0x93, 0x8c, 0xf3, 0x3b, 0x46, 0x67, + 0x3c, 0xbf, 0x35, 0x04, 0x33, 0xa5, 0x61, 0x9f, 0x52, 0x70, 0xb7, 0x7c, 0x4f, 0xd8, 0xe0, 0x71, + 0xbb, 0xe5, 0x9b, 0xfd, 0x04, 0x62, 0xff, 0xb2, 0xbc, 0x5f, 0xdc, 0xf2, 0x0f, 0xf6, 0xe5, 0x72, + 0xa9, 0x4d, 0x83, 0xe1, 0x07, 0x66, 0xd3, 0xa0, 0xfa, 0x40, 0x58, 0x6a, 0x9d, 0xd8, 0xa6, 0x41, + 0x2d, 0x67, 0xf0, 0x8a, 0x0c, 0x8d, 0x17, 0x68, 0xfd, 0x36, 0x1f, 0xf8, 0xa9, 0x76, 0xe5, 0xcc, + 0x65, 0x06, 0xce, 0x86, 0xee, 0x5b, 0x86, 0x9c, 0x33, 0x73, 0x5c, 0xa6, 0xa9, 0xae, 0xfb, 0x12, + 0x7b, 0xdc, 0xfc, 0x15, 0x05, 0x76, 0x74, 0xbb, 0x59, 0x31, 0xd7, 0xed, 0x66, 0x64, 0x0e, 0xca, + 0x0e, 0x5b, 0x7a, 0x97, 0x0e, 0x7d, 0x91, 0xd8, 0xf5, 0x6b, 0x74, 0x17, 0x79, 0x66, 0xed, 0x9d, + 0x22, 0x00, 0xab, 0xfe, 0xc1, 0xdc, 0xf7, 0x1f, 0x85, 0x61, 0xbf, 0xcb, 0x17, 0xda, 0x72, 0xb6, + 0x8f, 0x22, 0x7e, 0x04, 0x19, 0x55, 0x3a, 0x79, 0x1c, 0x2a, 0xaf, 0x77, 0x69, 0x57, 0xed, 0x45, + 0x87, 0xb6, 0xde, 0xe7, 0x19, 0x11, 0x45, 0xda, 0xf1, 0xb9, 0xe2, 0x94, 0x9b, 0xbf, 0x72, 0x5c, + 0x6e, 0xfe, 0x1a, 0x0c, 0x5f, 0x77, 0x79, 0x00, 0xa1, 0xf6, 0xd7, 0x45, 0x80, 0x28, 0x40, 0x8b, + 0x7c, 0xbb, 0x00, 0x0f, 0x87, 0x1d, 0x2e, 0x10, 0x26, 0x3b, 0xbf, 0xbf, 0x36, 0xb7, 0xcb, 0x3f, + 0xab, 0xb3, 0xf3, 0x11, 0x68, 0x35, 0x4b, 0x1c, 0x66, 0x97, 0x82, 0x20, 0x54, 0x69, 0xbb, 0x13, + 0xec, 0xce, 0x5b, 0x9e, 0x6c, 0x81, 0x99, 0x71, 0x80, 0x97, 0x25, 0x8f, 0xc8, 0x2a, 0xd7, 0x95, + 0xbc, 0x13, 0xa9, 0x14, 0x0c, 0x71, 0xc8, 0x16, 0x54, 0x1d, 0xf7, 0x55, 0x9f, 0xa9, 0x43, 0x36, + 0xc7, 0x17, 0x07, 0x57, 0xb9, 0x50, 0xab, 0x70, 0x11, 0xcb, 0x17, 0x1c, 0x76, 0xa4, 0xb2, 0xbf, + 0x55, 0x84, 0x53, 0x19, 0x7a, 0x20, 0x2f, 0xc2, 0xb8, 0x8c, 0x85, 0x8b, 0x2e, 0x72, 0x2e, 0x44, + 0x17, 0x39, 0x37, 0x53, 0x69, 0xd8, 0xc3, 0x4d, 0x5e, 0x05, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0x8a, + 0x6b, 0x2a, 0x63, 0xf6, 0x85, 0xfd, 0xbd, 0x49, 0x98, 0x0d, 0xa9, 0x77, 0xf6, 0x26, 0x3f, 0x91, + 0x15, 0xde, 0x9a, 0xd2, 0x73, 0x94, 0x01, 0x63, 0x90, 0xe4, 0xcb, 0x00, 0x62, 0xdd, 0x16, 0x9e, + 0xc0, 0xbf, 0x87, 0xb3, 0x63, 0x4a, 0xdd, 0x75, 0x34, 0xf5, 0xf9, 0xae, 0xee, 0x04, 0x56, 0xb0, + 0x2b, 0x2e, 0x3c, 0xb9, 0x19, 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0xdd, 0x22, 0x54, 0x95, 0x9b, 0xf5, + 0x3e, 0xf8, 0xd6, 0x5a, 0x09, 0xdf, 0xda, 0x11, 0x05, 0xb4, 0x66, 0x79, 0xd6, 0xdc, 0x94, 0x67, + 0x6d, 0x31, 0xbf, 0xa8, 0xbb, 0xfb, 0xd5, 0xbe, 0x57, 0x84, 0x31, 0xc5, 0x9a, 0xd7, 0xab, 0xf6, + 0x59, 0x38, 0x21, 0x36, 0xa2, 0x57, 0xf4, 0xdb, 0xe2, 0xee, 0x17, 0xae, 0xb0, 0xb2, 0x88, 0x21, + 0x6d, 0x24, 0x93, 0x30, 0xcd, 0xcb, 0x9a, 0xb5, 0x20, 0xad, 0xb3, 0x45, 0x88, 0xd8, 0xba, 0x12, + 0x8b, 0x25, 0xde, 0xac, 0x1b, 0xa9, 0x34, 0xec, 0xe1, 0x4e, 0xbb, 0xf5, 0xca, 0xc7, 0xe0, 0xd6, + 0xfb, 0x93, 0x02, 0x8c, 0x44, 0xfa, 0x3a, 0x76, 0xa7, 0xde, 0x66, 0xd2, 0xa9, 0x37, 0x9b, 0xbb, + 0x39, 0xf4, 0x71, 0xe9, 0xfd, 0xa7, 0x61, 0x48, 0xc4, 0x55, 0x93, 0x0d, 0x38, 0x67, 0x65, 0x46, + 0x87, 0xc5, 0x46, 0x9b, 0xf0, 0xa0, 0xf0, 0x52, 0x5f, 0x4e, 0xbc, 0x0b, 0x0a, 0xe9, 0x42, 0x75, + 0x87, 0x7a, 0x81, 0x65, 0x50, 0x55, 0xbf, 0xc5, 0xdc, 0x26, 0x99, 0x74, 0x5c, 0x86, 0x3a, 0xbd, + 0x29, 0x05, 0x60, 0x28, 0x8a, 0x6c, 0x40, 0x85, 0x9a, 0x2d, 0xaa, 0x6e, 0xe3, 0xc9, 0x79, 0xd7, + 0x65, 0xa8, 0x4f, 0xf6, 0xe6, 0xa3, 0x80, 0x26, 0x3e, 0xd4, 0x6c, 0xb5, 0x31, 0x25, 0xdb, 0xe1, + 0xe0, 0x06, 0x56, 0xb8, 0xc5, 0x15, 0x1d, 0xd4, 0x0f, 0x49, 0x18, 0xc9, 0x21, 0xdb, 0xa1, 0x87, + 0xac, 0x72, 0x44, 0x83, 0xc7, 0x5d, 0xfc, 0x63, 0x3e, 0xd4, 0x6e, 0xe9, 0x01, 0xf5, 0xda, 0xba, + 0xb7, 0x2d, 0x57, 0x1b, 0x83, 0xd7, 0xf0, 0x25, 0x85, 0x14, 0xd5, 0x30, 0x24, 0x61, 0x24, 0x87, + 0xb8, 0x50, 0x0b, 0xa4, 0xf9, 0xac, 0xdc, 0x80, 0x83, 0x0b, 0x55, 0x86, 0xb8, 0x2f, 0xe3, 0xab, + 0xd5, 0x2b, 0x46, 0x32, 0xc8, 0x4e, 0xe2, 0x3a, 0x62, 0x71, 0x09, 0x75, 0x23, 0x87, 0x3b, 0x59, + 0x42, 0x45, 0xd3, 0x4d, 0xf6, 0xb5, 0xc6, 0xda, 0x3b, 0x95, 0x68, 0x58, 0xbe, 0xdf, 0x4e, 0xae, + 0xa7, 0x93, 0x4e, 0xae, 0x0b, 0x69, 0x27, 0x57, 0x6a, 0x7f, 0xf3, 0xf0, 0x11, 0x99, 0x29, 0xf7, + 0x52, 0xf9, 0x18, 0xdc, 0x4b, 0x4f, 0x41, 0x7d, 0x87, 0x8f, 0x04, 0xe2, 0x6a, 0x9f, 0x0a, 0x9f, + 0x46, 0xf8, 0xc8, 0x7e, 0x33, 0x22, 0x63, 0x9c, 0x87, 0x65, 0x91, 0x3f, 0x60, 0x08, 0xef, 0x46, + 0x95, 0x59, 0x9a, 0x11, 0x19, 0xe3, 0x3c, 0x3c, 0x98, 0xcb, 0x72, 0xb6, 0x45, 0x86, 0x61, 0x9e, + 0x41, 0x04, 0x73, 0x29, 0x22, 0x46, 0xe9, 0xe4, 0x12, 0x54, 0xbb, 0xe6, 0xa6, 0xe0, 0xad, 0x72, + 0x5e, 0x6e, 0x61, 0xae, 0xcf, 0x2f, 0xc8, 0xab, 0x86, 0x54, 0x2a, 0x2b, 0x49, 0x5b, 0xef, 0xa8, + 0x04, 0xbe, 0x36, 0x94, 0x25, 0x59, 0x89, 0xc8, 0x18, 0xe7, 0x21, 0x9f, 0x82, 0x31, 0x8f, 0x9a, + 0x5d, 0x83, 0x86, 0xb9, 0x84, 0x77, 0x8a, 0x88, 0x3f, 0x4d, 0xc4, 0x53, 0x30, 0xc5, 0xd9, 0xc7, + 0x49, 0x56, 0x1f, 0x28, 0xf4, 0xf4, 0xa7, 0x05, 0x20, 0xbd, 0xc1, 0xcf, 0x64, 0x0b, 0x86, 0x1c, + 0xee, 0xfd, 0xca, 0x7d, 0x9b, 0x72, 0xcc, 0x89, 0x26, 0x86, 0x25, 0x49, 0x90, 0xf8, 0xc4, 0x81, + 0x2a, 0xbd, 0x1d, 0x50, 0xcf, 0x09, 0x0f, 0x43, 0x1c, 0xcd, 0xcd, 0xcd, 0x62, 0x35, 0x20, 0x91, + 0x31, 0x94, 0xa1, 0xfd, 0xbc, 0x08, 0xf5, 0x18, 0xdf, 0xbd, 0x16, 0x95, 0xfc, 0x3c, 0xb6, 0x70, + 0x3a, 0xad, 0x7b, 0xb6, 0xec, 0x61, 0xb1, 0xf3, 0xd8, 0x32, 0x09, 0x97, 0x31, 0xce, 0x47, 0x66, + 0x00, 0xda, 0xba, 0x1f, 0x50, 0x8f, 0xcf, 0xbe, 0xa9, 0x53, 0xd0, 0x2b, 0x61, 0x0a, 0xc6, 0xb8, + 0xc8, 0x45, 0x79, 0xf7, 0x76, 0x39, 0x79, 0x6b, 0x5d, 0x9f, 0x8b, 0xb5, 0x2b, 0x47, 0x70, 0xb1, + 0x36, 0x69, 0xc1, 0xb8, 0x2a, 0xb5, 0x4a, 0x3d, 0xdc, 0x9d, 0x66, 0x62, 0xfd, 0x92, 0x82, 0xc0, + 0x1e, 0x50, 0xed, 0x9d, 0x02, 0x8c, 0x26, 0x5c, 0x1e, 0xe2, 0xbe, 0x39, 0x15, 0xba, 0x9f, 0xb8, + 0x6f, 0x2e, 0x16, 0x71, 0xff, 0x24, 0x0c, 0x09, 0x05, 0xa5, 0x23, 0xf2, 0x84, 0x0a, 0x51, 0xa6, + 0xb2, 0xb1, 0x4c, 0x3a, 0x55, 0xd3, 0x63, 0x99, 0xf4, 0xba, 0xa2, 0x4a, 0x17, 0xbe, 0x7a, 0x51, + 0xba, 0x5e, 0x5f, 0xbd, 0xa0, 0x63, 0xc8, 0xa1, 0x7d, 0x9f, 0x97, 0x3b, 0xf0, 0x76, 0xc3, 0xb5, + 0x5c, 0x0b, 0x86, 0x65, 0x14, 0x96, 0xec, 0x1a, 0x2f, 0xe6, 0xf0, 0xc3, 0x70, 0x1c, 0x19, 0x6f, + 0xa4, 0x1b, 0xdb, 0x37, 0x36, 0x37, 0x51, 0xa1, 0x93, 0xcb, 0x50, 0x73, 0x9d, 0x05, 0xdd, 0xb2, + 0xbb, 0x9e, 0x1a, 0xd9, 0x3f, 0xc2, 0xc6, 0xaa, 0x1b, 0x8a, 0x78, 0x67, 0x6f, 0xf2, 0x4c, 0xf8, + 0x92, 0x28, 0x24, 0x46, 0x39, 0xb5, 0xbf, 0x2f, 0x01, 0x8f, 0xc0, 0x21, 0xcf, 0x41, 0xad, 0x4d, + 0x8d, 0x2d, 0xdd, 0xb1, 0x7c, 0x75, 0x63, 0x26, 0x5b, 0x9f, 0xd7, 0x56, 0x14, 0xf1, 0x0e, 0x53, + 0xc1, 0x6c, 0x73, 0x99, 0x07, 0xb9, 0x47, 0xbc, 0xc4, 0x80, 0xa1, 0x96, 0xef, 0xeb, 0x1d, 0x2b, + 0xf7, 0x06, 0xb0, 0xb8, 0xa1, 0x50, 0x0c, 0x03, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb1, + 0x75, 0xcb, 0xc9, 0xfd, 0xa7, 0x18, 0x56, 0x83, 0x55, 0x86, 0x24, 0x9c, 0x52, 0xfc, 0x11, 0x05, + 0x36, 0xe9, 0x42, 0xdd, 0x37, 0x3c, 0xbd, 0xed, 0x6f, 0xe9, 0x33, 0xcf, 0x3c, 0x9b, 0xdb, 0xcc, + 0x8b, 0x44, 0x89, 0x59, 0x67, 0x0e, 0x67, 0x57, 0x9a, 0x57, 0x66, 0x67, 0x9e, 0x79, 0x16, 0xe3, + 0x72, 0xe2, 0x62, 0x9f, 0x79, 0x6a, 0x46, 0xf6, 0xdc, 0x23, 0x17, 0xfb, 0xcc, 0x53, 0x33, 0x18, + 0x97, 0xa3, 0xfd, 0x5d, 0x01, 0x6a, 0x21, 0x2f, 0x59, 0x07, 0x60, 0x63, 0x88, 0xbc, 0x53, 0xf0, + 0x50, 0xf7, 0xfb, 0xf3, 0x75, 0xfd, 0x7a, 0x98, 0x19, 0x63, 0x40, 0x19, 0x97, 0x2e, 0x16, 0x8f, + 0xfa, 0xd2, 0xc5, 0x69, 0xa8, 0x6d, 0xe9, 0x8e, 0xe9, 0x6f, 0xe9, 0xdb, 0x62, 0x28, 0x8d, 0x5d, + 0x43, 0x7a, 0x45, 0x25, 0x60, 0xc4, 0xa3, 0xfd, 0xf6, 0x10, 0x88, 0x5d, 0x5b, 0xd6, 0xd9, 0x4d, + 0xcb, 0x17, 0x61, 0xc3, 0x05, 0x9e, 0x33, 0xec, 0xec, 0xf3, 0x92, 0x8e, 0x21, 0x07, 0x39, 0x0b, + 0xa5, 0xb6, 0xe5, 0xc8, 0x3d, 0x1b, 0xee, 0xb2, 0x5b, 0xb1, 0x1c, 0x64, 0x34, 0x9e, 0xa4, 0xdf, + 0x96, 0x11, 0x5f, 0x22, 0x49, 0xbf, 0x8d, 0x8c, 0xc6, 0x16, 0xd1, 0xb6, 0xeb, 0x6e, 0xb3, 0x6e, + 0xab, 0x02, 0xc3, 0xca, 0x7c, 0x2a, 0xe7, 0x8b, 0xe8, 0xe5, 0x64, 0x12, 0xa6, 0x79, 0xc9, 0x3a, + 0x3c, 0xf2, 0x06, 0xf5, 0x5c, 0x39, 0x4e, 0x35, 0x6d, 0x4a, 0x3b, 0x0a, 0x46, 0x18, 0x41, 0x3c, + 0xbe, 0xec, 0x8b, 0xd9, 0x2c, 0xd8, 0x2f, 0x2f, 0x8f, 0x54, 0xd5, 0xbd, 0x16, 0x0d, 0x56, 0x3d, + 0xd7, 0xa0, 0xbe, 0x6f, 0x39, 0x2d, 0x05, 0x3b, 0x14, 0xc1, 0xae, 0x65, 0xb3, 0x60, 0xbf, 0xbc, + 0xe4, 0x65, 0x98, 0x10, 0x49, 0xc2, 0x5c, 0x98, 0xdd, 0xd1, 0x2d, 0x5b, 0xdf, 0xb0, 0x6c, 0xf5, + 0x83, 0xb5, 0x51, 0xb1, 0x33, 0xb2, 0xd6, 0x87, 0x07, 0xfb, 0xe6, 0x26, 0x57, 0x61, 0x5c, 0xed, + 0x8b, 0xad, 0x52, 0xaf, 0x19, 0xee, 0xe4, 0x8f, 0x36, 0x2e, 0xb0, 0x15, 0xeb, 0x3c, 0xed, 0x78, + 0xd4, 0xe0, 0x76, 0x63, 0x8a, 0x0b, 0x7b, 0xf2, 0x11, 0x84, 0x33, 0x7c, 0xbb, 0x7e, 0xbd, 0x33, + 0xe7, 0xba, 0xb6, 0xe9, 0xde, 0x72, 0x54, 0xdd, 0x85, 0x69, 0xc6, 0xb7, 0xc2, 0x9a, 0x99, 0x1c, + 0xd8, 0x27, 0x27, 0xab, 0x39, 0x4f, 0x99, 0x77, 0x6f, 0x39, 0x69, 0x54, 0x88, 0x6a, 0xde, 0xec, + 0xc3, 0x83, 0x7d, 0x73, 0x93, 0x05, 0x20, 0xe9, 0x1a, 0xac, 0x77, 0xb8, 0x39, 0x37, 0xda, 0x38, + 0x23, 0xae, 0x07, 0x49, 0xa7, 0x62, 0x46, 0x0e, 0xb2, 0x0c, 0xa7, 0xd3, 0x54, 0x26, 0x8e, 0x9f, + 0x11, 0x18, 0x15, 0x17, 0x83, 0x62, 0x46, 0x3a, 0x66, 0xe6, 0xd2, 0x7e, 0xa7, 0x08, 0xa3, 0x89, + 0xf3, 0xe4, 0x0f, 0xdc, 0xb9, 0x5d, 0x66, 0x43, 0xb7, 0xfd, 0xd6, 0xd2, 0xfc, 0x15, 0xaa, 0x9b, + 0xd4, 0xbb, 0x46, 0xd5, 0xd9, 0x7f, 0x3e, 0xa8, 0xac, 0x24, 0x52, 0x30, 0xc5, 0x49, 0x36, 0xa1, + 0x22, 0x3c, 0xc2, 0x79, 0xff, 0x14, 0xa1, 0x74, 0xc4, 0xdd, 0xc2, 0xf2, 0xf7, 0x2a, 0xae, 0x47, + 0x51, 0xc0, 0x6b, 0x01, 0x8c, 0xc4, 0x39, 0xd8, 0x40, 0x12, 0x99, 0x9b, 0xc3, 0x09, 0x53, 0x73, + 0x09, 0x4a, 0x41, 0x30, 0xe8, 0x89, 0x60, 0xb1, 0xc3, 0xb0, 0xb6, 0x8c, 0x0c, 0x43, 0xdb, 0x64, + 0xdf, 0xce, 0xf7, 0x2d, 0xd7, 0x91, 0xd7, 0x43, 0xaf, 0xc3, 0x70, 0x20, 0x9d, 0x6c, 0x83, 0x9d, + 0x68, 0xe6, 0x36, 0x8a, 0x72, 0xb0, 0x29, 0x2c, 0xed, 0x4f, 0x8b, 0x50, 0x0b, 0x17, 0xc4, 0x07, + 0xb8, 0x76, 0xd9, 0x85, 0x5a, 0x18, 0x6e, 0x94, 0xfb, 0xe7, 0x73, 0x51, 0x14, 0x0c, 0x5f, 0xc3, + 0x85, 0xaf, 0x18, 0xc9, 0x88, 0x87, 0x32, 0x95, 0x72, 0x84, 0x32, 0x75, 0x60, 0x38, 0xf0, 0xac, + 0x56, 0x4b, 0x5a, 0xe7, 0x79, 0x62, 0x99, 0x42, 0x75, 0xad, 0x09, 0x40, 0xa9, 0x59, 0xf1, 0x82, + 0x4a, 0x8c, 0xf6, 0x1a, 0x8c, 0xa7, 0x39, 0xb9, 0xe9, 0x6a, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x1d, + 0x47, 0xa6, 0xab, 0xa4, 0x63, 0xc8, 0xc1, 0x96, 0xaf, 0xec, 0x33, 0xbd, 0xe1, 0x3a, 0xca, 0x7c, + 0xe4, 0xab, 0x80, 0x35, 0x49, 0xc3, 0x30, 0x55, 0xfb, 0xab, 0x12, 0x9c, 0x8d, 0xdc, 0x1a, 0x2b, + 0xba, 0xa3, 0xb7, 0x0e, 0xf0, 0xc7, 0xb1, 0x0f, 0xce, 0x88, 0x1c, 0xf6, 0xee, 0xfc, 0xd2, 0x03, + 0x70, 0x77, 0xfe, 0x3f, 0x14, 0x81, 0x87, 0x46, 0x92, 0xaf, 0xc2, 0x88, 0x1e, 0xfb, 0xd9, 0xa4, + 0xfc, 0x9c, 0x97, 0x73, 0x7f, 0x4e, 0x1e, 0x81, 0x19, 0x86, 0xfa, 0xc4, 0xa9, 0x98, 0x10, 0x48, + 0x5c, 0xa8, 0x6e, 0xea, 0xb6, 0xcd, 0x6c, 0xa1, 0xdc, 0xdb, 0x34, 0x09, 0xe1, 0xbc, 0x99, 0x2f, + 0x48, 0x68, 0x0c, 0x85, 0x90, 0x37, 0x0b, 0x30, 0xea, 0xc5, 0x97, 0x49, 0xf2, 0x83, 0xe4, 0xd9, + 0xc4, 0x8f, 0xa1, 0xc5, 0x03, 0x8b, 0xe2, 0x6b, 0xb1, 0xa4, 0x4c, 0xed, 0x2f, 0x0b, 0x30, 0xda, + 0xb4, 0x2d, 0xd3, 0x72, 0x5a, 0xc7, 0x78, 0x75, 0xff, 0x0d, 0xa8, 0xf8, 0xb6, 0x65, 0xd2, 0x01, + 0x67, 0x13, 0x31, 0x8f, 0x31, 0x00, 0x14, 0x38, 0xc9, 0x7f, 0x01, 0x94, 0x0e, 0xf0, 0x2f, 0x80, + 0x5f, 0x0d, 0x81, 0x0c, 0xf2, 0x25, 0x5d, 0xa8, 0xb5, 0xd4, 0x15, 0xe3, 0xb2, 0x8e, 0x57, 0x72, + 0x5c, 0x4f, 0x97, 0xb8, 0xac, 0x5c, 0x8c, 0xfd, 0x21, 0x11, 0x23, 0x49, 0x84, 0x26, 0xff, 0x72, + 0x3a, 0x9f, 0xf3, 0x2f, 0xa7, 0x42, 0x5c, 0xef, 0x7f, 0x4e, 0x75, 0x28, 0x6f, 0x05, 0x41, 0x47, + 0x36, 0xa6, 0xc1, 0xa3, 0xb8, 0xa3, 0x1b, 0x52, 0x84, 0x4d, 0xc4, 0xde, 0x91, 0x43, 0x33, 0x11, + 0x8e, 0x1e, 0xfe, 0xd5, 0x6a, 0x2e, 0x57, 0xc0, 0x40, 0x5c, 0x04, 0x7b, 0x47, 0x0e, 0x4d, 0xbe, + 0x02, 0xf5, 0xc0, 0xd3, 0x1d, 0x7f, 0xd3, 0xf5, 0xda, 0xd4, 0x93, 0x6b, 0xd4, 0x85, 0x1c, 0x3f, + 0xfa, 0x5c, 0x8b, 0xd0, 0xc4, 0x4e, 0x64, 0x82, 0x84, 0x71, 0x69, 0x64, 0x1b, 0xaa, 0x5d, 0x53, + 0x14, 0x4c, 0xba, 0x9f, 0x66, 0xf3, 0xfc, 0xbb, 0x35, 0x16, 0x0e, 0xa0, 0xde, 0x30, 0x14, 0x90, + 0xfc, 0x81, 0xdb, 0xf0, 0x51, 0xfd, 0xc0, 0x2d, 0xde, 0x1a, 0xb3, 0xae, 0x6f, 0x20, 0x6d, 0x69, + 0xd7, 0x3a, 0x2d, 0x19, 0xcd, 0xb4, 0x90, 0xdb, 0xe4, 0x14, 0x22, 0xeb, 0xa1, 0x6d, 0xec, 0xb4, + 0x50, 0xc9, 0xd0, 0xda, 0x20, 0x77, 0x09, 0x88, 0x91, 0xf8, 0xcd, 0x89, 0x38, 0x53, 0x34, 0x7d, + 0xb0, 0xf1, 0x20, 0xfc, 0xdf, 0x46, 0xec, 0x9a, 0xe5, 0xcc, 0xff, 0x99, 0x68, 0x7f, 0x56, 0x84, + 0xd2, 0xda, 0x72, 0x53, 0x5c, 0x9d, 0xc8, 0xff, 0x21, 0x44, 0x9b, 0xdb, 0x56, 0xe7, 0x26, 0xf5, + 0xac, 0xcd, 0x5d, 0xb9, 0xf4, 0x8e, 0x5d, 0x9d, 0x98, 0xe6, 0xc0, 0x8c, 0x5c, 0xe4, 0x15, 0x18, + 0x31, 0xf4, 0x39, 0xea, 0x05, 0x83, 0x38, 0x16, 0xf8, 0xe1, 0xc9, 0xb9, 0xd9, 0x28, 0x3b, 0x26, + 0xc0, 0xc8, 0x3a, 0x80, 0x11, 0x41, 0x97, 0x0e, 0xed, 0x0e, 0x89, 0x01, 0xc7, 0x80, 0x08, 0x42, + 0x6d, 0x9b, 0xb1, 0x72, 0xd4, 0xf2, 0x61, 0x50, 0x79, 0xcb, 0xb9, 0xa6, 0xf2, 0x62, 0x04, 0xa3, + 0x39, 0x30, 0x9a, 0xf8, 0xf7, 0x09, 0xf9, 0x24, 0x54, 0xdd, 0x4e, 0x6c, 0x38, 0xad, 0xf1, 0xb8, + 0xc9, 0xea, 0x0d, 0x49, 0xbb, 0xb3, 0x37, 0x39, 0xba, 0xec, 0xb6, 0x2c, 0x43, 0x11, 0x30, 0x64, + 0x27, 0x1a, 0x0c, 0xf1, 0x13, 0x4f, 0xea, 0xcf, 0x27, 0x7c, 0xee, 0xe0, 0x3f, 0x27, 0xf0, 0x51, + 0xa6, 0x68, 0x5f, 0x2b, 0x43, 0xb4, 0xb7, 0x46, 0x7c, 0x18, 0x12, 0x11, 0xdd, 0x72, 0xe4, 0x3e, + 0xd6, 0xe0, 0x71, 0x29, 0x8a, 0xb4, 0xa0, 0xf4, 0x9a, 0xbb, 0x91, 0x7b, 0xe0, 0x8e, 0x1d, 0x75, + 0x16, 0xbe, 0xb2, 0x18, 0x01, 0x99, 0x04, 0xf2, 0xdf, 0x0b, 0x70, 0xd2, 0x4f, 0x9b, 0xbe, 0xb2, + 0x39, 0x60, 0x7e, 0x1b, 0x3f, 0x6d, 0x4c, 0xcb, 0x00, 0xd7, 0x7e, 0xc9, 0xd8, 0x5b, 0x16, 0xa6, + 0x7f, 0xb1, 0xe9, 0x25, 0x9b, 0xd3, 0x62, 0xce, 0xff, 0xf5, 0x25, 0xf5, 0x9f, 0xa4, 0xa1, 0x14, + 0xa5, 0x7d, 0xa3, 0x08, 0xf5, 0xd8, 0x68, 0x9d, 0xfb, 0x87, 0x3a, 0xb7, 0x53, 0x3f, 0xd4, 0x59, + 0x1d, 0x7c, 0x0f, 0x38, 0x2a, 0xd5, 0x71, 0xff, 0x53, 0xe7, 0x47, 0x45, 0x28, 0xad, 0xcf, 0x2f, + 0x24, 0x17, 0xad, 0x85, 0xfb, 0xb0, 0x68, 0xdd, 0x82, 0xe1, 0x8d, 0xae, 0x65, 0x07, 0x96, 0x93, + 0xfb, 0x32, 0x06, 0xf5, 0xff, 0x21, 0xb9, 0xc7, 0x20, 0x50, 0x51, 0xc1, 0x93, 0x16, 0x0c, 0xb7, + 0xc4, 0x6d, 0x78, 0xb9, 0x23, 0xe3, 0xe4, 0xad, 0x7a, 0x42, 0x90, 0x7c, 0x41, 0x85, 0xae, 0xed, + 0x82, 0xfc, 0x91, 0xfa, 0x7d, 0xd7, 0xa6, 0xf6, 0x15, 0x08, 0xad, 0x80, 0xfb, 0x2f, 0xfc, 0x6f, + 0x0a, 0x90, 0x34, 0x7c, 0xee, 0x7f, 0x6b, 0xda, 0x4e, 0xb7, 0xa6, 0xf9, 0xa3, 0xe8, 0x7c, 0xd9, + 0x0d, 0x4a, 0xfb, 0xad, 0x22, 0x0c, 0xdd, 0xb7, 0x03, 0xb4, 0x34, 0x11, 0xe4, 0x37, 0x97, 0x73, + 0x60, 0xec, 0x1b, 0xe2, 0xd7, 0x4e, 0x85, 0xf8, 0xe5, 0xfd, 0x63, 0xea, 0x3d, 0x02, 0xfc, 0xfe, + 0xa8, 0x00, 0x72, 0x58, 0x5e, 0x72, 0xfc, 0x40, 0x77, 0x0c, 0x4a, 0x8c, 0x70, 0x0e, 0xc8, 0x1b, + 0x49, 0x22, 0xa3, 0xad, 0xc4, 0xb4, 0xcf, 0x9f, 0xd5, 0x98, 0x4f, 0x3e, 0x0e, 0xd5, 0x2d, 0xd7, + 0x0f, 0xf8, 0x38, 0x5f, 0x4c, 0x7a, 0x97, 0xae, 0x48, 0x3a, 0x86, 0x1c, 0xe9, 0x1d, 0xd7, 0x4a, + 0xff, 0x1d, 0x57, 0xed, 0xbb, 0x45, 0x18, 0x79, 0xbf, 0x9c, 0x02, 0xce, 0x0a, 0x89, 0x2c, 0xe5, + 0x0c, 0x89, 0x2c, 0x1f, 0x26, 0x24, 0x52, 0xfb, 0x71, 0x01, 0xe0, 0xbe, 0x1d, 0x41, 0x36, 0x93, + 0xd1, 0x8a, 0xb9, 0xdb, 0x55, 0x76, 0xac, 0xe2, 0x6f, 0x54, 0x54, 0x95, 0x78, 0xa4, 0xe2, 0x5b, + 0x05, 0x18, 0xd3, 0x13, 0xd1, 0x7f, 0xb9, 0x4d, 0xcb, 0x54, 0x30, 0x61, 0x78, 0xdc, 0x32, 0x49, + 0xc7, 0x94, 0x58, 0xf2, 0x7c, 0x74, 0xfd, 0xed, 0xf5, 0xa8, 0xd9, 0xf7, 0xdc, 0x5b, 0xcb, 0xcd, + 0x9c, 0x04, 0xe7, 0x3d, 0xa2, 0x2d, 0x4b, 0x47, 0x12, 0x6d, 0x19, 0x3f, 0x47, 0x56, 0xbe, 0xeb, + 0x39, 0xb2, 0x1d, 0xa8, 0x6d, 0x7a, 0x6e, 0x9b, 0x07, 0x34, 0xca, 0x7f, 0xad, 0x5e, 0xce, 0x31, + 0xa7, 0x44, 0x7f, 0x19, 0x8f, 0x7c, 0x3c, 0x0b, 0x0a, 0x1f, 0x23, 0x51, 0xdc, 0x2d, 0xee, 0x0a, + 0xa9, 0x43, 0x47, 0x29, 0x35, 0x1c, 0x4b, 0xd6, 0x04, 0x3a, 0x2a, 0x31, 0xc9, 0x20, 0xc6, 0xe1, + 0xfb, 0x13, 0xc4, 0xa8, 0xfd, 0xa8, 0xac, 0x06, 0xb0, 0x07, 0xee, 0xa6, 0xc5, 0xf7, 0xff, 0xd1, + 0xd5, 0xf4, 0xb9, 0xd2, 0xe1, 0xfb, 0x78, 0xae, 0xb4, 0x7a, 0x34, 0xe7, 0x4a, 0x6b, 0x87, 0x38, + 0x57, 0xba, 0x57, 0x82, 0xd4, 0xa2, 0xeb, 0x83, 0x0d, 0x96, 0x7f, 0x52, 0x1b, 0x2c, 0x6f, 0x17, + 0x21, 0x1a, 0x45, 0x0e, 0x19, 0x80, 0xf2, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, 0x6a, 0xeb, 0xbb, + 0x79, 0xfe, 0xae, 0xb9, 0x22, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0xde, 0x70, 0x9d, 0xdb, + 0x55, 0x1d, 0x5d, 0x96, 0x2d, 0x9c, 0x61, 0xd1, 0x3b, 0xc6, 0xc4, 0x68, 0x7f, 0x58, 0x04, 0x79, + 0x15, 0x3a, 0xa1, 0x50, 0xd9, 0xb4, 0x6e, 0x53, 0x33, 0x77, 0x38, 0x69, 0xec, 0x9f, 0xc7, 0xc2, + 0x17, 0xcf, 0x09, 0x28, 0xd0, 0xb9, 0x93, 0x55, 0xec, 0xad, 0x48, 0xfd, 0xe5, 0x70, 0xb2, 0xc6, + 0xf7, 0x68, 0xa4, 0x93, 0x55, 0x90, 0x50, 0xc9, 0x10, 0x3e, 0x5d, 0xbe, 0xcd, 0x9e, 0x7b, 0x2b, + 0x29, 0xb1, 0x5d, 0xaf, 0x7c, 0xba, 0xbe, 0x38, 0x58, 0x2e, 0x65, 0x34, 0xbe, 0xf4, 0xc3, 0x9f, + 0x5c, 0x78, 0xe8, 0xc7, 0x3f, 0xb9, 0xf0, 0xd0, 0xbb, 0x3f, 0xb9, 0xf0, 0xd0, 0xd7, 0xf6, 0x2f, + 0x14, 0x7e, 0xb8, 0x7f, 0xa1, 0xf0, 0xe3, 0xfd, 0x0b, 0x85, 0x77, 0xf7, 0x2f, 0x14, 0xfe, 0x7c, + 0xff, 0x42, 0xe1, 0xbf, 0xfc, 0xc5, 0x85, 0x87, 0xbe, 0xf8, 0x5c, 0x54, 0x84, 0x69, 0x55, 0x84, + 0x69, 0x25, 0x70, 0xba, 0xb3, 0xdd, 0x9a, 0x66, 0x45, 0x88, 0x28, 0xaa, 0x08, 0xff, 0x18, 0x00, + 0x00, 0xff, 0xff, 0xa1, 0xe2, 0x38, 0xfd, 0x6e, 0x95, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -3643,6 +3709,46 @@ func (m *Authorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Backoff) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backoff) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backoff) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Steps != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Steps)) + i-- + dAtA[i] = 0x10 + } + if m.Interval != nil { + { + size, err := m.Interval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *BasicAuth) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7304,6 +7410,48 @@ func (m *RedisSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RetryStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OnFailure != nil { + i -= len(*m.OnFailure) + copy(dAtA[i:], *m.OnFailure) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.OnFailure))) + i-- + dAtA[i] = 0x12 + } + if m.BackOff != nil { + { + size, err := m.BackOff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *SASL) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7840,6 +7988,16 @@ func (m *Sink) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if m.Fallback != nil { { size, err := m.Fallback.MarshalToSizedBuffer(dAtA[:i]) @@ -9198,6 +9356,22 @@ func (m *Authorization) Size() (n int) { return n } +func (m *Backoff) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Steps != nil { + n += 1 + sovGenerated(uint64(*m.Steps)) + } + return n +} + func (m *BasicAuth) Size() (n int) { if m == nil { return 0 @@ -10544,6 +10718,23 @@ func (m *RedisSettings) Size() (n int) { return n } +func (m *RetryStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BackOff != nil { + l = m.BackOff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OnFailure != nil { + l = len(*m.OnFailure) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SASL) Size() (n int) { if m == nil { return 0 @@ -10757,6 +10948,8 @@ func (m *Sink) Size() (n int) { l = m.Fallback.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.RetryStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11289,6 +11482,17 @@ func (this *Authorization) String() string { }, "") return s } +func (this *Backoff) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Backoff{`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "v11.Duration", 1) + `,`, + `Steps:` + valueToStringGenerated(this.Steps) + `,`, + `}`, + }, "") + return s +} func (this *BasicAuth) String() string { if this == nil { return "nil" @@ -12238,6 +12442,17 @@ func (this *RedisSettings) String() string { }, "") return s } +func (this *RetryStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryStrategy{`, + `BackOff:` + strings.Replace(this.BackOff.String(), "Backoff", "Backoff", 1) + `,`, + `OnFailure:` + valueToStringGenerated(this.OnFailure) + `,`, + `}`, + }, "") + return s +} func (this *SASL) String() string { if this == nil { return "nil" @@ -12367,6 +12582,7 @@ func (this *Sink) String() string { s := strings.Join([]string{`&Sink{`, `AbstractSink:` + strings.Replace(strings.Replace(this.AbstractSink.String(), "AbstractSink", "AbstractSink", 1), `&`, ``, 1) + `,`, `Fallback:` + strings.Replace(this.Fallback.String(), "AbstractSink", "AbstractSink", 1) + `,`, + `RetryStrategy:` + strings.Replace(strings.Replace(this.RetryStrategy.String(), "RetryStrategy", "RetryStrategy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -14085,6 +14301,112 @@ func (m *Authorization) Unmarshal(dAtA []byte) error { } return nil } +func (m *Backoff) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Backoff: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Backoff: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &v11.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Steps = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *BasicAuth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -26019,6 +26341,125 @@ func (m *RedisSettings) Unmarshal(dAtA []byte) error { } return nil } +func (m *RetryStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BackOff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BackOff == nil { + m.BackOff = &Backoff{} + } + if err := m.BackOff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnFailure", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := OnFailureRetryStrategy(dAtA[iNdEx:postIndex]) + m.OnFailure = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SASL) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -27614,6 +28055,39 @@ func (m *Sink) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 8c5a6db0f2..9c513d6fde 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -209,6 +209,18 @@ message Authorization { optional k8s.io.api.core.v1.SecretKeySelector token = 1; } +// Backoff defines parameters used to systematically configure the retry strategy. +message Backoff { + // Interval sets the delay to wait before retry, after a failure occurs. + // +kubebuilder:default="1ms" + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration interval = 1; + + // Steps defines the number of times to try writing to a sink including retries + // +optional + optional uint32 steps = 2; +} + // BasicAuth represents the basic authentication approach which contains a user name and a password. message BasicAuth { // Secret for auth user @@ -1248,6 +1260,19 @@ message RedisSettings { optional string sentinel = 4; } +// RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. +// It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure. +message RetryStrategy { + // BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase. + // +optional + optional Backoff backoff = 1; + + // OnFailure specifies the action to take when a retry fails. The default action is to retry. + // +optional + // +kubebuilder:default="retry" + optional string onFailure = 2; +} + message SASL { // SASL mechanism to use optional string mechanism = 1; @@ -1418,6 +1443,10 @@ message Sink { // initiated if the ud-sink response field sets it. // +optional optional AbstractSink fallback = 2; + + // RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. + // +optional + optional RetryStrategy retryStrategy = 3; } // SlidingWindow describes a sliding window diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 8ed0f228c0..79aab81c33 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -34,6 +34,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink": schema_pkg_apis_numaflow_v1alpha1_AbstractSink(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractVertex": schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Authorization": schema_pkg_apis_numaflow_v1alpha1_Authorization(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Backoff": schema_pkg_apis_numaflow_v1alpha1_Backoff(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BasicAuth": schema_pkg_apis_numaflow_v1alpha1_BasicAuth(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Blackhole": schema_pkg_apis_numaflow_v1alpha1_Blackhole(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.BufferServiceConfig": schema_pkg_apis_numaflow_v1alpha1_BufferServiceConfig(ref), @@ -91,6 +92,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisBufferService": schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RetryStrategy": schema_pkg_apis_numaflow_v1alpha1_RetryStrategy(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASL": schema_pkg_apis_numaflow_v1alpha1_SASL(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASLPlain": schema_pkg_apis_numaflow_v1alpha1_SASLPlain(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale": schema_pkg_apis_numaflow_v1alpha1_Scale(ref), @@ -608,6 +610,34 @@ func schema_pkg_apis_numaflow_v1alpha1_Authorization(ref common.ReferenceCallbac } } +func schema_pkg_apis_numaflow_v1alpha1_Backoff(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Backoff defines parameters used to systematically configure the retry strategy.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "interval": { + SchemaProps: spec.SchemaProps{ + Description: "Interval sets the delay to wait before retry, after a failure occurs.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "steps": { + SchemaProps: spec.SchemaProps{ + Description: "Steps defines the number of times to try writing to a sink including retries", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_BasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4202,6 +4232,34 @@ func schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref common.ReferenceCallbac } } +func schema_pkg_apis_numaflow_v1alpha1_RetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "backoff": { + SchemaProps: spec.SchemaProps{ + Description: "BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase.", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Backoff"), + }, + }, + "onFailure": { + SchemaProps: spec.SchemaProps{ + Description: "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Backoff"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_SASL(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4748,11 +4806,18 @@ func schema_pkg_apis_numaflow_v1alpha1_Sink(ref common.ReferenceCallback) common Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink"), }, }, + "retryStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "RetryStrategy struct encapsulates the settings for retrying operations in the event of failures.", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RetryStrategy"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Blackhole", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Log", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSink"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.AbstractSink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Blackhole", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Log", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RetryStrategy", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSink"}, } } diff --git a/pkg/apis/numaflow/v1alpha1/retry_strategy.go b/pkg/apis/numaflow/v1alpha1/retry_strategy.go new file mode 100644 index 0000000000..12c9daab4b --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/retry_strategy.go @@ -0,0 +1,102 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +type OnFailureRetryStrategy string + +// Constants representing the possible actions that can be taken when a failure occurs during an operation. +const ( + OnFailureRetry OnFailureRetryStrategy = "retry" // Retry the operation. + OnFailureFallback OnFailureRetryStrategy = "fallback" // Reroute the operation to a fallback mechanism. + OnFailureDrop OnFailureRetryStrategy = "drop" // Drop the operation and perform no further action. +) + +// RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. +// It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure. +type RetryStrategy struct { + // BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase. + // +optional + BackOff *Backoff `json:"backoff,omitempty" protobuf:"bytes,1,opt,name=backoff"` + // OnFailure specifies the action to take when a retry fails. The default action is to retry. + // +optional + // +kubebuilder:default="retry" + OnFailure *OnFailureRetryStrategy `json:"onFailure,omitempty" protobuf:"bytes,2,opt,name=onFailure"` +} + +// Backoff defines parameters used to systematically configure the retry strategy. +type Backoff struct { + // Interval sets the delay to wait before retry, after a failure occurs. + // +kubebuilder:default="1ms" + // +optional + Interval *metav1.Duration `json:"interval,omitempty" protobuf:"bytes,1,opt,name=interval"` + // Steps defines the number of times to try writing to a sink including retries + // +optional + Steps *uint32 `json:"steps,omitempty" protobuf:"bytes,2,opt,name=steps"` + // TODO(Retry): Enable after we add support for exponential backoff + //// +optional + //Cap *metav1.Duration `json:"cap,omitempty" protobuf:"bytes,3,opt,name=cap"` + //// +optional + //Factor *floatstr `json:"factor,omitempty" protobuf:"bytes,2,opt,name=factor"` + //// +optional + //Jitter *floatstr `json:"jitter,omitempty" protobuf:"bytes,3,opt,name=jitter"` +} + +// GetBackoff constructs a wait.Backoff configuration using default values and optionally overrides +// these defaults with custom settings specified in the RetryStrategy. +func (r RetryStrategy) GetBackoff() wait.Backoff { + // Initialize the Backoff structure with default values. + wt := wait.Backoff{ + Duration: DefaultRetryInterval, + Steps: DefaultRetrySteps, + } + + // If a custom back-off configuration is present, check and substitute the respective parts. + if r.BackOff != nil { + // If a custom Interval is specified, override the default Duration. + if r.BackOff.Interval != nil { + wt.Duration = r.BackOff.Interval.Duration + } + // If custom Steps are specified, override the default Steps. + if r.BackOff.Steps != nil { + wt.Steps = int(*r.BackOff.Steps) + } + } + + // Returns the fully configured Backoff structure, which is either default or overridden by custom settings. + return wt +} + +// GetOnFailureRetryStrategy retrieves the currently set strategy for handling failures upon retrying. +// This method uses a default strategy which can be overridden by a custom strategy defined in RetryStrategy. +func (r RetryStrategy) GetOnFailureRetryStrategy() OnFailureRetryStrategy { + // If the OnFailure is not defined initialize with the Default value + if r.OnFailure == nil { + return DefaultOnFailureRetryStrategy + } + switch *r.OnFailure { + case OnFailureRetry, OnFailureFallback, OnFailureDrop: + // If a custom on-failure behavior is specified + return *r.OnFailure + default: + return DefaultOnFailureRetryStrategy + } +} diff --git a/pkg/apis/numaflow/v1alpha1/retry_strategy_test.go b/pkg/apis/numaflow/v1alpha1/retry_strategy_test.go new file mode 100644 index 0000000000..e8968b9d36 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/retry_strategy_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func TestGetBackoff(t *testing.T) { + steps := uint32(10) + tests := []struct { + name string + strategy RetryStrategy + expectedBackoff wait.Backoff + steps uint32 + }{ + { + name: "default backoff", + strategy: RetryStrategy{}, + expectedBackoff: wait.Backoff{ + Duration: DefaultRetryInterval, + Steps: DefaultRetrySteps, + }, + }, + { + name: "custom backoff", + strategy: RetryStrategy{ + BackOff: &Backoff{ + Interval: &metav1.Duration{Duration: 10 * time.Second}, + Steps: &steps, + }, + }, + expectedBackoff: wait.Backoff{ + Duration: 10 * time.Second, + Steps: 10, + }, + }, + { + name: "custom backoff - 2", + strategy: RetryStrategy{ + BackOff: &Backoff{ + Interval: &metav1.Duration{Duration: 10 * time.Second}, + }, + }, + expectedBackoff: wait.Backoff{ + Duration: 10 * time.Second, + Steps: DefaultRetrySteps, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.strategy.GetBackoff() + if got.Duration != tt.expectedBackoff.Duration || got.Steps != tt.expectedBackoff.Steps { + t.Errorf("GetBackoff() = %v, want %v", got, tt.expectedBackoff) + } + }) + } +} + +func TestGetOnFailureRetryStrategy(t *testing.T) { + tests := []struct { + name string + strategy RetryStrategy + expectedOnFailure OnFailureRetryStrategy + }{ + { + name: "default strategy", + strategy: RetryStrategy{}, + expectedOnFailure: DefaultOnFailureRetryStrategy, + }, + { + name: "custom strategy", + strategy: RetryStrategy{ + OnFailure: func() *OnFailureRetryStrategy { s := OnFailureDrop; return &s }(), + }, + expectedOnFailure: OnFailureDrop, + }, + { + name: "incorrect strategy - use default", + strategy: RetryStrategy{ + OnFailure: func() *OnFailureRetryStrategy { s := "xxxx"; return (*OnFailureRetryStrategy)(&s) }(), + }, + expectedOnFailure: DefaultOnFailureRetryStrategy, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.strategy.GetOnFailureRetryStrategy() + if got != tt.expectedOnFailure { + t.Errorf("GetOnFailureRetryStrategy() = %v, want %v", got, tt.expectedOnFailure) + } + }) + } +} diff --git a/pkg/apis/numaflow/v1alpha1/sink.go b/pkg/apis/numaflow/v1alpha1/sink.go index 8530259214..d596a079e6 100644 --- a/pkg/apis/numaflow/v1alpha1/sink.go +++ b/pkg/apis/numaflow/v1alpha1/sink.go @@ -27,6 +27,9 @@ type Sink struct { // initiated if the ud-sink response field sets it. // +optional Fallback *AbstractSink `json:"fallback,omitempty" protobuf:"bytes,2,opt,name=fallback"` + // RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. + // +optional + RetryStrategy RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,3,opt,name=retryStrategy"` } type AbstractSink struct { diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index a5ee6d9b6f..6033c0302f 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -245,6 +245,32 @@ func (in *Authorization) DeepCopy() *Authorization { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backoff) DeepCopyInto(out *Backoff) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backoff. +func (in *Backoff) DeepCopy() *Backoff { + if in == nil { + return nil + } + out := new(Backoff) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { *out = *in @@ -1957,6 +1983,32 @@ func (in *RedisSettings) DeepCopy() *RedisSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryStrategy) DeepCopyInto(out *RetryStrategy) { + *out = *in + if in.BackOff != nil { + in, out := &in.BackOff, &out.BackOff + *out = new(Backoff) + (*in).DeepCopyInto(*out) + } + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(OnFailureRetryStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategy. +func (in *RetryStrategy) DeepCopy() *RetryStrategy { + if in == nil { + return nil + } + out := new(RetryStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SASL) DeepCopyInto(out *SASL) { *out = *in @@ -2263,6 +2315,7 @@ func (in *Sink) DeepCopyInto(out *Sink) { *out = new(AbstractSink) (*in).DeepCopyInto(*out) } + in.RetryStrategy.DeepCopyInto(&out.RetryStrategy) return } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index f4e394004e..95ce17b984 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -86,7 +86,7 @@ var ( Help: "Total number of bytes written", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) - // WriteMessagesError is used to indicate the number of errors messages written + // WriteMessagesError is used to indicate the number of errors encountered while writing messages WriteMessagesError = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", Name: "write_error_total", @@ -298,3 +298,27 @@ var ( Help: "Total number of ctrl Messages sent", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) ) + +// Sink forwarder metrics +var ( + // FbSinkWriteMessagesCount is used to indicate the number of messages written to a fallback sink + FbSinkWriteMessagesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Subsystem: "forwarder", + Name: "fbsink_write_total", + Help: "Total number of Messages written to a fallback sink", + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + + // FbSinkWriteBytesCount is to indicate the number of bytes written to a fallback sink + FbSinkWriteBytesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Subsystem: "forwarder", + Name: "fbsink_write_bytes_total", + Help: "Total number of bytes written to a fallback sink", + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + + // FbSinkWriteMessagesError is used to indicate the number of errors while writing to a fallback sink + FbSinkWriteMessagesError = promauto.NewCounterVec(prometheus.CounterOpts{ + Subsystem: "forwarder", + Name: "fbsink_write_error_total", + Help: "Total number of Write Errors while writing to a fallback sink", + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) +) diff --git a/pkg/reconciler/pipeline/validate.go b/pkg/reconciler/pipeline/validate.go index 0fc443b6b5..2a98c2e665 100644 --- a/pkg/reconciler/pipeline/validate.go +++ b/pkg/reconciler/pipeline/validate.go @@ -272,6 +272,10 @@ func validateVertex(v dfv1.AbstractVertex) error { if v.UDF != nil { return validateUDF(*v.UDF) } + + if v.Sink != nil { + return validateSink(*v.Sink) + } return nil } @@ -553,3 +557,33 @@ func buildVisitedMap(vtxName string, visited map[string]struct{}, pl *dfv1.Pipel } } + +// validateSink initiates the validation of the sink spec for a pipeline +func validateSink(sink dfv1.Sink) error { + // check the sinks retry strategy validity. + if ok := HasValidSinkRetryStrategy(sink); !ok { + return fmt.Errorf("given OnFailure strategy is fallback but fallback sink is not provided") + } + return nil +} + +// HasValidSinkRetryStrategy checks if the provided RetryStrategy is valid based on the sink's configuration. +// This validation ensures that the retry strategy is compatible with the sink's current setup +func HasValidSinkRetryStrategy(s dfv1.Sink) bool { + // If the OnFailure strategy is set to fallback, but no fallback sink is provided in the Sink struct, + // we return an error + if s.RetryStrategy.OnFailure != nil && *s.RetryStrategy.OnFailure == dfv1.OnFailureFallback && !hasValidFallbackSink(&s) { + return false + } + // If steps are provided in the strategy they cannot be 0, as we do not allow no tries for writing + if s.RetryStrategy.BackOff != nil && s.RetryStrategy.BackOff.Steps != nil && *s.RetryStrategy.BackOff.Steps == 0 { + return false + } + // If no errors are found, the function returns true indicating the validation passed. + return true +} + +// HasValidFallbackSink checks if the Sink vertex has a valid fallback sink configured +func hasValidFallbackSink(s *dfv1.Sink) bool { + return s.Fallback != nil && s.Fallback.UDSink != nil +} diff --git a/pkg/reconciler/pipeline/validate_test.go b/pkg/reconciler/pipeline/validate_test.go index 486b36c9be..f116e04825 100644 --- a/pkg/reconciler/pipeline/validate_test.go +++ b/pkg/reconciler/pipeline/validate_test.go @@ -1056,3 +1056,125 @@ func Test_validateIdleSource(t *testing.T) { assert.Error(t, err) assert.Contains(t, err.Error(), `invalid idle source watermark config, threshold should be greater than or equal to incrementBy`) } + +// TestValidateSink tests the validateSink function with different sink configurations. +func TestValidateSink(t *testing.T) { + onFailFallback := dfv1.OnFailureFallback + tests := []struct { + name string + sink dfv1.Sink + expectedError bool + }{ + { + name: "Valid configuration without needing fallback", + sink: dfv1.Sink{ + RetryStrategy: dfv1.RetryStrategy{OnFailure: nil}, + }, + expectedError: false, + }, + { + name: "Valid configuration with valid fallback", + sink: dfv1.Sink{ + RetryStrategy: dfv1.RetryStrategy{OnFailure: &onFailFallback}, + // represents a valid fallback sink + Fallback: &dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{}, + }, + }, + expectedError: false, + }, + { + name: "Valid configuration with invalid fallback - no UDSink", + sink: dfv1.Sink{ + RetryStrategy: dfv1.RetryStrategy{OnFailure: &onFailFallback}, + Fallback: &dfv1.AbstractSink{}, // represents a valid fallback sink + }, + expectedError: true, + }, + { + name: "Invalid configuration, fallback needed but not provided", + sink: dfv1.Sink{ + RetryStrategy: dfv1.RetryStrategy{OnFailure: &onFailFallback}, + Fallback: nil, + }, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the validation function + err := validateSink(tt.sink) + // Check if an error was expected or not + if (err != nil) != tt.expectedError { + t.Errorf("%s: validateSink() error = %v, wantErr %v", tt.name, err, tt.expectedError) + } + }) + } +} + +func TestIsValidSinkRetryStrategy(t *testing.T) { + zeroSteps := uint32(0) + tests := []struct { + name string + sink dfv1.Sink + strategy dfv1.RetryStrategy + wantErr bool + }{ + { + name: "valid strategy with fallback configured", + sink: dfv1.Sink{Fallback: &dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{}, + }}, + strategy: dfv1.RetryStrategy{ + OnFailure: func() *dfv1.OnFailureRetryStrategy { str := dfv1.OnFailureFallback; return &str }(), + }, + wantErr: false, + }, + { + name: "invalid valid strategy with fallback not configured properly", + sink: dfv1.Sink{Fallback: &dfv1.AbstractSink{}}, + strategy: dfv1.RetryStrategy{ + OnFailure: func() *dfv1.OnFailureRetryStrategy { str := dfv1.OnFailureFallback; return &str }(), + }, + wantErr: true, + }, + { + name: "invalid strategy with no fallback configured", + sink: dfv1.Sink{}, + strategy: dfv1.RetryStrategy{ + OnFailure: func() *dfv1.OnFailureRetryStrategy { str := dfv1.OnFailureFallback; return &str }(), + }, + wantErr: true, + }, + { + name: "valid strategy with drop and no fallback needed", + sink: dfv1.Sink{}, + strategy: dfv1.RetryStrategy{ + OnFailure: func() *dfv1.OnFailureRetryStrategy { str := dfv1.OnFailureDrop; return &str }(), + }, + wantErr: false, + }, + { + name: "invalid strategy with 0 steps", + sink: dfv1.Sink{}, + strategy: dfv1.RetryStrategy{ + BackOff: &dfv1.Backoff{ + Steps: &zeroSteps, + }, + OnFailure: func() *dfv1.OnFailureRetryStrategy { str := dfv1.OnFailureDrop; return &str }(), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.sink.RetryStrategy = tt.strategy + ok := HasValidSinkRetryStrategy(tt.sink) + if (!ok) != tt.wantErr { + t.Errorf("isValidSinkRetryStrategy() got = %v, want %v", ok, tt.wantErr) + } + }) + } +} diff --git a/pkg/sinks/forward/forward.go b/pkg/sinks/forward/forward.go index acddb40567..ad53b5fa51 100644 --- a/pkg/sinks/forward/forward.go +++ b/pkg/sinks/forward/forward.go @@ -54,6 +54,7 @@ type DataForward struct { vertexName string pipelineName string vertexReplica int32 + sinkRetryStrategy dfv1.RetryStrategy // idleManager manages the idle watermark status. idleManager wmb.IdleManager // wmbChecker checks if the idle watermark is valid. @@ -98,6 +99,10 @@ func NewDataForward( }, opts: *dOpts, } + // add the sink retry strategy to the forward + if vertexInstance.Vertex.Spec.Sink != nil { + df.sinkRetryStrategy = vertexInstance.Vertex.Spec.Sink.RetryStrategy + } // Add logger from parent ctx to child context. df.ctx = logging.WithLogger(ctx, dOpts.logger) @@ -357,82 +362,184 @@ func (df *DataForward) ackFromBuffer(ctx context.Context, offsets []isb.Offset) return ctxClosedErr } -// writeToSink forwards an array of messages to a sink and it is a blocking call it keeps retrying until shutdown has been initiated. -func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWriter, messages []isb.Message, isFbSinkWriter bool) ([]isb.Offset, []isb.Message, error) { +// writeToSink forwards an array of messages to a sink and it is a blocking call it keeps retrying +// until shutdown has been initiated. The function also evaluates whether to use a fallback sink based +// on the error and configuration. +func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWriter, messagesToTry []isb.Message, isFbSinkWriter bool) ([]isb.Offset, []isb.Message, error) { var ( - err error - writeCount int - writeBytes float64 + err error + writeCount int + writeBytes float64 + fallbackMessages []isb.Message ) - writeOffsets := make([]isb.Offset, 0, len(messages)) - var fallbackMessages []isb.Message + // slice to store the successful offsets returned by the sink + writeOffsets := make([]isb.Offset, 0, len(messagesToTry)) + + // extract the backOff conditions and failStrategy for the retry logic, + // when the isFbSinkWriter is true, we use an infinite retry + backoffCond, failStrategy := df.getBackOffConditions(isFbSinkWriter) + // The loop will continue trying to write messages until they are all processed + // or an unrecoverable error occurs. for { - _writeOffsets, errs := sinkWriter.Write(ctx, messages) - // Note: this is an unwanted memory allocation during a happy path. We want only minimal allocation since using failedMessages is an unlikely path. - var failedMessages []isb.Message - needRetry := false - for idx, msg := range messages { - - if err = errs[idx]; err != nil { - // if we are asked to write to fallback sink, check if the fallback sink is configured, - // and we are not already in the fallback sink write path. - if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter != nil && !isFbSinkWriter { - fallbackMessages = append(fallbackMessages, msg) - continue - } + err = wait.ExponentialBackoffWithContext(ctx, backoffCond, func(_ context.Context) (done bool, err error) { + // Note: this is an unwanted memory allocation during a happy path. We want only minimal allocation + // since using failedMessages is an unlikely path. + var failedMessages []isb.Message + needRetry := false + _writeOffsets, errs := sinkWriter.Write(ctx, messagesToTry) + for idx, msg := range messagesToTry { + if err = errs[idx]; err != nil { + // if we are asked to write to fallback sink, check if the fallback sink is configured, + // and we are not already in the fallback sink write path. + if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter != nil && !isFbSinkWriter { + fallbackMessages = append(fallbackMessages, msg) + continue + } - // if we are asked to write to fallback but no fallback sink is configured, we will retry the messages to the same sink - if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter == nil { - df.opts.logger.Error("Asked to write to fallback but no fallback sink is configured, retrying the message to the same sink") - } + // if we are asked to write to fallback but no fallback sink is configured, we will retry the messages to the same sink + if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter == nil { + df.opts.logger.Error("Asked to write to fallback but no fallback sink is configured, retrying the message to the same sink") + } - // if we are asked to write to fallback sink inside the fallback sink, we will retry the messages to the fallback sink - if errors.Is(err, &udsink.WriteToFallbackErr) && isFbSinkWriter { - df.opts.logger.Error("Asked to write to fallback sink inside the fallback sink, retrying the message to fallback sink") - } + // if we are asked to write to fallback sink inside the fallback sink, we will retry the messages to the fallback sink + if errors.Is(err, &udsink.WriteToFallbackErr) && isFbSinkWriter { + df.opts.logger.Error("Asked to write to fallback sink inside the fallback sink, retrying the message to fallback sink") + } - needRetry = true - // we retry only failed messages - failedMessages = append(failedMessages, msg) - metrics.WriteMessagesError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: sinkWriter.GetName()}).Inc() - // a shutdown can break the blocking loop caused due to InternalErr - if ok, _ := df.IsShuttingDown(); ok { - metrics.PlatformError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica))}).Inc() - return nil, nil, fmt.Errorf("writeToSink failed, Stop called while stuck on an internal error with failed messages:%d, %v", len(failedMessages), errs) - } - } else { - writeCount++ - writeBytes += float64(len(msg.Payload)) - // we support write offsets only for jetstream - if _writeOffsets != nil { - writeOffsets = append(writeOffsets, _writeOffsets[idx]) + needRetry = true + + // TODO(Retry-Sink) : Propagate the retry-count? + // we retry only failed message + failedMessages = append(failedMessages, msg) + + // increment the error metric + df.incrementErrorMetric(sinkWriter.GetName(), isFbSinkWriter) + + // a shutdown can break the blocking loop caused due to InternalErr + if ok, _ := df.IsShuttingDown(); ok { + metrics.PlatformError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica))}).Inc() + return true, fmt.Errorf("writeToSink failed, Stop called while stuck on an internal error with failed messages:%d, %v", len(failedMessages), errs) + } + } else { + writeCount++ + writeBytes += float64(len(msg.Payload)) + // we support write offsets only for jetstream + if _writeOffsets != nil { + writeOffsets = append(writeOffsets, _writeOffsets[idx]) + } } } + // set messages to failedMessages, in case of success this should be empty + // While checking for retry we see the length of the messages left + messagesToTry = failedMessages + if needRetry { + df.opts.logger.Errorw("Retrying failed messages", + zap.Any("errors", errorArrayToMap(errs)), + zap.String(metrics.LabelPipeline, df.pipelineName), + zap.String(metrics.LabelVertex, df.vertexName), + zap.String(metrics.LabelPartitionName, sinkWriter.GetName()), + ) + return false, nil + } + return true, nil + }) + // If we exited out of the loop and it was due to a forced shutdown we should exit + // TODO(Retry-Sink): Check for ctx done separately? That should be covered in shutdown + if ok, _ := df.IsShuttingDown(); err != nil && ok { + return nil, nil, err } - - if needRetry { - df.opts.logger.Errorw("Retrying failed messages", - zap.Any("errors", errorArrayToMap(errs)), - zap.String(metrics.LabelPipeline, df.pipelineName), - zap.String(metrics.LabelVertex, df.vertexName), - zap.String(metrics.LabelPartitionName, sinkWriter.GetName()), - ) - // set messages to failed for the retry - messages = failedMessages - // TODO: implement retry with backoff etc. - time.Sleep(df.opts.retryInterval) - } else { + // Check what actions are required once the writing loop is completed + // Break if no further action is required + if !df.handlePostRetryFailures(&messagesToTry, failStrategy, &fallbackMessages, sinkWriter) { break } } - - metrics.WriteMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: sinkWriter.GetName()}).Add(float64(writeCount)) - metrics.WriteBytesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: sinkWriter.GetName()}).Add(writeBytes) + // update the write metrics for sink + df.updateSinkWriteMetrics(writeCount, writeBytes, sinkWriter.GetName(), isFbSinkWriter) return writeOffsets, fallbackMessages, nil } +// handlePostRetryFailures deals with the scenarios after retries are exhausted. +// It returns true if we need to continue retrying else returns false when no further writes are required +func (df *DataForward) handlePostRetryFailures(messagesToTry *[]isb.Message, failStrategy dfv1.OnFailureRetryStrategy, fallbackMessages *[]isb.Message, + sinkWriter sinker.SinkWriter) bool { + + // Check if we still have messages left to be processed + if len(*messagesToTry) > 0 { + + df.opts.logger.Infof("Retries exhausted in sink, messagesLeft %d, Next strategy %s", + len(*messagesToTry), failStrategy) + + // Check what is the failure strategy to be followed after retry exhaustion + switch failStrategy { + case dfv1.OnFailureRetry: + // If on failure, we keep on retrying then lets continue the loop and try all again + return true + case dfv1.OnFailureFallback: + // If onFail we have to divert messages to fallback, lets add all failed messages to fallback slice + *fallbackMessages = append(*fallbackMessages, *messagesToTry...) + case dfv1.OnFailureDrop: + // If on fail we want to Drop in that case lets not retry further + df.opts.logger.Info("Dropping the failed messages after retry in the Sink") + // Update the drop metric count with the messages left + metrics.DropMessagesCount.With(map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: sinkWriter.GetName(), + metrics.LabelReason: "retries exhausted in the Sink", + }).Add(float64(len(*messagesToTry))) + } + } + return false +} + +// updateSinkWriteMetrics updates metrics related to data writes to a sink. +// Metrics are updated based on whether the operation involves the primary or fallback sink. +func (df *DataForward) updateSinkWriteMetrics(writeCount int, writeBytes float64, sinkWriterName string, isFallback bool) { + // Define labels to keep track of the data related to the specific operation + labels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: sinkWriterName, + } + + // Increment the metrics for message count and bytes + metrics.WriteMessagesCount.With(labels).Add(float64(writeCount)) + metrics.WriteBytesCount.With(labels).Add(writeBytes) + + // if this is for Fallback Sink, increment specific metrics as well + if isFallback { + metrics.FbSinkWriteMessagesCount.With(labels).Add(float64(writeCount)) + metrics.FbSinkWriteBytesCount.With(labels).Add(writeBytes) + } +} + +// incrementErrorMetric updates the appropriate error metric based on whether the operation involves a fallback sink. +func (df *DataForward) incrementErrorMetric(sinkWriter string, isFbSinkWriter bool) { + // Define labels to keep track of the data related to the specific operation + labels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: sinkWriter, + } + + // Increment the selected metric and attach labels to provide detailed context: + metrics.WriteMessagesError.With(labels).Inc() + + // Increment fallback specific metric if fallback mode + if isFbSinkWriter { + metrics.FbSinkWriteMessagesError.With(labels).Inc() + } +} + // errorArrayToMap summarizes an error array to map func errorArrayToMap(errs []error) map[string]int64 { result := make(map[string]int64) @@ -443,3 +550,16 @@ func errorArrayToMap(errs []error) map[string]int64 { } return result } + +// getBackOffConditions configures the retry backoff strategy based on whether its a fallbackSink or primary sink. +func (df *DataForward) getBackOffConditions(isFallbackSink bool) (wait.Backoff, dfv1.OnFailureRetryStrategy) { + // If we want for isFallbackSink we will return an infinite retry which will keep retrying post exhaustion till it succeeds + if isFallbackSink { + return wait.Backoff{ + Duration: dfv1.DefaultRetryInterval, + Steps: dfv1.DefaultRetrySteps, + }, dfv1.OnFailureRetry + } + // Initial interval duration and number of retries are taken from DataForward settings. + return df.sinkRetryStrategy.GetBackoff(), df.sinkRetryStrategy.GetOnFailureRetryStrategy() +} diff --git a/pkg/sinks/forward/options.go b/pkg/sinks/forward/options.go index f18dbe2620..afb6d1f37b 100644 --- a/pkg/sinks/forward/options.go +++ b/pkg/sinks/forward/options.go @@ -17,8 +17,6 @@ limitations under the License. package forward import ( - "time" - "go.uber.org/zap" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -33,8 +31,6 @@ type options struct { readBatchSize int64 // sinkConcurrency sets the concurrency for concurrent processing sinkConcurrency int - // retryInterval is the time.Duration to sleep before retrying - retryInterval time.Duration // fbSinkWriter is the writer for the fallback sink fbSinkWriter sinker.SinkWriter // logger is used to pass the logger variable @@ -49,7 +45,6 @@ func DefaultOptions() *options { return &options{ readBatchSize: dfv1.DefaultReadBatchSize, sinkConcurrency: dfv1.DefaultReadBatchSize, - retryInterval: time.Millisecond, logger: logging.NewLogger(), } } @@ -70,14 +65,6 @@ func WithSinkConcurrency(f int) Option { } } -// WithRetryInterval sets the retry interval -func WithRetryInterval(f time.Duration) Option { - return func(o *options) error { - o.retryInterval = time.Duration(f) - return nil - } -} - // WithLogger is used to return logger information func WithLogger(l *zap.SugaredLogger) Option { return func(o *options) error { diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 4e3ba1085b..6b34934210 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1473,6 +1473,7 @@ dependencies = [ "bytes", "chrono", "hyper-util", + "kube", "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", "numaflow-models", "once_cell", diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index 00c51e6e9a..90ddc5cbec 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -33,6 +33,7 @@ pep440_rs = "0.6.6" backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" +kube = "0.93.1" [dev-dependencies] tempfile = "3.11.0" diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index 00f966b9bc..d1450500be 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -4,7 +4,7 @@ use std::sync::OnceLock; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use numaflow_models::models::MonoVertex; +use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use crate::error::Error; @@ -17,8 +17,9 @@ const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; -const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = 10; +const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; +const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: &str = "retry"; pub fn config() -> &'static Settings { static CONF: OnceLock = OnceLock::new(); @@ -43,10 +44,22 @@ pub struct Settings { pub lag_refresh_interval_in_secs: u16, pub sink_max_retry_attempts: u16, pub sink_retry_interval_in_ms: u32, + pub sink_retry_on_fail_strategy: String, + pub sink_default_retry_strategy: RetryStrategy, } impl Default for Settings { fn default() -> Self { + // Create a default retry strategy from defined constants + let default_retry_strategy = RetryStrategy { + backoff: Option::from(Box::from(Backoff { + interval: Option::from(kube::core::Duration::from( + std::time::Duration::from_millis(DEFAULT_SINK_RETRY_INTERVAL_IN_MS as u64), + )), + steps: Option::from(DEFAULT_MAX_SINK_RETRY_ATTEMPTS as i64), + })), + on_failure: Option::from(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string()), + }; Self { mono_vertex_name: "default".to_string(), replica: 0, @@ -60,6 +73,8 @@ impl Default for Settings { lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, + sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string(), + sink_default_retry_strategy: default_retry_strategy, } } } @@ -113,9 +128,56 @@ impl Settings { settings.is_fallback_enabled = mono_vertex_obj .spec .sink + .as_deref() .ok_or(Error::ConfigError("Sink not found".to_string()))? .fallback .is_some(); + + if let Some(retry_strategy) = mono_vertex_obj + .spec + .sink + .expect("sink should not be empty") + .retry_strategy + { + if let Some(sink_backoff) = retry_strategy.clone().backoff { + // Set the max retry attempts and retry interval using direct reference + settings.sink_retry_interval_in_ms = sink_backoff + .clone() + .interval + .map(|x| std::time::Duration::from(x).as_millis() as u32) + .unwrap_or(DEFAULT_SINK_RETRY_INTERVAL_IN_MS); + + settings.sink_max_retry_attempts = sink_backoff + .clone() + .steps + .map(|x| x as u16) + .unwrap_or(DEFAULT_MAX_SINK_RETRY_ATTEMPTS); + + // We do not allow 0 attempts to write to sink + if settings.sink_max_retry_attempts == 0 { + return Err(Error::ConfigError( + "Retry Strategy given with 0 retry attempts".to_string(), + )); + } + } + + // Set the retry strategy using a direct reference whenever possible + settings.sink_retry_on_fail_strategy = retry_strategy + .on_failure + .clone() + .unwrap_or_else(|| DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string()); + + // check if the sink retry strategy is set to fallback and there is no fallback sink configured + // then we should return an error + if settings.sink_retry_on_fail_strategy == "fallback" + && !settings.is_fallback_enabled + { + return Err(Error::ConfigError( + "Retry Strategy given as fallback but Fallback sink not configured" + .to_string(), + )); + } + } } settings.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) @@ -136,31 +198,213 @@ impl Settings { #[cfg(test)] mod tests { - use std::env; - use super::*; + use serde_json::json; + use std::env; #[test] - fn test_settings_load() { - // Set up environment variables - unsafe { - env::set_var(ENV_MONO_VERTEX_OBJ, "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLW1vbm8tdmVydGV4IiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJyZXBsaWNhcyI6MCwic291cmNlIjp7InRyYW5zZm9ybWVyIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InF1YXkuaW8vbnVtYWlvL251bWFmbG93LXJzL21hcHQtZXZlbnQtdGltZS1maWx0ZXI6c3RhYmxlIiwicmVzb3VyY2VzIjp7fX0sImJ1aWx0aW4iOm51bGx9LCJ1ZHNvdXJjZSI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJkb2NrZXIuaW50dWl0LmNvbS9wZXJzb25hbC95aGwwMS9zaW1wbGUtc291cmNlOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImRvY2tlci5pbnR1aXQuY29tL3BlcnNvbmFsL3lobDAxL2JsYWNraG9sZS1zaW5rOnN0YWJsZSIsInJlc291cmNlcyI6e319fX0sImxpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMifSwic2NhbGUiOnt9fSwic3RhdHVzIjp7InJlcGxpY2FzIjowLCJsYXN0VXBkYXRlZCI6bnVsbCwibGFzdFNjYWxlZEF0IjpudWxsfX0="); - env::set_var(ENV_GRPC_MAX_MESSAGE_SIZE, "128000000"); - }; + fn test_settings_load_combined() { + // Define all JSON test configurations in separate scopes to use them distinctively + { + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "transformer": { + "container": { + "image": "xxxxxxx", + "resources": {} + }, + "builtin": null + }, + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + } + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + "scale": {}, + "status": { + "replicas": 0, + "lastUpdated": null, + "lastScaledAt": null + } + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); - // Load settings - let settings = Settings::load().unwrap(); + // Execute and verify + let settings = Settings::load().unwrap(); + assert_eq!(settings.mono_vertex_name, "simple-mono-vertex"); + env::remove_var(ENV_MONO_VERTEX_OBJ); + } - // Verify settings - assert_eq!(settings.mono_vertex_name, "simple-mono-vertex"); - assert_eq!(settings.batch_size, 500); - assert_eq!(settings.timeout_in_ms, 1000); - assert_eq!(settings.grpc_max_message_size, 128000000); + { + // Test Retry Strategy Load + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + }, + "retryStrategy": { + "backoff": { + "interval": "1s", + "steps": 5 + }, + }, + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); - // Clean up environment variables - unsafe { + // Execute and verify + let settings = Settings::load().unwrap(); + assert_eq!(settings.sink_retry_on_fail_strategy, "retry"); + assert_eq!(settings.sink_max_retry_attempts, 5); + assert_eq!(settings.sink_retry_interval_in_ms, 1000); env::remove_var(ENV_MONO_VERTEX_OBJ); - env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); - }; + } + + { + // Test Error Case: Retry Strategy Fallback without Fallback Sink + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + }, + "retryStrategy": { + "backoff": { + "interval": "1s", + "steps": 5 + }, + "onFailure": "fallback" + }, + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); + + // Execute and verify + assert!(Settings::load().is_err()); + env::remove_var(ENV_MONO_VERTEX_OBJ); + } + + { + // Test Error Case: Retry Strategy with 0 Retry Attempts + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + }, + "retryStrategy": { + "backoff": { + "interval": "1s", + "steps": 0 + }, + "onFailure": "retry" + }, + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); + + // Execute and verify + assert!(Settings::load().is_err()); + env::remove_var(ENV_MONO_VERTEX_OBJ); + } + // General cleanup + env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); } } diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 8efe16c844..0f55268576 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use crate::config::config; use crate::error::{Error, Result}; use crate::message::{Message, Offset}; @@ -7,7 +9,6 @@ use crate::sink::{proto, SinkClient}; use crate::source::SourceClient; use crate::transformer::TransformerClient; use chrono::Utc; -use std::collections::HashMap; use tokio::task::JoinSet; use tokio::time::sleep; use tokio_util::sync::CancellationToken; @@ -222,67 +223,43 @@ impl Forwarder { // we will overwrite this vec with failed messages and will keep retrying. let mut messages_to_send = messages; - while attempts <= config().sink_max_retry_attempts { - let start_time = tokio::time::Instant::now(); - match self.sink_client.sink_fn(messages_to_send.clone()).await { - Ok(response) => { - debug!( - attempts = attempts, - "Sink latency - {}ms", - start_time.elapsed().as_millis() - ); - attempts += 1; - - // create a map of id to result, since there is no strict requirement - // for the udsink to return the results in the same order as the requests - let result_map: HashMap<_, _> = response - .results - .iter() - .map(|result| (result.id.clone(), result)) - .collect(); - - error_map.clear(); - // drain all the messages that were successfully written - // and keep only the failed messages to send again - // construct the error map for the failed messages - messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.id) { - return if result.status == proto::Status::Success as i32 { - false - } else if result.status == proto::Status::Fallback as i32 { - fallback_msgs.push(msg.clone()); // add to fallback messages - false - } else { - *error_map.entry(result.err_msg.clone()).or_insert(0) += 1; - true - }; - } - false - }); - - // if all messages are successfully written, break the loop - if messages_to_send.is_empty() { - break; - } - - warn!( - "Retry attempt {} due to retryable error. Errors: {:?}", - attempts, error_map - ); - sleep(tokio::time::Duration::from_millis( - config().sink_retry_interval_in_ms as u64, - )) + // only breaks out of this loop based on the retry strategy unless all the messages have been written to sink + // successfully. + loop { + while attempts < config().sink_max_retry_attempts { + let status = self + .write_to_sink_once(&mut error_map, &mut fallback_msgs, &mut messages_to_send) .await; + match status { + Ok(true) => break, + Ok(false) => { + attempts += 1; + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, error_map + ); + } + Err(e) => Err(e)?, } - Err(e) => return Err(e), } - } - if !messages_to_send.is_empty() { - return Err(Error::SinkError(format!( - "Failed to sink messages after {} attempts. Errors: {:?}", - attempts, error_map - ))); + // If after the retries we still have messages to process, handle the post retry failures + let need_retry = self.handle_sink_post_retry( + &mut attempts, + &mut error_map, + &mut fallback_msgs, + &mut messages_to_send, + ); + match need_retry { + // if we are done with the messages, break the loop + Ok(false) => break, + // if we need to retry, reset the attempts and error_map + Ok(true) => { + attempts = 0; + error_map.clear(); + } + Err(e) => Err(e)?, + } } // If there are fallback messages, write them to the fallback sink @@ -294,6 +271,9 @@ impl Forwarder { .sink_time .get_or_create(&self.common_labels) .observe(start_time_e2e.elapsed().as_micros() as f64); + + // update the metric for number of messages written to the sink + // this included primary and fallback sink forward_metrics() .sink_write_total .get_or_create(&self.common_labels) @@ -301,6 +281,122 @@ impl Forwarder { Ok(()) } + /// Handles the post retry failures based on the configured strategy, + /// returns true if we need to retry, else false. + fn handle_sink_post_retry( + &mut self, + attempts: &mut u16, + error_map: &mut HashMap, + fallback_msgs: &mut Vec, + messages_to_send: &mut Vec, + ) -> Result { + // if we are done with the messages, break the loop + if messages_to_send.is_empty() { + return Ok(false); + } + // check what is the failure strategy in the config + let strategy = config().sink_retry_on_fail_strategy.clone(); + match strategy.as_str() { + // if we need to retry, return true + "retry" => { + warn!( + "Using onFailure Retry, Retry attempts {} completed", + attempts + ); + return Ok(true); + } + // if we need to drop the messages, log and return false + "drop" => { + // log that we are dropping the messages as requested + warn!( + "Dropping messages after {} attempts. Errors: {:?}", + attempts, error_map + ); + // update the metrics + forward_metrics() + .dropped_total + .get_or_create(&self.common_labels) + .inc_by(messages_to_send.len() as u64); + } + // if we need to move the messages to the fallback, return false + "fallback" => { + // log that we are moving the messages to the fallback as requested + warn!( + "Moving messages to fallback after {} attempts. Errors: {:?}", + attempts, error_map + ); + // move the messages to the fallback messages + fallback_msgs.append(messages_to_send); + } + // if the strategy is invalid, return an error + _ => { + return Err(Error::SinkError(format!( + "Invalid sink retry on fail strategy: {}", + strategy + ))); + } + } + // if we are done with the messages, break the loop + Ok(false) + } + + /// Writes to sink once and will return true if successful, else false. Please note that it + /// mutates is incoming fields. + async fn write_to_sink_once( + &mut self, + error_map: &mut HashMap, + fallback_msgs: &mut Vec, + messages_to_send: &mut Vec, + ) -> Result { + let start_time = tokio::time::Instant::now(); + match self.sink_client.sink_fn(messages_to_send.clone()).await { + Ok(response) => { + debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); + + // create a map of id to result, since there is no strict requirement + // for the udsink to return the results in the same order as the requests + let result_map: HashMap<_, _> = response + .results + .iter() + .map(|result| (result.id.clone(), result)) + .collect(); + + error_map.clear(); + // drain all the messages that were successfully written + // and keep only the failed messages to send again + // construct the error map for the failed messages + messages_to_send.retain(|msg| { + if let Some(result) = result_map.get(&msg.id) { + return if result.status == proto::Status::Success as i32 { + false + } else if result.status == proto::Status::Fallback as i32 { + fallback_msgs.push(msg.clone()); // add to fallback messages + false + } else { + *error_map.entry(result.err_msg.clone()).or_insert(0) += 1; + true + }; + } + false + }); + + // if all messages are successfully written, break the loop + if messages_to_send.is_empty() { + return Ok(true); + } + + sleep(tokio::time::Duration::from_millis( + config().sink_retry_interval_in_ms as u64, + )) + .await; + + // we need to retry + return Ok(false); + } + Err(e) => return Err(e), + } + } + // Writes the fallback messages to the fallback sink async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> Result<()> { if self.fallback_client.is_none() { @@ -316,8 +412,17 @@ impl Forwarder { // start with the original set of message to be sent. // we will overwrite this vec with failed messages and will keep retrying. let mut messages_to_send = fallback_msgs; + let fb_msg_count = messages_to_send.len() as u64; + + let default_retry = config() + .sink_default_retry_strategy + .clone() + .backoff + .unwrap(); + let max_attempts = default_retry.steps.unwrap(); + let sleep_interval = default_retry.interval.unwrap(); - while attempts <= config().sink_max_retry_attempts { + while attempts < max_attempts { let start_time = tokio::time::Instant::now(); match fallback_client.sink_fn(messages_to_send.clone()).await { Ok(fb_response) => { @@ -375,22 +480,22 @@ impl Forwarder { "Retry attempt {} due to retryable error. Errors: {:?}", attempts, fallback_error_map ); - sleep(tokio::time::Duration::from_millis( - config().sink_retry_interval_in_ms as u64, - )) - .await; + sleep(tokio::time::Duration::from(sleep_interval)).await; } Err(e) => return Err(e), } } - if !messages_to_send.is_empty() { return Err(Error::SinkError(format!( "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", attempts, fallback_error_map ))); } - + // increment the metric for the fallback sink write + forward_metrics() + .fbsink_write_total + .get_or_create(&self.common_labels) + .inc_by(fb_msg_count); Ok(()) } @@ -420,17 +525,18 @@ impl Forwarder { mod tests { use std::collections::HashSet; - use crate::error::Result; - use crate::forwarder::ForwarderBuilder; - use crate::sink::{SinkClient, SinkConfig}; - use crate::source::{SourceClient, SourceConfig}; - use crate::transformer::{TransformerClient, TransformerConfig}; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; + use crate::error::Result; + use crate::forwarder::ForwarderBuilder; + use crate::sink::{SinkClient, SinkConfig}; + use crate::source::{SourceClient, SourceConfig}; + use crate::transformer::{TransformerClient, TransformerConfig}; + struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, } @@ -768,13 +874,13 @@ mod tests { let forwarder_handle = tokio::spawn(async move { forwarder.start().await?; - Ok(()) + Result::<()>::Ok(()) }); // Set a timeout for the forwarder let timeout_duration = tokio::time::Duration::from_secs(1); + // The future should not complete as we should be retrying let result = tokio::time::timeout(timeout_duration, forwarder_handle).await; - let result: Result<()> = result.expect("forwarder_handle timed out").unwrap(); assert!(result.is_err()); // stop the servers diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index dc5dfd6dc9..f6f5519765 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -44,8 +44,12 @@ const READ_TOTAL: &str = "monovtx_read"; const READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; const ACK_TOTAL: &str = "monovtx_ack"; const SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; +const DROPPED_TOTAL: &str = "monovtx_dropped"; +const FALLBACK_SINK_WRITE_TOTAL: &str = "monovtx_fallback_sink_write"; + // pending as gauge const SOURCE_PENDING: &str = "monovtx_pending"; + // processing times as timers const E2E_TIME: &str = "monovtx_processing_time"; const READ_TIME: &str = "monovtx_read_time"; @@ -100,8 +104,12 @@ pub struct MonoVtxMetrics { pub read_bytes_total: Family, Counter>, pub ack_total: Family, Counter>, pub sink_write_total: Family, Counter>, + pub dropped_total: Family, Counter>, + pub fbsink_write_total: Family, Counter>, + // gauge pub source_pending: Family, Gauge>, + // timers pub e2e_time: Family, Histogram>, pub read_time: Family, Histogram>, @@ -118,6 +126,8 @@ impl MonoVtxMetrics { read_bytes_total: Family::, Counter>::default(), ack_total: Family::, Counter>::default(), sink_write_total: Family::, Counter>::default(), + dropped_total: Family::, Counter>::default(), + fbsink_write_total: Family::, Counter>::default(), // gauge source_pending: Family::, Gauge>::default(), // timers @@ -160,6 +170,19 @@ impl MonoVtxMetrics { "A Counter to keep track of the total number of bytes read from the source", metrics.read_bytes_total.clone(), ); + + registry.register( + DROPPED_TOTAL, + "A Counter to keep track of the total number of messages dropped by the monovtx", + metrics.dropped_total.clone(), + ); + + registry.register( + FALLBACK_SINK_WRITE_TOTAL, + "A Counter to keep track of the total number of messages written to the fallback sink", + metrics.fbsink_write_total.clone(), + ); + // gauges registry.register( SOURCE_PENDING, @@ -192,7 +215,6 @@ impl MonoVtxMetrics { "A Histogram to keep track of the total time taken to Write to the Sink, in microseconds", metrics.sink_time.clone(), ); - metrics } } diff --git a/rust/numaflow-models/src/models/backoff.rs b/rust/numaflow-models/src/models/backoff.rs new file mode 100644 index 0000000000..7d2baddcd7 --- /dev/null +++ b/rust/numaflow-models/src/models/backoff.rs @@ -0,0 +1,38 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// Backoff : Backoff defines parameters used to systematically configure the retry strategy. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Backoff { + #[serde(rename = "interval", skip_serializing_if = "Option::is_none")] + pub interval: Option, + /// Steps defines the number of times to try writing to a sink including retries + #[serde(rename = "steps", skip_serializing_if = "Option::is_none")] + pub steps: Option, +} + +impl Backoff { + /// Backoff defines parameters used to systematically configure the retry strategy. + pub fn new() -> Backoff { + Backoff { + interval: None, + steps: None, + } + } +} diff --git a/rust/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs index 648964f907..bbb11ca261 100644 --- a/rust/numaflow-models/src/models/mod.rs +++ b/rust/numaflow-models/src/models/mod.rs @@ -6,6 +6,8 @@ pub mod abstract_vertex; pub use self::abstract_vertex::AbstractVertex; pub mod authorization; pub use self::authorization::Authorization; +pub mod backoff; +pub use self::backoff::Backoff; pub mod basic_auth; pub use self::basic_auth::BasicAuth; pub mod blackhole; @@ -124,6 +126,8 @@ pub mod redis_config; pub use self::redis_config::RedisConfig; pub mod redis_settings; pub use self::redis_settings::RedisSettings; +pub mod retry_strategy; +pub use self::retry_strategy::RetryStrategy; pub mod sasl; pub use self::sasl::Sasl; pub mod sasl_plain; diff --git a/rust/numaflow-models/src/models/retry_strategy.rs b/rust/numaflow-models/src/models/retry_strategy.rs new file mode 100644 index 0000000000..0b1a52a654 --- /dev/null +++ b/rust/numaflow-models/src/models/retry_strategy.rs @@ -0,0 +1,38 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// RetryStrategy : RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RetryStrategy { + #[serde(rename = "backoff", skip_serializing_if = "Option::is_none")] + pub backoff: Option>, + /// OnFailure specifies the action to take when a retry fails. The default action is to retry. + #[serde(rename = "onFailure", skip_serializing_if = "Option::is_none")] + pub on_failure: Option, +} + +impl RetryStrategy { + /// RetryStrategy struct encapsulates the settings for retrying operations in the event of failures. It includes a BackOff strategy to manage the timing of retries and defines the action to take upon failure. + pub fn new() -> RetryStrategy { + RetryStrategy { + backoff: None, + on_failure: None, + } + } +} diff --git a/rust/numaflow-models/src/models/sink.rs b/rust/numaflow-models/src/models/sink.rs index aa3402f42e..91f18340eb 100644 --- a/rust/numaflow-models/src/models/sink.rs +++ b/rust/numaflow-models/src/models/sink.rs @@ -26,6 +26,8 @@ pub struct Sink { pub kafka: Option>, #[serde(rename = "log", skip_serializing_if = "Option::is_none")] pub log: Option>, + #[serde(rename = "retryStrategy", skip_serializing_if = "Option::is_none")] + pub retry_strategy: Option>, #[serde(rename = "udsink", skip_serializing_if = "Option::is_none")] pub udsink: Option>, } @@ -37,6 +39,7 @@ impl Sink { fallback: None, kafka: None, log: None, + retry_strategy: None, udsink: None, } } From 97ccc7fb5b312c13b4e23c5ff5b4b33053d81ad4 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 30 Aug 2024 19:48:21 -0700 Subject: [PATCH 041/188] chore: placeholders for rolling update (#2019) --- api/json-schema/schema.json | 36 + api/openapi-spec/swagger.json | 36 + .../numaflow.numaproj.io_monovertices.yaml | 10 + .../full/numaflow.numaproj.io_vertices.yaml | 10 + config/install.yaml | 20 + config/namespace-install.yaml | 20 + docs/APIs.md | 152 +++ pkg/apis/numaflow/v1alpha1/generated.pb.go | 1213 ++++++++++------- pkg/apis/numaflow/v1alpha1/generated.proto | 24 + .../numaflow/v1alpha1/mono_vertex_types.go | 8 + .../numaflow/v1alpha1/openapi_generated.go | 56 + pkg/apis/numaflow/v1alpha1/vertex_types.go | 8 + .../src/models/mono_vertex_status.rs | 16 + .../src/models/vertex_status.rs | 16 + 14 files changed, 1149 insertions(+), 476 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 496b6a963f..e594c79167 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19292,6 +19292,15 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "currentHash": { + "description": "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "currentReplicas": { + "description": "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + "format": "int64", + "type": "integer" + }, "lastScaledAt": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", "description": "Time of last scaling operation." @@ -19325,6 +19334,15 @@ }, "selector": { "type": "string" + }, + "updateHash": { + "description": "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", + "format": "int64", + "type": "integer" } }, "type": "object" @@ -20580,6 +20598,15 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "currentHash": { + "description": "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "currentReplicas": { + "description": "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + "format": "int64", + "type": "integer" + }, "lastScaledAt": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", "description": "Time of last scaling operation." @@ -20610,6 +20637,15 @@ }, "selector": { "type": "string" + }, + "updateHash": { + "description": "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "The number of Pods created by the controller from the Vertex version indicated by updateHash.", + "format": "int64", + "type": "integer" } }, "type": "object" diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 41d81a0efb..8f47047db6 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19288,6 +19288,15 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "currentHash": { + "description": "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "currentReplicas": { + "description": "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + "type": "integer", + "format": "int64" + }, "lastScaledAt": { "description": "Time of last scaling operation.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" @@ -19321,6 +19330,15 @@ }, "selector": { "type": "string" + }, + "updateHash": { + "description": "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", + "type": "integer", + "format": "int64" } } }, @@ -20558,6 +20576,15 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "currentHash": { + "description": "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "currentReplicas": { + "description": "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + "type": "integer", + "format": "int64" + }, "lastScaledAt": { "description": "Time of last scaling operation.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" @@ -20588,6 +20615,15 @@ }, "selector": { "type": "string" + }, + "updateHash": { + "description": "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "The number of Pods created by the controller from the Vertex version indicated by updateHash.", + "type": "integer", + "format": "int64" } } }, diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index c07e927f5b..8e503f47d6 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -5549,6 +5549,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -5579,6 +5584,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 8f0d150360..e7bb52a8d8 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -5484,6 +5484,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -5508,6 +5513,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec diff --git a/config/install.yaml b/config/install.yaml index d2e76c26b3..849958226d 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -8193,6 +8193,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -8223,6 +8228,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec @@ -23599,6 +23609,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -23623,6 +23638,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 13afdb3228..3b65e083b6 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -8193,6 +8193,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -8223,6 +8228,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec @@ -23599,6 +23609,11 @@ spec: - type type: object type: array + currentHash: + type: string + currentReplicas: + format: int32 + type: integer lastScaledAt: format: date-time type: string @@ -23623,6 +23638,11 @@ spec: type: integer selector: type: string + updateHash: + type: string + updatedReplicas: + format: int32 + type: integer type: object required: - spec diff --git a/docs/APIs.md b/docs/APIs.md index 3e631f3870..a311d7ee07 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -6372,6 +6372,82 @@ The number of pods targeted by this MonoVertex with a Ready Condition. + + + + +currentReplicas
uint32 + + + + +

+ +The number of Pods created by the controller from the MonoVertex version +indicated by currentHash. +

+ + + + + + + + + +updatedReplicas
uint32 + + + + +

+ +The number of Pods created by the controller from the MonoVertex version +indicated by updateHash. +

+ + + + + + + + + +currentHash
string + + + + +

+ +If not empty, indicates the version of the MonoVertex used to generate +Pods in the sequence \[0,currentReplicas). +

+ + + + + + + + + +updateHash
string + + + + +

+ +If not empty, indicates the version of the MonoVertx used to generate +Pods in the sequence \[replicas-updatedReplicas,replicas) +

+ + + + + @@ -11343,6 +11419,82 @@ The number of pods targeted by this Vertex with a Ready Condition. + + + + +currentReplicas
uint32 + + + + +

+ +The number of Pods created by the controller from the Vertex version +indicated by currentHash. +

+ + + + + + + + + +updatedReplicas
uint32 + + + + +

+ +The number of Pods created by the controller from the Vertex version +indicated by updateHash. +

+ + + + + + + + + +currentHash
string + + + + +

+ +If not empty, indicates the version of the Vertex used to generate Pods +in the sequence \[0,currentReplicas). +

+ + + + + + + + + +updateHash
string + + + + +

+ +If not empty, indicates the version of the Vertx used to generate Pods +in the sequence \[replicas-updatedReplicas,replicas) +

+ + + + + diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 3514c89361..f9bbc352a1 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2762,483 +2762,488 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7611 bytes of a gzipped FileDescriptorProto + // 7695 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd7, - 0x75, 0xa6, 0xfa, 0x8f, 0xec, 0x3e, 0x4d, 0x72, 0x38, 0x77, 0x46, 0x23, 0xce, 0x68, 0x34, 0x1c, - 0x97, 0x2c, 0x79, 0xbc, 0xb6, 0xc9, 0x15, 0x57, 0x7f, 0xfe, 0x95, 0xd8, 0xe4, 0x90, 0xc3, 0x19, - 0x72, 0x86, 0x3e, 0x4d, 0x8e, 0x64, 0x6b, 0x6d, 0x6d, 0xb1, 0xea, 0xb2, 0x59, 0x62, 0x75, 0x55, - 0xab, 0xaa, 0x9a, 0x33, 0x94, 0x77, 0xe1, 0x1f, 0x2d, 0x20, 0x2d, 0x16, 0x8b, 0x5d, 0xf8, 0xc9, - 0xc0, 0xc2, 0xbb, 0xd8, 0xc5, 0x02, 0x7e, 0x30, 0xbc, 0x0f, 0x8b, 0xd5, 0x3e, 0x2c, 0x90, 0x38, - 0x0e, 0x82, 0xc4, 0x0e, 0xf2, 0xe3, 0x87, 0x00, 0x51, 0x5e, 0x88, 0x98, 0x41, 0x1e, 0x12, 0x20, - 0x86, 0x11, 0x03, 0x89, 0x3d, 0x30, 0xe2, 0xe0, 0xfe, 0xd5, 0x5f, 0x57, 0xcf, 0x90, 0x5d, 0xe4, - 0x68, 0x94, 0xe8, 0xad, 0xea, 0xdc, 0x73, 0xbf, 0x73, 0xef, 0xa9, 0xfb, 0x73, 0xee, 0xb9, 0xe7, - 0xde, 0x82, 0xc5, 0x96, 0x15, 0x6c, 0x75, 0x37, 0xa6, 0x0c, 0xb7, 0x3d, 0xed, 0x74, 0xdb, 0x7a, - 0xc7, 0x73, 0x5f, 0xe3, 0x0f, 0x9b, 0xb6, 0x7b, 0x6b, 0xba, 0xb3, 0xdd, 0x9a, 0xd6, 0x3b, 0x96, - 0x1f, 0x51, 0x76, 0x9e, 0xd2, 0xed, 0xce, 0x96, 0xfe, 0xd4, 0x74, 0x8b, 0x3a, 0xd4, 0xd3, 0x03, - 0x6a, 0x4e, 0x75, 0x3c, 0x37, 0x70, 0xc9, 0x73, 0x11, 0xd0, 0x94, 0x02, 0x9a, 0x52, 0xd9, 0xa6, - 0x3a, 0xdb, 0xad, 0x29, 0x06, 0x14, 0x51, 0x14, 0xd0, 0xb9, 0x4f, 0xc4, 0x4a, 0xd0, 0x72, 0x5b, - 0xee, 0x34, 0xc7, 0xdb, 0xe8, 0x6e, 0xf2, 0x37, 0xfe, 0xc2, 0x9f, 0x84, 0x9c, 0x73, 0xda, 0xf6, - 0xf3, 0xfe, 0x94, 0xe5, 0xb2, 0x62, 0x4d, 0x1b, 0xae, 0x47, 0xa7, 0x77, 0x7a, 0xca, 0x72, 0xee, - 0xe9, 0x88, 0xa7, 0xad, 0x1b, 0x5b, 0x96, 0x43, 0xbd, 0x5d, 0x55, 0x97, 0x69, 0x8f, 0xfa, 0x6e, - 0xd7, 0x33, 0xe8, 0xa1, 0x72, 0xf9, 0xd3, 0x6d, 0x1a, 0xe8, 0x59, 0xb2, 0xa6, 0xfb, 0xe5, 0xf2, - 0xba, 0x4e, 0x60, 0xb5, 0x7b, 0xc5, 0x3c, 0x7b, 0xaf, 0x0c, 0xbe, 0xb1, 0x45, 0xdb, 0x7a, 0x3a, - 0x9f, 0xf6, 0x03, 0x80, 0x53, 0xb3, 0x1b, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xea, 0x9a, 0x6b, 0xb4, - 0xdd, 0xb1, 0xf5, 0x80, 0x92, 0x6d, 0xa8, 0xb2, 0xb2, 0x99, 0x7a, 0xa0, 0x4f, 0x14, 0x2e, 0x16, - 0x2e, 0xd5, 0x67, 0x66, 0xa7, 0x06, 0xfc, 0x16, 0x53, 0x2b, 0x12, 0xa8, 0x31, 0xb2, 0xbf, 0x37, - 0x59, 0x55, 0x6f, 0x18, 0x0a, 0x20, 0xdf, 0x2a, 0xc0, 0x88, 0xe3, 0x9a, 0xb4, 0x49, 0x6d, 0x6a, - 0x04, 0xae, 0x37, 0x51, 0xbc, 0x58, 0xba, 0x54, 0x9f, 0xf9, 0xf2, 0xc0, 0x12, 0x33, 0x6a, 0x34, - 0x75, 0x3d, 0x26, 0xe0, 0xb2, 0x13, 0x78, 0xbb, 0x8d, 0xd3, 0x3f, 0xdc, 0x9b, 0x7c, 0x68, 0x7f, - 0x6f, 0x72, 0x24, 0x9e, 0x84, 0x89, 0x92, 0x90, 0x75, 0xa8, 0x07, 0xae, 0xcd, 0x54, 0x66, 0xb9, - 0x8e, 0x3f, 0x51, 0xe2, 0x05, 0xbb, 0x30, 0x25, 0xb4, 0xcd, 0xc4, 0x4f, 0xb1, 0xe6, 0x32, 0xb5, - 0xf3, 0xd4, 0xd4, 0x5a, 0xc8, 0xd6, 0x38, 0x25, 0x81, 0xeb, 0x11, 0xcd, 0xc7, 0x38, 0x0e, 0xa1, - 0x70, 0xc2, 0xa7, 0x46, 0xd7, 0xb3, 0x82, 0xdd, 0x39, 0xd7, 0x09, 0xe8, 0xed, 0x60, 0xa2, 0xcc, - 0xb5, 0xfc, 0x64, 0x16, 0xf4, 0xaa, 0x6b, 0x36, 0x93, 0xdc, 0x8d, 0x53, 0xfb, 0x7b, 0x93, 0x27, - 0x52, 0x44, 0x4c, 0x63, 0x12, 0x07, 0xc6, 0xad, 0xb6, 0xde, 0xa2, 0xab, 0x5d, 0xdb, 0x6e, 0x52, - 0xc3, 0xa3, 0x81, 0x3f, 0x51, 0xe1, 0x55, 0xb8, 0x94, 0x25, 0x67, 0xd9, 0x35, 0x74, 0xfb, 0xc6, - 0xc6, 0x6b, 0xd4, 0x08, 0x90, 0x6e, 0x52, 0x8f, 0x3a, 0x06, 0x6d, 0x4c, 0xc8, 0xca, 0x8c, 0x2f, - 0xa5, 0x90, 0xb0, 0x07, 0x9b, 0x2c, 0xc2, 0xc9, 0x8e, 0x67, 0xb9, 0xbc, 0x08, 0xb6, 0xee, 0xfb, - 0xd7, 0xf5, 0x36, 0x9d, 0x18, 0xba, 0x58, 0xb8, 0x54, 0x6b, 0x9c, 0x95, 0x30, 0x27, 0x57, 0xd3, - 0x0c, 0xd8, 0x9b, 0x87, 0x5c, 0x82, 0xaa, 0x22, 0x4e, 0x0c, 0x5f, 0x2c, 0x5c, 0xaa, 0x88, 0xb6, - 0xa3, 0xf2, 0x62, 0x98, 0x4a, 0x16, 0xa0, 0xaa, 0x6f, 0x6e, 0x5a, 0x0e, 0xe3, 0xac, 0x72, 0x15, - 0x9e, 0xcf, 0xaa, 0xda, 0xac, 0xe4, 0x11, 0x38, 0xea, 0x0d, 0xc3, 0xbc, 0xe4, 0x2a, 0x10, 0x9f, - 0x7a, 0x3b, 0x96, 0x41, 0x67, 0x0d, 0xc3, 0xed, 0x3a, 0x01, 0x2f, 0x7b, 0x8d, 0x97, 0xfd, 0x9c, - 0x2c, 0x3b, 0x69, 0xf6, 0x70, 0x60, 0x46, 0x2e, 0xf2, 0x22, 0x8c, 0xcb, 0x6e, 0x17, 0x69, 0x01, - 0x38, 0xd2, 0x69, 0xa6, 0x48, 0x4c, 0xa5, 0x61, 0x0f, 0x37, 0x31, 0xe1, 0xbc, 0xde, 0x0d, 0xdc, - 0x36, 0x83, 0x4c, 0x0a, 0x5d, 0x73, 0xb7, 0xa9, 0x33, 0x51, 0xbf, 0x58, 0xb8, 0x54, 0x6d, 0x5c, - 0xdc, 0xdf, 0x9b, 0x3c, 0x3f, 0x7b, 0x17, 0x3e, 0xbc, 0x2b, 0x0a, 0xb9, 0x01, 0x35, 0xd3, 0xf1, - 0x57, 0x5d, 0xdb, 0x32, 0x76, 0x27, 0x46, 0x78, 0x01, 0x9f, 0x92, 0x55, 0xad, 0xcd, 0x5f, 0x6f, - 0x8a, 0x84, 0x3b, 0x7b, 0x93, 0xe7, 0x7b, 0x47, 0xc7, 0xa9, 0x30, 0x1d, 0x23, 0x0c, 0xb2, 0xc2, - 0x01, 0xe7, 0x5c, 0x67, 0xd3, 0x6a, 0x4d, 0x8c, 0xf2, 0xaf, 0x71, 0xb1, 0x4f, 0x83, 0x9e, 0xbf, - 0xde, 0x14, 0x7c, 0x8d, 0x51, 0x29, 0x4e, 0xbc, 0x62, 0x84, 0x40, 0x4c, 0x18, 0x53, 0xe3, 0xea, - 0x9c, 0xad, 0x5b, 0x6d, 0x7f, 0x62, 0x8c, 0x37, 0xde, 0x0f, 0xf7, 0xc1, 0xc4, 0x38, 0x73, 0xe3, - 0x8c, 0xac, 0xca, 0x58, 0x82, 0xec, 0x63, 0x0a, 0xf3, 0xdc, 0x0b, 0x70, 0xb2, 0x67, 0x6c, 0x20, - 0xe3, 0x50, 0xda, 0xa6, 0xbb, 0x7c, 0xe8, 0xab, 0x21, 0x7b, 0x24, 0xa7, 0xa1, 0xb2, 0xa3, 0xdb, - 0x5d, 0x3a, 0x51, 0xe4, 0x34, 0xf1, 0xf2, 0xa9, 0xe2, 0xf3, 0x05, 0xed, 0x7f, 0x96, 0x60, 0x44, - 0x8d, 0x38, 0x4d, 0xcb, 0xd9, 0x26, 0x2f, 0x41, 0xc9, 0x76, 0x5b, 0x72, 0xdc, 0xfc, 0xcc, 0xc0, - 0xa3, 0xd8, 0xb2, 0xdb, 0x6a, 0x0c, 0xef, 0xef, 0x4d, 0x96, 0x96, 0xdd, 0x16, 0x32, 0x44, 0x62, - 0x40, 0x65, 0x5b, 0xdf, 0xdc, 0xd6, 0x79, 0x19, 0xea, 0x33, 0x8d, 0x81, 0xa1, 0xaf, 0x31, 0x14, - 0x56, 0xd6, 0x46, 0x6d, 0x7f, 0x6f, 0xb2, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x6a, 0x1b, 0xb6, - 0x6e, 0x6c, 0x6f, 0xb9, 0x36, 0x9d, 0x28, 0xe5, 0x14, 0xd4, 0x50, 0x48, 0xe2, 0x33, 0x87, 0xaf, - 0x18, 0xc9, 0x20, 0x06, 0x0c, 0x75, 0x4d, 0xdf, 0x72, 0xb6, 0xe5, 0x18, 0xf8, 0xc2, 0xc0, 0xd2, - 0xd6, 0xe7, 0x79, 0x9d, 0x60, 0x7f, 0x6f, 0x72, 0x48, 0x3c, 0xa3, 0x84, 0xd6, 0x7e, 0x5a, 0x87, - 0x31, 0xf5, 0x91, 0x6e, 0x52, 0x2f, 0xa0, 0xb7, 0xc9, 0x45, 0x28, 0x3b, 0xac, 0x6b, 0xf2, 0x8f, - 0xdc, 0x18, 0x91, 0xcd, 0xa5, 0xcc, 0xbb, 0x24, 0x4f, 0x61, 0x25, 0x13, 0x4d, 0x45, 0x2a, 0x7c, - 0xf0, 0x92, 0x35, 0x39, 0x8c, 0x28, 0x99, 0x78, 0x46, 0x09, 0x4d, 0x5e, 0x81, 0x32, 0xaf, 0xbc, - 0x50, 0xf5, 0x67, 0x07, 0x17, 0xc1, 0xaa, 0x5e, 0x65, 0x35, 0xe0, 0x15, 0xe7, 0xa0, 0xac, 0x29, - 0x76, 0xcd, 0x4d, 0xa9, 0xd8, 0xcf, 0xe4, 0x50, 0xec, 0x82, 0x68, 0x8a, 0xeb, 0xf3, 0x0b, 0xc8, - 0x10, 0xc9, 0x7f, 0x2e, 0xc0, 0x49, 0xc3, 0x75, 0x02, 0x9d, 0x99, 0x1a, 0x6a, 0x92, 0x9d, 0xa8, - 0x70, 0x39, 0x57, 0x07, 0x96, 0x33, 0x97, 0x46, 0x6c, 0x3c, 0xcc, 0xe6, 0x8c, 0x1e, 0x32, 0xf6, - 0xca, 0x26, 0xff, 0xb5, 0x00, 0x0f, 0xb3, 0xb1, 0xbc, 0x87, 0x99, 0xcf, 0x40, 0x47, 0x5b, 0xaa, - 0xb3, 0xfb, 0x7b, 0x93, 0x0f, 0x2f, 0x65, 0x09, 0xc3, 0xec, 0x32, 0xb0, 0xd2, 0x9d, 0xd2, 0x7b, - 0xcd, 0x12, 0x3e, 0xbb, 0xd5, 0x67, 0x96, 0x8f, 0xd2, 0xd4, 0x69, 0x3c, 0x2a, 0x9b, 0x72, 0x96, - 0x65, 0x87, 0x59, 0xa5, 0x20, 0x97, 0x61, 0x78, 0xc7, 0xb5, 0xbb, 0x6d, 0xea, 0x4f, 0x54, 0xf9, - 0x10, 0x7b, 0x2e, 0x6b, 0x88, 0xbd, 0xc9, 0x59, 0x1a, 0x27, 0x24, 0xfc, 0xb0, 0x78, 0xf7, 0x51, - 0xe5, 0x25, 0x16, 0x0c, 0xd9, 0x56, 0xdb, 0x0a, 0x7c, 0x3e, 0x71, 0xd6, 0x67, 0x2e, 0x0f, 0x5c, - 0x2d, 0xd1, 0x45, 0x97, 0x39, 0x98, 0xe8, 0x35, 0xe2, 0x19, 0xa5, 0x00, 0x36, 0x14, 0xfa, 0x86, - 0x6e, 0x8b, 0x89, 0xb5, 0x3e, 0xf3, 0xb9, 0xc1, 0xbb, 0x0d, 0x43, 0x69, 0x8c, 0xca, 0x3a, 0x55, - 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0x25, 0x18, 0x4b, 0x7c, 0x4d, 0x7f, 0xa2, 0xce, 0xb5, 0xf3, 0x58, - 0x96, 0x76, 0x42, 0xae, 0x68, 0xe6, 0x49, 0xb4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x1a, 0x54, 0x7d, - 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0x13, 0x23, 0x07, 0x01, 0x1e, 0x97, 0xc0, 0xd5, 0xa6, 0xcc, 0x86, - 0x21, 0x00, 0x99, 0x02, 0xe8, 0xe8, 0x5e, 0x60, 0x09, 0x43, 0x75, 0x94, 0x1b, 0x4d, 0x63, 0xfb, - 0x7b, 0x93, 0xb0, 0x1a, 0x52, 0x31, 0xc6, 0xc1, 0xf8, 0x59, 0xde, 0x25, 0xa7, 0xd3, 0x0d, 0xc4, - 0xc4, 0x5a, 0x13, 0xfc, 0xcd, 0x90, 0x8a, 0x31, 0x0e, 0xf2, 0xbd, 0x02, 0x3c, 0x1a, 0xbd, 0xf6, - 0x76, 0xb2, 0x13, 0x47, 0xde, 0xc9, 0x26, 0xf7, 0xf7, 0x26, 0x1f, 0x6d, 0xf6, 0x17, 0x89, 0x77, - 0x2b, 0x8f, 0xf6, 0x12, 0x8c, 0xce, 0x76, 0x83, 0x2d, 0xd7, 0xb3, 0xde, 0xe0, 0x46, 0x37, 0x59, - 0x80, 0x4a, 0xc0, 0x8d, 0x27, 0x31, 0x2f, 0x3f, 0x91, 0xa5, 0x6a, 0x61, 0xc8, 0x5e, 0xa3, 0xbb, - 0xca, 0x1a, 0x10, 0xf3, 0xa3, 0x30, 0xa6, 0x44, 0x76, 0xed, 0xdf, 0x17, 0x60, 0xb8, 0xa1, 0x1b, - 0xdb, 0xee, 0xe6, 0x26, 0x79, 0x19, 0xaa, 0x96, 0x13, 0x50, 0x6f, 0x47, 0xb7, 0x25, 0xec, 0x54, - 0x0c, 0x36, 0x5c, 0x89, 0x45, 0xf5, 0x66, 0x6b, 0x1e, 0x26, 0x68, 0xbe, 0x2b, 0xd7, 0x0a, 0xdc, - 0x1e, 0x5d, 0x92, 0x18, 0x18, 0xa2, 0x91, 0x49, 0xa8, 0xf8, 0x01, 0xed, 0xf8, 0x7c, 0xe6, 0x19, - 0x15, 0xc5, 0x68, 0x32, 0x02, 0x0a, 0xba, 0xf6, 0x3f, 0x0a, 0x50, 0x6b, 0xe8, 0xbe, 0x65, 0xb0, - 0x5a, 0x92, 0x39, 0x28, 0x77, 0x7d, 0xea, 0x1d, 0xae, 0x6e, 0x7c, 0xb2, 0x58, 0xf7, 0xa9, 0x87, - 0x3c, 0x33, 0xb9, 0x01, 0xd5, 0x8e, 0xee, 0xfb, 0xb7, 0x5c, 0xcf, 0x94, 0x13, 0xde, 0x01, 0x81, - 0x84, 0x71, 0x2e, 0xb3, 0x62, 0x08, 0xa2, 0xd5, 0x21, 0x9a, 0xf1, 0xb5, 0x9f, 0x17, 0xe0, 0x54, - 0xa3, 0xbb, 0xb9, 0x49, 0x3d, 0x69, 0x8b, 0x4a, 0x2b, 0x8f, 0x42, 0xc5, 0xa3, 0xa6, 0xe5, 0xcb, - 0xb2, 0xcf, 0x0f, 0xdc, 0x82, 0x90, 0xa1, 0x48, 0xa3, 0x92, 0xeb, 0x8b, 0x13, 0x50, 0xa0, 0x93, - 0x2e, 0xd4, 0x5e, 0xa3, 0x81, 0x1f, 0x78, 0x54, 0x6f, 0xcb, 0xda, 0x5d, 0x19, 0x58, 0xd4, 0x55, - 0x1a, 0x34, 0x39, 0x52, 0xdc, 0x86, 0x0d, 0x89, 0x18, 0x49, 0xd2, 0x7e, 0x50, 0x81, 0x91, 0x39, - 0xb7, 0xbd, 0x61, 0x39, 0xd4, 0xbc, 0x6c, 0xb6, 0x28, 0x79, 0x15, 0xca, 0xd4, 0x6c, 0x51, 0x59, - 0xdb, 0xc1, 0xa7, 0x7b, 0x06, 0x16, 0x19, 0x2d, 0xec, 0x0d, 0x39, 0x30, 0x59, 0x86, 0xb1, 0x4d, - 0xcf, 0x6d, 0x8b, 0x11, 0x74, 0x6d, 0xb7, 0x23, 0x2d, 0xd6, 0xc6, 0x87, 0xd5, 0xa8, 0xb4, 0x90, - 0x48, 0xbd, 0xb3, 0x37, 0x09, 0xd1, 0x1b, 0xa6, 0xf2, 0x92, 0x97, 0x61, 0x22, 0xa2, 0x84, 0x43, - 0xc9, 0x1c, 0x5b, 0x44, 0x70, 0x8b, 0xa5, 0xd2, 0x38, 0xbf, 0xbf, 0x37, 0x39, 0xb1, 0xd0, 0x87, - 0x07, 0xfb, 0xe6, 0x26, 0x6f, 0x15, 0x60, 0x3c, 0x4a, 0x14, 0xc3, 0xbb, 0x34, 0x54, 0x8e, 0x68, - 0xde, 0xe0, 0xab, 0xad, 0x85, 0x94, 0x08, 0xec, 0x11, 0x4a, 0x16, 0x60, 0x24, 0x70, 0x63, 0xfa, - 0xaa, 0x70, 0x7d, 0x69, 0xca, 0x3d, 0xb0, 0xe6, 0xf6, 0xd5, 0x56, 0x22, 0x1f, 0x41, 0x38, 0xa3, - 0xde, 0x53, 0x9a, 0x1a, 0xe2, 0x9a, 0x3a, 0xb7, 0xbf, 0x37, 0x79, 0x66, 0x2d, 0x93, 0x03, 0xfb, - 0xe4, 0x24, 0x5f, 0x2f, 0xc0, 0x98, 0x4a, 0x92, 0x3a, 0x1a, 0x3e, 0x4a, 0x1d, 0x11, 0xd6, 0x22, - 0xd6, 0x12, 0x02, 0x30, 0x25, 0x50, 0xfb, 0x45, 0x19, 0x6a, 0xe1, 0x00, 0x4b, 0x1e, 0x87, 0x0a, - 0x5f, 0xf8, 0x4b, 0xbb, 0x39, 0x9c, 0x39, 0xb9, 0x7f, 0x00, 0x45, 0x1a, 0x79, 0x02, 0x86, 0x0d, - 0xb7, 0xdd, 0xd6, 0x1d, 0x93, 0x3b, 0x73, 0x6a, 0x8d, 0x3a, 0x33, 0x18, 0xe6, 0x04, 0x09, 0x55, - 0x1a, 0x39, 0x0f, 0x65, 0xdd, 0x6b, 0x09, 0xbf, 0x4a, 0x4d, 0x8c, 0x47, 0xb3, 0x5e, 0xcb, 0x47, - 0x4e, 0x25, 0x9f, 0x84, 0x12, 0x75, 0x76, 0x26, 0xca, 0xfd, 0x2d, 0x92, 0xcb, 0xce, 0xce, 0x4d, - 0xdd, 0x6b, 0xd4, 0x65, 0x19, 0x4a, 0x97, 0x9d, 0x1d, 0x64, 0x79, 0xc8, 0x32, 0x0c, 0x53, 0x67, - 0x87, 0x7d, 0x7b, 0xe9, 0xf0, 0xf8, 0x50, 0x9f, 0xec, 0x8c, 0x45, 0x1a, 0xe7, 0xa1, 0x5d, 0x23, - 0xc9, 0xa8, 0x20, 0xc8, 0x17, 0x60, 0x44, 0x98, 0x38, 0x2b, 0xec, 0x9b, 0xf8, 0x13, 0x43, 0x1c, - 0x72, 0xb2, 0xbf, 0x8d, 0xc4, 0xf9, 0x22, 0x07, 0x53, 0x8c, 0xe8, 0x63, 0x02, 0x8a, 0x7c, 0x01, - 0x6a, 0x6a, 0x3d, 0xaa, 0xbe, 0x6c, 0xa6, 0x6f, 0x46, 0x2d, 0x62, 0x91, 0xbe, 0xde, 0xb5, 0x3c, - 0xda, 0xa6, 0x4e, 0xe0, 0x37, 0x4e, 0xaa, 0xd5, 0xba, 0x4a, 0xf5, 0x31, 0x42, 0x23, 0x1b, 0xbd, - 0x4e, 0x26, 0xe1, 0x21, 0x79, 0xbc, 0xcf, 0xa8, 0x3e, 0x80, 0x87, 0xe9, 0xcb, 0x70, 0x22, 0xf4, - 0x02, 0x49, 0x47, 0x82, 0xf0, 0x99, 0x3c, 0xcd, 0xb2, 0x2f, 0x25, 0x93, 0xee, 0xec, 0x4d, 0x3e, - 0x96, 0xe1, 0x4a, 0x88, 0x18, 0x30, 0x0d, 0xa6, 0x7d, 0xbf, 0x04, 0xbd, 0xd6, 0x7f, 0x52, 0x69, - 0x85, 0xa3, 0x56, 0x5a, 0xba, 0x42, 0x62, 0xf8, 0x7c, 0x5e, 0x66, 0xcb, 0x5f, 0xa9, 0xac, 0x0f, - 0x53, 0x3a, 0xea, 0x0f, 0xf3, 0xa0, 0xf4, 0x1d, 0xed, 0xed, 0x32, 0x8c, 0xcd, 0xeb, 0xb4, 0xed, - 0x3a, 0xf7, 0x5c, 0x0b, 0x15, 0x1e, 0x88, 0xb5, 0xd0, 0x25, 0xa8, 0x7a, 0xb4, 0x63, 0x5b, 0x86, - 0x2e, 0x8c, 0x2f, 0xe9, 0x7b, 0x44, 0x49, 0xc3, 0x30, 0xb5, 0xcf, 0x1a, 0xb8, 0xf4, 0x40, 0xae, - 0x81, 0xcb, 0xef, 0xfd, 0x1a, 0x58, 0xfb, 0x7a, 0x11, 0xb8, 0xa1, 0x42, 0x2e, 0x42, 0x99, 0x4d, - 0xc2, 0x69, 0xcf, 0x0b, 0x6f, 0x38, 0x3c, 0x85, 0x9c, 0x83, 0x62, 0xe0, 0xca, 0x9e, 0x07, 0x32, - 0xbd, 0xb8, 0xe6, 0x62, 0x31, 0x70, 0xc9, 0x1b, 0x00, 0x86, 0xeb, 0x98, 0x96, 0x72, 0xc9, 0xe7, - 0xab, 0xd8, 0x82, 0xeb, 0xdd, 0xd2, 0x3d, 0x73, 0x2e, 0x44, 0x14, 0xab, 0xa0, 0xe8, 0x1d, 0x63, - 0xd2, 0xc8, 0x0b, 0x30, 0xe4, 0x3a, 0x0b, 0x5d, 0xdb, 0xe6, 0x0a, 0xad, 0x35, 0x3e, 0xc2, 0x96, - 0xa6, 0x37, 0x38, 0xe5, 0xce, 0xde, 0xe4, 0x59, 0x61, 0xdf, 0xb2, 0xb7, 0x97, 0x3c, 0x2b, 0xb0, - 0x9c, 0x56, 0x33, 0xf0, 0xf4, 0x80, 0xb6, 0x76, 0x51, 0x66, 0xd3, 0xbe, 0x59, 0x80, 0xfa, 0x82, - 0x75, 0x9b, 0x9a, 0x2f, 0x59, 0x8e, 0xe9, 0xde, 0x22, 0x08, 0x43, 0x36, 0x75, 0x5a, 0xc1, 0xd6, - 0x80, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x34, 0xd4, 0x84, 0xf5, 0x69, 0x39, - 0x2d, 0xae, 0xc3, 0x6a, 0x34, 0xe8, 0x35, 0x55, 0x02, 0x46, 0x3c, 0xda, 0x2e, 0x9c, 0xec, 0x51, - 0x03, 0x31, 0xa1, 0x1c, 0xe8, 0x2d, 0x35, 0xbe, 0x2e, 0x0c, 0xac, 0xe0, 0x35, 0xbd, 0x15, 0x53, - 0x2e, 0x9f, 0xe3, 0xd7, 0x74, 0x36, 0xc7, 0x33, 0x74, 0xed, 0x57, 0x05, 0xa8, 0x2e, 0x74, 0x1d, - 0x83, 0x2f, 0xd1, 0xee, 0xed, 0x91, 0x53, 0x06, 0x43, 0x31, 0xd3, 0x60, 0xe8, 0xc2, 0xd0, 0xf6, - 0xad, 0xd0, 0xa0, 0xa8, 0xcf, 0xac, 0x0c, 0xde, 0x2a, 0x64, 0x91, 0xa6, 0xae, 0x71, 0x3c, 0xb1, - 0x61, 0x34, 0x26, 0x0b, 0x34, 0x74, 0xed, 0x25, 0x2e, 0x54, 0x0a, 0x3b, 0xf7, 0x49, 0xa8, 0xc7, - 0xd8, 0x0e, 0xe5, 0x3b, 0xfe, 0x7f, 0x65, 0x18, 0x5a, 0x6c, 0x36, 0x67, 0x57, 0x97, 0xc8, 0x33, - 0x50, 0x97, 0x7b, 0x09, 0xd7, 0x23, 0x1d, 0x84, 0x5b, 0x49, 0xcd, 0x28, 0x09, 0xe3, 0x7c, 0xcc, - 0x1c, 0xf3, 0xa8, 0x6e, 0xb7, 0x65, 0x67, 0x09, 0xcd, 0x31, 0x64, 0x44, 0x14, 0x69, 0x44, 0x87, - 0x31, 0xb6, 0xc2, 0x63, 0x2a, 0x14, 0xab, 0x37, 0xd9, 0x6d, 0x0e, 0xb8, 0xbe, 0xe3, 0x46, 0xe2, - 0x7a, 0x02, 0x00, 0x53, 0x80, 0xe4, 0x79, 0xa8, 0xea, 0xdd, 0x60, 0x8b, 0x1b, 0xd0, 0xa2, 0x6f, - 0x9c, 0xe7, 0x5b, 0x2d, 0x92, 0x76, 0x67, 0x6f, 0x72, 0xe4, 0x1a, 0x36, 0x9e, 0x51, 0xef, 0x18, - 0x72, 0xb3, 0xc2, 0xa9, 0x15, 0xa3, 0x2c, 0x5c, 0xe5, 0xd0, 0x85, 0x5b, 0x4d, 0x00, 0x60, 0x0a, - 0x90, 0xbc, 0x02, 0x23, 0xdb, 0x74, 0x37, 0xd0, 0x37, 0xa4, 0x80, 0xa1, 0xc3, 0x08, 0x18, 0x67, - 0x26, 0xdc, 0xb5, 0x58, 0x76, 0x4c, 0x80, 0x11, 0x1f, 0x4e, 0x6f, 0x53, 0x6f, 0x83, 0x7a, 0xae, - 0x5c, 0x7d, 0x4a, 0x21, 0xc3, 0x87, 0x11, 0x32, 0xb1, 0xbf, 0x37, 0x79, 0xfa, 0x5a, 0x06, 0x0c, - 0x66, 0x82, 0x6b, 0xbf, 0x2c, 0xc2, 0x89, 0x45, 0xb1, 0x99, 0xeb, 0x7a, 0x62, 0x12, 0x26, 0x67, - 0xa1, 0xe4, 0x75, 0xba, 0xbc, 0xe5, 0x94, 0x84, 0xbb, 0x16, 0x57, 0xd7, 0x91, 0xd1, 0xc8, 0xcb, - 0x50, 0x35, 0xe5, 0x90, 0x21, 0x17, 0xbf, 0x03, 0x39, 0x2a, 0xd4, 0x1b, 0x86, 0x68, 0xcc, 0xd2, - 0x6f, 0xfb, 0xad, 0xa6, 0xf5, 0x06, 0x95, 0xeb, 0x41, 0x6e, 0xe9, 0xaf, 0x08, 0x12, 0xaa, 0x34, - 0x36, 0xab, 0x6e, 0xd3, 0x5d, 0xb1, 0x1a, 0x2a, 0x47, 0xb3, 0xea, 0x35, 0x49, 0xc3, 0x30, 0x95, - 0x4c, 0xaa, 0xce, 0xc2, 0x5a, 0x41, 0x59, 0xac, 0xe4, 0x6f, 0x32, 0x82, 0xec, 0x37, 0x6c, 0xc8, - 0x7c, 0xcd, 0x0a, 0x02, 0xea, 0xc9, 0xcf, 0x38, 0xd0, 0x90, 0x79, 0x95, 0x23, 0xa0, 0x44, 0x22, - 0x1f, 0x83, 0x1a, 0x07, 0x6f, 0xd8, 0xee, 0x06, 0xff, 0x70, 0x35, 0xb1, 0xa6, 0xbf, 0xa9, 0x88, - 0x18, 0xa5, 0x6b, 0xbf, 0x2e, 0xc2, 0x99, 0x45, 0x1a, 0x08, 0xab, 0x66, 0x9e, 0x76, 0x6c, 0x77, - 0x97, 0x99, 0x96, 0x48, 0x5f, 0x27, 0x2f, 0x02, 0x58, 0xfe, 0x46, 0x73, 0xc7, 0xe0, 0xfd, 0x40, - 0xf4, 0xe1, 0x8b, 0xb2, 0x4b, 0xc2, 0x52, 0xb3, 0x21, 0x53, 0xee, 0x24, 0xde, 0x30, 0x96, 0x27, - 0x5a, 0x5e, 0x15, 0xef, 0xb2, 0xbc, 0x6a, 0x02, 0x74, 0x22, 0x03, 0xb5, 0xc4, 0x39, 0xff, 0x95, - 0x12, 0x73, 0x18, 0xdb, 0x34, 0x06, 0x93, 0xc7, 0x64, 0x74, 0x60, 0xdc, 0xa4, 0x9b, 0x7a, 0xd7, - 0x0e, 0x42, 0xa3, 0x5a, 0x76, 0xe2, 0x83, 0xdb, 0xe5, 0xe1, 0x46, 0xf3, 0x7c, 0x0a, 0x09, 0x7b, - 0xb0, 0xb5, 0xff, 0x5f, 0x82, 0x73, 0x8b, 0x34, 0x08, 0x3d, 0x2e, 0x72, 0x74, 0x6c, 0x76, 0xa8, - 0xc1, 0xbe, 0xc2, 0x5b, 0x05, 0x18, 0xb2, 0xf5, 0x0d, 0x6a, 0xb3, 0xd9, 0x8b, 0xd5, 0xe6, 0xd5, - 0x81, 0x27, 0x82, 0xfe, 0x52, 0xa6, 0x96, 0xb9, 0x84, 0xd4, 0xd4, 0x20, 0x88, 0x28, 0xc5, 0xb3, - 0x41, 0xdd, 0xb0, 0xbb, 0x7e, 0x40, 0xbd, 0x55, 0xd7, 0x0b, 0xa4, 0x3d, 0x19, 0x0e, 0xea, 0x73, - 0x51, 0x12, 0xc6, 0xf9, 0xc8, 0x0c, 0x80, 0x61, 0x5b, 0xd4, 0x09, 0x78, 0x2e, 0xd1, 0xaf, 0x88, - 0xfa, 0xbe, 0x73, 0x61, 0x0a, 0xc6, 0xb8, 0x98, 0xa8, 0xb6, 0xeb, 0x58, 0x81, 0x2b, 0x44, 0x95, - 0x93, 0xa2, 0x56, 0xa2, 0x24, 0x8c, 0xf3, 0xf1, 0x6c, 0x34, 0xf0, 0x2c, 0xc3, 0xe7, 0xd9, 0x2a, - 0xa9, 0x6c, 0x51, 0x12, 0xc6, 0xf9, 0xd8, 0x9c, 0x17, 0xab, 0xff, 0xa1, 0xe6, 0xbc, 0xef, 0xd6, - 0xe0, 0x42, 0x42, 0xad, 0x81, 0x1e, 0xd0, 0xcd, 0xae, 0xdd, 0xa4, 0x81, 0xfa, 0x80, 0x03, 0xce, - 0x85, 0xff, 0x31, 0xfa, 0xee, 0x22, 0x84, 0xc4, 0x38, 0x9a, 0xef, 0xde, 0x53, 0xc0, 0x03, 0x7d, - 0xfb, 0x69, 0xa8, 0x39, 0x7a, 0xe0, 0xf3, 0x8e, 0x2b, 0xfb, 0x68, 0x68, 0x86, 0x5d, 0x57, 0x09, - 0x18, 0xf1, 0x90, 0x55, 0x38, 0x2d, 0x55, 0x7c, 0xf9, 0x76, 0xc7, 0xf5, 0x02, 0xea, 0x89, 0xbc, - 0x72, 0x3a, 0x95, 0x79, 0x4f, 0xaf, 0x64, 0xf0, 0x60, 0x66, 0x4e, 0xb2, 0x02, 0xa7, 0x0c, 0xb1, - 0xad, 0x4e, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x70, 0x70, 0x85, 0x4b, 0xa3, 0xb9, 0x5e, 0x16, 0xcc, - 0xca, 0x97, 0x6e, 0xcd, 0x43, 0x03, 0xb5, 0xe6, 0xe1, 0x41, 0x5a, 0x73, 0x75, 0xb0, 0xd6, 0x5c, - 0x3b, 0x58, 0x6b, 0x66, 0x9a, 0x67, 0xed, 0x88, 0x7a, 0xcc, 0x3c, 0x11, 0x33, 0x6c, 0x2c, 0x6a, - 0x23, 0xd4, 0x7c, 0x33, 0x83, 0x07, 0x33, 0x73, 0x92, 0x0d, 0x38, 0x27, 0xe8, 0x97, 0x1d, 0xc3, - 0xdb, 0xed, 0xb0, 0x89, 0x27, 0x86, 0x5b, 0x4f, 0x78, 0x18, 0xcf, 0x35, 0xfb, 0x72, 0xe2, 0x5d, - 0x50, 0xc8, 0xa7, 0x61, 0x54, 0x7c, 0xa5, 0x15, 0xbd, 0xc3, 0x61, 0x45, 0x0c, 0xc7, 0xc3, 0x12, - 0x76, 0x74, 0x2e, 0x9e, 0x88, 0x49, 0x5e, 0x32, 0x0b, 0x27, 0x3a, 0x3b, 0x06, 0x7b, 0x5c, 0xda, - 0xbc, 0x4e, 0xa9, 0x49, 0x4d, 0xbe, 0x69, 0x54, 0x6b, 0x3c, 0xa2, 0x1c, 0x1d, 0xab, 0xc9, 0x64, - 0x4c, 0xf3, 0x93, 0xe7, 0x61, 0xc4, 0x0f, 0x74, 0x2f, 0x90, 0x6e, 0xbd, 0x89, 0x31, 0x11, 0xe3, - 0xa2, 0xbc, 0x5e, 0xcd, 0x58, 0x1a, 0x26, 0x38, 0x33, 0xe7, 0x8b, 0x13, 0xc7, 0x37, 0x5f, 0xe4, - 0x19, 0xad, 0x7e, 0xaf, 0x08, 0x17, 0x17, 0x69, 0xb0, 0xe2, 0x3a, 0xd2, 0x29, 0x9a, 0x35, 0xed, - 0x1f, 0xc8, 0x27, 0x9a, 0x9c, 0xb4, 0x8b, 0x47, 0x3a, 0x69, 0x97, 0x8e, 0x68, 0xd2, 0x2e, 0x1f, - 0xe3, 0xa4, 0xfd, 0x9b, 0x45, 0x78, 0x24, 0xa1, 0xc9, 0x55, 0xd7, 0x54, 0x03, 0xfe, 0x07, 0x0a, - 0x3c, 0x80, 0x02, 0xef, 0x08, 0xbb, 0x93, 0x6f, 0x6b, 0xa5, 0x2c, 0x9e, 0x37, 0xd3, 0x16, 0xcf, - 0x2b, 0x79, 0x66, 0xbe, 0x0c, 0x09, 0x07, 0x9a, 0xf1, 0xae, 0x02, 0xf1, 0xe4, 0x26, 0x9c, 0x70, - 0xfd, 0xc4, 0x8c, 0x9e, 0x30, 0x88, 0x0e, 0x7b, 0x38, 0x30, 0x23, 0x17, 0x69, 0xc2, 0xc3, 0x3e, - 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x58, 0x43, 0x8f, 0x49, 0xb8, 0x87, 0x9b, 0x59, 0x4c, - 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0x07, 0xc0, 0x4d, 0x4e, 0xa1, 0x9a, 0x23, 0xb3, 0x58, 0xde, - 0x4a, 0x5b, 0x2c, 0xaf, 0xe6, 0xff, 0x6e, 0x83, 0x59, 0x2b, 0x33, 0x00, 0xfc, 0x2b, 0xc4, 0xcd, - 0x95, 0x70, 0x92, 0xc6, 0x30, 0x05, 0x63, 0x5c, 0x6c, 0x02, 0x52, 0x7a, 0x8e, 0x5b, 0x2a, 0xe1, - 0x04, 0xd4, 0x8c, 0x27, 0x62, 0x92, 0xb7, 0xaf, 0xb5, 0x53, 0x19, 0xd8, 0xda, 0xb9, 0x0a, 0x24, - 0xe1, 0x78, 0x14, 0x78, 0x43, 0xc9, 0x18, 0xce, 0xa5, 0x1e, 0x0e, 0xcc, 0xc8, 0xd5, 0xa7, 0x29, - 0x0f, 0x1f, 0x6d, 0x53, 0xae, 0x0e, 0xde, 0x94, 0xc9, 0xab, 0x70, 0x96, 0x8b, 0x92, 0xfa, 0x49, - 0x02, 0x0b, 0xbb, 0xe7, 0x43, 0x12, 0xf8, 0x2c, 0xf6, 0x63, 0xc4, 0xfe, 0x18, 0xec, 0xfb, 0x18, - 0x1e, 0x35, 0x99, 0x70, 0xdd, 0xee, 0x6f, 0x13, 0xcd, 0x65, 0xf0, 0x60, 0x66, 0x4e, 0xd6, 0xc4, - 0x02, 0xd6, 0x0c, 0xf5, 0x0d, 0x9b, 0x9a, 0x32, 0x86, 0x35, 0x6c, 0x62, 0x6b, 0xcb, 0x4d, 0x99, - 0x82, 0x31, 0xae, 0x2c, 0x33, 0x65, 0xe4, 0x90, 0x66, 0xca, 0x22, 0xf7, 0xd2, 0x6f, 0x26, 0xac, - 0x21, 0x69, 0xeb, 0x84, 0x51, 0xc9, 0x73, 0x69, 0x06, 0xec, 0xcd, 0xc3, 0xad, 0x44, 0xc3, 0xb3, - 0x3a, 0x81, 0x9f, 0xc4, 0x1a, 0x4b, 0x59, 0x89, 0x19, 0x3c, 0x98, 0x99, 0x93, 0xd9, 0xe7, 0x5b, - 0x54, 0xb7, 0x83, 0xad, 0x24, 0xe0, 0x89, 0xa4, 0x7d, 0x7e, 0xa5, 0x97, 0x05, 0xb3, 0xf2, 0x65, - 0x4e, 0x48, 0xe3, 0x0f, 0xa6, 0x59, 0xf5, 0x8d, 0x12, 0x9c, 0x5d, 0xa4, 0x41, 0x18, 0xde, 0xf3, - 0x81, 0x1b, 0xe5, 0x3d, 0x70, 0xa3, 0x7c, 0xa7, 0x02, 0xa7, 0x16, 0x69, 0xd0, 0x63, 0x8d, 0xfd, - 0x33, 0x55, 0xff, 0x0a, 0x9c, 0x8a, 0x22, 0xca, 0x9a, 0x81, 0xeb, 0x89, 0xb9, 0x3c, 0xb5, 0x5a, - 0x6e, 0xf6, 0xb2, 0x60, 0x56, 0x3e, 0xf2, 0x05, 0x78, 0x84, 0x4f, 0xf5, 0x4e, 0x4b, 0xf8, 0x67, - 0x85, 0x33, 0x21, 0x76, 0x26, 0x62, 0x52, 0x42, 0x3e, 0xd2, 0xcc, 0x66, 0xc3, 0x7e, 0xf9, 0xc9, - 0x57, 0x61, 0xa4, 0x63, 0x75, 0xa8, 0x6d, 0x39, 0xdc, 0x3e, 0xcb, 0x1d, 0x12, 0xb2, 0x1a, 0x03, - 0x8b, 0x16, 0x70, 0x71, 0x2a, 0x26, 0x04, 0x66, 0xb6, 0xd4, 0xea, 0x31, 0xb6, 0xd4, 0xbf, 0x2d, - 0xc2, 0xf0, 0xa2, 0xe7, 0x76, 0x3b, 0x8d, 0x5d, 0xd2, 0x82, 0xa1, 0x5b, 0x7c, 0xf3, 0x4c, 0x6e, - 0x4d, 0x0d, 0x1e, 0x95, 0x2d, 0xf6, 0xe0, 0x22, 0x93, 0x48, 0xbc, 0xa3, 0x84, 0x67, 0x8d, 0x78, - 0x9b, 0xee, 0x52, 0x53, 0xee, 0xa1, 0x85, 0x8d, 0xf8, 0x1a, 0x23, 0xa2, 0x48, 0x23, 0x6d, 0x38, - 0xa1, 0xdb, 0xb6, 0x7b, 0x8b, 0x9a, 0xcb, 0x7a, 0x40, 0x1d, 0xea, 0xab, 0x2d, 0xc9, 0xc3, 0xba, - 0xa5, 0xf9, 0xbe, 0xfe, 0x6c, 0x12, 0x0a, 0xd3, 0xd8, 0xe4, 0x35, 0x18, 0xf6, 0x03, 0xd7, 0x53, - 0xc6, 0x56, 0x7d, 0x66, 0x6e, 0xf0, 0x8f, 0xde, 0xf8, 0x7c, 0x53, 0x40, 0x09, 0x9f, 0xbd, 0x7c, - 0x41, 0x25, 0x40, 0xfb, 0x76, 0x01, 0xe0, 0xca, 0xda, 0xda, 0xaa, 0xdc, 0x5e, 0x30, 0xa1, 0xac, - 0x77, 0xc3, 0x8d, 0xca, 0xc1, 0x37, 0x04, 0x13, 0x61, 0x99, 0x72, 0x0f, 0xaf, 0x1b, 0x6c, 0x21, - 0x47, 0x27, 0x1f, 0x85, 0x61, 0x69, 0x20, 0x4b, 0xb5, 0x87, 0xa1, 0x05, 0xd2, 0x88, 0x46, 0x95, - 0xae, 0xfd, 0x9f, 0x22, 0xc0, 0x92, 0x69, 0xd3, 0xa6, 0x0a, 0xa4, 0xaf, 0x05, 0x5b, 0x1e, 0xf5, - 0xb7, 0x5c, 0xdb, 0x1c, 0x70, 0x37, 0x95, 0xfb, 0xfc, 0xd7, 0x14, 0x08, 0x46, 0x78, 0xc4, 0x84, - 0x11, 0x3f, 0xa0, 0x1d, 0x15, 0xa9, 0x39, 0xe0, 0x26, 0xca, 0xb8, 0xf0, 0x8b, 0x44, 0x38, 0x98, - 0x40, 0x25, 0x3a, 0xd4, 0x2d, 0xc7, 0x10, 0x1d, 0xa4, 0xb1, 0x3b, 0x60, 0x43, 0x3a, 0xc1, 0x56, - 0x1c, 0x4b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x9f, 0x15, 0xe1, 0x0c, 0x97, 0xc7, 0x8a, 0x91, 0x88, - 0xc7, 0x24, 0xff, 0xa6, 0xe7, 0xd0, 0xdf, 0xbf, 0x3c, 0x98, 0x68, 0x71, 0x66, 0x6c, 0x85, 0x06, - 0x7a, 0x64, 0xcf, 0x45, 0xb4, 0xd8, 0x49, 0xbf, 0x2e, 0x94, 0x7d, 0x36, 0x5e, 0x09, 0xed, 0x35, - 0x07, 0x6e, 0x42, 0xd9, 0x15, 0xe0, 0xa3, 0x57, 0xb8, 0x6b, 0xcc, 0x47, 0x2d, 0x2e, 0x8e, 0xfc, - 0x3b, 0x18, 0xf2, 0x03, 0x3d, 0xe8, 0xaa, 0xae, 0xb9, 0x7e, 0xd4, 0x82, 0x39, 0x78, 0x34, 0x8e, - 0x88, 0x77, 0x94, 0x42, 0xb5, 0x9f, 0x15, 0xe0, 0x5c, 0x76, 0xc6, 0x65, 0xcb, 0x0f, 0xc8, 0xbf, - 0xee, 0x51, 0xfb, 0x01, 0xbf, 0x38, 0xcb, 0xcd, 0x95, 0x1e, 0xc6, 0x85, 0x2b, 0x4a, 0x4c, 0xe5, - 0x01, 0x54, 0xac, 0x80, 0xb6, 0xd5, 0xfa, 0xf2, 0xc6, 0x11, 0x57, 0x3d, 0x36, 0xb5, 0x33, 0x29, - 0x28, 0x84, 0x69, 0x6f, 0x17, 0xfb, 0x55, 0x99, 0x4f, 0x1f, 0x76, 0x32, 0xe6, 0xf7, 0x5a, 0xbe, - 0x98, 0xdf, 0x64, 0x81, 0x7a, 0x43, 0x7f, 0xff, 0x6d, 0x6f, 0xe8, 0xef, 0x8d, 0xfc, 0xa1, 0xbf, - 0x29, 0x35, 0xf4, 0x8d, 0x00, 0x7e, 0xb7, 0x04, 0xe7, 0xef, 0xd6, 0x6c, 0xd8, 0x7c, 0x26, 0x5b, - 0x67, 0xde, 0xf9, 0xec, 0xee, 0xed, 0x90, 0xcc, 0x40, 0xa5, 0xb3, 0xa5, 0xfb, 0xca, 0x28, 0x53, - 0x0b, 0x96, 0xca, 0x2a, 0x23, 0xde, 0x61, 0x83, 0x06, 0x37, 0xe6, 0xf8, 0x2b, 0x0a, 0x56, 0x36, - 0x1c, 0xb7, 0xa9, 0xef, 0x47, 0x3e, 0x81, 0x70, 0x38, 0x5e, 0x11, 0x64, 0x54, 0xe9, 0x24, 0x80, - 0x21, 0xe1, 0x62, 0x96, 0x33, 0xd3, 0xe0, 0x81, 0x5c, 0x19, 0x61, 0xe2, 0x51, 0xa5, 0xe4, 0x6e, - 0x85, 0x94, 0x45, 0xa6, 0xa0, 0x1c, 0x44, 0x41, 0xbb, 0x6a, 0x69, 0x5e, 0xce, 0xb0, 0x4f, 0x39, - 0x1f, 0x5b, 0xd8, 0xbb, 0x1b, 0xdc, 0xa9, 0x6e, 0xca, 0xfd, 0x73, 0xcb, 0x75, 0xb8, 0x41, 0x56, - 0x8a, 0x16, 0xf6, 0x37, 0x7a, 0x38, 0x30, 0x23, 0x97, 0xf6, 0xc7, 0x55, 0x38, 0x93, 0xdd, 0x1e, - 0x98, 0xde, 0x76, 0xa8, 0xe7, 0x33, 0xec, 0x42, 0x52, 0x6f, 0x37, 0x05, 0x19, 0x55, 0xfa, 0xfb, - 0x3a, 0xe0, 0xec, 0x3b, 0x05, 0x38, 0xeb, 0xc9, 0x3d, 0xa2, 0xfb, 0x11, 0x74, 0xf6, 0x98, 0x70, - 0x67, 0xf4, 0x11, 0x88, 0xfd, 0xcb, 0x42, 0xfe, 0x57, 0x01, 0x26, 0xda, 0x29, 0x3f, 0xc7, 0x31, - 0x9e, 0x5b, 0xe3, 0x51, 0xf1, 0x2b, 0x7d, 0xe4, 0x61, 0xdf, 0x92, 0x90, 0xaf, 0x42, 0xbd, 0xc3, - 0xda, 0x85, 0x1f, 0x50, 0xc7, 0x50, 0x47, 0xd7, 0x06, 0xef, 0x49, 0xab, 0x11, 0x96, 0x0a, 0x45, - 0x13, 0xf6, 0x41, 0x2c, 0x01, 0xe3, 0x12, 0x1f, 0xf0, 0x83, 0x6a, 0x97, 0xa0, 0xea, 0xd3, 0x20, - 0xb0, 0x9c, 0x96, 0x58, 0x6f, 0xd4, 0x44, 0x5f, 0x69, 0x4a, 0x1a, 0x86, 0xa9, 0xe4, 0x63, 0x50, - 0xe3, 0x5b, 0x4e, 0xb3, 0x5e, 0xcb, 0x9f, 0xa8, 0xf1, 0x70, 0xb1, 0x51, 0x11, 0x00, 0x27, 0x89, - 0x18, 0xa5, 0x93, 0xa7, 0x61, 0x64, 0x83, 0x77, 0x5f, 0x79, 0x76, 0x59, 0xf8, 0xb8, 0xb8, 0xb5, - 0xd6, 0x88, 0xd1, 0x31, 0xc1, 0x45, 0x66, 0x00, 0x68, 0xb8, 0x2f, 0x97, 0xf6, 0x67, 0x45, 0x3b, - 0x76, 0x18, 0xe3, 0x22, 0x8f, 0x41, 0x29, 0xb0, 0x7d, 0xee, 0xc3, 0xaa, 0x46, 0x4b, 0xd0, 0xb5, - 0xe5, 0x26, 0x32, 0xba, 0xf6, 0xeb, 0x02, 0x9c, 0x48, 0x1d, 0x2e, 0x61, 0x59, 0xba, 0x9e, 0x2d, - 0x87, 0x91, 0x30, 0xcb, 0x3a, 0x2e, 0x23, 0xa3, 0x93, 0x57, 0xa5, 0x59, 0x5e, 0xcc, 0x79, 0x4d, - 0xc3, 0x75, 0x3d, 0xf0, 0x99, 0x1d, 0xde, 0x63, 0x91, 0xf3, 0x6d, 0xbe, 0xa8, 0x3c, 0x72, 0x1e, - 0x88, 0x6d, 0xf3, 0x45, 0x69, 0x98, 0xe0, 0x4c, 0x39, 0xfc, 0xca, 0x07, 0x71, 0xf8, 0x69, 0xdf, - 0x2c, 0xc6, 0x34, 0x20, 0x2d, 0xfb, 0x7b, 0x68, 0xe0, 0x49, 0x36, 0x81, 0x86, 0x93, 0x7b, 0x2d, - 0x3e, 0xff, 0xf1, 0xc9, 0x58, 0xa6, 0x92, 0x97, 0x84, 0xee, 0x4b, 0x39, 0x0f, 0xc3, 0xae, 0x2d, - 0x37, 0x45, 0x74, 0x95, 0xfa, 0x6a, 0xe1, 0x27, 0x28, 0x1f, 0xd3, 0x27, 0xd0, 0x7e, 0xbf, 0x04, - 0xf5, 0xab, 0xee, 0xc6, 0xfb, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0xbe, 0x87, 0xd3, 0xd4, 0x3a, - 0x3c, 0x12, 0x04, 0x76, 0x93, 0x1a, 0xae, 0x63, 0xfa, 0xb3, 0x9b, 0x01, 0xf5, 0x16, 0x2c, 0xc7, - 0xf2, 0xb7, 0xa8, 0x29, 0xb7, 0x93, 0x1e, 0xdd, 0xdf, 0x9b, 0x7c, 0x64, 0x6d, 0x6d, 0x39, 0x8b, - 0x05, 0xfb, 0xe5, 0xe5, 0xc3, 0x86, 0x38, 0x09, 0xc8, 0x4f, 0xca, 0xc8, 0x98, 0x1b, 0x31, 0x6c, - 0xc4, 0xe8, 0x98, 0xe0, 0xd2, 0xde, 0x29, 0x42, 0x2d, 0x3c, 0x80, 0x4f, 0x9e, 0x80, 0xe1, 0x0d, - 0xcf, 0xdd, 0xa6, 0x9e, 0xd8, 0xb9, 0x93, 0x27, 0x65, 0x1a, 0x82, 0x84, 0x2a, 0x8d, 0x3c, 0x0e, - 0x95, 0xc0, 0xed, 0x58, 0x46, 0xda, 0xa1, 0xb6, 0xc6, 0x88, 0x28, 0xd2, 0x8e, 0xaf, 0x81, 0x3f, - 0x99, 0x30, 0xed, 0x6a, 0x7d, 0x8d, 0xb1, 0x57, 0xa0, 0xec, 0xeb, 0xbe, 0x2d, 0xe7, 0xd3, 0x1c, - 0x67, 0xd9, 0x67, 0x9b, 0xcb, 0xf2, 0x2c, 0xfb, 0x6c, 0x73, 0x19, 0x39, 0xa8, 0xf6, 0x8b, 0x22, - 0xd4, 0x85, 0xde, 0xc4, 0xa8, 0x70, 0x94, 0x9a, 0x7b, 0x81, 0x87, 0x52, 0xf8, 0xdd, 0x36, 0xf5, - 0xb8, 0x9b, 0x49, 0x0e, 0x72, 0xf1, 0xfd, 0x81, 0x28, 0x31, 0x0c, 0xa7, 0x88, 0x48, 0x4a, 0xf5, - 0xe5, 0x63, 0x54, 0x7d, 0xe5, 0x40, 0xaa, 0x1f, 0x3a, 0x0e, 0xd5, 0xbf, 0x55, 0x84, 0xda, 0xb2, - 0xb5, 0x49, 0x8d, 0x5d, 0xc3, 0xe6, 0x67, 0x02, 0x4d, 0x6a, 0xd3, 0x80, 0x2e, 0x7a, 0xba, 0x41, - 0x57, 0xa9, 0x67, 0xf1, 0x0b, 0x6a, 0x58, 0xff, 0xe0, 0x23, 0x90, 0x3c, 0x13, 0x38, 0xdf, 0x87, - 0x07, 0xfb, 0xe6, 0x26, 0x4b, 0x30, 0x62, 0x52, 0xdf, 0xf2, 0xa8, 0xb9, 0x1a, 0x5b, 0xa8, 0x3c, - 0xa1, 0xa6, 0x9a, 0xf9, 0x58, 0xda, 0x9d, 0xbd, 0xc9, 0x51, 0xe5, 0xa0, 0x14, 0x2b, 0x96, 0x44, - 0x56, 0xd6, 0xe5, 0x3b, 0x7a, 0xd7, 0xcf, 0x2a, 0x63, 0xac, 0xcb, 0xaf, 0x66, 0xb3, 0x60, 0xbf, - 0xbc, 0x5a, 0x05, 0x4a, 0xcb, 0x6e, 0x4b, 0x7b, 0xbb, 0x04, 0xe1, 0x4d, 0x46, 0xe4, 0x3f, 0x14, - 0xa0, 0xae, 0x3b, 0x8e, 0x1b, 0xc8, 0x5b, 0x82, 0xc4, 0x0e, 0x3c, 0xe6, 0xbe, 0x30, 0x69, 0x6a, - 0x36, 0x02, 0x15, 0x9b, 0xb7, 0xe1, 0x86, 0x72, 0x2c, 0x05, 0xe3, 0xb2, 0x49, 0x37, 0xb5, 0x9f, - 0xbc, 0x92, 0xbf, 0x14, 0x07, 0xd8, 0x3d, 0x3e, 0xf7, 0x39, 0x18, 0x4f, 0x17, 0xf6, 0x30, 0xdb, - 0x41, 0xb9, 0x36, 0xe6, 0x8b, 0x00, 0x51, 0x4c, 0xc9, 0x7d, 0x70, 0x62, 0x59, 0x09, 0x27, 0xd6, - 0xe2, 0xe0, 0x0a, 0x0e, 0x0b, 0xdd, 0xd7, 0x71, 0xf5, 0x7a, 0xca, 0x71, 0xb5, 0x74, 0x14, 0xc2, - 0xee, 0xee, 0xac, 0xfa, 0xdf, 0x05, 0x18, 0x8f, 0x98, 0xe5, 0x09, 0xd9, 0xe7, 0x60, 0xd4, 0xa3, - 0xba, 0xd9, 0xd0, 0x03, 0x63, 0x8b, 0x87, 0x7a, 0x17, 0x78, 0x6c, 0xf6, 0xc9, 0xfd, 0xbd, 0xc9, - 0x51, 0x8c, 0x27, 0x60, 0x92, 0x8f, 0xe8, 0x50, 0x67, 0x84, 0x35, 0xab, 0x4d, 0xdd, 0x6e, 0x30, - 0xa0, 0xd7, 0x94, 0x2f, 0x58, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x6e, 0x01, 0xc6, 0xe2, 0x05, - 0x3e, 0x76, 0x8f, 0xda, 0x56, 0xd2, 0xa3, 0x36, 0x77, 0x04, 0xdf, 0xa4, 0x8f, 0x17, 0xed, 0x97, - 0xd5, 0x78, 0xd5, 0xb8, 0xe7, 0x2c, 0xee, 0x2c, 0x28, 0xdc, 0xd5, 0x59, 0xf0, 0xfe, 0xbf, 0xbc, - 0xa6, 0x9f, 0x95, 0x5b, 0x7e, 0x80, 0xad, 0xdc, 0xf7, 0xf2, 0x06, 0x9c, 0xd8, 0x2d, 0x2e, 0x43, - 0x39, 0x6e, 0x71, 0x69, 0x87, 0xb7, 0xb8, 0x0c, 0x1f, 0xd9, 0xa0, 0x73, 0x90, 0x9b, 0x5c, 0xaa, - 0xf7, 0xf5, 0x26, 0x97, 0xda, 0x71, 0xdd, 0xe4, 0x02, 0x79, 0x6f, 0x72, 0x79, 0xb3, 0x00, 0x63, - 0x66, 0xe2, 0xc4, 0x2c, 0xf7, 0x2d, 0xe4, 0x99, 0x6a, 0x92, 0x07, 0x70, 0xc5, 0x91, 0xa9, 0x24, - 0x0d, 0x53, 0x22, 0xb5, 0xff, 0x5b, 0x89, 0xcf, 0x03, 0xf7, 0xdb, 0x55, 0xfd, 0x6c, 0xd2, 0x55, - 0x7d, 0x31, 0xed, 0xaa, 0x3e, 0x11, 0x8b, 0x22, 0x8d, 0xbb, 0xab, 0x3f, 0x1e, 0x1b, 0x1e, 0x4b, - 0xfc, 0xe6, 0x94, 0x50, 0xd3, 0x19, 0x43, 0xe4, 0xc7, 0xa1, 0xea, 0xab, 0x3b, 0x27, 0xc5, 0xc2, - 0x26, 0xfa, 0x2e, 0xea, 0x3e, 0xc8, 0x90, 0x83, 0x59, 0xe2, 0x1e, 0xd5, 0x7d, 0xd7, 0x49, 0x5b, - 0xe2, 0xc8, 0xa9, 0x28, 0x53, 0xe3, 0x2e, 0xf3, 0xa1, 0x7b, 0xb8, 0xcc, 0x75, 0xa8, 0xdb, 0xba, - 0x1f, 0xac, 0x77, 0x4c, 0x3d, 0xa0, 0xa6, 0xec, 0x6f, 0xff, 0xe2, 0x60, 0x73, 0x15, 0x9b, 0xff, - 0x22, 0x83, 0x70, 0x39, 0x82, 0xc1, 0x38, 0x26, 0x31, 0x61, 0x84, 0xbd, 0xf2, 0xde, 0x60, 0xce, - 0xaa, 0x2b, 0x00, 0x0e, 0x23, 0x23, 0xf4, 0xf4, 0x2c, 0xc7, 0x70, 0x30, 0x81, 0xda, 0xc7, 0xab, - 0x5e, 0x1b, 0xc4, 0xab, 0x4e, 0x3e, 0x2d, 0x8c, 0x8d, 0x5d, 0xf5, 0xc1, 0xb8, 0x37, 0x6e, 0x34, - 0x8a, 0x2a, 0xc4, 0x78, 0x22, 0x26, 0x79, 0xb5, 0x37, 0x6b, 0x50, 0xbf, 0xae, 0x07, 0xd6, 0x0e, - 0xe5, 0x5b, 0x40, 0xc7, 0xe3, 0x87, 0xff, 0x6f, 0x05, 0x38, 0x93, 0x8c, 0xf3, 0x3b, 0x46, 0x67, - 0x3c, 0xbf, 0x35, 0x04, 0x33, 0xa5, 0x61, 0x9f, 0x52, 0x70, 0xb7, 0x7c, 0x4f, 0xd8, 0xe0, 0x71, - 0xbb, 0xe5, 0x9b, 0xfd, 0x04, 0x62, 0xff, 0xb2, 0xbc, 0x5f, 0xdc, 0xf2, 0x0f, 0xf6, 0xe5, 0x72, - 0xa9, 0x4d, 0x83, 0xe1, 0x07, 0x66, 0xd3, 0xa0, 0xfa, 0x40, 0x58, 0x6a, 0x9d, 0xd8, 0xa6, 0x41, - 0x2d, 0x67, 0xf0, 0x8a, 0x0c, 0x8d, 0x17, 0x68, 0xfd, 0x36, 0x1f, 0xf8, 0xa9, 0x76, 0xe5, 0xcc, - 0x65, 0x06, 0xce, 0x86, 0xee, 0x5b, 0x86, 0x9c, 0x33, 0x73, 0x5c, 0xa6, 0xa9, 0xae, 0xfb, 0x12, - 0x7b, 0xdc, 0xfc, 0x15, 0x05, 0x76, 0x74, 0xbb, 0x59, 0x31, 0xd7, 0xed, 0x66, 0x64, 0x0e, 0xca, - 0x0e, 0x5b, 0x7a, 0x97, 0x0e, 0x7d, 0x91, 0xd8, 0xf5, 0x6b, 0x74, 0x17, 0x79, 0x66, 0xed, 0x9d, - 0x22, 0x00, 0xab, 0xfe, 0xc1, 0xdc, 0xf7, 0x1f, 0x85, 0x61, 0xbf, 0xcb, 0x17, 0xda, 0x72, 0xb6, - 0x8f, 0x22, 0x7e, 0x04, 0x19, 0x55, 0x3a, 0x79, 0x1c, 0x2a, 0xaf, 0x77, 0x69, 0x57, 0xed, 0x45, - 0x87, 0xb6, 0xde, 0xe7, 0x19, 0x11, 0x45, 0xda, 0xf1, 0xb9, 0xe2, 0x94, 0x9b, 0xbf, 0x72, 0x5c, - 0x6e, 0xfe, 0x1a, 0x0c, 0x5f, 0x77, 0x79, 0x00, 0xa1, 0xf6, 0xd7, 0x45, 0x80, 0x28, 0x40, 0x8b, - 0x7c, 0xbb, 0x00, 0x0f, 0x87, 0x1d, 0x2e, 0x10, 0x26, 0x3b, 0xbf, 0xbf, 0x36, 0xb7, 0xcb, 0x3f, - 0xab, 0xb3, 0xf3, 0x11, 0x68, 0x35, 0x4b, 0x1c, 0x66, 0x97, 0x82, 0x20, 0x54, 0x69, 0xbb, 0x13, - 0xec, 0xce, 0x5b, 0x9e, 0x6c, 0x81, 0x99, 0x71, 0x80, 0x97, 0x25, 0x8f, 0xc8, 0x2a, 0xd7, 0x95, - 0xbc, 0x13, 0xa9, 0x14, 0x0c, 0x71, 0xc8, 0x16, 0x54, 0x1d, 0xf7, 0x55, 0x9f, 0xa9, 0x43, 0x36, - 0xc7, 0x17, 0x07, 0x57, 0xb9, 0x50, 0xab, 0x70, 0x11, 0xcb, 0x17, 0x1c, 0x76, 0xa4, 0xb2, 0xbf, - 0x55, 0x84, 0x53, 0x19, 0x7a, 0x20, 0x2f, 0xc2, 0xb8, 0x8c, 0x85, 0x8b, 0x2e, 0x72, 0x2e, 0x44, - 0x17, 0x39, 0x37, 0x53, 0x69, 0xd8, 0xc3, 0x4d, 0x5e, 0x05, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0x8a, - 0x6b, 0x2a, 0x63, 0xf6, 0x85, 0xfd, 0xbd, 0x49, 0x98, 0x0d, 0xa9, 0x77, 0xf6, 0x26, 0x3f, 0x91, - 0x15, 0xde, 0x9a, 0xd2, 0x73, 0x94, 0x01, 0x63, 0x90, 0xe4, 0xcb, 0x00, 0x62, 0xdd, 0x16, 0x9e, - 0xc0, 0xbf, 0x87, 0xb3, 0x63, 0x4a, 0xdd, 0x75, 0x34, 0xf5, 0xf9, 0xae, 0xee, 0x04, 0x56, 0xb0, - 0x2b, 0x2e, 0x3c, 0xb9, 0x19, 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0xdd, 0x22, 0x54, 0x95, 0x9b, 0xf5, - 0x3e, 0xf8, 0xd6, 0x5a, 0x09, 0xdf, 0xda, 0x11, 0x05, 0xb4, 0x66, 0x79, 0xd6, 0xdc, 0x94, 0x67, - 0x6d, 0x31, 0xbf, 0xa8, 0xbb, 0xfb, 0xd5, 0xbe, 0x57, 0x84, 0x31, 0xc5, 0x9a, 0xd7, 0xab, 0xf6, - 0x59, 0x38, 0x21, 0x36, 0xa2, 0x57, 0xf4, 0xdb, 0xe2, 0xee, 0x17, 0xae, 0xb0, 0xb2, 0x88, 0x21, - 0x6d, 0x24, 0x93, 0x30, 0xcd, 0xcb, 0x9a, 0xb5, 0x20, 0xad, 0xb3, 0x45, 0x88, 0xd8, 0xba, 0x12, - 0x8b, 0x25, 0xde, 0xac, 0x1b, 0xa9, 0x34, 0xec, 0xe1, 0x4e, 0xbb, 0xf5, 0xca, 0xc7, 0xe0, 0xd6, - 0xfb, 0x93, 0x02, 0x8c, 0x44, 0xfa, 0x3a, 0x76, 0xa7, 0xde, 0x66, 0xd2, 0xa9, 0x37, 0x9b, 0xbb, - 0x39, 0xf4, 0x71, 0xe9, 0xfd, 0xa7, 0x61, 0x48, 0xc4, 0x55, 0x93, 0x0d, 0x38, 0x67, 0x65, 0x46, - 0x87, 0xc5, 0x46, 0x9b, 0xf0, 0xa0, 0xf0, 0x52, 0x5f, 0x4e, 0xbc, 0x0b, 0x0a, 0xe9, 0x42, 0x75, - 0x87, 0x7a, 0x81, 0x65, 0x50, 0x55, 0xbf, 0xc5, 0xdc, 0x26, 0x99, 0x74, 0x5c, 0x86, 0x3a, 0xbd, - 0x29, 0x05, 0x60, 0x28, 0x8a, 0x6c, 0x40, 0x85, 0x9a, 0x2d, 0xaa, 0x6e, 0xe3, 0xc9, 0x79, 0xd7, - 0x65, 0xa8, 0x4f, 0xf6, 0xe6, 0xa3, 0x80, 0x26, 0x3e, 0xd4, 0x6c, 0xb5, 0x31, 0x25, 0xdb, 0xe1, - 0xe0, 0x06, 0x56, 0xb8, 0xc5, 0x15, 0x1d, 0xd4, 0x0f, 0x49, 0x18, 0xc9, 0x21, 0xdb, 0xa1, 0x87, - 0xac, 0x72, 0x44, 0x83, 0xc7, 0x5d, 0xfc, 0x63, 0x3e, 0xd4, 0x6e, 0xe9, 0x01, 0xf5, 0xda, 0xba, - 0xb7, 0x2d, 0x57, 0x1b, 0x83, 0xd7, 0xf0, 0x25, 0x85, 0x14, 0xd5, 0x30, 0x24, 0x61, 0x24, 0x87, - 0xb8, 0x50, 0x0b, 0xa4, 0xf9, 0xac, 0xdc, 0x80, 0x83, 0x0b, 0x55, 0x86, 0xb8, 0x2f, 0xe3, 0xab, - 0xd5, 0x2b, 0x46, 0x32, 0xc8, 0x4e, 0xe2, 0x3a, 0x62, 0x71, 0x09, 0x75, 0x23, 0x87, 0x3b, 0x59, - 0x42, 0x45, 0xd3, 0x4d, 0xf6, 0xb5, 0xc6, 0xda, 0x3b, 0x95, 0x68, 0x58, 0xbe, 0xdf, 0x4e, 0xae, - 0xa7, 0x93, 0x4e, 0xae, 0x0b, 0x69, 0x27, 0x57, 0x6a, 0x7f, 0xf3, 0xf0, 0x11, 0x99, 0x29, 0xf7, - 0x52, 0xf9, 0x18, 0xdc, 0x4b, 0x4f, 0x41, 0x7d, 0x87, 0x8f, 0x04, 0xe2, 0x6a, 0x9f, 0x0a, 0x9f, - 0x46, 0xf8, 0xc8, 0x7e, 0x33, 0x22, 0x63, 0x9c, 0x87, 0x65, 0x91, 0x3f, 0x60, 0x08, 0xef, 0x46, - 0x95, 0x59, 0x9a, 0x11, 0x19, 0xe3, 0x3c, 0x3c, 0x98, 0xcb, 0x72, 0xb6, 0x45, 0x86, 0x61, 0x9e, - 0x41, 0x04, 0x73, 0x29, 0x22, 0x46, 0xe9, 0xe4, 0x12, 0x54, 0xbb, 0xe6, 0xa6, 0xe0, 0xad, 0x72, - 0x5e, 0x6e, 0x61, 0xae, 0xcf, 0x2f, 0xc8, 0xab, 0x86, 0x54, 0x2a, 0x2b, 0x49, 0x5b, 0xef, 0xa8, - 0x04, 0xbe, 0x36, 0x94, 0x25, 0x59, 0x89, 0xc8, 0x18, 0xe7, 0x21, 0x9f, 0x82, 0x31, 0x8f, 0x9a, - 0x5d, 0x83, 0x86, 0xb9, 0x84, 0x77, 0x8a, 0x88, 0x3f, 0x4d, 0xc4, 0x53, 0x30, 0xc5, 0xd9, 0xc7, - 0x49, 0x56, 0x1f, 0x28, 0xf4, 0xf4, 0xa7, 0x05, 0x20, 0xbd, 0xc1, 0xcf, 0x64, 0x0b, 0x86, 0x1c, - 0xee, 0xfd, 0xca, 0x7d, 0x9b, 0x72, 0xcc, 0x89, 0x26, 0x86, 0x25, 0x49, 0x90, 0xf8, 0xc4, 0x81, - 0x2a, 0xbd, 0x1d, 0x50, 0xcf, 0x09, 0x0f, 0x43, 0x1c, 0xcd, 0xcd, 0xcd, 0x62, 0x35, 0x20, 0x91, - 0x31, 0x94, 0xa1, 0xfd, 0xbc, 0x08, 0xf5, 0x18, 0xdf, 0xbd, 0x16, 0x95, 0xfc, 0x3c, 0xb6, 0x70, - 0x3a, 0xad, 0x7b, 0xb6, 0xec, 0x61, 0xb1, 0xf3, 0xd8, 0x32, 0x09, 0x97, 0x31, 0xce, 0x47, 0x66, - 0x00, 0xda, 0xba, 0x1f, 0x50, 0x8f, 0xcf, 0xbe, 0xa9, 0x53, 0xd0, 0x2b, 0x61, 0x0a, 0xc6, 0xb8, - 0xc8, 0x45, 0x79, 0xf7, 0x76, 0x39, 0x79, 0x6b, 0x5d, 0x9f, 0x8b, 0xb5, 0x2b, 0x47, 0x70, 0xb1, - 0x36, 0x69, 0xc1, 0xb8, 0x2a, 0xb5, 0x4a, 0x3d, 0xdc, 0x9d, 0x66, 0x62, 0xfd, 0x92, 0x82, 0xc0, - 0x1e, 0x50, 0xed, 0x9d, 0x02, 0x8c, 0x26, 0x5c, 0x1e, 0xe2, 0xbe, 0x39, 0x15, 0xba, 0x9f, 0xb8, - 0x6f, 0x2e, 0x16, 0x71, 0xff, 0x24, 0x0c, 0x09, 0x05, 0xa5, 0x23, 0xf2, 0x84, 0x0a, 0x51, 0xa6, - 0xb2, 0xb1, 0x4c, 0x3a, 0x55, 0xd3, 0x63, 0x99, 0xf4, 0xba, 0xa2, 0x4a, 0x17, 0xbe, 0x7a, 0x51, - 0xba, 0x5e, 0x5f, 0xbd, 0xa0, 0x63, 0xc8, 0xa1, 0x7d, 0x9f, 0x97, 0x3b, 0xf0, 0x76, 0xc3, 0xb5, - 0x5c, 0x0b, 0x86, 0x65, 0x14, 0x96, 0xec, 0x1a, 0x2f, 0xe6, 0xf0, 0xc3, 0x70, 0x1c, 0x19, 0x6f, - 0xa4, 0x1b, 0xdb, 0x37, 0x36, 0x37, 0x51, 0xa1, 0x93, 0xcb, 0x50, 0x73, 0x9d, 0x05, 0xdd, 0xb2, - 0xbb, 0x9e, 0x1a, 0xd9, 0x3f, 0xc2, 0xc6, 0xaa, 0x1b, 0x8a, 0x78, 0x67, 0x6f, 0xf2, 0x4c, 0xf8, - 0x92, 0x28, 0x24, 0x46, 0x39, 0xb5, 0xbf, 0x2f, 0x01, 0x8f, 0xc0, 0x21, 0xcf, 0x41, 0xad, 0x4d, - 0x8d, 0x2d, 0xdd, 0xb1, 0x7c, 0x75, 0x63, 0x26, 0x5b, 0x9f, 0xd7, 0x56, 0x14, 0xf1, 0x0e, 0x53, - 0xc1, 0x6c, 0x73, 0x99, 0x07, 0xb9, 0x47, 0xbc, 0xc4, 0x80, 0xa1, 0x96, 0xef, 0xeb, 0x1d, 0x2b, - 0xf7, 0x06, 0xb0, 0xb8, 0xa1, 0x50, 0x0c, 0x03, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb1, - 0x75, 0xcb, 0xc9, 0xfd, 0xa7, 0x18, 0x56, 0x83, 0x55, 0x86, 0x24, 0x9c, 0x52, 0xfc, 0x11, 0x05, - 0x36, 0xe9, 0x42, 0xdd, 0x37, 0x3c, 0xbd, 0xed, 0x6f, 0xe9, 0x33, 0xcf, 0x3c, 0x9b, 0xdb, 0xcc, - 0x8b, 0x44, 0x89, 0x59, 0x67, 0x0e, 0x67, 0x57, 0x9a, 0x57, 0x66, 0x67, 0x9e, 0x79, 0x16, 0xe3, - 0x72, 0xe2, 0x62, 0x9f, 0x79, 0x6a, 0x46, 0xf6, 0xdc, 0x23, 0x17, 0xfb, 0xcc, 0x53, 0x33, 0x18, - 0x97, 0xa3, 0xfd, 0x5d, 0x01, 0x6a, 0x21, 0x2f, 0x59, 0x07, 0x60, 0x63, 0x88, 0xbc, 0x53, 0xf0, - 0x50, 0xf7, 0xfb, 0xf3, 0x75, 0xfd, 0x7a, 0x98, 0x19, 0x63, 0x40, 0x19, 0x97, 0x2e, 0x16, 0x8f, - 0xfa, 0xd2, 0xc5, 0x69, 0xa8, 0x6d, 0xe9, 0x8e, 0xe9, 0x6f, 0xe9, 0xdb, 0x62, 0x28, 0x8d, 0x5d, - 0x43, 0x7a, 0x45, 0x25, 0x60, 0xc4, 0xa3, 0xfd, 0xf6, 0x10, 0x88, 0x5d, 0x5b, 0xd6, 0xd9, 0x4d, - 0xcb, 0x17, 0x61, 0xc3, 0x05, 0x9e, 0x33, 0xec, 0xec, 0xf3, 0x92, 0x8e, 0x21, 0x07, 0x39, 0x0b, - 0xa5, 0xb6, 0xe5, 0xc8, 0x3d, 0x1b, 0xee, 0xb2, 0x5b, 0xb1, 0x1c, 0x64, 0x34, 0x9e, 0xa4, 0xdf, - 0x96, 0x11, 0x5f, 0x22, 0x49, 0xbf, 0x8d, 0x8c, 0xc6, 0x16, 0xd1, 0xb6, 0xeb, 0x6e, 0xb3, 0x6e, - 0xab, 0x02, 0xc3, 0xca, 0x7c, 0x2a, 0xe7, 0x8b, 0xe8, 0xe5, 0x64, 0x12, 0xa6, 0x79, 0xc9, 0x3a, - 0x3c, 0xf2, 0x06, 0xf5, 0x5c, 0x39, 0x4e, 0x35, 0x6d, 0x4a, 0x3b, 0x0a, 0x46, 0x18, 0x41, 0x3c, - 0xbe, 0xec, 0x8b, 0xd9, 0x2c, 0xd8, 0x2f, 0x2f, 0x8f, 0x54, 0xd5, 0xbd, 0x16, 0x0d, 0x56, 0x3d, - 0xd7, 0xa0, 0xbe, 0x6f, 0x39, 0x2d, 0x05, 0x3b, 0x14, 0xc1, 0xae, 0x65, 0xb3, 0x60, 0xbf, 0xbc, - 0xe4, 0x65, 0x98, 0x10, 0x49, 0xc2, 0x5c, 0x98, 0xdd, 0xd1, 0x2d, 0x5b, 0xdf, 0xb0, 0x6c, 0xf5, - 0x83, 0xb5, 0x51, 0xb1, 0x33, 0xb2, 0xd6, 0x87, 0x07, 0xfb, 0xe6, 0x26, 0x57, 0x61, 0x5c, 0xed, - 0x8b, 0xad, 0x52, 0xaf, 0x19, 0xee, 0xe4, 0x8f, 0x36, 0x2e, 0xb0, 0x15, 0xeb, 0x3c, 0xed, 0x78, - 0xd4, 0xe0, 0x76, 0x63, 0x8a, 0x0b, 0x7b, 0xf2, 0x11, 0x84, 0x33, 0x7c, 0xbb, 0x7e, 0xbd, 0x33, - 0xe7, 0xba, 0xb6, 0xe9, 0xde, 0x72, 0x54, 0xdd, 0x85, 0x69, 0xc6, 0xb7, 0xc2, 0x9a, 0x99, 0x1c, - 0xd8, 0x27, 0x27, 0xab, 0x39, 0x4f, 0x99, 0x77, 0x6f, 0x39, 0x69, 0x54, 0x88, 0x6a, 0xde, 0xec, - 0xc3, 0x83, 0x7d, 0x73, 0x93, 0x05, 0x20, 0xe9, 0x1a, 0xac, 0x77, 0xb8, 0x39, 0x37, 0xda, 0x38, - 0x23, 0xae, 0x07, 0x49, 0xa7, 0x62, 0x46, 0x0e, 0xb2, 0x0c, 0xa7, 0xd3, 0x54, 0x26, 0x8e, 0x9f, - 0x11, 0x18, 0x15, 0x17, 0x83, 0x62, 0x46, 0x3a, 0x66, 0xe6, 0xd2, 0x7e, 0xa7, 0x08, 0xa3, 0x89, - 0xf3, 0xe4, 0x0f, 0xdc, 0xb9, 0x5d, 0x66, 0x43, 0xb7, 0xfd, 0xd6, 0xd2, 0xfc, 0x15, 0xaa, 0x9b, - 0xd4, 0xbb, 0x46, 0xd5, 0xd9, 0x7f, 0x3e, 0xa8, 0xac, 0x24, 0x52, 0x30, 0xc5, 0x49, 0x36, 0xa1, - 0x22, 0x3c, 0xc2, 0x79, 0xff, 0x14, 0xa1, 0x74, 0xc4, 0xdd, 0xc2, 0xf2, 0xf7, 0x2a, 0xae, 0x47, - 0x51, 0xc0, 0x6b, 0x01, 0x8c, 0xc4, 0x39, 0xd8, 0x40, 0x12, 0x99, 0x9b, 0xc3, 0x09, 0x53, 0x73, - 0x09, 0x4a, 0x41, 0x30, 0xe8, 0x89, 0x60, 0xb1, 0xc3, 0xb0, 0xb6, 0x8c, 0x0c, 0x43, 0xdb, 0x64, - 0xdf, 0xce, 0xf7, 0x2d, 0xd7, 0x91, 0xd7, 0x43, 0xaf, 0xc3, 0x70, 0x20, 0x9d, 0x6c, 0x83, 0x9d, - 0x68, 0xe6, 0x36, 0x8a, 0x72, 0xb0, 0x29, 0x2c, 0xed, 0x4f, 0x8b, 0x50, 0x0b, 0x17, 0xc4, 0x07, - 0xb8, 0x76, 0xd9, 0x85, 0x5a, 0x18, 0x6e, 0x94, 0xfb, 0xe7, 0x73, 0x51, 0x14, 0x0c, 0x5f, 0xc3, - 0x85, 0xaf, 0x18, 0xc9, 0x88, 0x87, 0x32, 0x95, 0x72, 0x84, 0x32, 0x75, 0x60, 0x38, 0xf0, 0xac, - 0x56, 0x4b, 0x5a, 0xe7, 0x79, 0x62, 0x99, 0x42, 0x75, 0xad, 0x09, 0x40, 0xa9, 0x59, 0xf1, 0x82, - 0x4a, 0x8c, 0xf6, 0x1a, 0x8c, 0xa7, 0x39, 0xb9, 0xe9, 0x6a, 0x6c, 0x51, 0xb3, 0x6b, 0x2b, 0x1d, - 0x47, 0xa6, 0xab, 0xa4, 0x63, 0xc8, 0xc1, 0x96, 0xaf, 0xec, 0x33, 0xbd, 0xe1, 0x3a, 0xca, 0x7c, - 0xe4, 0xab, 0x80, 0x35, 0x49, 0xc3, 0x30, 0x55, 0xfb, 0xab, 0x12, 0x9c, 0x8d, 0xdc, 0x1a, 0x2b, - 0xba, 0xa3, 0xb7, 0x0e, 0xf0, 0xc7, 0xb1, 0x0f, 0xce, 0x88, 0x1c, 0xf6, 0xee, 0xfc, 0xd2, 0x03, - 0x70, 0x77, 0xfe, 0x3f, 0x14, 0x81, 0x87, 0x46, 0x92, 0xaf, 0xc2, 0x88, 0x1e, 0xfb, 0xd9, 0xa4, - 0xfc, 0x9c, 0x97, 0x73, 0x7f, 0x4e, 0x1e, 0x81, 0x19, 0x86, 0xfa, 0xc4, 0xa9, 0x98, 0x10, 0x48, - 0x5c, 0xa8, 0x6e, 0xea, 0xb6, 0xcd, 0x6c, 0xa1, 0xdc, 0xdb, 0x34, 0x09, 0xe1, 0xbc, 0x99, 0x2f, - 0x48, 0x68, 0x0c, 0x85, 0x90, 0x37, 0x0b, 0x30, 0xea, 0xc5, 0x97, 0x49, 0xf2, 0x83, 0xe4, 0xd9, - 0xc4, 0x8f, 0xa1, 0xc5, 0x03, 0x8b, 0xe2, 0x6b, 0xb1, 0xa4, 0x4c, 0xed, 0x2f, 0x0b, 0x30, 0xda, - 0xb4, 0x2d, 0xd3, 0x72, 0x5a, 0xc7, 0x78, 0x75, 0xff, 0x0d, 0xa8, 0xf8, 0xb6, 0x65, 0xd2, 0x01, - 0x67, 0x13, 0x31, 0x8f, 0x31, 0x00, 0x14, 0x38, 0xc9, 0x7f, 0x01, 0x94, 0x0e, 0xf0, 0x2f, 0x80, - 0x5f, 0x0d, 0x81, 0x0c, 0xf2, 0x25, 0x5d, 0xa8, 0xb5, 0xd4, 0x15, 0xe3, 0xb2, 0x8e, 0x57, 0x72, - 0x5c, 0x4f, 0x97, 0xb8, 0xac, 0x5c, 0x8c, 0xfd, 0x21, 0x11, 0x23, 0x49, 0x84, 0x26, 0xff, 0x72, - 0x3a, 0x9f, 0xf3, 0x2f, 0xa7, 0x42, 0x5c, 0xef, 0x7f, 0x4e, 0x75, 0x28, 0x6f, 0x05, 0x41, 0x47, - 0x36, 0xa6, 0xc1, 0xa3, 0xb8, 0xa3, 0x1b, 0x52, 0x84, 0x4d, 0xc4, 0xde, 0x91, 0x43, 0x33, 0x11, - 0x8e, 0x1e, 0xfe, 0xd5, 0x6a, 0x2e, 0x57, 0xc0, 0x40, 0x5c, 0x04, 0x7b, 0x47, 0x0e, 0x4d, 0xbe, - 0x02, 0xf5, 0xc0, 0xd3, 0x1d, 0x7f, 0xd3, 0xf5, 0xda, 0xd4, 0x93, 0x6b, 0xd4, 0x85, 0x1c, 0x3f, - 0xfa, 0x5c, 0x8b, 0xd0, 0xc4, 0x4e, 0x64, 0x82, 0x84, 0x71, 0x69, 0x64, 0x1b, 0xaa, 0x5d, 0x53, - 0x14, 0x4c, 0xba, 0x9f, 0x66, 0xf3, 0xfc, 0xbb, 0x35, 0x16, 0x0e, 0xa0, 0xde, 0x30, 0x14, 0x90, - 0xfc, 0x81, 0xdb, 0xf0, 0x51, 0xfd, 0xc0, 0x2d, 0xde, 0x1a, 0xb3, 0xae, 0x6f, 0x20, 0x6d, 0x69, - 0xd7, 0x3a, 0x2d, 0x19, 0xcd, 0xb4, 0x90, 0xdb, 0xe4, 0x14, 0x22, 0xeb, 0xa1, 0x6d, 0xec, 0xb4, - 0x50, 0xc9, 0xd0, 0xda, 0x20, 0x77, 0x09, 0x88, 0x91, 0xf8, 0xcd, 0x89, 0x38, 0x53, 0x34, 0x7d, - 0xb0, 0xf1, 0x20, 0xfc, 0xdf, 0x46, 0xec, 0x9a, 0xe5, 0xcc, 0xff, 0x99, 0x68, 0x7f, 0x56, 0x84, - 0xd2, 0xda, 0x72, 0x53, 0x5c, 0x9d, 0xc8, 0xff, 0x21, 0x44, 0x9b, 0xdb, 0x56, 0xe7, 0x26, 0xf5, - 0xac, 0xcd, 0x5d, 0xb9, 0xf4, 0x8e, 0x5d, 0x9d, 0x98, 0xe6, 0xc0, 0x8c, 0x5c, 0xe4, 0x15, 0x18, - 0x31, 0xf4, 0x39, 0xea, 0x05, 0x83, 0x38, 0x16, 0xf8, 0xe1, 0xc9, 0xb9, 0xd9, 0x28, 0x3b, 0x26, - 0xc0, 0xc8, 0x3a, 0x80, 0x11, 0x41, 0x97, 0x0e, 0xed, 0x0e, 0x89, 0x01, 0xc7, 0x80, 0x08, 0x42, - 0x6d, 0x9b, 0xb1, 0x72, 0xd4, 0xf2, 0x61, 0x50, 0x79, 0xcb, 0xb9, 0xa6, 0xf2, 0x62, 0x04, 0xa3, - 0x39, 0x30, 0x9a, 0xf8, 0xf7, 0x09, 0xf9, 0x24, 0x54, 0xdd, 0x4e, 0x6c, 0x38, 0xad, 0xf1, 0xb8, - 0xc9, 0xea, 0x0d, 0x49, 0xbb, 0xb3, 0x37, 0x39, 0xba, 0xec, 0xb6, 0x2c, 0x43, 0x11, 0x30, 0x64, - 0x27, 0x1a, 0x0c, 0xf1, 0x13, 0x4f, 0xea, 0xcf, 0x27, 0x7c, 0xee, 0xe0, 0x3f, 0x27, 0xf0, 0x51, - 0xa6, 0x68, 0x5f, 0x2b, 0x43, 0xb4, 0xb7, 0x46, 0x7c, 0x18, 0x12, 0x11, 0xdd, 0x72, 0xe4, 0x3e, - 0xd6, 0xe0, 0x71, 0x29, 0x8a, 0xb4, 0xa0, 0xf4, 0x9a, 0xbb, 0x91, 0x7b, 0xe0, 0x8e, 0x1d, 0x75, - 0x16, 0xbe, 0xb2, 0x18, 0x01, 0x99, 0x04, 0xf2, 0xdf, 0x0b, 0x70, 0xd2, 0x4f, 0x9b, 0xbe, 0xb2, - 0x39, 0x60, 0x7e, 0x1b, 0x3f, 0x6d, 0x4c, 0xcb, 0x00, 0xd7, 0x7e, 0xc9, 0xd8, 0x5b, 0x16, 0xa6, - 0x7f, 0xb1, 0xe9, 0x25, 0x9b, 0xd3, 0x62, 0xce, 0xff, 0xf5, 0x25, 0xf5, 0x9f, 0xa4, 0xa1, 0x14, - 0xa5, 0x7d, 0xa3, 0x08, 0xf5, 0xd8, 0x68, 0x9d, 0xfb, 0x87, 0x3a, 0xb7, 0x53, 0x3f, 0xd4, 0x59, - 0x1d, 0x7c, 0x0f, 0x38, 0x2a, 0xd5, 0x71, 0xff, 0x53, 0xe7, 0x47, 0x45, 0x28, 0xad, 0xcf, 0x2f, - 0x24, 0x17, 0xad, 0x85, 0xfb, 0xb0, 0x68, 0xdd, 0x82, 0xe1, 0x8d, 0xae, 0x65, 0x07, 0x96, 0x93, - 0xfb, 0x32, 0x06, 0xf5, 0xff, 0x21, 0xb9, 0xc7, 0x20, 0x50, 0x51, 0xc1, 0x93, 0x16, 0x0c, 0xb7, - 0xc4, 0x6d, 0x78, 0xb9, 0x23, 0xe3, 0xe4, 0xad, 0x7a, 0x42, 0x90, 0x7c, 0x41, 0x85, 0xae, 0xed, - 0x82, 0xfc, 0x91, 0xfa, 0x7d, 0xd7, 0xa6, 0xf6, 0x15, 0x08, 0xad, 0x80, 0xfb, 0x2f, 0xfc, 0x6f, - 0x0a, 0x90, 0x34, 0x7c, 0xee, 0x7f, 0x6b, 0xda, 0x4e, 0xb7, 0xa6, 0xf9, 0xa3, 0xe8, 0x7c, 0xd9, - 0x0d, 0x4a, 0xfb, 0xad, 0x22, 0x0c, 0xdd, 0xb7, 0x03, 0xb4, 0x34, 0x11, 0xe4, 0x37, 0x97, 0x73, - 0x60, 0xec, 0x1b, 0xe2, 0xd7, 0x4e, 0x85, 0xf8, 0xe5, 0xfd, 0x63, 0xea, 0x3d, 0x02, 0xfc, 0xfe, - 0xa8, 0x00, 0x72, 0x58, 0x5e, 0x72, 0xfc, 0x40, 0x77, 0x0c, 0x4a, 0x8c, 0x70, 0x0e, 0xc8, 0x1b, - 0x49, 0x22, 0xa3, 0xad, 0xc4, 0xb4, 0xcf, 0x9f, 0xd5, 0x98, 0x4f, 0x3e, 0x0e, 0xd5, 0x2d, 0xd7, - 0x0f, 0xf8, 0x38, 0x5f, 0x4c, 0x7a, 0x97, 0xae, 0x48, 0x3a, 0x86, 0x1c, 0xe9, 0x1d, 0xd7, 0x4a, - 0xff, 0x1d, 0x57, 0xed, 0xbb, 0x45, 0x18, 0x79, 0xbf, 0x9c, 0x02, 0xce, 0x0a, 0x89, 0x2c, 0xe5, - 0x0c, 0x89, 0x2c, 0x1f, 0x26, 0x24, 0x52, 0xfb, 0x71, 0x01, 0xe0, 0xbe, 0x1d, 0x41, 0x36, 0x93, - 0xd1, 0x8a, 0xb9, 0xdb, 0x55, 0x76, 0xac, 0xe2, 0x6f, 0x54, 0x54, 0x95, 0x78, 0xa4, 0xe2, 0x5b, - 0x05, 0x18, 0xd3, 0x13, 0xd1, 0x7f, 0xb9, 0x4d, 0xcb, 0x54, 0x30, 0x61, 0x78, 0xdc, 0x32, 0x49, - 0xc7, 0x94, 0x58, 0xf2, 0x7c, 0x74, 0xfd, 0xed, 0xf5, 0xa8, 0xd9, 0xf7, 0xdc, 0x5b, 0xcb, 0xcd, - 0x9c, 0x04, 0xe7, 0x3d, 0xa2, 0x2d, 0x4b, 0x47, 0x12, 0x6d, 0x19, 0x3f, 0x47, 0x56, 0xbe, 0xeb, - 0x39, 0xb2, 0x1d, 0xa8, 0x6d, 0x7a, 0x6e, 0x9b, 0x07, 0x34, 0xca, 0x7f, 0xad, 0x5e, 0xce, 0x31, - 0xa7, 0x44, 0x7f, 0x19, 0x8f, 0x7c, 0x3c, 0x0b, 0x0a, 0x1f, 0x23, 0x51, 0xdc, 0x2d, 0xee, 0x0a, - 0xa9, 0x43, 0x47, 0x29, 0x35, 0x1c, 0x4b, 0xd6, 0x04, 0x3a, 0x2a, 0x31, 0xc9, 0x20, 0xc6, 0xe1, - 0xfb, 0x13, 0xc4, 0xa8, 0xfd, 0xa8, 0xac, 0x06, 0xb0, 0x07, 0xee, 0xa6, 0xc5, 0xf7, 0xff, 0xd1, - 0xd5, 0xf4, 0xb9, 0xd2, 0xe1, 0xfb, 0x78, 0xae, 0xb4, 0x7a, 0x34, 0xe7, 0x4a, 0x6b, 0x87, 0x38, - 0x57, 0xba, 0x57, 0x82, 0xd4, 0xa2, 0xeb, 0x83, 0x0d, 0x96, 0x7f, 0x52, 0x1b, 0x2c, 0x6f, 0x17, - 0x21, 0x1a, 0x45, 0x0e, 0x19, 0x80, 0xf2, 0x32, 0x54, 0xdb, 0xfa, 0xed, 0x79, 0x6a, 0xeb, 0xbb, - 0x79, 0xfe, 0xae, 0xb9, 0x22, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, 0x15, 0xde, 0x70, 0x9d, 0xdb, - 0x55, 0x1d, 0x5d, 0x96, 0x2d, 0x9c, 0x61, 0xd1, 0x3b, 0xc6, 0xc4, 0x68, 0x7f, 0x58, 0x04, 0x79, - 0x15, 0x3a, 0xa1, 0x50, 0xd9, 0xb4, 0x6e, 0x53, 0x33, 0x77, 0x38, 0x69, 0xec, 0x9f, 0xc7, 0xc2, - 0x17, 0xcf, 0x09, 0x28, 0xd0, 0xb9, 0x93, 0x55, 0xec, 0xad, 0x48, 0xfd, 0xe5, 0x70, 0xb2, 0xc6, - 0xf7, 0x68, 0xa4, 0x93, 0x55, 0x90, 0x50, 0xc9, 0x10, 0x3e, 0x5d, 0xbe, 0xcd, 0x9e, 0x7b, 0x2b, - 0x29, 0xb1, 0x5d, 0xaf, 0x7c, 0xba, 0xbe, 0x38, 0x58, 0x2e, 0x65, 0x34, 0xbe, 0xf4, 0xc3, 0x9f, - 0x5c, 0x78, 0xe8, 0xc7, 0x3f, 0xb9, 0xf0, 0xd0, 0xbb, 0x3f, 0xb9, 0xf0, 0xd0, 0xd7, 0xf6, 0x2f, - 0x14, 0x7e, 0xb8, 0x7f, 0xa1, 0xf0, 0xe3, 0xfd, 0x0b, 0x85, 0x77, 0xf7, 0x2f, 0x14, 0xfe, 0x7c, - 0xff, 0x42, 0xe1, 0xbf, 0xfc, 0xc5, 0x85, 0x87, 0xbe, 0xf8, 0x5c, 0x54, 0x84, 0x69, 0x55, 0x84, - 0x69, 0x25, 0x70, 0xba, 0xb3, 0xdd, 0x9a, 0x66, 0x45, 0x88, 0x28, 0xaa, 0x08, 0xff, 0x18, 0x00, - 0x00, 0xff, 0xff, 0xa1, 0xe2, 0x38, 0xfd, 0x6e, 0x95, 0x00, 0x00, + 0x75, 0xa6, 0xfa, 0xbf, 0xfb, 0x34, 0xc9, 0xe1, 0xdc, 0x19, 0x8d, 0x38, 0xa3, 0xd1, 0x70, 0x5c, + 0xb2, 0xe4, 0xf1, 0xda, 0x26, 0x57, 0x5c, 0xfd, 0xf9, 0x57, 0x62, 0x93, 0x43, 0x0e, 0x67, 0xc8, + 0x19, 0xfa, 0x34, 0x39, 0x92, 0xad, 0xb5, 0xb5, 0xc5, 0xea, 0xcb, 0x66, 0x89, 0xd5, 0x55, 0xad, + 0xaa, 0x6a, 0xce, 0x50, 0xde, 0x85, 0x7f, 0xb4, 0x80, 0xb4, 0x58, 0x2c, 0x76, 0xe1, 0x27, 0x03, + 0x0b, 0xef, 0x62, 0x17, 0xbb, 0xf0, 0x83, 0xe1, 0x7d, 0x58, 0x40, 0xfb, 0x60, 0x60, 0xe3, 0x38, + 0x08, 0x12, 0x27, 0xc8, 0x8f, 0x1f, 0x02, 0x44, 0x79, 0x21, 0x62, 0x06, 0x79, 0x48, 0x80, 0x18, + 0x46, 0x0c, 0x24, 0xf6, 0xc0, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x3d, 0x43, 0x76, 0x91, 0xa3, + 0x51, 0xa2, 0xb7, 0xaa, 0x7b, 0xcf, 0xfd, 0xce, 0xad, 0x53, 0xf7, 0xe7, 0xdc, 0x73, 0xce, 0xbd, + 0x17, 0x16, 0xdb, 0xa6, 0xbf, 0xd5, 0xdb, 0x98, 0x32, 0x9c, 0xce, 0xb4, 0xdd, 0xeb, 0xe8, 0x5d, + 0xd7, 0x79, 0x8d, 0x3f, 0x6c, 0x5a, 0xce, 0xad, 0xe9, 0xee, 0x76, 0x7b, 0x5a, 0xef, 0x9a, 0x5e, + 0x98, 0xb2, 0xf3, 0x94, 0x6e, 0x75, 0xb7, 0xf4, 0xa7, 0xa6, 0xdb, 0xd4, 0xa6, 0xae, 0xee, 0xd3, + 0xd6, 0x54, 0xd7, 0x75, 0x7c, 0x87, 0x3c, 0x17, 0x02, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, + 0xbb, 0xdd, 0x9e, 0x62, 0x40, 0x61, 0x8a, 0x02, 0x3a, 0xf7, 0x89, 0x48, 0x0d, 0xda, 0x4e, 0xdb, + 0x99, 0xe6, 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, + 0xde, 0x9b, 0x32, 0x1d, 0x56, 0xad, 0x69, 0xc3, 0x71, 0xe9, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, + 0x1d, 0xd2, 0x74, 0x74, 0x63, 0xcb, 0xb4, 0xa9, 0xbb, 0xab, 0xbe, 0x65, 0xda, 0xa5, 0x9e, 0xd3, + 0x73, 0x0d, 0x7a, 0xa8, 0x52, 0xde, 0x74, 0x87, 0xfa, 0x7a, 0x1a, 0xaf, 0xe9, 0x41, 0xa5, 0xdc, + 0x9e, 0xed, 0x9b, 0x9d, 0x7e, 0x36, 0xcf, 0xde, 0xab, 0x80, 0x67, 0x6c, 0xd1, 0x8e, 0x9e, 0x2c, + 0xa7, 0xfd, 0x10, 0xe0, 0xd4, 0xec, 0x86, 0xe7, 0xbb, 0xba, 0xe1, 0xaf, 0x3a, 0xad, 0x35, 0xda, + 0xe9, 0x5a, 0xba, 0x4f, 0xc9, 0x36, 0x54, 0x59, 0xdd, 0x5a, 0xba, 0xaf, 0x4f, 0xe4, 0x2e, 0xe6, + 0x2e, 0xd5, 0x67, 0x66, 0xa7, 0x86, 0xfc, 0x17, 0x53, 0x2b, 0x12, 0xa8, 0x31, 0xb2, 0xbf, 0x37, + 0x59, 0x55, 0x6f, 0x18, 0x30, 0x20, 0xdf, 0xca, 0xc1, 0x88, 0xed, 0xb4, 0x68, 0x93, 0x5a, 0xd4, + 0xf0, 0x1d, 0x77, 0x22, 0x7f, 0xb1, 0x70, 0xa9, 0x3e, 0xf3, 0xe5, 0xa1, 0x39, 0xa6, 0x7c, 0xd1, + 0xd4, 0xf5, 0x08, 0x83, 0xcb, 0xb6, 0xef, 0xee, 0x36, 0x4e, 0xff, 0x68, 0x6f, 0xf2, 0xa1, 0xfd, + 0xbd, 0xc9, 0x91, 0x68, 0x16, 0xc6, 0x6a, 0x42, 0xd6, 0xa1, 0xee, 0x3b, 0x16, 0x13, 0x99, 0xe9, + 0xd8, 0xde, 0x44, 0x81, 0x57, 0xec, 0xc2, 0x94, 0x90, 0x36, 0x63, 0x3f, 0xc5, 0x9a, 0xcb, 0xd4, + 0xce, 0x53, 0x53, 0x6b, 0x01, 0x59, 0xe3, 0x94, 0x04, 0xae, 0x87, 0x69, 0x1e, 0x46, 0x71, 0x08, + 0x85, 0x13, 0x1e, 0x35, 0x7a, 0xae, 0xe9, 0xef, 0xce, 0x39, 0xb6, 0x4f, 0x6f, 0xfb, 0x13, 0x45, + 0x2e, 0xe5, 0x27, 0xd3, 0xa0, 0x57, 0x9d, 0x56, 0x33, 0x4e, 0xdd, 0x38, 0xb5, 0xbf, 0x37, 0x79, + 0x22, 0x91, 0x88, 0x49, 0x4c, 0x62, 0xc3, 0xb8, 0xd9, 0xd1, 0xdb, 0x74, 0xb5, 0x67, 0x59, 0x4d, + 0x6a, 0xb8, 0xd4, 0xf7, 0x26, 0x4a, 0xfc, 0x13, 0x2e, 0xa5, 0xf1, 0x59, 0x76, 0x0c, 0xdd, 0xba, + 0xb1, 0xf1, 0x1a, 0x35, 0x7c, 0xa4, 0x9b, 0xd4, 0xa5, 0xb6, 0x41, 0x1b, 0x13, 0xf2, 0x63, 0xc6, + 0x97, 0x12, 0x48, 0xd8, 0x87, 0x4d, 0x16, 0xe1, 0x64, 0xd7, 0x35, 0x1d, 0x5e, 0x05, 0x4b, 0xf7, + 0xbc, 0xeb, 0x7a, 0x87, 0x4e, 0x94, 0x2f, 0xe6, 0x2e, 0xd5, 0x1a, 0x67, 0x25, 0xcc, 0xc9, 0xd5, + 0x24, 0x01, 0xf6, 0x97, 0x21, 0x97, 0xa0, 0xaa, 0x12, 0x27, 0x2a, 0x17, 0x73, 0x97, 0x4a, 0xa2, + 0xed, 0xa8, 0xb2, 0x18, 0xe4, 0x92, 0x05, 0xa8, 0xea, 0x9b, 0x9b, 0xa6, 0xcd, 0x28, 0xab, 0x5c, + 0x84, 0xe7, 0xd3, 0x3e, 0x6d, 0x56, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x41, 0x59, 0x72, 0x15, 0x88, + 0x47, 0xdd, 0x1d, 0xd3, 0xa0, 0xb3, 0x86, 0xe1, 0xf4, 0x6c, 0x9f, 0xd7, 0xbd, 0xc6, 0xeb, 0x7e, + 0x4e, 0xd6, 0x9d, 0x34, 0xfb, 0x28, 0x30, 0xa5, 0x14, 0x79, 0x11, 0xc6, 0x65, 0xb7, 0x0b, 0xa5, + 0x00, 0x1c, 0xe9, 0x34, 0x13, 0x24, 0x26, 0xf2, 0xb0, 0x8f, 0x9a, 0xb4, 0xe0, 0xbc, 0xde, 0xf3, + 0x9d, 0x0e, 0x83, 0x8c, 0x33, 0x5d, 0x73, 0xb6, 0xa9, 0x3d, 0x51, 0xbf, 0x98, 0xbb, 0x54, 0x6d, + 0x5c, 0xdc, 0xdf, 0x9b, 0x3c, 0x3f, 0x7b, 0x17, 0x3a, 0xbc, 0x2b, 0x0a, 0xb9, 0x01, 0xb5, 0x96, + 0xed, 0xad, 0x3a, 0x96, 0x69, 0xec, 0x4e, 0x8c, 0xf0, 0x0a, 0x3e, 0x25, 0x3f, 0xb5, 0x36, 0x7f, + 0xbd, 0x29, 0x32, 0xee, 0xec, 0x4d, 0x9e, 0xef, 0x1f, 0x1d, 0xa7, 0x82, 0x7c, 0x0c, 0x31, 0xc8, + 0x0a, 0x07, 0x9c, 0x73, 0xec, 0x4d, 0xb3, 0x3d, 0x31, 0xca, 0xff, 0xc6, 0xc5, 0x01, 0x0d, 0x7a, + 0xfe, 0x7a, 0x53, 0xd0, 0x35, 0x46, 0x25, 0x3b, 0xf1, 0x8a, 0x21, 0x02, 0x69, 0xc1, 0x98, 0x1a, + 0x57, 0xe7, 0x2c, 0xdd, 0xec, 0x78, 0x13, 0x63, 0xbc, 0xf1, 0x7e, 0x78, 0x00, 0x26, 0x46, 0x89, + 0x1b, 0x67, 0xe4, 0xa7, 0x8c, 0xc5, 0x92, 0x3d, 0x4c, 0x60, 0x9e, 0x7b, 0x01, 0x4e, 0xf6, 0x8d, + 0x0d, 0x64, 0x1c, 0x0a, 0xdb, 0x74, 0x97, 0x0f, 0x7d, 0x35, 0x64, 0x8f, 0xe4, 0x34, 0x94, 0x76, + 0x74, 0xab, 0x47, 0x27, 0xf2, 0x3c, 0x4d, 0xbc, 0x7c, 0x2a, 0xff, 0x7c, 0x4e, 0xfb, 0x9f, 0x05, + 0x18, 0x51, 0x23, 0x4e, 0xd3, 0xb4, 0xb7, 0xc9, 0x4b, 0x50, 0xb0, 0x9c, 0xb6, 0x1c, 0x37, 0x3f, + 0x33, 0xf4, 0x28, 0xb6, 0xec, 0xb4, 0x1b, 0x95, 0xfd, 0xbd, 0xc9, 0xc2, 0xb2, 0xd3, 0x46, 0x86, + 0x48, 0x0c, 0x28, 0x6d, 0xeb, 0x9b, 0xdb, 0x3a, 0xaf, 0x43, 0x7d, 0xa6, 0x31, 0x34, 0xf4, 0x35, + 0x86, 0xc2, 0xea, 0xda, 0xa8, 0xed, 0xef, 0x4d, 0x96, 0xf8, 0x2b, 0x0a, 0x6c, 0xe2, 0x40, 0x6d, + 0xc3, 0xd2, 0x8d, 0xed, 0x2d, 0xc7, 0xa2, 0x13, 0x85, 0x8c, 0x8c, 0x1a, 0x0a, 0x49, 0xfc, 0xe6, + 0xe0, 0x15, 0x43, 0x1e, 0xc4, 0x80, 0x72, 0xaf, 0xe5, 0x99, 0xf6, 0xb6, 0x1c, 0x03, 0x5f, 0x18, + 0x9a, 0xdb, 0xfa, 0x3c, 0xff, 0x26, 0xd8, 0xdf, 0x9b, 0x2c, 0x8b, 0x67, 0x94, 0xd0, 0xda, 0x4f, + 0xeb, 0x30, 0xa6, 0x7e, 0xd2, 0x4d, 0xea, 0xfa, 0xf4, 0x36, 0xb9, 0x08, 0x45, 0x9b, 0x75, 0x4d, + 0xfe, 0x93, 0x1b, 0x23, 0xb2, 0xb9, 0x14, 0x79, 0x97, 0xe4, 0x39, 0xac, 0x66, 0xa2, 0xa9, 0x48, + 0x81, 0x0f, 0x5f, 0xb3, 0x26, 0x87, 0x11, 0x35, 0x13, 0xcf, 0x28, 0xa1, 0xc9, 0x2b, 0x50, 0xe4, + 0x1f, 0x2f, 0x44, 0xfd, 0xd9, 0xe1, 0x59, 0xb0, 0x4f, 0xaf, 0xb2, 0x2f, 0xe0, 0x1f, 0xce, 0x41, + 0x59, 0x53, 0xec, 0xb5, 0x36, 0xa5, 0x60, 0x3f, 0x93, 0x41, 0xb0, 0x0b, 0xa2, 0x29, 0xae, 0xcf, + 0x2f, 0x20, 0x43, 0x24, 0xff, 0x39, 0x07, 0x27, 0x0d, 0xc7, 0xf6, 0x75, 0xa6, 0x6a, 0xa8, 0x49, + 0x76, 0xa2, 0xc4, 0xf9, 0x5c, 0x1d, 0x9a, 0xcf, 0x5c, 0x12, 0xb1, 0xf1, 0x30, 0x9b, 0x33, 0xfa, + 0x92, 0xb1, 0x9f, 0x37, 0xf9, 0xaf, 0x39, 0x78, 0x98, 0x8d, 0xe5, 0x7d, 0xc4, 0x7c, 0x06, 0x3a, + 0xda, 0x5a, 0x9d, 0xdd, 0xdf, 0x9b, 0x7c, 0x78, 0x29, 0x8d, 0x19, 0xa6, 0xd7, 0x81, 0xd5, 0xee, + 0x94, 0xde, 0xaf, 0x96, 0xf0, 0xd9, 0xad, 0x3e, 0xb3, 0x7c, 0x94, 0xaa, 0x4e, 0xe3, 0x51, 0xd9, + 0x94, 0xd3, 0x34, 0x3b, 0x4c, 0xab, 0x05, 0xb9, 0x0c, 0x95, 0x1d, 0xc7, 0xea, 0x75, 0xa8, 0x37, + 0x51, 0xe5, 0x43, 0xec, 0xb9, 0xb4, 0x21, 0xf6, 0x26, 0x27, 0x69, 0x9c, 0x90, 0xf0, 0x15, 0xf1, + 0xee, 0xa1, 0x2a, 0x4b, 0x4c, 0x28, 0x5b, 0x66, 0xc7, 0xf4, 0x3d, 0x3e, 0x71, 0xd6, 0x67, 0x2e, + 0x0f, 0xfd, 0x59, 0xa2, 0x8b, 0x2e, 0x73, 0x30, 0xd1, 0x6b, 0xc4, 0x33, 0x4a, 0x06, 0x6c, 0x28, + 0xf4, 0x0c, 0xdd, 0x12, 0x13, 0x6b, 0x7d, 0xe6, 0x73, 0xc3, 0x77, 0x1b, 0x86, 0xd2, 0x18, 0x95, + 0xdf, 0x54, 0xe2, 0xaf, 0x28, 0xb0, 0xc9, 0x97, 0x60, 0x2c, 0xf6, 0x37, 0xbd, 0x89, 0x3a, 0x97, + 0xce, 0x63, 0x69, 0xd2, 0x09, 0xa8, 0xc2, 0x99, 0x27, 0xd6, 0x42, 0x3c, 0x4c, 0x80, 0x91, 0x6b, + 0x50, 0xf5, 0xcc, 0x16, 0x35, 0x74, 0xd7, 0x9b, 0x18, 0x39, 0x08, 0xf0, 0xb8, 0x04, 0xae, 0x36, + 0x65, 0x31, 0x0c, 0x00, 0xc8, 0x14, 0x40, 0x57, 0x77, 0x7d, 0x53, 0x28, 0xaa, 0xa3, 0x5c, 0x69, + 0x1a, 0xdb, 0xdf, 0x9b, 0x84, 0xd5, 0x20, 0x15, 0x23, 0x14, 0x8c, 0x9e, 0x95, 0x5d, 0xb2, 0xbb, + 0x3d, 0x5f, 0x4c, 0xac, 0x35, 0x41, 0xdf, 0x0c, 0x52, 0x31, 0x42, 0x41, 0xbe, 0x97, 0x83, 0x47, + 0xc3, 0xd7, 0xfe, 0x4e, 0x76, 0xe2, 0xc8, 0x3b, 0xd9, 0xe4, 0xfe, 0xde, 0xe4, 0xa3, 0xcd, 0xc1, + 0x2c, 0xf1, 0x6e, 0xf5, 0xd1, 0x5e, 0x82, 0xd1, 0xd9, 0x9e, 0xbf, 0xe5, 0xb8, 0xe6, 0x1b, 0x5c, + 0xe9, 0x26, 0x0b, 0x50, 0xf2, 0xb9, 0xf2, 0x24, 0xe6, 0xe5, 0x27, 0xd2, 0x44, 0x2d, 0x14, 0xd9, + 0x6b, 0x74, 0x57, 0x69, 0x03, 0x62, 0x7e, 0x14, 0xca, 0x94, 0x28, 0xae, 0xfd, 0xfb, 0x1c, 0x54, + 0x1a, 0xba, 0xb1, 0xed, 0x6c, 0x6e, 0x92, 0x97, 0xa1, 0x6a, 0xda, 0x3e, 0x75, 0x77, 0x74, 0x4b, + 0xc2, 0x4e, 0x45, 0x60, 0x83, 0x95, 0x58, 0xf8, 0xdd, 0x6c, 0xcd, 0xc3, 0x18, 0xcd, 0xf7, 0xe4, + 0x5a, 0x81, 0xeb, 0xa3, 0x4b, 0x12, 0x03, 0x03, 0x34, 0x32, 0x09, 0x25, 0xcf, 0xa7, 0x5d, 0x8f, + 0xcf, 0x3c, 0xa3, 0xa2, 0x1a, 0x4d, 0x96, 0x80, 0x22, 0x5d, 0xfb, 0x1f, 0x39, 0xa8, 0x35, 0x74, + 0xcf, 0x34, 0xd8, 0x57, 0x92, 0x39, 0x28, 0xf6, 0x3c, 0xea, 0x1e, 0xee, 0xdb, 0xf8, 0x64, 0xb1, + 0xee, 0x51, 0x17, 0x79, 0x61, 0x72, 0x03, 0xaa, 0x5d, 0xdd, 0xf3, 0x6e, 0x39, 0x6e, 0x4b, 0x4e, + 0x78, 0x07, 0x04, 0x12, 0xca, 0xb9, 0x2c, 0x8a, 0x01, 0x88, 0x56, 0x87, 0x70, 0xc6, 0xd7, 0x7e, + 0x9e, 0x83, 0x53, 0x8d, 0xde, 0xe6, 0x26, 0x75, 0xa5, 0x2e, 0x2a, 0xb5, 0x3c, 0x0a, 0x25, 0x97, + 0xb6, 0x4c, 0x4f, 0xd6, 0x7d, 0x7e, 0xe8, 0x16, 0x84, 0x0c, 0x45, 0x2a, 0x95, 0x5c, 0x5e, 0x3c, + 0x01, 0x05, 0x3a, 0xe9, 0x41, 0xed, 0x35, 0xea, 0x7b, 0xbe, 0x4b, 0xf5, 0x8e, 0xfc, 0xba, 0x2b, + 0x43, 0xb3, 0xba, 0x4a, 0xfd, 0x26, 0x47, 0x8a, 0xea, 0xb0, 0x41, 0x22, 0x86, 0x9c, 0xb4, 0x1f, + 0x96, 0x60, 0x64, 0xce, 0xe9, 0x6c, 0x98, 0x36, 0x6d, 0x5d, 0x6e, 0xb5, 0x29, 0x79, 0x15, 0x8a, + 0xb4, 0xd5, 0xa6, 0xf2, 0x6b, 0x87, 0x9f, 0xee, 0x19, 0x58, 0xa8, 0xb4, 0xb0, 0x37, 0xe4, 0xc0, + 0x64, 0x19, 0xc6, 0x36, 0x5d, 0xa7, 0x23, 0x46, 0xd0, 0xb5, 0xdd, 0xae, 0xd4, 0x58, 0x1b, 0x1f, + 0x56, 0xa3, 0xd2, 0x42, 0x2c, 0xf7, 0xce, 0xde, 0x24, 0x84, 0x6f, 0x98, 0x28, 0x4b, 0x5e, 0x86, + 0x89, 0x30, 0x25, 0x18, 0x4a, 0xe6, 0xd8, 0x22, 0x82, 0x6b, 0x2c, 0xa5, 0xc6, 0xf9, 0xfd, 0xbd, + 0xc9, 0x89, 0x85, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x79, 0x2b, 0x07, 0xe3, 0x61, 0xa6, 0x18, 0xde, + 0xa5, 0xa2, 0x72, 0x44, 0xf3, 0x06, 0x5f, 0x6d, 0x2d, 0x24, 0x58, 0x60, 0x1f, 0x53, 0xb2, 0x00, + 0x23, 0xbe, 0x13, 0x91, 0x57, 0x89, 0xcb, 0x4b, 0x53, 0xe6, 0x81, 0x35, 0x67, 0xa0, 0xb4, 0x62, + 0xe5, 0x08, 0xc2, 0x19, 0xf5, 0x9e, 0x90, 0x54, 0x99, 0x4b, 0xea, 0xdc, 0xfe, 0xde, 0xe4, 0x99, + 0xb5, 0x54, 0x0a, 0x1c, 0x50, 0x92, 0x7c, 0x3d, 0x07, 0x63, 0x2a, 0x4b, 0xca, 0xa8, 0x72, 0x94, + 0x32, 0x22, 0xac, 0x45, 0xac, 0xc5, 0x18, 0x60, 0x82, 0xa1, 0xf6, 0x8b, 0x22, 0xd4, 0x82, 0x01, + 0x96, 0x3c, 0x0e, 0x25, 0xbe, 0xf0, 0x97, 0x7a, 0x73, 0x30, 0x73, 0x72, 0xfb, 0x00, 0x8a, 0x3c, + 0xf2, 0x04, 0x54, 0x0c, 0xa7, 0xd3, 0xd1, 0xed, 0x16, 0x37, 0xe6, 0xd4, 0x1a, 0x75, 0xa6, 0x30, + 0xcc, 0x89, 0x24, 0x54, 0x79, 0xe4, 0x3c, 0x14, 0x75, 0xb7, 0x2d, 0xec, 0x2a, 0x35, 0x31, 0x1e, + 0xcd, 0xba, 0x6d, 0x0f, 0x79, 0x2a, 0xf9, 0x24, 0x14, 0xa8, 0xbd, 0x33, 0x51, 0x1c, 0xac, 0x91, + 0x5c, 0xb6, 0x77, 0x6e, 0xea, 0x6e, 0xa3, 0x2e, 0xeb, 0x50, 0xb8, 0x6c, 0xef, 0x20, 0x2b, 0x43, + 0x96, 0xa1, 0x42, 0xed, 0x1d, 0xf6, 0xef, 0xa5, 0xc1, 0xe3, 0x43, 0x03, 0x8a, 0x33, 0x12, 0xa9, + 0x9c, 0x07, 0x7a, 0x8d, 0x4c, 0x46, 0x05, 0x41, 0xbe, 0x00, 0x23, 0x42, 0xc5, 0x59, 0x61, 0xff, + 0xc4, 0x9b, 0x28, 0x73, 0xc8, 0xc9, 0xc1, 0x3a, 0x12, 0xa7, 0x0b, 0x0d, 0x4c, 0x91, 0x44, 0x0f, + 0x63, 0x50, 0xe4, 0x0b, 0x50, 0x53, 0xeb, 0x51, 0xf5, 0x67, 0x53, 0x6d, 0x33, 0x6a, 0x11, 0x8b, + 0xf4, 0xf5, 0x9e, 0xe9, 0xd2, 0x0e, 0xb5, 0x7d, 0xaf, 0x71, 0x52, 0xad, 0xd6, 0x55, 0xae, 0x87, + 0x21, 0x1a, 0xd9, 0xe8, 0x37, 0x32, 0x09, 0x0b, 0xc9, 0xe3, 0x03, 0x46, 0xf5, 0x21, 0x2c, 0x4c, + 0x5f, 0x86, 0x13, 0x81, 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x69, 0x56, 0x7c, 0x29, 0x9e, + 0x75, 0x67, 0x6f, 0xf2, 0xb1, 0x14, 0x53, 0x42, 0x48, 0x80, 0x49, 0x30, 0xed, 0x07, 0x05, 0xe8, + 0xd7, 0xfe, 0xe3, 0x42, 0xcb, 0x1d, 0xb5, 0xd0, 0x92, 0x1f, 0x24, 0x86, 0xcf, 0xe7, 0x65, 0xb1, + 0xec, 0x1f, 0x95, 0xf6, 0x63, 0x0a, 0x47, 0xfd, 0x63, 0x1e, 0x94, 0xbe, 0xa3, 0xbd, 0x5d, 0x84, + 0xb1, 0x79, 0x9d, 0x76, 0x1c, 0xfb, 0x9e, 0x6b, 0xa1, 0xdc, 0x03, 0xb1, 0x16, 0xba, 0x04, 0x55, + 0x97, 0x76, 0x2d, 0xd3, 0xd0, 0x85, 0xf2, 0x25, 0x6d, 0x8f, 0x28, 0xd3, 0x30, 0xc8, 0x1d, 0xb0, + 0x06, 0x2e, 0x3c, 0x90, 0x6b, 0xe0, 0xe2, 0x7b, 0xbf, 0x06, 0xd6, 0xbe, 0x9e, 0x07, 0xae, 0xa8, + 0x90, 0x8b, 0x50, 0x64, 0x93, 0x70, 0xd2, 0xf2, 0xc2, 0x1b, 0x0e, 0xcf, 0x21, 0xe7, 0x20, 0xef, + 0x3b, 0xb2, 0xe7, 0x81, 0xcc, 0xcf, 0xaf, 0x39, 0x98, 0xf7, 0x1d, 0xf2, 0x06, 0x80, 0xe1, 0xd8, + 0x2d, 0x53, 0x99, 0xe4, 0xb3, 0x7d, 0xd8, 0x82, 0xe3, 0xde, 0xd2, 0xdd, 0xd6, 0x5c, 0x80, 0x28, + 0x56, 0x41, 0xe1, 0x3b, 0x46, 0xb8, 0x91, 0x17, 0xa0, 0xec, 0xd8, 0x0b, 0x3d, 0xcb, 0xe2, 0x02, + 0xad, 0x35, 0x3e, 0xc2, 0x96, 0xa6, 0x37, 0x78, 0xca, 0x9d, 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, + 0x6f, 0x2f, 0xb9, 0xa6, 0x6f, 0xda, 0xed, 0xa6, 0xef, 0xea, 0x3e, 0x6d, 0xef, 0xa2, 0x2c, 0xa6, + 0x7d, 0x33, 0x07, 0xf5, 0x05, 0xf3, 0x36, 0x6d, 0xbd, 0x64, 0xda, 0x2d, 0xe7, 0x16, 0x41, 0x28, + 0x5b, 0xd4, 0x6e, 0xfb, 0x5b, 0x43, 0xae, 0x1f, 0xc4, 0xda, 0x98, 0x23, 0xa0, 0x44, 0x22, 0xd3, + 0x50, 0x13, 0xda, 0xa7, 0x69, 0xb7, 0xb9, 0x0c, 0xab, 0xe1, 0xa0, 0xd7, 0x54, 0x19, 0x18, 0xd2, + 0x68, 0xbb, 0x70, 0xb2, 0x4f, 0x0c, 0xa4, 0x05, 0x45, 0x5f, 0x6f, 0xab, 0xf1, 0x75, 0x61, 0x68, + 0x01, 0xaf, 0xe9, 0xed, 0x88, 0x70, 0xf9, 0x1c, 0xbf, 0xa6, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0xbf, + 0xca, 0x41, 0x75, 0xa1, 0x67, 0x1b, 0x7c, 0x89, 0x76, 0x6f, 0x8b, 0x9c, 0x52, 0x18, 0xf2, 0xa9, + 0x0a, 0x43, 0x0f, 0xca, 0xdb, 0xb7, 0x02, 0x85, 0xa2, 0x3e, 0xb3, 0x32, 0x7c, 0xab, 0x90, 0x55, + 0x9a, 0xba, 0xc6, 0xf1, 0x84, 0xc3, 0x68, 0x4c, 0x56, 0xa8, 0x7c, 0xed, 0x25, 0xce, 0x54, 0x32, + 0x3b, 0xf7, 0x49, 0xa8, 0x47, 0xc8, 0x0e, 0x65, 0x3b, 0xfe, 0x7f, 0x45, 0x28, 0x2f, 0x36, 0x9b, + 0xb3, 0xab, 0x4b, 0xe4, 0x19, 0xa8, 0x4b, 0x5f, 0xc2, 0xf5, 0x50, 0x06, 0x81, 0x2b, 0xa9, 0x19, + 0x66, 0x61, 0x94, 0x8e, 0xa9, 0x63, 0x2e, 0xd5, 0xad, 0x8e, 0xec, 0x2c, 0x81, 0x3a, 0x86, 0x2c, + 0x11, 0x45, 0x1e, 0xd1, 0x61, 0x8c, 0xad, 0xf0, 0x98, 0x08, 0xc5, 0xea, 0x4d, 0x76, 0x9b, 0x03, + 0xae, 0xef, 0xb8, 0x92, 0xb8, 0x1e, 0x03, 0xc0, 0x04, 0x20, 0x79, 0x1e, 0xaa, 0x7a, 0xcf, 0xdf, + 0xe2, 0x0a, 0xb4, 0xe8, 0x1b, 0xe7, 0xb9, 0xab, 0x45, 0xa6, 0xdd, 0xd9, 0x9b, 0x1c, 0xb9, 0x86, + 0x8d, 0x67, 0xd4, 0x3b, 0x06, 0xd4, 0xac, 0x72, 0x6a, 0xc5, 0x28, 0x2b, 0x57, 0x3a, 0x74, 0xe5, + 0x56, 0x63, 0x00, 0x98, 0x00, 0x24, 0xaf, 0xc0, 0xc8, 0x36, 0xdd, 0xf5, 0xf5, 0x0d, 0xc9, 0xa0, + 0x7c, 0x18, 0x06, 0xe3, 0x4c, 0x85, 0xbb, 0x16, 0x29, 0x8e, 0x31, 0x30, 0xe2, 0xc1, 0xe9, 0x6d, + 0xea, 0x6e, 0x50, 0xd7, 0x91, 0xab, 0x4f, 0xc9, 0xa4, 0x72, 0x18, 0x26, 0x13, 0xfb, 0x7b, 0x93, + 0xa7, 0xaf, 0xa5, 0xc0, 0x60, 0x2a, 0xb8, 0xf6, 0xcb, 0x3c, 0x9c, 0x58, 0x14, 0xce, 0x5c, 0xc7, + 0x15, 0x93, 0x30, 0x39, 0x0b, 0x05, 0xb7, 0xdb, 0xe3, 0x2d, 0xa7, 0x20, 0xcc, 0xb5, 0xb8, 0xba, + 0x8e, 0x2c, 0x8d, 0xbc, 0x0c, 0xd5, 0x96, 0x1c, 0x32, 0xe4, 0xe2, 0x77, 0x28, 0x43, 0x85, 0x7a, + 0xc3, 0x00, 0x8d, 0x69, 0xfa, 0x1d, 0xaf, 0xdd, 0x34, 0xdf, 0xa0, 0x72, 0x3d, 0xc8, 0x35, 0xfd, + 0x15, 0x91, 0x84, 0x2a, 0x8f, 0xcd, 0xaa, 0xdb, 0x74, 0x57, 0xac, 0x86, 0x8a, 0xe1, 0xac, 0x7a, + 0x4d, 0xa6, 0x61, 0x90, 0x4b, 0x26, 0x55, 0x67, 0x61, 0xad, 0xa0, 0x28, 0x56, 0xf2, 0x37, 0x59, + 0x82, 0xec, 0x37, 0x6c, 0xc8, 0x7c, 0xcd, 0xf4, 0x7d, 0xea, 0xca, 0xdf, 0x38, 0xd4, 0x90, 0x79, + 0x95, 0x23, 0xa0, 0x44, 0x22, 0x1f, 0x83, 0x1a, 0x07, 0x6f, 0x58, 0xce, 0x06, 0xff, 0x71, 0x35, + 0xb1, 0xa6, 0xbf, 0xa9, 0x12, 0x31, 0xcc, 0xd7, 0x7e, 0x9d, 0x87, 0x33, 0x8b, 0xd4, 0x17, 0x5a, + 0xcd, 0x3c, 0xed, 0x5a, 0xce, 0x2e, 0x53, 0x2d, 0x91, 0xbe, 0x4e, 0x5e, 0x04, 0x30, 0xbd, 0x8d, + 0xe6, 0x8e, 0xc1, 0xfb, 0x81, 0xe8, 0xc3, 0x17, 0x65, 0x97, 0x84, 0xa5, 0x66, 0x43, 0xe6, 0xdc, + 0x89, 0xbd, 0x61, 0xa4, 0x4c, 0xb8, 0xbc, 0xca, 0xdf, 0x65, 0x79, 0xd5, 0x04, 0xe8, 0x86, 0x0a, + 0x6a, 0x81, 0x53, 0xfe, 0x2b, 0xc5, 0xe6, 0x30, 0xba, 0x69, 0x04, 0x26, 0x8b, 0xca, 0x68, 0xc3, + 0x78, 0x8b, 0x6e, 0xea, 0x3d, 0xcb, 0x0f, 0x94, 0x6a, 0xd9, 0x89, 0x0f, 0xae, 0x97, 0x07, 0x8e, + 0xe6, 0xf9, 0x04, 0x12, 0xf6, 0x61, 0x6b, 0xdf, 0x2f, 0xc0, 0xb9, 0x45, 0xea, 0x07, 0x16, 0x17, + 0x39, 0x3a, 0x36, 0xbb, 0xd4, 0x60, 0x7f, 0xe1, 0xad, 0x1c, 0x94, 0x2d, 0x7d, 0x83, 0x5a, 0x6c, + 0xf6, 0x62, 0x5f, 0xf3, 0xea, 0xd0, 0x13, 0xc1, 0x60, 0x2e, 0x53, 0xcb, 0x9c, 0x43, 0x62, 0x6a, + 0x10, 0x89, 0x28, 0xd9, 0xb3, 0x41, 0xdd, 0xb0, 0x7a, 0x9e, 0x4f, 0xdd, 0x55, 0xc7, 0xf5, 0xa5, + 0x3e, 0x19, 0x0c, 0xea, 0x73, 0x61, 0x16, 0x46, 0xe9, 0xc8, 0x0c, 0x80, 0x61, 0x99, 0xd4, 0xf6, + 0x79, 0x29, 0xd1, 0xaf, 0x88, 0xfa, 0xbf, 0x73, 0x41, 0x0e, 0x46, 0xa8, 0x18, 0xab, 0x8e, 0x63, + 0x9b, 0xbe, 0x23, 0x58, 0x15, 0xe3, 0xac, 0x56, 0xc2, 0x2c, 0x8c, 0xd2, 0xf1, 0x62, 0xd4, 0x77, + 0x4d, 0xc3, 0xe3, 0xc5, 0x4a, 0x89, 0x62, 0x61, 0x16, 0x46, 0xe9, 0xd8, 0x9c, 0x17, 0xf9, 0xfe, + 0x43, 0xcd, 0x79, 0xdf, 0xad, 0xc1, 0x85, 0x98, 0x58, 0x7d, 0xdd, 0xa7, 0x9b, 0x3d, 0xab, 0x49, + 0x7d, 0xf5, 0x03, 0x87, 0x9c, 0x0b, 0xff, 0x63, 0xf8, 0xdf, 0x45, 0x08, 0x89, 0x71, 0x34, 0xff, + 0xbd, 0xaf, 0x82, 0x07, 0xfa, 0xf7, 0xd3, 0x50, 0xb3, 0x75, 0xdf, 0xe3, 0x1d, 0x57, 0xf6, 0xd1, + 0x40, 0x0d, 0xbb, 0xae, 0x32, 0x30, 0xa4, 0x21, 0xab, 0x70, 0x5a, 0x8a, 0xf8, 0xf2, 0xed, 0xae, + 0xe3, 0xfa, 0xd4, 0x15, 0x65, 0xe5, 0x74, 0x2a, 0xcb, 0x9e, 0x5e, 0x49, 0xa1, 0xc1, 0xd4, 0x92, + 0x64, 0x05, 0x4e, 0x19, 0xc2, 0xad, 0x4e, 0x2d, 0x47, 0x6f, 0x29, 0x40, 0x61, 0xe0, 0x0a, 0x96, + 0x46, 0x73, 0xfd, 0x24, 0x98, 0x56, 0x2e, 0xd9, 0x9a, 0xcb, 0x43, 0xb5, 0xe6, 0xca, 0x30, 0xad, + 0xb9, 0x3a, 0x5c, 0x6b, 0xae, 0x1d, 0xac, 0x35, 0x33, 0xc9, 0xb3, 0x76, 0x44, 0x5d, 0xa6, 0x9e, + 0x88, 0x19, 0x36, 0x12, 0xb5, 0x11, 0x48, 0xbe, 0x99, 0x42, 0x83, 0xa9, 0x25, 0xc9, 0x06, 0x9c, + 0x13, 0xe9, 0x97, 0x6d, 0xc3, 0xdd, 0xed, 0xb2, 0x89, 0x27, 0x82, 0x5b, 0x8f, 0x59, 0x18, 0xcf, + 0x35, 0x07, 0x52, 0xe2, 0x5d, 0x50, 0xc8, 0xa7, 0x61, 0x54, 0xfc, 0xa5, 0x15, 0xbd, 0xcb, 0x61, + 0x45, 0x0c, 0xc7, 0xc3, 0x12, 0x76, 0x74, 0x2e, 0x9a, 0x89, 0x71, 0x5a, 0x32, 0x0b, 0x27, 0xba, + 0x3b, 0x06, 0x7b, 0x5c, 0xda, 0xbc, 0x4e, 0x69, 0x8b, 0xb6, 0xb8, 0xd3, 0xa8, 0xd6, 0x78, 0x44, + 0x19, 0x3a, 0x56, 0xe3, 0xd9, 0x98, 0xa4, 0x27, 0xcf, 0xc3, 0x88, 0xe7, 0xeb, 0xae, 0x2f, 0xcd, + 0x7a, 0x13, 0x63, 0x22, 0xc6, 0x45, 0x59, 0xbd, 0x9a, 0x91, 0x3c, 0x8c, 0x51, 0xa6, 0xce, 0x17, + 0x27, 0x8e, 0x6f, 0xbe, 0xc8, 0x32, 0x5a, 0xfd, 0x6e, 0x1e, 0x2e, 0x2e, 0x52, 0x7f, 0xc5, 0xb1, + 0xa5, 0x51, 0x34, 0x6d, 0xda, 0x3f, 0x90, 0x4d, 0x34, 0x3e, 0x69, 0xe7, 0x8f, 0x74, 0xd2, 0x2e, + 0x1c, 0xd1, 0xa4, 0x5d, 0x3c, 0xc6, 0x49, 0xfb, 0x37, 0xf2, 0xf0, 0x48, 0x4c, 0x92, 0xab, 0x4e, + 0x4b, 0x0d, 0xf8, 0x1f, 0x08, 0xf0, 0x00, 0x02, 0xbc, 0x23, 0xf4, 0x4e, 0xee, 0xd6, 0x4a, 0x68, + 0x3c, 0x6f, 0x26, 0x35, 0x9e, 0x57, 0xb2, 0xcc, 0x7c, 0x29, 0x1c, 0x0e, 0x34, 0xe3, 0x5d, 0x05, + 0xe2, 0x4a, 0x27, 0x9c, 0x30, 0xfd, 0x44, 0x94, 0x9e, 0x20, 0x88, 0x0e, 0xfb, 0x28, 0x30, 0xa5, + 0x14, 0x69, 0xc2, 0xc3, 0x1e, 0xb5, 0x7d, 0xd3, 0xa6, 0x56, 0x1c, 0x4e, 0x68, 0x43, 0x8f, 0x49, + 0xb8, 0x87, 0x9b, 0x69, 0x44, 0x98, 0x5e, 0x36, 0xcb, 0x38, 0xf0, 0x07, 0xc0, 0x55, 0x4e, 0x21, + 0x9a, 0x23, 0xd3, 0x58, 0xde, 0x4a, 0x6a, 0x2c, 0xaf, 0x66, 0xff, 0x6f, 0xc3, 0x69, 0x2b, 0x33, + 0x00, 0xfc, 0x2f, 0x44, 0xd5, 0x95, 0x60, 0x92, 0xc6, 0x20, 0x07, 0x23, 0x54, 0x6c, 0x02, 0x52, + 0x72, 0x8e, 0x6a, 0x2a, 0xc1, 0x04, 0xd4, 0x8c, 0x66, 0x62, 0x9c, 0x76, 0xa0, 0xb6, 0x53, 0x1a, + 0x5a, 0xdb, 0xb9, 0x0a, 0x24, 0x66, 0x78, 0x14, 0x78, 0xe5, 0x78, 0x0c, 0xe7, 0x52, 0x1f, 0x05, + 0xa6, 0x94, 0x1a, 0xd0, 0x94, 0x2b, 0x47, 0xdb, 0x94, 0xab, 0xc3, 0x37, 0x65, 0xf2, 0x2a, 0x9c, + 0xe5, 0xac, 0xa4, 0x7c, 0xe2, 0xc0, 0x42, 0xef, 0xf9, 0x90, 0x04, 0x3e, 0x8b, 0x83, 0x08, 0x71, + 0x30, 0x06, 0xfb, 0x3f, 0x86, 0x4b, 0x5b, 0x8c, 0xb9, 0x6e, 0x0d, 0xd6, 0x89, 0xe6, 0x52, 0x68, + 0x30, 0xb5, 0x24, 0x6b, 0x62, 0x3e, 0x6b, 0x86, 0xfa, 0x86, 0x45, 0x5b, 0x32, 0x86, 0x35, 0x68, + 0x62, 0x6b, 0xcb, 0x4d, 0x99, 0x83, 0x11, 0xaa, 0x34, 0x35, 0x65, 0xe4, 0x90, 0x6a, 0xca, 0x22, + 0xb7, 0xd2, 0x6f, 0xc6, 0xb4, 0x21, 0xa9, 0xeb, 0x04, 0x51, 0xc9, 0x73, 0x49, 0x02, 0xec, 0x2f, + 0xc3, 0xb5, 0x44, 0xc3, 0x35, 0xbb, 0xbe, 0x17, 0xc7, 0x1a, 0x4b, 0x68, 0x89, 0x29, 0x34, 0x98, + 0x5a, 0x92, 0xe9, 0xe7, 0x5b, 0x54, 0xb7, 0xfc, 0xad, 0x38, 0xe0, 0x89, 0xb8, 0x7e, 0x7e, 0xa5, + 0x9f, 0x04, 0xd3, 0xca, 0xa5, 0x4e, 0x48, 0xe3, 0x0f, 0xa6, 0x5a, 0xf5, 0x8d, 0x02, 0x9c, 0x5d, + 0xa4, 0x7e, 0x10, 0xde, 0xf3, 0x81, 0x19, 0xe5, 0x3d, 0x30, 0xa3, 0x7c, 0xa7, 0x04, 0xa7, 0x16, + 0xa9, 0xdf, 0xa7, 0x8d, 0xfd, 0x33, 0x15, 0xff, 0x0a, 0x9c, 0x0a, 0x23, 0xca, 0x9a, 0xbe, 0xe3, + 0x8a, 0xb9, 0x3c, 0xb1, 0x5a, 0x6e, 0xf6, 0x93, 0x60, 0x5a, 0x39, 0xf2, 0x05, 0x78, 0x84, 0x4f, + 0xf5, 0x76, 0x5b, 0xd8, 0x67, 0x85, 0x31, 0x21, 0xb2, 0x27, 0x62, 0x52, 0x42, 0x3e, 0xd2, 0x4c, + 0x27, 0xc3, 0x41, 0xe5, 0xc9, 0x57, 0x61, 0xa4, 0x6b, 0x76, 0xa9, 0x65, 0xda, 0x5c, 0x3f, 0xcb, + 0x1c, 0x12, 0xb2, 0x1a, 0x01, 0x0b, 0x17, 0x70, 0xd1, 0x54, 0x8c, 0x31, 0x4c, 0x6d, 0xa9, 0xd5, + 0x63, 0x6c, 0xa9, 0x7f, 0x9b, 0x87, 0xca, 0xa2, 0xeb, 0xf4, 0xba, 0x8d, 0x5d, 0xd2, 0x86, 0xf2, + 0x2d, 0xee, 0x3c, 0x93, 0xae, 0xa9, 0xe1, 0xa3, 0xb2, 0x85, 0x0f, 0x2e, 0x54, 0x89, 0xc4, 0x3b, + 0x4a, 0x78, 0xd6, 0x88, 0xb7, 0xe9, 0x2e, 0x6d, 0x49, 0x1f, 0x5a, 0xd0, 0x88, 0xaf, 0xb1, 0x44, + 0x14, 0x79, 0xa4, 0x03, 0x27, 0x74, 0xcb, 0x72, 0x6e, 0xd1, 0xd6, 0xb2, 0xee, 0x53, 0x9b, 0x7a, + 0xca, 0x25, 0x79, 0x58, 0xb3, 0x34, 0xf7, 0xeb, 0xcf, 0xc6, 0xa1, 0x30, 0x89, 0x4d, 0x5e, 0x83, + 0x8a, 0xe7, 0x3b, 0xae, 0x52, 0xb6, 0xea, 0x33, 0x73, 0xc3, 0xff, 0xf4, 0xc6, 0xe7, 0x9b, 0x02, + 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, 0xda, 0xb7, 0x73, 0x00, 0x57, 0xd6, 0xd6, 0x56, 0xa5, + 0x7b, 0xa1, 0x05, 0x45, 0xbd, 0x17, 0x38, 0x2a, 0x87, 0x77, 0x08, 0xc6, 0xc2, 0x32, 0xa5, 0x0f, + 0xaf, 0xe7, 0x6f, 0x21, 0x47, 0x27, 0x1f, 0x85, 0x8a, 0x54, 0x90, 0xa5, 0xd8, 0x83, 0xd0, 0x02, + 0xa9, 0x44, 0xa3, 0xca, 0xd7, 0xfe, 0x6f, 0x1e, 0x60, 0xa9, 0x65, 0xd1, 0xa6, 0x0a, 0xa4, 0xaf, + 0xf9, 0x5b, 0x2e, 0xf5, 0xb6, 0x1c, 0xab, 0x35, 0xa4, 0x37, 0x95, 0xdb, 0xfc, 0xd7, 0x14, 0x08, + 0x86, 0x78, 0xa4, 0x05, 0x23, 0x9e, 0x4f, 0xbb, 0x2a, 0x52, 0x73, 0x48, 0x27, 0xca, 0xb8, 0xb0, + 0x8b, 0x84, 0x38, 0x18, 0x43, 0x25, 0x3a, 0xd4, 0x4d, 0xdb, 0x10, 0x1d, 0xa4, 0xb1, 0x3b, 0x64, + 0x43, 0x3a, 0xc1, 0x56, 0x1c, 0x4b, 0x21, 0x0c, 0x46, 0x31, 0xb5, 0x9f, 0xe5, 0xe1, 0x0c, 0xe7, + 0xc7, 0xaa, 0x11, 0x8b, 0xc7, 0x24, 0xff, 0xa6, 0x6f, 0xd3, 0xdf, 0xbf, 0x3c, 0x18, 0x6b, 0xb1, + 0x67, 0x6c, 0x85, 0xfa, 0x7a, 0xa8, 0xcf, 0x85, 0x69, 0x91, 0x9d, 0x7e, 0x3d, 0x28, 0x7a, 0x6c, + 0xbc, 0x12, 0xd2, 0x6b, 0x0e, 0xdd, 0x84, 0xd2, 0x3f, 0x80, 0x8f, 0x5e, 0x81, 0xd7, 0x98, 0x8f, + 0x5a, 0x9c, 0x1d, 0xf9, 0x77, 0x50, 0xf6, 0x7c, 0xdd, 0xef, 0xa9, 0xae, 0xb9, 0x7e, 0xd4, 0x8c, + 0x39, 0x78, 0x38, 0x8e, 0x88, 0x77, 0x94, 0x4c, 0xb5, 0x9f, 0xe5, 0xe0, 0x5c, 0x7a, 0xc1, 0x65, + 0xd3, 0xf3, 0xc9, 0xbf, 0xee, 0x13, 0xfb, 0x01, 0xff, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0xc4, 0x85, + 0xab, 0x94, 0x88, 0xc8, 0x7d, 0x28, 0x99, 0x3e, 0xed, 0xa8, 0xf5, 0xe5, 0x8d, 0x23, 0xfe, 0xf4, + 0xc8, 0xd4, 0xce, 0xb8, 0xa0, 0x60, 0xa6, 0xbd, 0x9d, 0x1f, 0xf4, 0xc9, 0x7c, 0xfa, 0xb0, 0xe2, + 0x31, 0xbf, 0xd7, 0xb2, 0xc5, 0xfc, 0xc6, 0x2b, 0xd4, 0x1f, 0xfa, 0xfb, 0x6f, 0xfb, 0x43, 0x7f, + 0x6f, 0x64, 0x0f, 0xfd, 0x4d, 0x88, 0x61, 0x60, 0x04, 0xf0, 0xbb, 0x05, 0x38, 0x7f, 0xb7, 0x66, + 0xc3, 0xe6, 0x33, 0xd9, 0x3a, 0xb3, 0xce, 0x67, 0x77, 0x6f, 0x87, 0x64, 0x06, 0x4a, 0xdd, 0x2d, + 0xdd, 0x53, 0x4a, 0x99, 0x5a, 0xb0, 0x94, 0x56, 0x59, 0xe2, 0x1d, 0x36, 0x68, 0x70, 0x65, 0x8e, + 0xbf, 0xa2, 0x20, 0x65, 0xc3, 0x71, 0x87, 0x7a, 0x5e, 0x68, 0x13, 0x08, 0x86, 0xe3, 0x15, 0x91, + 0x8c, 0x2a, 0x9f, 0xf8, 0x50, 0x16, 0x26, 0x66, 0x39, 0x33, 0x0d, 0x1f, 0xc8, 0x95, 0x12, 0x26, + 0x1e, 0x7e, 0x94, 0xf4, 0x56, 0x48, 0x5e, 0x64, 0x0a, 0x8a, 0x7e, 0x18, 0xb4, 0xab, 0x96, 0xe6, + 0xc5, 0x14, 0xfd, 0x94, 0xd3, 0xb1, 0x85, 0xbd, 0xb3, 0xc1, 0x8d, 0xea, 0x2d, 0xe9, 0x3f, 0x37, + 0x1d, 0x9b, 0x2b, 0x64, 0x85, 0x70, 0x61, 0x7f, 0xa3, 0x8f, 0x02, 0x53, 0x4a, 0x69, 0x7f, 0x5c, + 0x85, 0x33, 0xe9, 0xed, 0x81, 0xc9, 0x6d, 0x87, 0xba, 0x1e, 0xc3, 0xce, 0xc5, 0xe5, 0x76, 0x53, + 0x24, 0xa3, 0xca, 0x7f, 0x5f, 0x07, 0x9c, 0x7d, 0x27, 0x07, 0x67, 0x5d, 0xe9, 0x23, 0xba, 0x1f, + 0x41, 0x67, 0x8f, 0x09, 0x73, 0xc6, 0x00, 0x86, 0x38, 0xb8, 0x2e, 0xe4, 0x7f, 0xe5, 0x60, 0xa2, + 0x93, 0xb0, 0x73, 0x1c, 0xe3, 0xbe, 0x35, 0x1e, 0x15, 0xbf, 0x32, 0x80, 0x1f, 0x0e, 0xac, 0x09, + 0xf9, 0x2a, 0xd4, 0xbb, 0xac, 0x5d, 0x78, 0x3e, 0xb5, 0x0d, 0xb5, 0x75, 0x6d, 0xf8, 0x9e, 0xb4, + 0x1a, 0x62, 0xa9, 0x50, 0x34, 0xa1, 0x1f, 0x44, 0x32, 0x30, 0xca, 0xf1, 0x01, 0xdf, 0xa8, 0x76, + 0x09, 0xaa, 0x1e, 0xf5, 0x7d, 0xd3, 0x6e, 0x8b, 0xf5, 0x46, 0x4d, 0xf4, 0x95, 0xa6, 0x4c, 0xc3, + 0x20, 0x97, 0x7c, 0x0c, 0x6a, 0xdc, 0xe5, 0x34, 0xeb, 0xb6, 0xbd, 0x89, 0x1a, 0x0f, 0x17, 0x1b, + 0x15, 0x01, 0x70, 0x32, 0x11, 0xc3, 0x7c, 0xf2, 0x34, 0x8c, 0x6c, 0xf0, 0xee, 0x2b, 0xf7, 0x2e, + 0x0b, 0x1b, 0x17, 0xd7, 0xd6, 0x1a, 0x91, 0x74, 0x8c, 0x51, 0x91, 0x19, 0x00, 0x1a, 0xf8, 0xe5, + 0x92, 0xf6, 0xac, 0xd0, 0x63, 0x87, 0x11, 0x2a, 0xf2, 0x18, 0x14, 0x7c, 0xcb, 0xe3, 0x36, 0xac, + 0x6a, 0xb8, 0x04, 0x5d, 0x5b, 0x6e, 0x22, 0x4b, 0xd7, 0x7e, 0x9d, 0x83, 0x13, 0x89, 0xcd, 0x25, + 0xac, 0x48, 0xcf, 0xb5, 0xe4, 0x30, 0x12, 0x14, 0x59, 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0xa9, + 0x96, 0xe7, 0x33, 0x1e, 0xd3, 0x70, 0x5d, 0xf7, 0x3d, 0xa6, 0x87, 0xf7, 0x69, 0xe4, 0xdc, 0xcd, + 0x17, 0xd6, 0x47, 0xce, 0x03, 0x11, 0x37, 0x5f, 0x98, 0x87, 0x31, 0xca, 0x84, 0xc1, 0xaf, 0x78, + 0x10, 0x83, 0x9f, 0xf6, 0xcd, 0x7c, 0x44, 0x02, 0x52, 0xb3, 0xbf, 0x87, 0x04, 0x9e, 0x64, 0x13, + 0x68, 0x30, 0xb9, 0xd7, 0xa2, 0xf3, 0x1f, 0x9f, 0x8c, 0x65, 0x2e, 0x79, 0x49, 0xc8, 0xbe, 0x90, + 0x71, 0x33, 0xec, 0xda, 0x72, 0x53, 0x44, 0x57, 0xa9, 0xbf, 0x16, 0xfc, 0x82, 0xe2, 0x31, 0xfd, + 0x02, 0xed, 0xf7, 0x0b, 0x50, 0xbf, 0xea, 0x6c, 0xbc, 0x4f, 0x22, 0xa8, 0xd3, 0xa7, 0xa9, 0xfc, + 0x7b, 0x38, 0x4d, 0xad, 0xc3, 0x23, 0xbe, 0x6f, 0x35, 0xa9, 0xe1, 0xd8, 0x2d, 0x6f, 0x76, 0xd3, + 0xa7, 0xee, 0x82, 0x69, 0x9b, 0xde, 0x16, 0x6d, 0x49, 0x77, 0xd2, 0xa3, 0xfb, 0x7b, 0x93, 0x8f, + 0xac, 0xad, 0x2d, 0xa7, 0x91, 0xe0, 0xa0, 0xb2, 0x7c, 0xd8, 0x10, 0x3b, 0x01, 0xf9, 0x4e, 0x19, + 0x19, 0x73, 0x23, 0x86, 0x8d, 0x48, 0x3a, 0xc6, 0xa8, 0xb4, 0x77, 0xf2, 0x50, 0x0b, 0x36, 0xe0, + 0x93, 0x27, 0xa0, 0xb2, 0xe1, 0x3a, 0xdb, 0xd4, 0x15, 0x9e, 0x3b, 0xb9, 0x53, 0xa6, 0x21, 0x92, + 0x50, 0xe5, 0x91, 0xc7, 0xa1, 0xe4, 0x3b, 0x5d, 0xd3, 0x48, 0x1a, 0xd4, 0xd6, 0x58, 0x22, 0x8a, + 0xbc, 0xe3, 0x6b, 0xe0, 0x4f, 0xc6, 0x54, 0xbb, 0xda, 0x40, 0x65, 0xec, 0x15, 0x28, 0x7a, 0xba, + 0x67, 0xc9, 0xf9, 0x34, 0xc3, 0x5e, 0xf6, 0xd9, 0xe6, 0xb2, 0xdc, 0xcb, 0x3e, 0xdb, 0x5c, 0x46, + 0x0e, 0xaa, 0xfd, 0x22, 0x0f, 0x75, 0x21, 0x37, 0x31, 0x2a, 0x1c, 0xa5, 0xe4, 0x5e, 0xe0, 0xa1, + 0x14, 0x5e, 0xaf, 0x43, 0x5d, 0x6e, 0x66, 0x92, 0x83, 0x5c, 0xd4, 0x3f, 0x10, 0x66, 0x06, 0xe1, + 0x14, 0x61, 0x92, 0x12, 0x7d, 0xf1, 0x18, 0x45, 0x5f, 0x3a, 0x90, 0xe8, 0xcb, 0xc7, 0x21, 0xfa, + 0xb7, 0xf2, 0x50, 0x5b, 0x36, 0x37, 0xa9, 0xb1, 0x6b, 0x58, 0x7c, 0x4f, 0x60, 0x8b, 0x5a, 0xd4, + 0xa7, 0x8b, 0xae, 0x6e, 0xd0, 0x55, 0xea, 0x9a, 0xfc, 0x80, 0x1a, 0xd6, 0x3f, 0xf8, 0x08, 0x24, + 0xf7, 0x04, 0xce, 0x0f, 0xa0, 0xc1, 0x81, 0xa5, 0xc9, 0x12, 0x8c, 0xb4, 0xa8, 0x67, 0xba, 0xb4, + 0xb5, 0x1a, 0x59, 0xa8, 0x3c, 0xa1, 0xa6, 0x9a, 0xf9, 0x48, 0xde, 0x9d, 0xbd, 0xc9, 0x51, 0x65, + 0xa0, 0x14, 0x2b, 0x96, 0x58, 0x51, 0xd6, 0xe5, 0xbb, 0x7a, 0xcf, 0x4b, 0xab, 0x63, 0xa4, 0xcb, + 0xaf, 0xa6, 0x93, 0xe0, 0xa0, 0xb2, 0x5a, 0x09, 0x0a, 0xcb, 0x4e, 0x5b, 0x7b, 0xbb, 0x00, 0xc1, + 0x49, 0x46, 0xe4, 0x3f, 0xe4, 0xa0, 0xae, 0xdb, 0xb6, 0xe3, 0xcb, 0x53, 0x82, 0x84, 0x07, 0x1e, + 0x33, 0x1f, 0x98, 0x34, 0x35, 0x1b, 0x82, 0x0a, 0xe7, 0x6d, 0xe0, 0x50, 0x8e, 0xe4, 0x60, 0x94, + 0x37, 0xe9, 0x25, 0xfc, 0xc9, 0x2b, 0xd9, 0x6b, 0x71, 0x00, 0xef, 0xf1, 0xb9, 0xcf, 0xc1, 0x78, + 0xb2, 0xb2, 0x87, 0x71, 0x07, 0x65, 0x72, 0xcc, 0xe7, 0x01, 0xc2, 0x98, 0x92, 0xfb, 0x60, 0xc4, + 0x32, 0x63, 0x46, 0xac, 0xc5, 0xe1, 0x05, 0x1c, 0x54, 0x7a, 0xa0, 0xe1, 0xea, 0xf5, 0x84, 0xe1, + 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, 0x58, 0xf5, 0x7f, 0x72, 0x30, 0x1e, 0x12, 0xcb, 0x1d, 0xb2, + 0xcf, 0xc1, 0xa8, 0x4b, 0xf5, 0x56, 0x43, 0xf7, 0x8d, 0x2d, 0x1e, 0xea, 0x9d, 0xe3, 0xb1, 0xd9, + 0x27, 0xf7, 0xf7, 0x26, 0x47, 0x31, 0x9a, 0x81, 0x71, 0x3a, 0xa2, 0x43, 0x9d, 0x25, 0xac, 0x99, + 0x1d, 0xea, 0xf4, 0xfc, 0x21, 0xad, 0xa6, 0x7c, 0xc1, 0x82, 0x21, 0x0c, 0x46, 0x31, 0xb5, 0x77, + 0x73, 0x30, 0x16, 0xad, 0xf0, 0xb1, 0x5b, 0xd4, 0xb6, 0xe2, 0x16, 0xb5, 0xb9, 0x23, 0xf8, 0x27, + 0x03, 0xac, 0x68, 0xbf, 0xac, 0x46, 0x3f, 0x8d, 0x5b, 0xce, 0xa2, 0xc6, 0x82, 0xdc, 0x5d, 0x8d, + 0x05, 0xef, 0xff, 0xc3, 0x6b, 0x06, 0x69, 0xb9, 0xc5, 0x07, 0x58, 0xcb, 0x7d, 0x2f, 0x4f, 0xc0, + 0x89, 0x9c, 0xe2, 0x52, 0xce, 0x70, 0x8a, 0x4b, 0x27, 0x38, 0xc5, 0xa5, 0x72, 0x64, 0x83, 0xce, + 0x41, 0x4e, 0x72, 0xa9, 0xde, 0xd7, 0x93, 0x5c, 0x6a, 0xc7, 0x75, 0x92, 0x0b, 0x64, 0x3d, 0xc9, + 0xe5, 0xcd, 0x1c, 0x8c, 0xb5, 0x62, 0x3b, 0x66, 0xb9, 0x6d, 0x21, 0xcb, 0x54, 0x13, 0xdf, 0x80, + 0x2b, 0xb6, 0x4c, 0xc5, 0xd3, 0x30, 0xc1, 0x52, 0xfb, 0xdf, 0x95, 0xe8, 0x3c, 0x70, 0xbf, 0x4d, + 0xd5, 0xcf, 0xc6, 0x4d, 0xd5, 0x17, 0x93, 0xa6, 0xea, 0x13, 0x91, 0x28, 0xd2, 0xa8, 0xb9, 0xfa, + 0xe3, 0x91, 0xe1, 0xb1, 0xc0, 0x4f, 0x4e, 0x09, 0x24, 0x9d, 0x32, 0x44, 0x7e, 0x1c, 0xaa, 0x9e, + 0x3a, 0x73, 0x52, 0x2c, 0x6c, 0xc2, 0xff, 0xa2, 0xce, 0x83, 0x0c, 0x28, 0x98, 0x26, 0xee, 0x52, + 0xdd, 0x73, 0xec, 0xa4, 0x26, 0x8e, 0x3c, 0x15, 0x65, 0x6e, 0xd4, 0x64, 0x5e, 0xbe, 0x87, 0xc9, + 0x5c, 0x87, 0xba, 0xa5, 0x7b, 0xfe, 0x7a, 0xb7, 0xa5, 0xfb, 0xb4, 0x25, 0xfb, 0xdb, 0xbf, 0x38, + 0xd8, 0x5c, 0xc5, 0xe6, 0xbf, 0x50, 0x21, 0x5c, 0x0e, 0x61, 0x30, 0x8a, 0x49, 0x5a, 0x30, 0xc2, + 0x5e, 0x79, 0x6f, 0x68, 0xcd, 0xaa, 0x23, 0x00, 0x0e, 0xc3, 0x23, 0xb0, 0xf4, 0x2c, 0x47, 0x70, + 0x30, 0x86, 0x3a, 0xc0, 0xaa, 0x5e, 0x1b, 0xc6, 0xaa, 0x4e, 0x3e, 0x2d, 0x94, 0x8d, 0x5d, 0xf5, + 0xc3, 0xb8, 0x35, 0x6e, 0x34, 0x8c, 0x2a, 0xc4, 0x68, 0x26, 0xc6, 0x69, 0xc9, 0x2c, 0x9c, 0x30, + 0x7a, 0xae, 0xcb, 0xe3, 0x88, 0x64, 0xf1, 0x3a, 0x2f, 0x1e, 0xc4, 0x8b, 0xcd, 0xc5, 0xb3, 0x31, + 0x49, 0xcf, 0x20, 0x7a, 0x52, 0x92, 0x0a, 0x62, 0x24, 0x0e, 0xb1, 0x1e, 0xcf, 0xc6, 0x24, 0x3d, + 0xdf, 0x28, 0x21, 0x50, 0xaf, 0xe8, 0xde, 0x96, 0x0c, 0x36, 0x0b, 0x37, 0x4a, 0x84, 0x59, 0x18, + 0xa5, 0x23, 0x33, 0x00, 0x02, 0x89, 0x97, 0x1a, 0x8b, 0xc7, 0x60, 0xae, 0x07, 0x39, 0x18, 0xa1, + 0xd2, 0xde, 0xac, 0x41, 0xfd, 0xba, 0xee, 0x9b, 0x3b, 0x94, 0xfb, 0xbc, 0x8e, 0xc7, 0xf1, 0xf0, + 0xdf, 0x72, 0x70, 0x26, 0x1e, 0xd8, 0x78, 0x8c, 0xde, 0x07, 0x7e, 0x4c, 0x0a, 0xa6, 0x72, 0xc3, + 0x01, 0xb5, 0xe0, 0x7e, 0x88, 0xbe, 0x38, 0xc9, 0xe3, 0xf6, 0x43, 0x34, 0x07, 0x31, 0xc4, 0xc1, + 0x75, 0x79, 0xbf, 0xf8, 0x21, 0x1e, 0xec, 0xd3, 0xf4, 0x12, 0x5e, 0x92, 0xca, 0x03, 0xe3, 0x25, + 0xa9, 0x3e, 0x10, 0xaa, 0x69, 0x37, 0xe2, 0x25, 0xa9, 0x65, 0x8c, 0xd6, 0x91, 0x7b, 0x01, 0x04, + 0xda, 0x20, 0x6f, 0x0b, 0xdf, 0xc6, 0xaf, 0xac, 0xd7, 0x4c, 0xa3, 0xdb, 0xd0, 0x3d, 0xd3, 0x90, + 0x4a, 0x42, 0x86, 0xd3, 0x43, 0xd5, 0xf9, 0x66, 0xc2, 0xa9, 0xcf, 0x5f, 0x51, 0x60, 0x87, 0xc7, + 0xb9, 0xe5, 0x33, 0x1d, 0xe7, 0x46, 0xe6, 0xa0, 0x68, 0x6f, 0xd3, 0xdd, 0xc3, 0x6d, 0x88, 0xe7, + 0x2b, 0x95, 0xeb, 0xd7, 0xe8, 0x2e, 0xf2, 0xc2, 0xda, 0x3b, 0x79, 0x00, 0xf6, 0xf9, 0x07, 0xf3, + 0x57, 0x7c, 0x14, 0x2a, 0x5e, 0x8f, 0x5b, 0x16, 0xa4, 0x7a, 0x13, 0x86, 0x38, 0x89, 0x64, 0x54, + 0xf9, 0xe4, 0x71, 0x28, 0xbd, 0xde, 0xa3, 0x3d, 0xe5, 0x7c, 0x0f, 0x94, 0xdb, 0xcf, 0xb3, 0x44, + 0x14, 0x79, 0xc7, 0x67, 0x7b, 0x54, 0x7e, 0x8d, 0xd2, 0x71, 0xf9, 0x35, 0x6a, 0x50, 0xb9, 0xee, + 0xf0, 0x88, 0x49, 0xed, 0xaf, 0xf3, 0x00, 0x61, 0x44, 0x1a, 0xf9, 0x76, 0x0e, 0x1e, 0x0e, 0x3a, + 0x9c, 0x2f, 0xd6, 0x28, 0xfc, 0xc0, 0xde, 0xcc, 0x3e, 0x8e, 0xb4, 0xce, 0xce, 0x47, 0xa0, 0xd5, + 0x34, 0x76, 0x98, 0x5e, 0x0b, 0x82, 0x50, 0xa5, 0x9d, 0xae, 0xbf, 0x3b, 0x6f, 0xba, 0xb2, 0x05, + 0xa6, 0x06, 0x3e, 0x5e, 0x96, 0x34, 0xa2, 0xa8, 0x5c, 0x48, 0xf3, 0x4e, 0xa4, 0x72, 0x30, 0xc0, + 0x21, 0x5b, 0x50, 0xb5, 0x9d, 0x57, 0x3d, 0x26, 0x0e, 0xd9, 0x1c, 0x5f, 0x1c, 0x5e, 0xe4, 0x42, + 0xac, 0xc2, 0x26, 0x2e, 0x5f, 0xb0, 0x62, 0x4b, 0x61, 0x7f, 0x2b, 0x0f, 0xa7, 0x52, 0xe4, 0x40, + 0x5e, 0x84, 0x71, 0x19, 0xfc, 0x17, 0x9e, 0x5c, 0x9d, 0x0b, 0x4f, 0xae, 0x6e, 0x26, 0xf2, 0xb0, + 0x8f, 0x9a, 0xbc, 0x0a, 0xa0, 0x1b, 0x06, 0xf5, 0xbc, 0x15, 0xa7, 0xa5, 0xb4, 0xf7, 0x17, 0x98, + 0xfa, 0x32, 0x1b, 0xa4, 0xde, 0xd9, 0x9b, 0xfc, 0x44, 0x5a, 0x3c, 0x6f, 0x42, 0xce, 0x61, 0x01, + 0x8c, 0x40, 0x92, 0x2f, 0x03, 0x88, 0x85, 0x6a, 0x70, 0xe4, 0xc0, 0x3d, 0xac, 0x3b, 0x53, 0xea, + 0x70, 0xa7, 0xa9, 0xcf, 0xf7, 0x74, 0xdb, 0x37, 0xfd, 0x5d, 0x71, 0xc2, 0xcb, 0xcd, 0x00, 0x05, + 0x23, 0x88, 0xda, 0xef, 0xe4, 0xa1, 0xaa, 0xec, 0xca, 0xf7, 0xc1, 0x98, 0xd8, 0x8e, 0x19, 0x13, + 0x8f, 0x28, 0x82, 0x37, 0xcd, 0x94, 0xe8, 0x24, 0x4c, 0x89, 0x8b, 0xd9, 0x59, 0xdd, 0xdd, 0x90, + 0xf8, 0xbd, 0x3c, 0x8c, 0x29, 0xd2, 0xac, 0x66, 0xc4, 0xcf, 0xc2, 0x09, 0xe1, 0x79, 0x5f, 0xd1, + 0x6f, 0x8b, 0xc3, 0x6e, 0xb8, 0xc0, 0x8a, 0x22, 0x68, 0xb6, 0x11, 0xcf, 0xc2, 0x24, 0x2d, 0x6b, + 0xd6, 0x22, 0x69, 0x9d, 0xad, 0xba, 0x84, 0xaf, 0x4e, 0xac, 0x0e, 0x79, 0xb3, 0x6e, 0x24, 0xf2, + 0xb0, 0x8f, 0x3a, 0x69, 0xc7, 0x2c, 0x1e, 0x83, 0x1d, 0xf3, 0x4f, 0x72, 0x30, 0x12, 0xca, 0xeb, + 0xd8, 0xad, 0x98, 0x9b, 0x71, 0x2b, 0xe6, 0x6c, 0xe6, 0xe6, 0x30, 0xc0, 0x86, 0xf9, 0x9f, 0x2a, + 0x10, 0x0b, 0x24, 0x27, 0x1b, 0x70, 0xce, 0x4c, 0x0d, 0x87, 0x8b, 0x8c, 0x36, 0xc1, 0xce, 0xe8, + 0xa5, 0x81, 0x94, 0x78, 0x17, 0x14, 0xd2, 0x83, 0xea, 0x0e, 0x75, 0x7d, 0xd3, 0xa0, 0xea, 0xfb, + 0x16, 0x33, 0xab, 0x64, 0xd2, 0x52, 0x1b, 0xc8, 0xf4, 0xa6, 0x64, 0x80, 0x01, 0x2b, 0xb2, 0x01, + 0x25, 0xda, 0x6a, 0x53, 0x75, 0xfc, 0x50, 0xc6, 0xc3, 0x3d, 0x03, 0x79, 0xb2, 0x37, 0x0f, 0x05, + 0x34, 0xf1, 0xa0, 0x66, 0x29, 0x4f, 0x9c, 0x6c, 0x87, 0xc3, 0x2b, 0x58, 0x81, 0x4f, 0x2f, 0x3c, + 0x99, 0x20, 0x48, 0xc2, 0x90, 0x0f, 0xd9, 0x0e, 0x4c, 0x82, 0xa5, 0x23, 0x1a, 0x3c, 0xee, 0x62, + 0x10, 0xf4, 0xa0, 0x76, 0x4b, 0xf7, 0xa9, 0xdb, 0xd1, 0xdd, 0x6d, 0xb9, 0xda, 0x18, 0xfe, 0x0b, + 0x5f, 0x52, 0x48, 0xe1, 0x17, 0x06, 0x49, 0x18, 0xf2, 0x21, 0x0e, 0xd4, 0x7c, 0xa9, 0x3e, 0x2b, + 0xbb, 0xe7, 0xf0, 0x4c, 0x95, 0x22, 0xee, 0xc9, 0x80, 0x72, 0xf5, 0x8a, 0x21, 0x0f, 0xb2, 0x13, + 0x3b, 0x7f, 0x59, 0x9c, 0xba, 0xdd, 0xc8, 0x60, 0x3f, 0x97, 0x50, 0xe1, 0x74, 0x93, 0x7e, 0x8e, + 0xb3, 0xf6, 0x4e, 0x29, 0x1c, 0x96, 0xef, 0xb7, 0x55, 0xef, 0xe9, 0xb8, 0x55, 0xef, 0x42, 0xd2, + 0xaa, 0x97, 0x70, 0xe8, 0x1e, 0x3e, 0x04, 0x35, 0x61, 0x4f, 0x2b, 0x1e, 0x83, 0x3d, 0xed, 0x29, + 0xa8, 0xef, 0xf0, 0x91, 0x40, 0x9c, 0x65, 0x54, 0xe2, 0xd3, 0x08, 0x1f, 0xd9, 0x6f, 0x86, 0xc9, + 0x18, 0xa5, 0x61, 0x45, 0xe4, 0x8d, 0x13, 0xc1, 0x61, 0xb0, 0xb2, 0x48, 0x33, 0x4c, 0xc6, 0x28, + 0x0d, 0x8f, 0x5e, 0x33, 0xed, 0x6d, 0x51, 0xa0, 0xc2, 0x0b, 0x88, 0xe8, 0x35, 0x95, 0x88, 0x61, + 0x3e, 0xb9, 0x04, 0xd5, 0x5e, 0x6b, 0x53, 0xd0, 0x56, 0x39, 0x2d, 0xd7, 0x30, 0xd7, 0xe7, 0x17, + 0xe4, 0xd9, 0x4a, 0x2a, 0x97, 0xd5, 0xa4, 0xa3, 0x77, 0x55, 0x06, 0x5f, 0x1b, 0xca, 0x9a, 0xac, + 0x84, 0xc9, 0x18, 0xa5, 0x21, 0x9f, 0x82, 0x31, 0x97, 0xb6, 0x7a, 0x06, 0x0d, 0x4a, 0x09, 0x73, + 0x1c, 0x11, 0x57, 0x6b, 0x44, 0x73, 0x30, 0x41, 0x39, 0xc0, 0x2a, 0x58, 0x1f, 0x2a, 0xd6, 0xf6, + 0xa7, 0x39, 0x20, 0xfd, 0xd1, 0xde, 0x64, 0x0b, 0xca, 0x36, 0xb7, 0x7e, 0x65, 0x3e, 0x3e, 0x3a, + 0x62, 0x44, 0x13, 0xc3, 0x92, 0x4c, 0x90, 0xf8, 0xc4, 0x86, 0x2a, 0xbd, 0xed, 0x53, 0xd7, 0x0e, + 0x76, 0x7f, 0x1c, 0xcd, 0x51, 0xd5, 0x62, 0x35, 0x20, 0x91, 0x31, 0xe0, 0xa1, 0xfd, 0x3c, 0x0f, + 0xf5, 0x08, 0xdd, 0xbd, 0x16, 0x95, 0x7c, 0x03, 0xba, 0x30, 0x3a, 0xad, 0xbb, 0x96, 0xec, 0x61, + 0x91, 0x0d, 0xe8, 0x32, 0x0b, 0x97, 0x31, 0x4a, 0x47, 0x66, 0x00, 0x3a, 0xba, 0xe7, 0x53, 0x97, + 0xcf, 0xbe, 0x89, 0x6d, 0xdf, 0x2b, 0x41, 0x0e, 0x46, 0xa8, 0xc8, 0x45, 0x79, 0xd8, 0x78, 0x31, + 0x7e, 0x4c, 0xdf, 0x80, 0x93, 0xc4, 0x4b, 0x47, 0x70, 0x92, 0x38, 0x69, 0xc3, 0xb8, 0xaa, 0xb5, + 0xca, 0x3d, 0xdc, 0x21, 0x6e, 0x62, 0xfd, 0x92, 0x80, 0xc0, 0x3e, 0x50, 0xed, 0x9d, 0x1c, 0x8c, + 0xc6, 0x4c, 0x1e, 0xe2, 0x80, 0x3d, 0xb5, 0x57, 0x21, 0x76, 0xc0, 0x5e, 0x64, 0x8b, 0xc1, 0x93, + 0x50, 0x16, 0x02, 0x4a, 0x86, 0x20, 0x0a, 0x11, 0xa2, 0xcc, 0x65, 0x63, 0x99, 0x34, 0xaa, 0x26, + 0xc7, 0x32, 0x69, 0x75, 0x45, 0x95, 0x2f, 0x9c, 0x13, 0xa2, 0x76, 0xfd, 0xce, 0x09, 0x91, 0x8e, + 0x01, 0x85, 0xf6, 0x03, 0x5e, 0x6f, 0xdf, 0xdd, 0x0d, 0xd6, 0x72, 0x6d, 0xa8, 0xc8, 0xb0, 0x33, + 0xd9, 0x35, 0x5e, 0xcc, 0x60, 0x87, 0xe1, 0x38, 0x32, 0xc0, 0x4a, 0x37, 0xb6, 0x6f, 0x6c, 0x6e, + 0xa2, 0x42, 0x27, 0x97, 0xa1, 0xe6, 0xd8, 0x0b, 0xba, 0x69, 0xf5, 0x5c, 0x35, 0xb2, 0x7f, 0x84, + 0x8d, 0x55, 0x37, 0x54, 0xe2, 0x9d, 0xbd, 0xc9, 0x33, 0xc1, 0x4b, 0xac, 0x92, 0x18, 0x96, 0xd4, + 0xfe, 0xbe, 0x00, 0x3c, 0xe4, 0x88, 0x3c, 0x07, 0xb5, 0x0e, 0x35, 0xb6, 0x74, 0xdb, 0xf4, 0xd4, + 0x11, 0xa1, 0x6c, 0x7d, 0x5e, 0x5b, 0x51, 0x89, 0x77, 0x98, 0x08, 0x66, 0x9b, 0xcb, 0x3c, 0xaa, + 0x3f, 0xa4, 0x25, 0x06, 0x94, 0xdb, 0x9e, 0xa7, 0x77, 0xcd, 0xcc, 0x1e, 0x6f, 0x71, 0x24, 0xa3, + 0x18, 0x06, 0xc4, 0x33, 0x4a, 0x68, 0x62, 0x40, 0xa9, 0x6b, 0xe9, 0xa6, 0x9d, 0xf9, 0x6a, 0x1c, + 0xf6, 0x05, 0xab, 0x0c, 0x49, 0x18, 0xa5, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, 0x83, 0xba, 0x67, 0xb8, + 0x7a, 0xc7, 0xdb, 0xd2, 0x67, 0x9e, 0x79, 0x36, 0xb3, 0x9a, 0x17, 0xb2, 0x12, 0xb3, 0xce, 0x1c, + 0xce, 0xae, 0x34, 0xaf, 0xcc, 0xce, 0x3c, 0xf3, 0x2c, 0x46, 0xf9, 0x44, 0xd9, 0x3e, 0xf3, 0xd4, + 0x8c, 0xec, 0xb9, 0x47, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x30, 0xca, 0x47, 0xfb, 0xbb, 0x1c, 0xd4, + 0x02, 0x5a, 0xb2, 0x0e, 0xc0, 0xc6, 0x10, 0x79, 0x88, 0xe2, 0xa1, 0x2e, 0x34, 0xe0, 0xeb, 0xfa, + 0xf5, 0xa0, 0x30, 0x46, 0x80, 0x52, 0x4e, 0x99, 0xcc, 0x1f, 0xf5, 0x29, 0x93, 0xd3, 0x50, 0xdb, + 0xd2, 0xed, 0x96, 0xb7, 0xa5, 0x6f, 0x8b, 0xa1, 0x34, 0x72, 0xee, 0xea, 0x15, 0x95, 0x81, 0x21, + 0x8d, 0xf6, 0x5b, 0x65, 0x10, 0x6e, 0x6a, 0xd6, 0xd9, 0x5b, 0xa6, 0x27, 0xe2, 0xa4, 0x73, 0xbc, + 0x64, 0xd0, 0xd9, 0xe7, 0x65, 0x3a, 0x06, 0x14, 0xe4, 0x2c, 0x14, 0x3a, 0xa6, 0x2d, 0x7d, 0x36, + 0xdc, 0x64, 0xb7, 0x62, 0xda, 0xc8, 0xd2, 0x78, 0x96, 0x7e, 0x5b, 0x86, 0xb8, 0x89, 0x2c, 0xfd, + 0x36, 0xb2, 0x34, 0xb6, 0x88, 0xb6, 0x1c, 0x67, 0x9b, 0x75, 0x5b, 0x15, 0x09, 0x57, 0xe4, 0x53, + 0x39, 0x5f, 0x44, 0x2f, 0xc7, 0xb3, 0x30, 0x49, 0x4b, 0xd6, 0xe1, 0x91, 0x37, 0xa8, 0xeb, 0xc8, + 0x71, 0xaa, 0x69, 0x51, 0xda, 0x55, 0x30, 0x42, 0x09, 0xe2, 0x01, 0x75, 0x5f, 0x4c, 0x27, 0xc1, + 0x41, 0x65, 0x79, 0x68, 0xae, 0xee, 0xb6, 0xa9, 0xbf, 0xea, 0x3a, 0x06, 0xf5, 0x3c, 0xd3, 0x6e, + 0x2b, 0xd8, 0x72, 0x08, 0xbb, 0x96, 0x4e, 0x82, 0x83, 0xca, 0x92, 0x97, 0x61, 0x42, 0x64, 0x09, + 0x75, 0x61, 0x76, 0x47, 0x37, 0x2d, 0x7d, 0xc3, 0xb4, 0xd4, 0x8d, 0x72, 0xa3, 0xc2, 0x33, 0xb2, + 0x36, 0x80, 0x06, 0x07, 0x96, 0x26, 0x57, 0x61, 0x5c, 0xf9, 0xc5, 0x56, 0xa9, 0xdb, 0x0c, 0x42, + 0x17, 0x46, 0x1b, 0x17, 0xd8, 0x8a, 0x75, 0x9e, 0x76, 0x5d, 0x6a, 0x44, 0xbd, 0x81, 0x8a, 0x0a, + 0xfb, 0xca, 0x11, 0x84, 0x33, 0x3c, 0x3e, 0x61, 0xbd, 0x3b, 0xe7, 0x38, 0x56, 0xcb, 0xb9, 0x65, + 0xab, 0x6f, 0x17, 0xaa, 0x19, 0x77, 0x85, 0x35, 0x53, 0x29, 0x70, 0x40, 0x49, 0xf6, 0xe5, 0x3c, + 0x67, 0xde, 0xb9, 0x65, 0x27, 0x51, 0x21, 0xfc, 0xf2, 0xe6, 0x00, 0x1a, 0x1c, 0x58, 0x9a, 0x2c, + 0x00, 0x49, 0x7e, 0xc1, 0x7a, 0x57, 0xba, 0x57, 0xcf, 0x88, 0xf3, 0x50, 0x92, 0xb9, 0x98, 0x52, + 0x82, 0x2c, 0xc3, 0xe9, 0x64, 0x2a, 0x63, 0x27, 0xbd, 0xac, 0xfc, 0x24, 0x54, 0x4c, 0xc9, 0xc7, + 0xd4, 0x52, 0xda, 0x6f, 0xe7, 0x61, 0x34, 0xb6, 0x81, 0xfe, 0x81, 0xdb, 0xa8, 0xcc, 0x74, 0xe8, + 0x8e, 0xd7, 0x5e, 0x9a, 0xbf, 0x42, 0xf5, 0x16, 0x75, 0xaf, 0x51, 0x75, 0xd8, 0x01, 0x1f, 0x54, + 0x56, 0x62, 0x39, 0x98, 0xa0, 0x24, 0x9b, 0x50, 0x12, 0x16, 0xe1, 0xac, 0x57, 0x63, 0x28, 0x19, + 0x71, 0xb3, 0xb0, 0xbc, 0x4f, 0xc6, 0x71, 0x29, 0x0a, 0x78, 0xcd, 0x87, 0x91, 0x28, 0x05, 0x1b, + 0x48, 0x42, 0x75, 0xb3, 0x12, 0x53, 0x35, 0x97, 0xa0, 0xe0, 0xfb, 0xc3, 0x6e, 0x81, 0x16, 0x1e, + 0x86, 0xb5, 0x65, 0x64, 0x18, 0xda, 0x26, 0xfb, 0x77, 0x9e, 0x67, 0x3a, 0xb6, 0x3c, 0x0f, 0x7b, + 0x1d, 0x2a, 0xbe, 0x34, 0xb2, 0x0d, 0xb7, 0x85, 0x9b, 0xeb, 0x28, 0xca, 0xc0, 0xa6, 0xb0, 0xb4, + 0x3f, 0xcd, 0x43, 0x2d, 0x58, 0x10, 0x1f, 0xe0, 0x9c, 0x69, 0x07, 0x6a, 0x41, 0x7c, 0x55, 0xe6, + 0xdb, 0xf6, 0xc2, 0xb0, 0x1f, 0xbe, 0x86, 0x0b, 0x5e, 0x31, 0xe4, 0x11, 0x8d, 0xdd, 0x2a, 0x64, + 0x88, 0xdd, 0xea, 0x42, 0xc5, 0x77, 0xcd, 0x76, 0x5b, 0x6a, 0xe7, 0x59, 0x82, 0xb7, 0x02, 0x71, + 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, 0xf6, 0x1a, 0x8c, 0x27, 0x29, 0xb9, 0xea, + 0x6a, 0x6c, 0xd1, 0x56, 0xcf, 0x52, 0x32, 0x0e, 0x55, 0x57, 0x99, 0x8e, 0x01, 0x05, 0x5b, 0xbe, + 0xb2, 0xdf, 0xf4, 0x86, 0x63, 0x2b, 0xf5, 0x91, 0xaf, 0x02, 0xd6, 0x64, 0x1a, 0x06, 0xb9, 0xda, + 0x5f, 0x15, 0xe0, 0x6c, 0x68, 0xd6, 0x58, 0xd1, 0x6d, 0xbd, 0x7d, 0x80, 0x2b, 0xd6, 0x3e, 0xd8, + 0x14, 0x73, 0xd8, 0xcb, 0x02, 0x0a, 0x0f, 0xc0, 0x65, 0x01, 0xff, 0x90, 0x07, 0x1e, 0x0b, 0x4a, + 0xbe, 0x0a, 0x23, 0x7a, 0xe4, 0x76, 0x4d, 0xf9, 0x3b, 0x2f, 0x67, 0xfe, 0x9d, 0x3c, 0xe4, 0x34, + 0x88, 0x6d, 0x8a, 0xa6, 0x62, 0x8c, 0x21, 0x71, 0xa0, 0xba, 0xa9, 0x5b, 0x16, 0xd3, 0x85, 0x32, + 0xbb, 0x69, 0x62, 0xcc, 0x79, 0x33, 0x5f, 0x90, 0xd0, 0x18, 0x30, 0x21, 0x6f, 0xe6, 0x60, 0xd4, + 0x8d, 0x2e, 0x93, 0xe4, 0x0f, 0xc9, 0xe2, 0xc4, 0x8f, 0xa0, 0x45, 0x23, 0xa9, 0xa2, 0x6b, 0xb1, + 0x38, 0x4f, 0xed, 0x2f, 0x73, 0x30, 0xda, 0xb4, 0xcc, 0x96, 0x69, 0xb7, 0x8f, 0xf1, 0xae, 0x82, + 0x1b, 0x50, 0xf2, 0x2c, 0xb3, 0x45, 0x87, 0x9c, 0x4d, 0xc4, 0x3c, 0xc6, 0x00, 0x50, 0xe0, 0xc4, + 0x2f, 0x3f, 0x28, 0x1c, 0xe0, 0xf2, 0x83, 0x5f, 0x95, 0x41, 0x46, 0x35, 0x93, 0x1e, 0xd4, 0xda, + 0xea, 0x4c, 0x75, 0xf9, 0x8d, 0x57, 0x32, 0x9c, 0xc7, 0x17, 0x3b, 0x9d, 0x5d, 0x8c, 0xfd, 0x41, + 0x22, 0x86, 0x9c, 0x08, 0x8d, 0x5f, 0xeb, 0x3a, 0x9f, 0xf1, 0x5a, 0x57, 0xc1, 0xae, 0xff, 0x62, + 0x57, 0x1d, 0x8a, 0x5b, 0xbe, 0xdf, 0x95, 0x8d, 0x69, 0xf8, 0xb0, 0xf5, 0xf0, 0x48, 0x18, 0xa1, + 0x13, 0xb1, 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xad, 0x07, 0xd7, 0x78, 0xcd, 0x65, 0x0a, 0x18, 0x88, + 0xb2, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0x57, 0xa0, 0xee, 0xbb, 0xba, 0xed, 0x6d, 0x3a, 0x6e, 0x87, + 0xba, 0x72, 0x8d, 0xba, 0x90, 0xe1, 0x66, 0xd3, 0xb5, 0x10, 0x4d, 0x78, 0x22, 0x63, 0x49, 0x18, + 0xe5, 0x46, 0xb6, 0xa1, 0xda, 0x6b, 0x89, 0x8a, 0x49, 0xf3, 0xd3, 0x6c, 0x96, 0xcb, 0x6a, 0x23, + 0xe1, 0x00, 0xea, 0x0d, 0x03, 0x06, 0xf1, 0x1b, 0xeb, 0x2a, 0x47, 0x75, 0x63, 0x5d, 0xb4, 0x35, + 0xa6, 0x9d, 0x57, 0x41, 0x3a, 0x52, 0xaf, 0xb5, 0xdb, 0x32, 0x9a, 0x69, 0x21, 0xb3, 0xca, 0x29, + 0x58, 0xd6, 0x03, 0xdd, 0xd8, 0x6e, 0xa3, 0xe2, 0xa1, 0x75, 0x40, 0x7a, 0x09, 0x88, 0x11, 0xbb, + 0xd7, 0x45, 0x6c, 0xa2, 0x9a, 0x3e, 0xd8, 0x78, 0x10, 0x5c, 0x30, 0x12, 0x39, 0x57, 0x3a, 0xf5, + 0x02, 0x17, 0xed, 0xcf, 0xf2, 0x50, 0x58, 0x5b, 0x6e, 0x8a, 0xb3, 0x22, 0xf9, 0xa5, 0x49, 0xb4, + 0xb9, 0x6d, 0x76, 0x6f, 0x52, 0xd7, 0xdc, 0xdc, 0x95, 0x4b, 0xef, 0xc8, 0x59, 0x91, 0x49, 0x0a, + 0x4c, 0x29, 0x45, 0x5e, 0x81, 0x11, 0x43, 0x9f, 0xa3, 0xae, 0x3f, 0x8c, 0x61, 0x81, 0xef, 0x16, + 0x9d, 0x9b, 0x0d, 0x8b, 0x63, 0x0c, 0x8c, 0xac, 0x03, 0x18, 0x21, 0x74, 0xe1, 0xd0, 0xe6, 0x90, + 0x08, 0x70, 0x04, 0x88, 0x20, 0xd4, 0xb6, 0x19, 0x29, 0x47, 0x2d, 0x1e, 0x06, 0x95, 0xb7, 0x9c, + 0x6b, 0xaa, 0x2c, 0x86, 0x30, 0x9a, 0x0d, 0xa3, 0xb1, 0xcb, 0x5e, 0xc8, 0x27, 0xa1, 0xea, 0x74, + 0x23, 0xc3, 0x69, 0x8d, 0xc7, 0x4d, 0x56, 0x6f, 0xc8, 0xb4, 0x3b, 0x7b, 0x93, 0xa3, 0xcb, 0x4e, + 0xdb, 0x34, 0x54, 0x02, 0x06, 0xe4, 0x44, 0x83, 0x32, 0xdf, 0xe2, 0xa5, 0xae, 0x7a, 0xe1, 0x73, + 0x07, 0xbf, 0x8d, 0xc1, 0x43, 0x99, 0xa3, 0x7d, 0xad, 0x08, 0xa1, 0x6f, 0x8d, 0x78, 0x50, 0x16, + 0x21, 0xec, 0x72, 0xe4, 0x3e, 0xd6, 0x68, 0x79, 0xc9, 0x8a, 0xb4, 0xa1, 0xf0, 0x9a, 0xb3, 0x91, + 0x79, 0xe0, 0x8e, 0xec, 0xed, 0x16, 0xb6, 0xb2, 0x48, 0x02, 0x32, 0x0e, 0xe4, 0xbf, 0xe7, 0xe0, + 0xa4, 0x97, 0x54, 0x7d, 0x65, 0x73, 0xc0, 0xec, 0x3a, 0x7e, 0x52, 0x99, 0x96, 0x01, 0xae, 0x83, + 0xb2, 0xb1, 0xbf, 0x2e, 0x4c, 0xfe, 0xc2, 0xe9, 0x25, 0x9b, 0xd3, 0x62, 0xc6, 0x0b, 0x0a, 0xe3, + 0xf2, 0x8f, 0xa7, 0xa1, 0x64, 0xa5, 0x7d, 0x23, 0x0f, 0xf5, 0xc8, 0x68, 0x9d, 0xf9, 0x06, 0xa1, + 0xdb, 0x89, 0x1b, 0x84, 0x56, 0x87, 0xf7, 0x01, 0x87, 0xb5, 0x3a, 0xee, 0x4b, 0x84, 0x7e, 0x2f, + 0x0f, 0x85, 0xf5, 0xf9, 0x85, 0xf8, 0xa2, 0x35, 0x77, 0x1f, 0x16, 0xad, 0x5b, 0x50, 0xd9, 0xe8, + 0x99, 0x96, 0x6f, 0xda, 0x99, 0x4f, 0x9f, 0x50, 0x17, 0x2e, 0x49, 0x1f, 0x83, 0x40, 0x45, 0x05, + 0x4f, 0xda, 0x50, 0x69, 0x8b, 0xe3, 0xff, 0x32, 0x47, 0xc6, 0xc9, 0x63, 0x04, 0x05, 0x23, 0xf9, + 0x82, 0x0a, 0x5d, 0xdb, 0x05, 0x79, 0x73, 0xfc, 0x7d, 0x97, 0xa6, 0xf6, 0x15, 0x08, 0xb4, 0x80, + 0xfb, 0xcf, 0xfc, 0x6f, 0x72, 0x10, 0x57, 0x7c, 0xee, 0x7f, 0x6b, 0xda, 0x4e, 0xb6, 0xa6, 0xf9, + 0xa3, 0xe8, 0x7c, 0xe9, 0x0d, 0x4a, 0xfb, 0xcd, 0x3c, 0x94, 0xef, 0xdb, 0x8e, 0x61, 0x1a, 0x0b, + 0xf2, 0x9b, 0xcb, 0x38, 0x30, 0x0e, 0x0c, 0xf1, 0xeb, 0x24, 0x42, 0xfc, 0xb2, 0x5e, 0x11, 0x7b, + 0x8f, 0x00, 0xbf, 0x3f, 0xca, 0x81, 0x1c, 0x96, 0x97, 0x6c, 0xcf, 0xd7, 0x6d, 0x83, 0x12, 0x23, + 0x98, 0x03, 0xb2, 0x46, 0x92, 0xc8, 0x68, 0x2b, 0x31, 0xed, 0xf3, 0x67, 0x35, 0xe6, 0x93, 0x8f, + 0x43, 0x75, 0xcb, 0xf1, 0x7c, 0x3e, 0xce, 0xe7, 0xe3, 0xd6, 0xa5, 0x2b, 0x32, 0x1d, 0x03, 0x8a, + 0xa4, 0xc7, 0xb5, 0x34, 0xd8, 0xe3, 0xaa, 0x7d, 0x37, 0x0f, 0x23, 0xef, 0x97, 0x6d, 0xcf, 0x69, + 0x21, 0x91, 0x85, 0x8c, 0x21, 0x91, 0xc5, 0xc3, 0x84, 0x44, 0x6a, 0x3f, 0xce, 0x01, 0xdc, 0xb7, + 0x3d, 0xd7, 0xad, 0x78, 0xb4, 0x62, 0xe6, 0x76, 0x95, 0x1e, 0xab, 0xf8, 0xff, 0x4b, 0xea, 0x93, + 0x78, 0xa4, 0xe2, 0x5b, 0x39, 0x18, 0xd3, 0x63, 0xd1, 0x7f, 0x99, 0x55, 0xcb, 0x44, 0x30, 0x61, + 0xb0, 0xbf, 0x34, 0x9e, 0x8e, 0x09, 0xb6, 0xe4, 0xf9, 0xf0, 0xbc, 0xdf, 0xeb, 0x61, 0xb3, 0xef, + 0x3b, 0xa8, 0x97, 0xab, 0x39, 0x31, 0xca, 0x7b, 0x44, 0x5b, 0x16, 0x8e, 0x24, 0xda, 0x32, 0xba, + 0x8f, 0xac, 0x78, 0xd7, 0x7d, 0x64, 0x3b, 0x50, 0xdb, 0x74, 0x9d, 0x0e, 0x0f, 0x68, 0x94, 0x97, + 0xcb, 0x5e, 0xce, 0x30, 0xa7, 0x84, 0xd7, 0xaa, 0x87, 0x36, 0x9e, 0x05, 0x85, 0x8f, 0x21, 0x2b, + 0x6e, 0x16, 0x77, 0x04, 0xd7, 0xf2, 0x51, 0x72, 0x0d, 0xc6, 0x92, 0x35, 0x81, 0x8e, 0x8a, 0x4d, + 0x3c, 0x88, 0xb1, 0x72, 0x7f, 0x82, 0x18, 0xb5, 0xef, 0x97, 0xd5, 0x00, 0xf6, 0xc0, 0x1d, 0x2d, + 0xf9, 0xfe, 0xdf, 0xab, 0x9b, 0xdc, 0x48, 0x5b, 0xb9, 0x8f, 0x1b, 0x69, 0xab, 0x47, 0xb3, 0x91, + 0xb6, 0x96, 0x6d, 0x23, 0x2d, 0x64, 0xdf, 0x48, 0x5b, 0xcf, 0xb6, 0x91, 0x76, 0x64, 0xa8, 0x8d, + 0xb4, 0xa3, 0x07, 0xda, 0x48, 0xbb, 0x57, 0x80, 0xc4, 0x2a, 0xf3, 0x03, 0x8f, 0xd2, 0x3f, 0x29, + 0x8f, 0xd2, 0xdb, 0x79, 0x08, 0x87, 0xcd, 0x43, 0x46, 0xdc, 0xbc, 0x0c, 0xd5, 0x8e, 0x7e, 0x7b, + 0x9e, 0x5a, 0xfa, 0x6e, 0x96, 0xfb, 0x53, 0x57, 0x24, 0x06, 0x06, 0x68, 0xc4, 0x03, 0x30, 0x83, + 0x33, 0xcc, 0x33, 0xdb, 0xe6, 0xc3, 0xe3, 0xd0, 0x85, 0xf5, 0x2f, 0x7c, 0xc7, 0x08, 0x1b, 0xed, + 0x0f, 0xf3, 0x20, 0x0f, 0xbb, 0x27, 0x14, 0x4a, 0x9b, 0xe6, 0x6d, 0xda, 0xca, 0x1c, 0x3f, 0x1b, + 0xb9, 0xd5, 0x5a, 0x38, 0x1f, 0x78, 0x02, 0x0a, 0x74, 0x6e, 0x55, 0x16, 0xce, 0x24, 0x29, 0xbf, + 0x0c, 0x56, 0xe5, 0xa8, 0x53, 0x4a, 0x5a, 0x95, 0x45, 0x12, 0x2a, 0x1e, 0xc2, 0x88, 0xcd, 0xe3, + 0x0a, 0x32, 0xfb, 0xce, 0x62, 0xf1, 0x09, 0xca, 0x88, 0xed, 0x89, 0x9d, 0xf4, 0x92, 0x47, 0xe3, + 0x4b, 0x3f, 0xfa, 0xc9, 0x85, 0x87, 0x7e, 0xfc, 0x93, 0x0b, 0x0f, 0xbd, 0xfb, 0x93, 0x0b, 0x0f, + 0x7d, 0x6d, 0xff, 0x42, 0xee, 0x47, 0xfb, 0x17, 0x72, 0x3f, 0xde, 0xbf, 0x90, 0x7b, 0x77, 0xff, + 0x42, 0xee, 0xcf, 0xf7, 0x2f, 0xe4, 0xfe, 0xcb, 0x5f, 0x5c, 0x78, 0xe8, 0x8b, 0xcf, 0x85, 0x55, + 0x98, 0x56, 0x55, 0x98, 0x56, 0x0c, 0xa7, 0xbb, 0xdb, 0xed, 0x69, 0x56, 0x85, 0x30, 0x45, 0x55, + 0xe1, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x32, 0x6d, 0x43, 0x50, 0x97, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6474,6 +6479,22 @@ func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.UpdateHash) + copy(dAtA[i:], m.UpdateHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateHash))) + i-- + dAtA[i] = 0x72 + i -= len(m.CurrentHash) + copy(dAtA[i:], m.CurrentHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentHash))) + i-- + dAtA[i] = 0x6a + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x60 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x58 i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- dAtA[i] = 0x50 @@ -8947,6 +8968,22 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.UpdateHash) + copy(dAtA[i:], m.UpdateHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateHash))) + i-- + dAtA[i] = 0x6a + i -= len(m.CurrentHash) + copy(dAtA[i:], m.CurrentHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentHash))) + i-- + dAtA[i] = 0x62 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x58 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x50 i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- dAtA[i] = 0x48 @@ -10390,6 +10427,12 @@ func (m *MonoVertexStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentHash) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateHash) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11305,6 +11348,12 @@ func (m *VertexStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentHash) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateHash) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12229,6 +12278,10 @@ func (this *MonoVertexStatus) String() string { `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentHash:` + fmt.Sprintf("%v", this.CurrentHash) + `,`, + `UpdateHash:` + fmt.Sprintf("%v", this.UpdateHash) + `,`, `}`, }, "") return s @@ -12826,6 +12879,10 @@ func (this *VertexStatus) String() string { `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentHash:` + fmt.Sprintf("%v", this.CurrentHash) + `,`, + `UpdateHash:` + fmt.Sprintf("%v", this.UpdateHash) + `,`, `}`, }, "") return s @@ -23630,6 +23687,108 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { break } } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30950,6 +31109,108 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 9c513d6fde..8fadbab191 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -977,6 +977,18 @@ message MonoVertexStatus { // The number of pods targeted by this MonoVertex with a Ready Condition. // +optional optional uint32 readyReplicas = 10; + + // The number of Pods created by the controller from the MonoVertex version indicated by currentHash. + optional uint32 currentReplicas = 11; + + // The number of Pods created by the controller from the MonoVertex version indicated by updateHash. + optional uint32 updatedReplicas = 12; + + // If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). + optional string currentHash = 13; + + // If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + optional string updateHash = 14; } message NativeRedis { @@ -1697,6 +1709,18 @@ message VertexStatus { // The number of pods targeted by this Vertex with a Ready Condition. // +optional optional uint32 readyReplicas = 9; + + // The number of Pods created by the controller from the Vertex version indicated by currentHash. + optional uint32 currentReplicas = 10; + + // The number of Pods created by the controller from the Vertex version indicated by updateHash. + optional uint32 updatedReplicas = 11; + + // If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). + optional string currentHash = 12; + + // If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + optional string updateHash = 13; } message VertexTemplate { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 15544b81b6..b05fd8c5f4 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -487,6 +487,14 @@ type MonoVertexStatus struct { // The number of pods targeted by this MonoVertex with a Ready Condition. // +optional ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,10,opt,name=readyReplicas"` + // The number of Pods created by the controller from the MonoVertex version indicated by currentHash. + CurrentReplicas uint32 `json:"currentReplicas,omitempty" protobuf:"varint,11,opt,name=currentReplicas"` + // The number of Pods created by the controller from the MonoVertex version indicated by updateHash. + UpdatedReplicas uint32 `json:"updatedReplicas,omitempty" protobuf:"varint,12,opt,name=updatedReplicas"` + // If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). + CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,13,opt,name=currentHash"` + // If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,14,opt,name=updateHash"` } // SetObservedGeneration sets the Status ObservedGeneration diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 79aab81c33..bb128213f0 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -3423,6 +3423,34 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCall Format: "int64", }, }, + "currentReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "updatedReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "currentHash": { + SchemaProps: spec.SchemaProps{ + Description: "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + Type: []string{"string"}, + Format: "", + }, + }, + "updateHash": { + SchemaProps: spec.SchemaProps{ + Description: "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -5770,6 +5798,34 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback Format: "int64", }, }, + "currentReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "updatedReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of Pods created by the controller from the Vertex version indicated by updateHash.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "currentHash": { + SchemaProps: spec.SchemaProps{ + Description: "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + Type: []string{"string"}, + Format: "", + }, + }, + "updateHash": { + SchemaProps: spec.SchemaProps{ + Description: "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 2e78357bb9..0b5ec7efc9 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -730,6 +730,14 @@ type VertexStatus struct { // The number of pods targeted by this Vertex with a Ready Condition. // +optional ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` + // The number of Pods created by the controller from the Vertex version indicated by currentHash. + CurrentReplicas uint32 `json:"currentReplicas,omitempty" protobuf:"varint,10,opt,name=currentReplicas"` + // The number of Pods created by the controller from the Vertex version indicated by updateHash. + UpdatedReplicas uint32 `json:"updatedReplicas,omitempty" protobuf:"varint,11,opt,name=updatedReplicas"` + // If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). + CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,12,opt,name=currentHash"` + // If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,13,opt,name=updateHash"` } func (vs *VertexStatus) MarkPhase(phase VertexPhase, reason, message string) { diff --git a/rust/numaflow-models/src/models/mono_vertex_status.rs b/rust/numaflow-models/src/models/mono_vertex_status.rs index b0773192e8..bd2fdee807 100644 --- a/rust/numaflow-models/src/models/mono_vertex_status.rs +++ b/rust/numaflow-models/src/models/mono_vertex_status.rs @@ -21,6 +21,12 @@ pub struct MonoVertexStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, + /// If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). + #[serde(rename = "currentHash", skip_serializing_if = "Option::is_none")] + pub current_hash: Option, + /// The number of Pods created by the controller from the MonoVertex version indicated by currentHash. + #[serde(rename = "currentReplicas", skip_serializing_if = "Option::is_none")] + pub current_replicas: Option, #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] pub last_scaled_at: Option, #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] @@ -42,12 +48,20 @@ pub struct MonoVertexStatus { pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, + /// If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + #[serde(rename = "updateHash", skip_serializing_if = "Option::is_none")] + pub update_hash: Option, + /// The number of Pods created by the controller from the MonoVertex version indicated by updateHash. + #[serde(rename = "updatedReplicas", skip_serializing_if = "Option::is_none")] + pub updated_replicas: Option, } impl MonoVertexStatus { pub fn new() -> MonoVertexStatus { MonoVertexStatus { conditions: None, + current_hash: None, + current_replicas: None, last_scaled_at: None, last_updated: None, message: None, @@ -57,6 +71,8 @@ impl MonoVertexStatus { reason: None, replicas: None, selector: None, + update_hash: None, + updated_replicas: None, } } } diff --git a/rust/numaflow-models/src/models/vertex_status.rs b/rust/numaflow-models/src/models/vertex_status.rs index 326a83c9b7..950ffa9ba5 100644 --- a/rust/numaflow-models/src/models/vertex_status.rs +++ b/rust/numaflow-models/src/models/vertex_status.rs @@ -21,6 +21,12 @@ pub struct VertexStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, + /// If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). + #[serde(rename = "currentHash", skip_serializing_if = "Option::is_none")] + pub current_hash: Option, + /// The number of Pods created by the controller from the Vertex version indicated by currentHash. + #[serde(rename = "currentReplicas", skip_serializing_if = "Option::is_none")] + pub current_replicas: Option, #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] pub last_scaled_at: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] @@ -40,12 +46,20 @@ pub struct VertexStatus { pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, + /// If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + #[serde(rename = "updateHash", skip_serializing_if = "Option::is_none")] + pub update_hash: Option, + /// The number of Pods created by the controller from the Vertex version indicated by updateHash. + #[serde(rename = "updatedReplicas", skip_serializing_if = "Option::is_none")] + pub updated_replicas: Option, } impl VertexStatus { pub fn new() -> VertexStatus { VertexStatus { conditions: None, + current_hash: None, + current_replicas: None, last_scaled_at: None, message: None, observed_generation: None, @@ -54,6 +68,8 @@ impl VertexStatus { reason: None, replicas: None, selector: None, + update_hash: None, + updated_replicas: None, } } } From 0814d1f542d165918e36a1504091f995d35524a6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:38:34 -0700 Subject: [PATCH 042/188] docs: updated CHANGELOG.md (#2021) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b2f607fbc..bbc4a1a7b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## v1.3.1 (2024-09-02) + + * [a42d0063](https://github.com/numaproj/numaflow/commit/a42d0063caf53d6f4c01c2fb2f6f6f6f74a8f987) Update manifests to v1.3.1 + * [6993e75f](https://github.com/numaproj/numaflow/commit/6993e75f546f2ffc6db1ecbb0fc579a5d6048754) feat: allow configurable retryStrategy (#2010) + * [6c973698](https://github.com/numaproj/numaflow/commit/6c973698762488915df719161ec4a70a130b4bea) chore(deps): bump webpack from 5.93.0 to 5.94.0 in /ui (#2018) + * [cd54e86f](https://github.com/numaproj/numaflow/commit/cd54e86f7d42641182531df3823baecece0ee57c) fix: add latency metrics for mvtx (#2013) + * [c6530d37](https://github.com/numaproj/numaflow/commit/c6530d37efce9a1a7ffd153cde104180b2c0b287) feat: introduce `readyReplicas` for Vertex and MonoVertex (#2014) + * [13c13e5f](https://github.com/numaproj/numaflow/commit/13c13e5f1a36957b11219cac49ad8e872bd290be) feat: enable resourceClaims for vertex and monovtx (#2009) + * [1040a022](https://github.com/numaproj/numaflow/commit/1040a0223ad54ce619e6b33eeb5b99bf341d807d) fix: log format with config load error (#2000) + * [8d2a4b21](https://github.com/numaproj/numaflow/commit/8d2a4b21fe18085ed12303a604019dc88fca4665) feat: more flexible scaling with `replicasPerScaleUp` and `replicasPerScaleDown` (#2003) + * [9e54b2cd](https://github.com/numaproj/numaflow/commit/9e54b2cdaa75f9679dac2f37a0a7df88a39b481f) chore(deps): bump micromatch from 4.0.7 to 4.0.8 in /ui (#2002) + * [d841421f](https://github.com/numaproj/numaflow/commit/d841421f7d09da448cae10a45fa91a3bf9013d5c) fix: e2e testing isbsvc deletion timeout issue (#1997) + * [991bfb70](https://github.com/numaproj/numaflow/commit/991bfb701195ed2c6bfbc01f2ce8af99bfc5d763) fix: test coverage generation for Rust code (#1993) + * [a39746c1](https://github.com/numaproj/numaflow/commit/a39746c118791a37725f41241da4b3a9a03fa5a5) fix: do not pass scale info to MonoVertex (#1990) + * [0dcd9284](https://github.com/numaproj/numaflow/commit/0dcd9284d6a46869d81281a7e267a59b51282148) fix: adding not available for negative processing rates (#1983) + * [c49fdb9a](https://github.com/numaproj/numaflow/commit/c49fdb9af350b37aed7ef9b5b3d491cd85fe14a0) fix: minor perf improvements of mvtx fallback sink (#1967) + * [24239fc1](https://github.com/numaproj/numaflow/commit/24239fc1cc5a834621904cc12186b9d4dd51f950) fix: remove coloring in logs (#1975) + * [26b0d1db](https://github.com/numaproj/numaflow/commit/26b0d1dbdba51da944604cbae11029727ee3b26e) doc: update roadmap (#1970) + +### Contributors + + * Derek Wang + * Keran Yang + * Sidhant Kohli + * Sreekanth + * Vedant Gupta + * Vigith Maurice + * dependabot[bot] + * xdevxy + ## v1.3.0 (2024-08-19) * [4de121c2](https://github.com/numaproj/numaflow/commit/4de121c2c3b436ac51fba97c8ce5153afc5364c9) Update manifests to v1.3.0 From 1588175fb98214dc1530c3721ed6deee08ac92b6 Mon Sep 17 00:00:00 2001 From: lanedd <43617392+lanedd@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:28:43 -0700 Subject: [PATCH 043/188] chore: added Seekr to USERS.md (#2022) Signed-off-by: Lane Dalan --- USERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/USERS.md b/USERS.md index ce6cdd45e5..002bcc4440 100644 --- a/USERS.md +++ b/USERS.md @@ -7,3 +7,4 @@ Please add your company name and initial use case (optional) below. 3. [Atlan](https://atlan.com/) - Numaflow powers real time notifications, stream processing ecosystem at Atlan. 4. [Valegachain Analytics](https://www.valegachain.com/) Numaflow is used to extract, transform, and load cryptocurrency blocks and mempool transactions in data lakes, as well as for activity alerts. 5. [Lockheed Martin](https://lockheedmartin.com/) Perform ELT processing on high and low volume data streams of sensor data as recieved from IOT type systems. +6. [Seekr](https://www.seekr.com/) Numaflow coordinates multiple ML pipelines to rate and extract information from the pipeline input. From 14cdff5a363c758c35f5451b380ed6269459384a Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Tue, 3 Sep 2024 14:59:28 -0400 Subject: [PATCH 044/188] chore: reduce 5 second wait time every time redis sink check runs in e2e (#2023) Signed-off-by: Keran Yang --- test/fixtures/redis_check.go | 3 +++ test/reduce-two-e2e/reduce_two_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/test/fixtures/redis_check.go b/test/fixtures/redis_check.go index b784aa2d61..3b230a454a 100644 --- a/test/fixtures/redis_check.go +++ b/test/fixtures/redis_check.go @@ -100,6 +100,9 @@ func runChecks(ctx context.Context, performChecks CheckFunc) bool { ticker := time.NewTicker(retryInterval) defer ticker.Stop() + if performChecks() { + return true + } for { select { case <-ctx.Done(): diff --git a/test/reduce-two-e2e/reduce_two_test.go b/test/reduce-two-e2e/reduce_two_test.go index c4749569a0..115e54e92c 100644 --- a/test/reduce-two-e2e/reduce_two_test.go +++ b/test/reduce-two-e2e/reduce_two_test.go @@ -83,7 +83,7 @@ func (r *ReduceSuite) testReduceStream(lang string) { // There should be no other values. w.Expect().RedisSinkContains(pipelineName+"-sink", "102") w.Expect().RedisSinkNotContains(pipelineName+"-sink", "99") - w.Expect().RedisSinkNotContains(pipelineName+"sink", "105") + w.Expect().RedisSinkNotContains(pipelineName+"-sink", "105") done <- struct{}{} } From 8d8b9e20b37ee214b33a06fed9fc54bab3b9a94d Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Tue, 3 Sep 2024 12:07:40 -0700 Subject: [PATCH 045/188] chore: convert onFailureStrategy to enum in rust (#2020) Signed-off-by: Sidhant Kohli --- rust/monovertex/src/config.rs | 222 +++++++++++++++++++++++++++++-- rust/monovertex/src/forwarder.rs | 17 +-- 2 files changed, 218 insertions(+), 21 deletions(-) diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index d1450500be..81b115422f 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -19,7 +19,47 @@ const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; -const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: &str = "retry"; +const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: OnFailureStrategy = OnFailureStrategy::Retry; + +#[derive(Debug, PartialEq, Clone)] +pub enum OnFailureStrategy { + Retry, + Fallback, + Drop, +} + +impl OnFailureStrategy { + /// Converts a string slice to an `OnFailureStrategy` enum variant. + /// Case insensitivity is considered to enhance usability. + /// + /// # Arguments + /// * `s` - A string slice representing the retry strategy. + /// + /// # Returns + /// An option containing the corresponding enum variant if successful, + /// or DefaultStrategy if the input does not match known variants. + fn from_str(s: &str) -> Option { + match s.to_lowercase().as_str() { + "retry" => Some(OnFailureStrategy::Retry), + "fallback" => Some(OnFailureStrategy::Fallback), + "drop" => Some(OnFailureStrategy::Drop), + _ => Some(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY), + } + } + + /// Converts the `OnFailureStrategy` enum variant to a String. + /// This facilitates situations where the enum needs to be displayed or logged as a string. + /// + /// # Returns + /// A string representing the `OnFailureStrategy` enum variant. + fn to_string(&self) -> String { + match *self { + OnFailureStrategy::Retry => "retry".to_string(), + OnFailureStrategy::Fallback => "fallback".to_string(), + OnFailureStrategy::Drop => "drop".to_string(), + } + } +} pub fn config() -> &'static Settings { static CONF: OnceLock = OnceLock::new(); @@ -44,7 +84,7 @@ pub struct Settings { pub lag_refresh_interval_in_secs: u16, pub sink_max_retry_attempts: u16, pub sink_retry_interval_in_ms: u32, - pub sink_retry_on_fail_strategy: String, + pub sink_retry_on_fail_strategy: OnFailureStrategy, pub sink_default_retry_strategy: RetryStrategy, } @@ -73,7 +113,7 @@ impl Default for Settings { lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, - sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string(), + sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, sink_default_retry_strategy: default_retry_strategy, } } @@ -161,15 +201,16 @@ impl Settings { } } - // Set the retry strategy using a direct reference whenever possible + // Set the retry strategy from the spec or use the default settings.sink_retry_on_fail_strategy = retry_strategy .on_failure .clone() - .unwrap_or_else(|| DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string()); + .and_then(|s| OnFailureStrategy::from_str(&s)) + .unwrap_or(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY); // check if the sink retry strategy is set to fallback and there is no fallback sink configured // then we should return an error - if settings.sink_retry_on_fail_strategy == "fallback" + if settings.sink_retry_on_fail_strategy == OnFailureStrategy::Fallback && !settings.is_fallback_enabled { return Err(Error::ConfigError( @@ -198,10 +239,12 @@ impl Settings { #[cfg(test)] mod tests { - use super::*; - use serde_json::json; use std::env; + use serde_json::json; + + use super::*; + #[test] fn test_settings_load_combined() { // Define all JSON test configurations in separate scopes to use them distinctively @@ -303,7 +346,118 @@ mod tests { // Execute and verify let settings = Settings::load().unwrap(); - assert_eq!(settings.sink_retry_on_fail_strategy, "retry"); + assert_eq!( + settings.sink_retry_on_fail_strategy, + DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY + ); + assert_eq!(settings.sink_max_retry_attempts, 5); + assert_eq!(settings.sink_retry_interval_in_ms, 1000); + env::remove_var(ENV_MONO_VERTEX_OBJ); + } + + { + // Test Non default Retry Strategy Load + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + }, + "retryStrategy": { + "backoff": { + "interval": "1s", + "steps": 5 + }, + "onFailure": "drop" + }, + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); + + // Execute and verify + let settings = Settings::load().unwrap(); + assert_eq!( + settings.sink_retry_on_fail_strategy, + OnFailureStrategy::Drop + ); + assert_eq!(settings.sink_max_retry_attempts, 5); + assert_eq!(settings.sink_retry_interval_in_ms, 1000); + env::remove_var(ENV_MONO_VERTEX_OBJ); + } + + { + // Test Invalid on failure strategy to use default + let json_data = json!({ + "metadata": { + "name": "simple-mono-vertex", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "replicas": 0, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "xxxxxx", + "resources": {} + } + }, + "retryStrategy": { + "backoff": { + "interval": "1s", + "steps": 5 + }, + "onFailure": "xxxxx" + }, + }, + "limits": { + "readBatchSize": 500, + "readTimeout": "1s" + }, + } + }); + let json_str = json_data.to_string(); + let encoded_json = BASE64_STANDARD.encode(json_str); + env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); + + // Execute and verify + let settings = Settings::load().unwrap(); + assert_eq!( + settings.sink_retry_on_fail_strategy, + DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY + ); assert_eq!(settings.sink_max_retry_attempts, 5); assert_eq!(settings.sink_retry_interval_in_ms, 1000); env::remove_var(ENV_MONO_VERTEX_OBJ); @@ -407,4 +561,54 @@ mod tests { // General cleanup env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); } + + #[test] + fn test_on_failure_enum_from_str_valid_inputs() { + assert_eq!( + OnFailureStrategy::from_str("retry"), + Some(OnFailureStrategy::Retry) + ); + assert_eq!( + OnFailureStrategy::from_str("fallback"), + Some(OnFailureStrategy::Fallback) + ); + assert_eq!( + OnFailureStrategy::from_str("drop"), + Some(OnFailureStrategy::Drop) + ); + + // Testing case insensitivity + assert_eq!( + OnFailureStrategy::from_str("ReTry"), + Some(OnFailureStrategy::Retry) + ); + assert_eq!( + OnFailureStrategy::from_str("FALLBACK"), + Some(OnFailureStrategy::Fallback) + ); + assert_eq!( + OnFailureStrategy::from_str("Drop"), + Some(OnFailureStrategy::Drop) + ); + } + + #[test] + fn test_on_failure_enum_from_str_invalid_input() { + assert_eq!( + OnFailureStrategy::from_str("unknown"), + Some(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY) + ); // should return None for undefined inputs + } + + #[test] + fn test_on_failure_enum_to_string() { + let retry = OnFailureStrategy::Retry; + assert_eq!(retry.to_string(), "retry"); + + let fallback = OnFailureStrategy::Fallback; + assert_eq!(fallback.to_string(), "fallback"); + + let drop = OnFailureStrategy::Drop; + assert_eq!(drop.to_string(), "drop"); + } } diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 0f55268576..1ba928123c 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use crate::config::config; +use crate::config::{config, OnFailureStrategy}; use crate::error::{Error, Result}; use crate::message::{Message, Offset}; use crate::metrics; @@ -296,9 +296,9 @@ impl Forwarder { } // check what is the failure strategy in the config let strategy = config().sink_retry_on_fail_strategy.clone(); - match strategy.as_str() { + match strategy { // if we need to retry, return true - "retry" => { + OnFailureStrategy::Retry => { warn!( "Using onFailure Retry, Retry attempts {} completed", attempts @@ -306,7 +306,7 @@ impl Forwarder { return Ok(true); } // if we need to drop the messages, log and return false - "drop" => { + OnFailureStrategy::Drop => { // log that we are dropping the messages as requested warn!( "Dropping messages after {} attempts. Errors: {:?}", @@ -319,7 +319,7 @@ impl Forwarder { .inc_by(messages_to_send.len() as u64); } // if we need to move the messages to the fallback, return false - "fallback" => { + OnFailureStrategy::Fallback => { // log that we are moving the messages to the fallback as requested warn!( "Moving messages to fallback after {} attempts. Errors: {:?}", @@ -328,13 +328,6 @@ impl Forwarder { // move the messages to the fallback messages fallback_msgs.append(messages_to_send); } - // if the strategy is invalid, return an error - _ => { - return Err(Error::SinkError(format!( - "Invalid sink retry on fail strategy: {}", - strategy - ))); - } } // if we are done with the messages, break the loop Ok(false) From 8fc99bbbd694067287e017b311b1f7cdefe9f580 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Wed, 4 Sep 2024 13:38:02 -0700 Subject: [PATCH 046/188] chore: add docs for retry (#2024) Signed-off-by: Sidhant Kohli --- docs/user-guide/sinks/retry-strategy.md | 68 +++++++++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 69 insertions(+) create mode 100644 docs/user-guide/sinks/retry-strategy.md diff --git a/docs/user-guide/sinks/retry-strategy.md b/docs/user-guide/sinks/retry-strategy.md new file mode 100644 index 0000000000..a5b2a7264b --- /dev/null +++ b/docs/user-guide/sinks/retry-strategy.md @@ -0,0 +1,68 @@ +# Retry Strategy + +### Overview +The `RetryStrategy` is used to configure the behavior for a sink after encountering failures during a write operation. +This structure allows the user to specify how Numaflow should respond to different fail-over scenarios for Sinks, ensuring that the writing can be resilient and handle +unexpected issues efficiently. + + +### Struct Explanation + + +`retryStrategy` is optional, and can be added to the Sink spec configurations where retry logic is necessary. + + + +```yaml +sink: + retryStrategy: + # Optional + backoff: + duration: 1s # Optional + steps: 3 # Optional, number of retries (including the 1st try) + # Optional + onFailure: retry|fallback|drop +``` +Note: If no custom fields are defined for retryStrategy then the **default** values are used. + +- `BackOff` - Defines the timing for retries, including the interval and the maximum attempts. + - `duration`: the time interval to wait before retry attempts + - Default: _1ms_ + - `steps`: the limit on the number of times to try the sink write operation including retries + - Default: _Infinite_ +- `OnFailure` - Specifies the action to be undertaken if number of retries are exhausted + - retry: continue with the retry logic again + - fallback: write the leftover messages to a [fallback](https://numaflow.numaproj.io/user-guide/sinks/fallback/) sink + - drop: any messages left to be processed are dropped + - Default: _retry_ + + +### Constraints + +1) If the `onFailure` is defined as fallback, then there should be a fallback sink specified in the spec. + +2) The steps defined should always be `> 0` + + +## Example + +```yaml + sink: + retryStrategy: + backoff: + interval: "500ms" + steps: 10 + onFailure: "fallback" + udsink: + container: + image: my-sink-image + fallback: + udsink: + container: + image: my-fallback-sink +``` +### Explanation + +- Normal Operation: Data is processed by the primary sink container specified by `UDSink`. +The system retries up to 10 times for a batch write operation to succeed with an interval of 500 milliseconds between each retry. +- After Maximum Retries: If all retries fail, data is then routed to a fallback sink instead. diff --git a/mkdocs.yml b/mkdocs.yml index 7c9161d480..7e57a6af21 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -66,6 +66,7 @@ nav: - user-guide/sinks/blackhole.md - User-defined Sinks: "user-guide/sinks/user-defined-sinks.md" - Fallback Sink: "user-guide/sinks/fallback.md" + - Retry Strategy: "user-guide/sinks/retry-strategy.md" - User-defined Functions: - Overview: "user-guide/user-defined-functions/user-defined-functions.md" - Map: From 9f13068a967db36faeb0a23e86bc7faa24c364a3 Mon Sep 17 00:00:00 2001 From: xdevxy <115589853+xdevxy@users.noreply.github.com> Date: Thu, 5 Sep 2024 15:35:51 -0700 Subject: [PATCH 047/188] chore: use `kube_codegen.sh` instead in codegen. (#2011) Signed-off-by: Hao Hao --- hack/update-codegen.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 3ab861b73b..1137a938cc 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -17,15 +17,13 @@ cd "${FAKE_REPOPATH}" CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${FAKE_REPOPATH}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} -chmod +x ${CODEGEN_PKG}/*.sh - subheader "running codegen" -bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ +bash -x ${CODEGEN_PKG}/kube_codegen.sh "deepcopy" \ github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ "numaflow:v1alpha1" \ --go-header-file hack/boilerplate/boilerplate.go.txt -bash -x ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ +bash -x ${CODEGEN_PKG}/kube_codegen.sh "client,informer,lister" \ github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ "numaflow:v1alpha1" \ --plural-exceptions="Vertex:Vertices,MonoVertex:MonoVertices" \ From cf90e258261b50d95db2787cfe23e9008c2ab72a Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Thu, 5 Sep 2024 21:44:14 -0700 Subject: [PATCH 048/188] fix: pause lifecyle changes and add drained status (#2028) Signed-off-by: Sidhant Kohli --- api/json-schema/schema.json | 4 + api/openapi-spec/swagger.json | 4 + .../full/numaflow.numaproj.io_pipelines.yaml | 3 + config/install.yaml | 3 + config/namespace-install.yaml | 3 + docs/APIs.md | 20 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 994 +++++++++--------- pkg/apis/numaflow/v1alpha1/generated.proto | 6 + .../numaflow/v1alpha1/openapi_generated.go | 7 + pkg/apis/numaflow/v1alpha1/pipeline_types.go | 15 + .../numaflow/v1alpha1/pipeline_types_test.go | 9 + pkg/reconciler/pipeline/controller.go | 56 +- pkg/reconciler/pipeline/controller_test.go | 70 +- .../src/models/pipeline_status.rs | 4 + 14 files changed, 701 insertions(+), 497 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index e594c79167..d58a13beb6 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19681,6 +19681,10 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "drainedOnPause": { + "description": "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + "type": "boolean" + }, "lastUpdated": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 8f47047db6..b3cdb6b120 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19668,6 +19668,10 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "drainedOnPause": { + "description": "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + "type": "boolean" + }, "lastUpdated": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 4ddc954f61..9670b018e0 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -9821,6 +9821,9 @@ spec: - type type: object type: array + drainedOnPause: + default: false + type: boolean lastUpdated: format: date-time type: string diff --git a/config/install.yaml b/config/install.yaml index 849958226d..8a84ffac83 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -18074,6 +18074,9 @@ spec: - type type: object type: array + drainedOnPause: + default: false + type: boolean lastUpdated: format: date-time type: string diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 3b65e083b6..c9301892de 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -18074,6 +18074,9 @@ spec: - type type: object type: array + drainedOnPause: + default: false + type: boolean lastUpdated: format: date-time type: string diff --git a/docs/APIs.md b/docs/APIs.md index a311d7ee07..97ddf96672 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -7941,6 +7941,26 @@ The generation observed by the Pipeline controller. + + + + +drainedOnPause
bool + + + + +

+ +Field to indicate if a pipeline drain successfully occurred, or it timed +out. Set to true when the Pipeline is in Paused state, and after it has +successfully been drained. defaults to false +

+ + + + + diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index f9bbc352a1..abf3a4e47a 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2762,488 +2762,490 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7695 bytes of a gzipped FileDescriptorProto + // 7719 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd7, - 0x75, 0xa6, 0xfa, 0xbf, 0xfb, 0x34, 0xc9, 0xe1, 0xdc, 0x19, 0x8d, 0x38, 0xa3, 0xd1, 0x70, 0x5c, - 0xb2, 0xe4, 0xf1, 0xda, 0x26, 0x57, 0x5c, 0xfd, 0xf9, 0x57, 0x62, 0x93, 0x43, 0x0e, 0x67, 0xc8, - 0x19, 0xfa, 0x34, 0x39, 0x92, 0xad, 0xb5, 0xb5, 0xc5, 0xea, 0xcb, 0x66, 0x89, 0xd5, 0x55, 0xad, - 0xaa, 0x6a, 0xce, 0x50, 0xde, 0x85, 0x7f, 0xb4, 0x80, 0xb4, 0x58, 0x2c, 0x76, 0xe1, 0x27, 0x03, - 0x0b, 0xef, 0x62, 0x17, 0xbb, 0xf0, 0x83, 0xe1, 0x7d, 0x58, 0x40, 0xfb, 0x60, 0x60, 0xe3, 0x38, - 0x08, 0x12, 0x27, 0xc8, 0x8f, 0x1f, 0x02, 0x44, 0x79, 0x21, 0x62, 0x06, 0x79, 0x48, 0x80, 0x18, - 0x46, 0x0c, 0x24, 0xf6, 0xc0, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x3d, 0x43, 0x76, 0x91, 0xa3, - 0x51, 0xa2, 0xb7, 0xaa, 0x7b, 0xcf, 0xfd, 0xce, 0xad, 0x53, 0xf7, 0xe7, 0xdc, 0x73, 0xce, 0xbd, - 0x17, 0x16, 0xdb, 0xa6, 0xbf, 0xd5, 0xdb, 0x98, 0x32, 0x9c, 0xce, 0xb4, 0xdd, 0xeb, 0xe8, 0x5d, - 0xd7, 0x79, 0x8d, 0x3f, 0x6c, 0x5a, 0xce, 0xad, 0xe9, 0xee, 0x76, 0x7b, 0x5a, 0xef, 0x9a, 0x5e, - 0x98, 0xb2, 0xf3, 0x94, 0x6e, 0x75, 0xb7, 0xf4, 0xa7, 0xa6, 0xdb, 0xd4, 0xa6, 0xae, 0xee, 0xd3, - 0xd6, 0x54, 0xd7, 0x75, 0x7c, 0x87, 0x3c, 0x17, 0x02, 0x4d, 0x29, 0xa0, 0x29, 0x55, 0x6c, 0xaa, - 0xbb, 0xdd, 0x9e, 0x62, 0x40, 0x61, 0x8a, 0x02, 0x3a, 0xf7, 0x89, 0x48, 0x0d, 0xda, 0x4e, 0xdb, - 0x99, 0xe6, 0x78, 0x1b, 0xbd, 0x4d, 0xfe, 0xc6, 0x5f, 0xf8, 0x93, 0xe0, 0x73, 0x4e, 0xdb, 0x7e, - 0xde, 0x9b, 0x32, 0x1d, 0x56, 0xad, 0x69, 0xc3, 0x71, 0xe9, 0xf4, 0x4e, 0x5f, 0x5d, 0xce, 0x3d, - 0x1d, 0xd2, 0x74, 0x74, 0x63, 0xcb, 0xb4, 0xa9, 0xbb, 0xab, 0xbe, 0x65, 0xda, 0xa5, 0x9e, 0xd3, - 0x73, 0x0d, 0x7a, 0xa8, 0x52, 0xde, 0x74, 0x87, 0xfa, 0x7a, 0x1a, 0xaf, 0xe9, 0x41, 0xa5, 0xdc, - 0x9e, 0xed, 0x9b, 0x9d, 0x7e, 0x36, 0xcf, 0xde, 0xab, 0x80, 0x67, 0x6c, 0xd1, 0x8e, 0x9e, 0x2c, - 0xa7, 0xfd, 0x10, 0xe0, 0xd4, 0xec, 0x86, 0xe7, 0xbb, 0xba, 0xe1, 0xaf, 0x3a, 0xad, 0x35, 0xda, - 0xe9, 0x5a, 0xba, 0x4f, 0xc9, 0x36, 0x54, 0x59, 0xdd, 0x5a, 0xba, 0xaf, 0x4f, 0xe4, 0x2e, 0xe6, - 0x2e, 0xd5, 0x67, 0x66, 0xa7, 0x86, 0xfc, 0x17, 0x53, 0x2b, 0x12, 0xa8, 0x31, 0xb2, 0xbf, 0x37, - 0x59, 0x55, 0x6f, 0x18, 0x30, 0x20, 0xdf, 0xca, 0xc1, 0x88, 0xed, 0xb4, 0x68, 0x93, 0x5a, 0xd4, - 0xf0, 0x1d, 0x77, 0x22, 0x7f, 0xb1, 0x70, 0xa9, 0x3e, 0xf3, 0xe5, 0xa1, 0x39, 0xa6, 0x7c, 0xd1, - 0xd4, 0xf5, 0x08, 0x83, 0xcb, 0xb6, 0xef, 0xee, 0x36, 0x4e, 0xff, 0x68, 0x6f, 0xf2, 0xa1, 0xfd, - 0xbd, 0xc9, 0x91, 0x68, 0x16, 0xc6, 0x6a, 0x42, 0xd6, 0xa1, 0xee, 0x3b, 0x16, 0x13, 0x99, 0xe9, - 0xd8, 0xde, 0x44, 0x81, 0x57, 0xec, 0xc2, 0x94, 0x90, 0x36, 0x63, 0x3f, 0xc5, 0x9a, 0xcb, 0xd4, - 0xce, 0x53, 0x53, 0x6b, 0x01, 0x59, 0xe3, 0x94, 0x04, 0xae, 0x87, 0x69, 0x1e, 0x46, 0x71, 0x08, - 0x85, 0x13, 0x1e, 0x35, 0x7a, 0xae, 0xe9, 0xef, 0xce, 0x39, 0xb6, 0x4f, 0x6f, 0xfb, 0x13, 0x45, - 0x2e, 0xe5, 0x27, 0xd3, 0xa0, 0x57, 0x9d, 0x56, 0x33, 0x4e, 0xdd, 0x38, 0xb5, 0xbf, 0x37, 0x79, - 0x22, 0x91, 0x88, 0x49, 0x4c, 0x62, 0xc3, 0xb8, 0xd9, 0xd1, 0xdb, 0x74, 0xb5, 0x67, 0x59, 0x4d, - 0x6a, 0xb8, 0xd4, 0xf7, 0x26, 0x4a, 0xfc, 0x13, 0x2e, 0xa5, 0xf1, 0x59, 0x76, 0x0c, 0xdd, 0xba, - 0xb1, 0xf1, 0x1a, 0x35, 0x7c, 0xa4, 0x9b, 0xd4, 0xa5, 0xb6, 0x41, 0x1b, 0x13, 0xf2, 0x63, 0xc6, - 0x97, 0x12, 0x48, 0xd8, 0x87, 0x4d, 0x16, 0xe1, 0x64, 0xd7, 0x35, 0x1d, 0x5e, 0x05, 0x4b, 0xf7, - 0xbc, 0xeb, 0x7a, 0x87, 0x4e, 0x94, 0x2f, 0xe6, 0x2e, 0xd5, 0x1a, 0x67, 0x25, 0xcc, 0xc9, 0xd5, - 0x24, 0x01, 0xf6, 0x97, 0x21, 0x97, 0xa0, 0xaa, 0x12, 0x27, 0x2a, 0x17, 0x73, 0x97, 0x4a, 0xa2, - 0xed, 0xa8, 0xb2, 0x18, 0xe4, 0x92, 0x05, 0xa8, 0xea, 0x9b, 0x9b, 0xa6, 0xcd, 0x28, 0xab, 0x5c, - 0x84, 0xe7, 0xd3, 0x3e, 0x6d, 0x56, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x41, 0x59, 0x72, 0x15, 0x88, - 0x47, 0xdd, 0x1d, 0xd3, 0xa0, 0xb3, 0x86, 0xe1, 0xf4, 0x6c, 0x9f, 0xd7, 0xbd, 0xc6, 0xeb, 0x7e, - 0x4e, 0xd6, 0x9d, 0x34, 0xfb, 0x28, 0x30, 0xa5, 0x14, 0x79, 0x11, 0xc6, 0x65, 0xb7, 0x0b, 0xa5, - 0x00, 0x1c, 0xe9, 0x34, 0x13, 0x24, 0x26, 0xf2, 0xb0, 0x8f, 0x9a, 0xb4, 0xe0, 0xbc, 0xde, 0xf3, - 0x9d, 0x0e, 0x83, 0x8c, 0x33, 0x5d, 0x73, 0xb6, 0xa9, 0x3d, 0x51, 0xbf, 0x98, 0xbb, 0x54, 0x6d, - 0x5c, 0xdc, 0xdf, 0x9b, 0x3c, 0x3f, 0x7b, 0x17, 0x3a, 0xbc, 0x2b, 0x0a, 0xb9, 0x01, 0xb5, 0x96, - 0xed, 0xad, 0x3a, 0x96, 0x69, 0xec, 0x4e, 0x8c, 0xf0, 0x0a, 0x3e, 0x25, 0x3f, 0xb5, 0x36, 0x7f, - 0xbd, 0x29, 0x32, 0xee, 0xec, 0x4d, 0x9e, 0xef, 0x1f, 0x1d, 0xa7, 0x82, 0x7c, 0x0c, 0x31, 0xc8, - 0x0a, 0x07, 0x9c, 0x73, 0xec, 0x4d, 0xb3, 0x3d, 0x31, 0xca, 0xff, 0xc6, 0xc5, 0x01, 0x0d, 0x7a, - 0xfe, 0x7a, 0x53, 0xd0, 0x35, 0x46, 0x25, 0x3b, 0xf1, 0x8a, 0x21, 0x02, 0x69, 0xc1, 0x98, 0x1a, - 0x57, 0xe7, 0x2c, 0xdd, 0xec, 0x78, 0x13, 0x63, 0xbc, 0xf1, 0x7e, 0x78, 0x00, 0x26, 0x46, 0x89, - 0x1b, 0x67, 0xe4, 0xa7, 0x8c, 0xc5, 0x92, 0x3d, 0x4c, 0x60, 0x9e, 0x7b, 0x01, 0x4e, 0xf6, 0x8d, - 0x0d, 0x64, 0x1c, 0x0a, 0xdb, 0x74, 0x97, 0x0f, 0x7d, 0x35, 0x64, 0x8f, 0xe4, 0x34, 0x94, 0x76, - 0x74, 0xab, 0x47, 0x27, 0xf2, 0x3c, 0x4d, 0xbc, 0x7c, 0x2a, 0xff, 0x7c, 0x4e, 0xfb, 0x9f, 0x05, - 0x18, 0x51, 0x23, 0x4e, 0xd3, 0xb4, 0xb7, 0xc9, 0x4b, 0x50, 0xb0, 0x9c, 0xb6, 0x1c, 0x37, 0x3f, - 0x33, 0xf4, 0x28, 0xb6, 0xec, 0xb4, 0x1b, 0x95, 0xfd, 0xbd, 0xc9, 0xc2, 0xb2, 0xd3, 0x46, 0x86, - 0x48, 0x0c, 0x28, 0x6d, 0xeb, 0x9b, 0xdb, 0x3a, 0xaf, 0x43, 0x7d, 0xa6, 0x31, 0x34, 0xf4, 0x35, - 0x86, 0xc2, 0xea, 0xda, 0xa8, 0xed, 0xef, 0x4d, 0x96, 0xf8, 0x2b, 0x0a, 0x6c, 0xe2, 0x40, 0x6d, - 0xc3, 0xd2, 0x8d, 0xed, 0x2d, 0xc7, 0xa2, 0x13, 0x85, 0x8c, 0x8c, 0x1a, 0x0a, 0x49, 0xfc, 0xe6, - 0xe0, 0x15, 0x43, 0x1e, 0xc4, 0x80, 0x72, 0xaf, 0xe5, 0x99, 0xf6, 0xb6, 0x1c, 0x03, 0x5f, 0x18, - 0x9a, 0xdb, 0xfa, 0x3c, 0xff, 0x26, 0xd8, 0xdf, 0x9b, 0x2c, 0x8b, 0x67, 0x94, 0xd0, 0xda, 0x4f, - 0xeb, 0x30, 0xa6, 0x7e, 0xd2, 0x4d, 0xea, 0xfa, 0xf4, 0x36, 0xb9, 0x08, 0x45, 0x9b, 0x75, 0x4d, - 0xfe, 0x93, 0x1b, 0x23, 0xb2, 0xb9, 0x14, 0x79, 0x97, 0xe4, 0x39, 0xac, 0x66, 0xa2, 0xa9, 0x48, - 0x81, 0x0f, 0x5f, 0xb3, 0x26, 0x87, 0x11, 0x35, 0x13, 0xcf, 0x28, 0xa1, 0xc9, 0x2b, 0x50, 0xe4, - 0x1f, 0x2f, 0x44, 0xfd, 0xd9, 0xe1, 0x59, 0xb0, 0x4f, 0xaf, 0xb2, 0x2f, 0xe0, 0x1f, 0xce, 0x41, - 0x59, 0x53, 0xec, 0xb5, 0x36, 0xa5, 0x60, 0x3f, 0x93, 0x41, 0xb0, 0x0b, 0xa2, 0x29, 0xae, 0xcf, - 0x2f, 0x20, 0x43, 0x24, 0xff, 0x39, 0x07, 0x27, 0x0d, 0xc7, 0xf6, 0x75, 0xa6, 0x6a, 0xa8, 0x49, - 0x76, 0xa2, 0xc4, 0xf9, 0x5c, 0x1d, 0x9a, 0xcf, 0x5c, 0x12, 0xb1, 0xf1, 0x30, 0x9b, 0x33, 0xfa, - 0x92, 0xb1, 0x9f, 0x37, 0xf9, 0xaf, 0x39, 0x78, 0x98, 0x8d, 0xe5, 0x7d, 0xc4, 0x7c, 0x06, 0x3a, - 0xda, 0x5a, 0x9d, 0xdd, 0xdf, 0x9b, 0x7c, 0x78, 0x29, 0x8d, 0x19, 0xa6, 0xd7, 0x81, 0xd5, 0xee, - 0x94, 0xde, 0xaf, 0x96, 0xf0, 0xd9, 0xad, 0x3e, 0xb3, 0x7c, 0x94, 0xaa, 0x4e, 0xe3, 0x51, 0xd9, - 0x94, 0xd3, 0x34, 0x3b, 0x4c, 0xab, 0x05, 0xb9, 0x0c, 0x95, 0x1d, 0xc7, 0xea, 0x75, 0xa8, 0x37, - 0x51, 0xe5, 0x43, 0xec, 0xb9, 0xb4, 0x21, 0xf6, 0x26, 0x27, 0x69, 0x9c, 0x90, 0xf0, 0x15, 0xf1, - 0xee, 0xa1, 0x2a, 0x4b, 0x4c, 0x28, 0x5b, 0x66, 0xc7, 0xf4, 0x3d, 0x3e, 0x71, 0xd6, 0x67, 0x2e, - 0x0f, 0xfd, 0x59, 0xa2, 0x8b, 0x2e, 0x73, 0x30, 0xd1, 0x6b, 0xc4, 0x33, 0x4a, 0x06, 0x6c, 0x28, - 0xf4, 0x0c, 0xdd, 0x12, 0x13, 0x6b, 0x7d, 0xe6, 0x73, 0xc3, 0x77, 0x1b, 0x86, 0xd2, 0x18, 0x95, - 0xdf, 0x54, 0xe2, 0xaf, 0x28, 0xb0, 0xc9, 0x97, 0x60, 0x2c, 0xf6, 0x37, 0xbd, 0x89, 0x3a, 0x97, - 0xce, 0x63, 0x69, 0xd2, 0x09, 0xa8, 0xc2, 0x99, 0x27, 0xd6, 0x42, 0x3c, 0x4c, 0x80, 0x91, 0x6b, - 0x50, 0xf5, 0xcc, 0x16, 0x35, 0x74, 0xd7, 0x9b, 0x18, 0x39, 0x08, 0xf0, 0xb8, 0x04, 0xae, 0x36, - 0x65, 0x31, 0x0c, 0x00, 0xc8, 0x14, 0x40, 0x57, 0x77, 0x7d, 0x53, 0x28, 0xaa, 0xa3, 0x5c, 0x69, - 0x1a, 0xdb, 0xdf, 0x9b, 0x84, 0xd5, 0x20, 0x15, 0x23, 0x14, 0x8c, 0x9e, 0x95, 0x5d, 0xb2, 0xbb, - 0x3d, 0x5f, 0x4c, 0xac, 0x35, 0x41, 0xdf, 0x0c, 0x52, 0x31, 0x42, 0x41, 0xbe, 0x97, 0x83, 0x47, - 0xc3, 0xd7, 0xfe, 0x4e, 0x76, 0xe2, 0xc8, 0x3b, 0xd9, 0xe4, 0xfe, 0xde, 0xe4, 0xa3, 0xcd, 0xc1, - 0x2c, 0xf1, 0x6e, 0xf5, 0xd1, 0x5e, 0x82, 0xd1, 0xd9, 0x9e, 0xbf, 0xe5, 0xb8, 0xe6, 0x1b, 0x5c, - 0xe9, 0x26, 0x0b, 0x50, 0xf2, 0xb9, 0xf2, 0x24, 0xe6, 0xe5, 0x27, 0xd2, 0x44, 0x2d, 0x14, 0xd9, - 0x6b, 0x74, 0x57, 0x69, 0x03, 0x62, 0x7e, 0x14, 0xca, 0x94, 0x28, 0xae, 0xfd, 0xfb, 0x1c, 0x54, - 0x1a, 0xba, 0xb1, 0xed, 0x6c, 0x6e, 0x92, 0x97, 0xa1, 0x6a, 0xda, 0x3e, 0x75, 0x77, 0x74, 0x4b, - 0xc2, 0x4e, 0x45, 0x60, 0x83, 0x95, 0x58, 0xf8, 0xdd, 0x6c, 0xcd, 0xc3, 0x18, 0xcd, 0xf7, 0xe4, - 0x5a, 0x81, 0xeb, 0xa3, 0x4b, 0x12, 0x03, 0x03, 0x34, 0x32, 0x09, 0x25, 0xcf, 0xa7, 0x5d, 0x8f, - 0xcf, 0x3c, 0xa3, 0xa2, 0x1a, 0x4d, 0x96, 0x80, 0x22, 0x5d, 0xfb, 0x1f, 0x39, 0xa8, 0x35, 0x74, - 0xcf, 0x34, 0xd8, 0x57, 0x92, 0x39, 0x28, 0xf6, 0x3c, 0xea, 0x1e, 0xee, 0xdb, 0xf8, 0x64, 0xb1, - 0xee, 0x51, 0x17, 0x79, 0x61, 0x72, 0x03, 0xaa, 0x5d, 0xdd, 0xf3, 0x6e, 0x39, 0x6e, 0x4b, 0x4e, - 0x78, 0x07, 0x04, 0x12, 0xca, 0xb9, 0x2c, 0x8a, 0x01, 0x88, 0x56, 0x87, 0x70, 0xc6, 0xd7, 0x7e, - 0x9e, 0x83, 0x53, 0x8d, 0xde, 0xe6, 0x26, 0x75, 0xa5, 0x2e, 0x2a, 0xb5, 0x3c, 0x0a, 0x25, 0x97, - 0xb6, 0x4c, 0x4f, 0xd6, 0x7d, 0x7e, 0xe8, 0x16, 0x84, 0x0c, 0x45, 0x2a, 0x95, 0x5c, 0x5e, 0x3c, - 0x01, 0x05, 0x3a, 0xe9, 0x41, 0xed, 0x35, 0xea, 0x7b, 0xbe, 0x4b, 0xf5, 0x8e, 0xfc, 0xba, 0x2b, - 0x43, 0xb3, 0xba, 0x4a, 0xfd, 0x26, 0x47, 0x8a, 0xea, 0xb0, 0x41, 0x22, 0x86, 0x9c, 0xb4, 0x1f, - 0x96, 0x60, 0x64, 0xce, 0xe9, 0x6c, 0x98, 0x36, 0x6d, 0x5d, 0x6e, 0xb5, 0x29, 0x79, 0x15, 0x8a, - 0xb4, 0xd5, 0xa6, 0xf2, 0x6b, 0x87, 0x9f, 0xee, 0x19, 0x58, 0xa8, 0xb4, 0xb0, 0x37, 0xe4, 0xc0, - 0x64, 0x19, 0xc6, 0x36, 0x5d, 0xa7, 0x23, 0x46, 0xd0, 0xb5, 0xdd, 0xae, 0xd4, 0x58, 0x1b, 0x1f, - 0x56, 0xa3, 0xd2, 0x42, 0x2c, 0xf7, 0xce, 0xde, 0x24, 0x84, 0x6f, 0x98, 0x28, 0x4b, 0x5e, 0x86, - 0x89, 0x30, 0x25, 0x18, 0x4a, 0xe6, 0xd8, 0x22, 0x82, 0x6b, 0x2c, 0xa5, 0xc6, 0xf9, 0xfd, 0xbd, - 0xc9, 0x89, 0x85, 0x01, 0x34, 0x38, 0xb0, 0x34, 0x79, 0x2b, 0x07, 0xe3, 0x61, 0xa6, 0x18, 0xde, - 0xa5, 0xa2, 0x72, 0x44, 0xf3, 0x06, 0x5f, 0x6d, 0x2d, 0x24, 0x58, 0x60, 0x1f, 0x53, 0xb2, 0x00, - 0x23, 0xbe, 0x13, 0x91, 0x57, 0x89, 0xcb, 0x4b, 0x53, 0xe6, 0x81, 0x35, 0x67, 0xa0, 0xb4, 0x62, - 0xe5, 0x08, 0xc2, 0x19, 0xf5, 0x9e, 0x90, 0x54, 0x99, 0x4b, 0xea, 0xdc, 0xfe, 0xde, 0xe4, 0x99, - 0xb5, 0x54, 0x0a, 0x1c, 0x50, 0x92, 0x7c, 0x3d, 0x07, 0x63, 0x2a, 0x4b, 0xca, 0xa8, 0x72, 0x94, - 0x32, 0x22, 0xac, 0x45, 0xac, 0xc5, 0x18, 0x60, 0x82, 0xa1, 0xf6, 0x8b, 0x22, 0xd4, 0x82, 0x01, - 0x96, 0x3c, 0x0e, 0x25, 0xbe, 0xf0, 0x97, 0x7a, 0x73, 0x30, 0x73, 0x72, 0xfb, 0x00, 0x8a, 0x3c, - 0xf2, 0x04, 0x54, 0x0c, 0xa7, 0xd3, 0xd1, 0xed, 0x16, 0x37, 0xe6, 0xd4, 0x1a, 0x75, 0xa6, 0x30, - 0xcc, 0x89, 0x24, 0x54, 0x79, 0xe4, 0x3c, 0x14, 0x75, 0xb7, 0x2d, 0xec, 0x2a, 0x35, 0x31, 0x1e, - 0xcd, 0xba, 0x6d, 0x0f, 0x79, 0x2a, 0xf9, 0x24, 0x14, 0xa8, 0xbd, 0x33, 0x51, 0x1c, 0xac, 0x91, - 0x5c, 0xb6, 0x77, 0x6e, 0xea, 0x6e, 0xa3, 0x2e, 0xeb, 0x50, 0xb8, 0x6c, 0xef, 0x20, 0x2b, 0x43, - 0x96, 0xa1, 0x42, 0xed, 0x1d, 0xf6, 0xef, 0xa5, 0xc1, 0xe3, 0x43, 0x03, 0x8a, 0x33, 0x12, 0xa9, - 0x9c, 0x07, 0x7a, 0x8d, 0x4c, 0x46, 0x05, 0x41, 0xbe, 0x00, 0x23, 0x42, 0xc5, 0x59, 0x61, 0xff, - 0xc4, 0x9b, 0x28, 0x73, 0xc8, 0xc9, 0xc1, 0x3a, 0x12, 0xa7, 0x0b, 0x0d, 0x4c, 0x91, 0x44, 0x0f, - 0x63, 0x50, 0xe4, 0x0b, 0x50, 0x53, 0xeb, 0x51, 0xf5, 0x67, 0x53, 0x6d, 0x33, 0x6a, 0x11, 0x8b, - 0xf4, 0xf5, 0x9e, 0xe9, 0xd2, 0x0e, 0xb5, 0x7d, 0xaf, 0x71, 0x52, 0xad, 0xd6, 0x55, 0xae, 0x87, - 0x21, 0x1a, 0xd9, 0xe8, 0x37, 0x32, 0x09, 0x0b, 0xc9, 0xe3, 0x03, 0x46, 0xf5, 0x21, 0x2c, 0x4c, - 0x5f, 0x86, 0x13, 0x81, 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x69, 0x56, 0x7c, 0x29, 0x9e, - 0x75, 0x67, 0x6f, 0xf2, 0xb1, 0x14, 0x53, 0x42, 0x48, 0x80, 0x49, 0x30, 0xed, 0x07, 0x05, 0xe8, - 0xd7, 0xfe, 0xe3, 0x42, 0xcb, 0x1d, 0xb5, 0xd0, 0x92, 0x1f, 0x24, 0x86, 0xcf, 0xe7, 0x65, 0xb1, - 0xec, 0x1f, 0x95, 0xf6, 0x63, 0x0a, 0x47, 0xfd, 0x63, 0x1e, 0x94, 0xbe, 0xa3, 0xbd, 0x5d, 0x84, - 0xb1, 0x79, 0x9d, 0x76, 0x1c, 0xfb, 0x9e, 0x6b, 0xa1, 0xdc, 0x03, 0xb1, 0x16, 0xba, 0x04, 0x55, - 0x97, 0x76, 0x2d, 0xd3, 0xd0, 0x85, 0xf2, 0x25, 0x6d, 0x8f, 0x28, 0xd3, 0x30, 0xc8, 0x1d, 0xb0, - 0x06, 0x2e, 0x3c, 0x90, 0x6b, 0xe0, 0xe2, 0x7b, 0xbf, 0x06, 0xd6, 0xbe, 0x9e, 0x07, 0xae, 0xa8, - 0x90, 0x8b, 0x50, 0x64, 0x93, 0x70, 0xd2, 0xf2, 0xc2, 0x1b, 0x0e, 0xcf, 0x21, 0xe7, 0x20, 0xef, - 0x3b, 0xb2, 0xe7, 0x81, 0xcc, 0xcf, 0xaf, 0x39, 0x98, 0xf7, 0x1d, 0xf2, 0x06, 0x80, 0xe1, 0xd8, - 0x2d, 0x53, 0x99, 0xe4, 0xb3, 0x7d, 0xd8, 0x82, 0xe3, 0xde, 0xd2, 0xdd, 0xd6, 0x5c, 0x80, 0x28, - 0x56, 0x41, 0xe1, 0x3b, 0x46, 0xb8, 0x91, 0x17, 0xa0, 0xec, 0xd8, 0x0b, 0x3d, 0xcb, 0xe2, 0x02, - 0xad, 0x35, 0x3e, 0xc2, 0x96, 0xa6, 0x37, 0x78, 0xca, 0x9d, 0xbd, 0xc9, 0xb3, 0x42, 0xbf, 0x65, - 0x6f, 0x2f, 0xb9, 0xa6, 0x6f, 0xda, 0xed, 0xa6, 0xef, 0xea, 0x3e, 0x6d, 0xef, 0xa2, 0x2c, 0xa6, - 0x7d, 0x33, 0x07, 0xf5, 0x05, 0xf3, 0x36, 0x6d, 0xbd, 0x64, 0xda, 0x2d, 0xe7, 0x16, 0x41, 0x28, - 0x5b, 0xd4, 0x6e, 0xfb, 0x5b, 0x43, 0xae, 0x1f, 0xc4, 0xda, 0x98, 0x23, 0xa0, 0x44, 0x22, 0xd3, - 0x50, 0x13, 0xda, 0xa7, 0x69, 0xb7, 0xb9, 0x0c, 0xab, 0xe1, 0xa0, 0xd7, 0x54, 0x19, 0x18, 0xd2, - 0x68, 0xbb, 0x70, 0xb2, 0x4f, 0x0c, 0xa4, 0x05, 0x45, 0x5f, 0x6f, 0xab, 0xf1, 0x75, 0x61, 0x68, - 0x01, 0xaf, 0xe9, 0xed, 0x88, 0x70, 0xf9, 0x1c, 0xbf, 0xa6, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0xbf, - 0xca, 0x41, 0x75, 0xa1, 0x67, 0x1b, 0x7c, 0x89, 0x76, 0x6f, 0x8b, 0x9c, 0x52, 0x18, 0xf2, 0xa9, - 0x0a, 0x43, 0x0f, 0xca, 0xdb, 0xb7, 0x02, 0x85, 0xa2, 0x3e, 0xb3, 0x32, 0x7c, 0xab, 0x90, 0x55, - 0x9a, 0xba, 0xc6, 0xf1, 0x84, 0xc3, 0x68, 0x4c, 0x56, 0xa8, 0x7c, 0xed, 0x25, 0xce, 0x54, 0x32, - 0x3b, 0xf7, 0x49, 0xa8, 0x47, 0xc8, 0x0e, 0x65, 0x3b, 0xfe, 0x7f, 0x45, 0x28, 0x2f, 0x36, 0x9b, - 0xb3, 0xab, 0x4b, 0xe4, 0x19, 0xa8, 0x4b, 0x5f, 0xc2, 0xf5, 0x50, 0x06, 0x81, 0x2b, 0xa9, 0x19, - 0x66, 0x61, 0x94, 0x8e, 0xa9, 0x63, 0x2e, 0xd5, 0xad, 0x8e, 0xec, 0x2c, 0x81, 0x3a, 0x86, 0x2c, - 0x11, 0x45, 0x1e, 0xd1, 0x61, 0x8c, 0xad, 0xf0, 0x98, 0x08, 0xc5, 0xea, 0x4d, 0x76, 0x9b, 0x03, - 0xae, 0xef, 0xb8, 0x92, 0xb8, 0x1e, 0x03, 0xc0, 0x04, 0x20, 0x79, 0x1e, 0xaa, 0x7a, 0xcf, 0xdf, - 0xe2, 0x0a, 0xb4, 0xe8, 0x1b, 0xe7, 0xb9, 0xab, 0x45, 0xa6, 0xdd, 0xd9, 0x9b, 0x1c, 0xb9, 0x86, - 0x8d, 0x67, 0xd4, 0x3b, 0x06, 0xd4, 0xac, 0x72, 0x6a, 0xc5, 0x28, 0x2b, 0x57, 0x3a, 0x74, 0xe5, - 0x56, 0x63, 0x00, 0x98, 0x00, 0x24, 0xaf, 0xc0, 0xc8, 0x36, 0xdd, 0xf5, 0xf5, 0x0d, 0xc9, 0xa0, - 0x7c, 0x18, 0x06, 0xe3, 0x4c, 0x85, 0xbb, 0x16, 0x29, 0x8e, 0x31, 0x30, 0xe2, 0xc1, 0xe9, 0x6d, - 0xea, 0x6e, 0x50, 0xd7, 0x91, 0xab, 0x4f, 0xc9, 0xa4, 0x72, 0x18, 0x26, 0x13, 0xfb, 0x7b, 0x93, - 0xa7, 0xaf, 0xa5, 0xc0, 0x60, 0x2a, 0xb8, 0xf6, 0xcb, 0x3c, 0x9c, 0x58, 0x14, 0xce, 0x5c, 0xc7, - 0x15, 0x93, 0x30, 0x39, 0x0b, 0x05, 0xb7, 0xdb, 0xe3, 0x2d, 0xa7, 0x20, 0xcc, 0xb5, 0xb8, 0xba, - 0x8e, 0x2c, 0x8d, 0xbc, 0x0c, 0xd5, 0x96, 0x1c, 0x32, 0xe4, 0xe2, 0x77, 0x28, 0x43, 0x85, 0x7a, - 0xc3, 0x00, 0x8d, 0x69, 0xfa, 0x1d, 0xaf, 0xdd, 0x34, 0xdf, 0xa0, 0x72, 0x3d, 0xc8, 0x35, 0xfd, - 0x15, 0x91, 0x84, 0x2a, 0x8f, 0xcd, 0xaa, 0xdb, 0x74, 0x57, 0xac, 0x86, 0x8a, 0xe1, 0xac, 0x7a, - 0x4d, 0xa6, 0x61, 0x90, 0x4b, 0x26, 0x55, 0x67, 0x61, 0xad, 0xa0, 0x28, 0x56, 0xf2, 0x37, 0x59, - 0x82, 0xec, 0x37, 0x6c, 0xc8, 0x7c, 0xcd, 0xf4, 0x7d, 0xea, 0xca, 0xdf, 0x38, 0xd4, 0x90, 0x79, - 0x95, 0x23, 0xa0, 0x44, 0x22, 0x1f, 0x83, 0x1a, 0x07, 0x6f, 0x58, 0xce, 0x06, 0xff, 0x71, 0x35, - 0xb1, 0xa6, 0xbf, 0xa9, 0x12, 0x31, 0xcc, 0xd7, 0x7e, 0x9d, 0x87, 0x33, 0x8b, 0xd4, 0x17, 0x5a, - 0xcd, 0x3c, 0xed, 0x5a, 0xce, 0x2e, 0x53, 0x2d, 0x91, 0xbe, 0x4e, 0x5e, 0x04, 0x30, 0xbd, 0x8d, - 0xe6, 0x8e, 0xc1, 0xfb, 0x81, 0xe8, 0xc3, 0x17, 0x65, 0x97, 0x84, 0xa5, 0x66, 0x43, 0xe6, 0xdc, - 0x89, 0xbd, 0x61, 0xa4, 0x4c, 0xb8, 0xbc, 0xca, 0xdf, 0x65, 0x79, 0xd5, 0x04, 0xe8, 0x86, 0x0a, - 0x6a, 0x81, 0x53, 0xfe, 0x2b, 0xc5, 0xe6, 0x30, 0xba, 0x69, 0x04, 0x26, 0x8b, 0xca, 0x68, 0xc3, - 0x78, 0x8b, 0x6e, 0xea, 0x3d, 0xcb, 0x0f, 0x94, 0x6a, 0xd9, 0x89, 0x0f, 0xae, 0x97, 0x07, 0x8e, - 0xe6, 0xf9, 0x04, 0x12, 0xf6, 0x61, 0x6b, 0xdf, 0x2f, 0xc0, 0xb9, 0x45, 0xea, 0x07, 0x16, 0x17, - 0x39, 0x3a, 0x36, 0xbb, 0xd4, 0x60, 0x7f, 0xe1, 0xad, 0x1c, 0x94, 0x2d, 0x7d, 0x83, 0x5a, 0x6c, - 0xf6, 0x62, 0x5f, 0xf3, 0xea, 0xd0, 0x13, 0xc1, 0x60, 0x2e, 0x53, 0xcb, 0x9c, 0x43, 0x62, 0x6a, - 0x10, 0x89, 0x28, 0xd9, 0xb3, 0x41, 0xdd, 0xb0, 0x7a, 0x9e, 0x4f, 0xdd, 0x55, 0xc7, 0xf5, 0xa5, - 0x3e, 0x19, 0x0c, 0xea, 0x73, 0x61, 0x16, 0x46, 0xe9, 0xc8, 0x0c, 0x80, 0x61, 0x99, 0xd4, 0xf6, - 0x79, 0x29, 0xd1, 0xaf, 0x88, 0xfa, 0xbf, 0x73, 0x41, 0x0e, 0x46, 0xa8, 0x18, 0xab, 0x8e, 0x63, - 0x9b, 0xbe, 0x23, 0x58, 0x15, 0xe3, 0xac, 0x56, 0xc2, 0x2c, 0x8c, 0xd2, 0xf1, 0x62, 0xd4, 0x77, - 0x4d, 0xc3, 0xe3, 0xc5, 0x4a, 0x89, 0x62, 0x61, 0x16, 0x46, 0xe9, 0xd8, 0x9c, 0x17, 0xf9, 0xfe, - 0x43, 0xcd, 0x79, 0xdf, 0xad, 0xc1, 0x85, 0x98, 0x58, 0x7d, 0xdd, 0xa7, 0x9b, 0x3d, 0xab, 0x49, - 0x7d, 0xf5, 0x03, 0x87, 0x9c, 0x0b, 0xff, 0x63, 0xf8, 0xdf, 0x45, 0x08, 0x89, 0x71, 0x34, 0xff, - 0xbd, 0xaf, 0x82, 0x07, 0xfa, 0xf7, 0xd3, 0x50, 0xb3, 0x75, 0xdf, 0xe3, 0x1d, 0x57, 0xf6, 0xd1, - 0x40, 0x0d, 0xbb, 0xae, 0x32, 0x30, 0xa4, 0x21, 0xab, 0x70, 0x5a, 0x8a, 0xf8, 0xf2, 0xed, 0xae, - 0xe3, 0xfa, 0xd4, 0x15, 0x65, 0xe5, 0x74, 0x2a, 0xcb, 0x9e, 0x5e, 0x49, 0xa1, 0xc1, 0xd4, 0x92, - 0x64, 0x05, 0x4e, 0x19, 0xc2, 0xad, 0x4e, 0x2d, 0x47, 0x6f, 0x29, 0x40, 0x61, 0xe0, 0x0a, 0x96, - 0x46, 0x73, 0xfd, 0x24, 0x98, 0x56, 0x2e, 0xd9, 0x9a, 0xcb, 0x43, 0xb5, 0xe6, 0xca, 0x30, 0xad, - 0xb9, 0x3a, 0x5c, 0x6b, 0xae, 0x1d, 0xac, 0x35, 0x33, 0xc9, 0xb3, 0x76, 0x44, 0x5d, 0xa6, 0x9e, - 0x88, 0x19, 0x36, 0x12, 0xb5, 0x11, 0x48, 0xbe, 0x99, 0x42, 0x83, 0xa9, 0x25, 0xc9, 0x06, 0x9c, - 0x13, 0xe9, 0x97, 0x6d, 0xc3, 0xdd, 0xed, 0xb2, 0x89, 0x27, 0x82, 0x5b, 0x8f, 0x59, 0x18, 0xcf, - 0x35, 0x07, 0x52, 0xe2, 0x5d, 0x50, 0xc8, 0xa7, 0x61, 0x54, 0xfc, 0xa5, 0x15, 0xbd, 0xcb, 0x61, - 0x45, 0x0c, 0xc7, 0xc3, 0x12, 0x76, 0x74, 0x2e, 0x9a, 0x89, 0x71, 0x5a, 0x32, 0x0b, 0x27, 0xba, - 0x3b, 0x06, 0x7b, 0x5c, 0xda, 0xbc, 0x4e, 0x69, 0x8b, 0xb6, 0xb8, 0xd3, 0xa8, 0xd6, 0x78, 0x44, - 0x19, 0x3a, 0x56, 0xe3, 0xd9, 0x98, 0xa4, 0x27, 0xcf, 0xc3, 0x88, 0xe7, 0xeb, 0xae, 0x2f, 0xcd, - 0x7a, 0x13, 0x63, 0x22, 0xc6, 0x45, 0x59, 0xbd, 0x9a, 0x91, 0x3c, 0x8c, 0x51, 0xa6, 0xce, 0x17, - 0x27, 0x8e, 0x6f, 0xbe, 0xc8, 0x32, 0x5a, 0xfd, 0x6e, 0x1e, 0x2e, 0x2e, 0x52, 0x7f, 0xc5, 0xb1, - 0xa5, 0x51, 0x34, 0x6d, 0xda, 0x3f, 0x90, 0x4d, 0x34, 0x3e, 0x69, 0xe7, 0x8f, 0x74, 0xd2, 0x2e, - 0x1c, 0xd1, 0xa4, 0x5d, 0x3c, 0xc6, 0x49, 0xfb, 0x37, 0xf2, 0xf0, 0x48, 0x4c, 0x92, 0xab, 0x4e, - 0x4b, 0x0d, 0xf8, 0x1f, 0x08, 0xf0, 0x00, 0x02, 0xbc, 0x23, 0xf4, 0x4e, 0xee, 0xd6, 0x4a, 0x68, - 0x3c, 0x6f, 0x26, 0x35, 0x9e, 0x57, 0xb2, 0xcc, 0x7c, 0x29, 0x1c, 0x0e, 0x34, 0xe3, 0x5d, 0x05, - 0xe2, 0x4a, 0x27, 0x9c, 0x30, 0xfd, 0x44, 0x94, 0x9e, 0x20, 0x88, 0x0e, 0xfb, 0x28, 0x30, 0xa5, - 0x14, 0x69, 0xc2, 0xc3, 0x1e, 0xb5, 0x7d, 0xd3, 0xa6, 0x56, 0x1c, 0x4e, 0x68, 0x43, 0x8f, 0x49, - 0xb8, 0x87, 0x9b, 0x69, 0x44, 0x98, 0x5e, 0x36, 0xcb, 0x38, 0xf0, 0x07, 0xc0, 0x55, 0x4e, 0x21, - 0x9a, 0x23, 0xd3, 0x58, 0xde, 0x4a, 0x6a, 0x2c, 0xaf, 0x66, 0xff, 0x6f, 0xc3, 0x69, 0x2b, 0x33, - 0x00, 0xfc, 0x2f, 0x44, 0xd5, 0x95, 0x60, 0x92, 0xc6, 0x20, 0x07, 0x23, 0x54, 0x6c, 0x02, 0x52, - 0x72, 0x8e, 0x6a, 0x2a, 0xc1, 0x04, 0xd4, 0x8c, 0x66, 0x62, 0x9c, 0x76, 0xa0, 0xb6, 0x53, 0x1a, - 0x5a, 0xdb, 0xb9, 0x0a, 0x24, 0x66, 0x78, 0x14, 0x78, 0xe5, 0x78, 0x0c, 0xe7, 0x52, 0x1f, 0x05, - 0xa6, 0x94, 0x1a, 0xd0, 0x94, 0x2b, 0x47, 0xdb, 0x94, 0xab, 0xc3, 0x37, 0x65, 0xf2, 0x2a, 0x9c, - 0xe5, 0xac, 0xa4, 0x7c, 0xe2, 0xc0, 0x42, 0xef, 0xf9, 0x90, 0x04, 0x3e, 0x8b, 0x83, 0x08, 0x71, - 0x30, 0x06, 0xfb, 0x3f, 0x86, 0x4b, 0x5b, 0x8c, 0xb9, 0x6e, 0x0d, 0xd6, 0x89, 0xe6, 0x52, 0x68, - 0x30, 0xb5, 0x24, 0x6b, 0x62, 0x3e, 0x6b, 0x86, 0xfa, 0x86, 0x45, 0x5b, 0x32, 0x86, 0x35, 0x68, - 0x62, 0x6b, 0xcb, 0x4d, 0x99, 0x83, 0x11, 0xaa, 0x34, 0x35, 0x65, 0xe4, 0x90, 0x6a, 0xca, 0x22, - 0xb7, 0xd2, 0x6f, 0xc6, 0xb4, 0x21, 0xa9, 0xeb, 0x04, 0x51, 0xc9, 0x73, 0x49, 0x02, 0xec, 0x2f, - 0xc3, 0xb5, 0x44, 0xc3, 0x35, 0xbb, 0xbe, 0x17, 0xc7, 0x1a, 0x4b, 0x68, 0x89, 0x29, 0x34, 0x98, - 0x5a, 0x92, 0xe9, 0xe7, 0x5b, 0x54, 0xb7, 0xfc, 0xad, 0x38, 0xe0, 0x89, 0xb8, 0x7e, 0x7e, 0xa5, - 0x9f, 0x04, 0xd3, 0xca, 0xa5, 0x4e, 0x48, 0xe3, 0x0f, 0xa6, 0x5a, 0xf5, 0x8d, 0x02, 0x9c, 0x5d, - 0xa4, 0x7e, 0x10, 0xde, 0xf3, 0x81, 0x19, 0xe5, 0x3d, 0x30, 0xa3, 0x7c, 0xa7, 0x04, 0xa7, 0x16, - 0xa9, 0xdf, 0xa7, 0x8d, 0xfd, 0x33, 0x15, 0xff, 0x0a, 0x9c, 0x0a, 0x23, 0xca, 0x9a, 0xbe, 0xe3, - 0x8a, 0xb9, 0x3c, 0xb1, 0x5a, 0x6e, 0xf6, 0x93, 0x60, 0x5a, 0x39, 0xf2, 0x05, 0x78, 0x84, 0x4f, - 0xf5, 0x76, 0x5b, 0xd8, 0x67, 0x85, 0x31, 0x21, 0xb2, 0x27, 0x62, 0x52, 0x42, 0x3e, 0xd2, 0x4c, - 0x27, 0xc3, 0x41, 0xe5, 0xc9, 0x57, 0x61, 0xa4, 0x6b, 0x76, 0xa9, 0x65, 0xda, 0x5c, 0x3f, 0xcb, - 0x1c, 0x12, 0xb2, 0x1a, 0x01, 0x0b, 0x17, 0x70, 0xd1, 0x54, 0x8c, 0x31, 0x4c, 0x6d, 0xa9, 0xd5, - 0x63, 0x6c, 0xa9, 0x7f, 0x9b, 0x87, 0xca, 0xa2, 0xeb, 0xf4, 0xba, 0x8d, 0x5d, 0xd2, 0x86, 0xf2, - 0x2d, 0xee, 0x3c, 0x93, 0xae, 0xa9, 0xe1, 0xa3, 0xb2, 0x85, 0x0f, 0x2e, 0x54, 0x89, 0xc4, 0x3b, - 0x4a, 0x78, 0xd6, 0x88, 0xb7, 0xe9, 0x2e, 0x6d, 0x49, 0x1f, 0x5a, 0xd0, 0x88, 0xaf, 0xb1, 0x44, - 0x14, 0x79, 0xa4, 0x03, 0x27, 0x74, 0xcb, 0x72, 0x6e, 0xd1, 0xd6, 0xb2, 0xee, 0x53, 0x9b, 0x7a, - 0xca, 0x25, 0x79, 0x58, 0xb3, 0x34, 0xf7, 0xeb, 0xcf, 0xc6, 0xa1, 0x30, 0x89, 0x4d, 0x5e, 0x83, - 0x8a, 0xe7, 0x3b, 0xae, 0x52, 0xb6, 0xea, 0x33, 0x73, 0xc3, 0xff, 0xf4, 0xc6, 0xe7, 0x9b, 0x02, - 0x4a, 0xd8, 0xec, 0xe5, 0x0b, 0x2a, 0x06, 0xda, 0xb7, 0x73, 0x00, 0x57, 0xd6, 0xd6, 0x56, 0xa5, - 0x7b, 0xa1, 0x05, 0x45, 0xbd, 0x17, 0x38, 0x2a, 0x87, 0x77, 0x08, 0xc6, 0xc2, 0x32, 0xa5, 0x0f, - 0xaf, 0xe7, 0x6f, 0x21, 0x47, 0x27, 0x1f, 0x85, 0x8a, 0x54, 0x90, 0xa5, 0xd8, 0x83, 0xd0, 0x02, - 0xa9, 0x44, 0xa3, 0xca, 0xd7, 0xfe, 0x6f, 0x1e, 0x60, 0xa9, 0x65, 0xd1, 0xa6, 0x0a, 0xa4, 0xaf, - 0xf9, 0x5b, 0x2e, 0xf5, 0xb6, 0x1c, 0xab, 0x35, 0xa4, 0x37, 0x95, 0xdb, 0xfc, 0xd7, 0x14, 0x08, - 0x86, 0x78, 0xa4, 0x05, 0x23, 0x9e, 0x4f, 0xbb, 0x2a, 0x52, 0x73, 0x48, 0x27, 0xca, 0xb8, 0xb0, - 0x8b, 0x84, 0x38, 0x18, 0x43, 0x25, 0x3a, 0xd4, 0x4d, 0xdb, 0x10, 0x1d, 0xa4, 0xb1, 0x3b, 0x64, - 0x43, 0x3a, 0xc1, 0x56, 0x1c, 0x4b, 0x21, 0x0c, 0x46, 0x31, 0xb5, 0x9f, 0xe5, 0xe1, 0x0c, 0xe7, - 0xc7, 0xaa, 0x11, 0x8b, 0xc7, 0x24, 0xff, 0xa6, 0x6f, 0xd3, 0xdf, 0xbf, 0x3c, 0x18, 0x6b, 0xb1, - 0x67, 0x6c, 0x85, 0xfa, 0x7a, 0xa8, 0xcf, 0x85, 0x69, 0x91, 0x9d, 0x7e, 0x3d, 0x28, 0x7a, 0x6c, - 0xbc, 0x12, 0xd2, 0x6b, 0x0e, 0xdd, 0x84, 0xd2, 0x3f, 0x80, 0x8f, 0x5e, 0x81, 0xd7, 0x98, 0x8f, - 0x5a, 0x9c, 0x1d, 0xf9, 0x77, 0x50, 0xf6, 0x7c, 0xdd, 0xef, 0xa9, 0xae, 0xb9, 0x7e, 0xd4, 0x8c, - 0x39, 0x78, 0x38, 0x8e, 0x88, 0x77, 0x94, 0x4c, 0xb5, 0x9f, 0xe5, 0xe0, 0x5c, 0x7a, 0xc1, 0x65, - 0xd3, 0xf3, 0xc9, 0xbf, 0xee, 0x13, 0xfb, 0x01, 0xff, 0x38, 0x2b, 0xcd, 0x85, 0x1e, 0xc4, 0x85, - 0xab, 0x94, 0x88, 0xc8, 0x7d, 0x28, 0x99, 0x3e, 0xed, 0xa8, 0xf5, 0xe5, 0x8d, 0x23, 0xfe, 0xf4, - 0xc8, 0xd4, 0xce, 0xb8, 0xa0, 0x60, 0xa6, 0xbd, 0x9d, 0x1f, 0xf4, 0xc9, 0x7c, 0xfa, 0xb0, 0xe2, - 0x31, 0xbf, 0xd7, 0xb2, 0xc5, 0xfc, 0xc6, 0x2b, 0xd4, 0x1f, 0xfa, 0xfb, 0x6f, 0xfb, 0x43, 0x7f, - 0x6f, 0x64, 0x0f, 0xfd, 0x4d, 0x88, 0x61, 0x60, 0x04, 0xf0, 0xbb, 0x05, 0x38, 0x7f, 0xb7, 0x66, - 0xc3, 0xe6, 0x33, 0xd9, 0x3a, 0xb3, 0xce, 0x67, 0x77, 0x6f, 0x87, 0x64, 0x06, 0x4a, 0xdd, 0x2d, - 0xdd, 0x53, 0x4a, 0x99, 0x5a, 0xb0, 0x94, 0x56, 0x59, 0xe2, 0x1d, 0x36, 0x68, 0x70, 0x65, 0x8e, - 0xbf, 0xa2, 0x20, 0x65, 0xc3, 0x71, 0x87, 0x7a, 0x5e, 0x68, 0x13, 0x08, 0x86, 0xe3, 0x15, 0x91, - 0x8c, 0x2a, 0x9f, 0xf8, 0x50, 0x16, 0x26, 0x66, 0x39, 0x33, 0x0d, 0x1f, 0xc8, 0x95, 0x12, 0x26, - 0x1e, 0x7e, 0x94, 0xf4, 0x56, 0x48, 0x5e, 0x64, 0x0a, 0x8a, 0x7e, 0x18, 0xb4, 0xab, 0x96, 0xe6, - 0xc5, 0x14, 0xfd, 0x94, 0xd3, 0xb1, 0x85, 0xbd, 0xb3, 0xc1, 0x8d, 0xea, 0x2d, 0xe9, 0x3f, 0x37, - 0x1d, 0x9b, 0x2b, 0x64, 0x85, 0x70, 0x61, 0x7f, 0xa3, 0x8f, 0x02, 0x53, 0x4a, 0x69, 0x7f, 0x5c, - 0x85, 0x33, 0xe9, 0xed, 0x81, 0xc9, 0x6d, 0x87, 0xba, 0x1e, 0xc3, 0xce, 0xc5, 0xe5, 0x76, 0x53, - 0x24, 0xa3, 0xca, 0x7f, 0x5f, 0x07, 0x9c, 0x7d, 0x27, 0x07, 0x67, 0x5d, 0xe9, 0x23, 0xba, 0x1f, - 0x41, 0x67, 0x8f, 0x09, 0x73, 0xc6, 0x00, 0x86, 0x38, 0xb8, 0x2e, 0xe4, 0x7f, 0xe5, 0x60, 0xa2, - 0x93, 0xb0, 0x73, 0x1c, 0xe3, 0xbe, 0x35, 0x1e, 0x15, 0xbf, 0x32, 0x80, 0x1f, 0x0e, 0xac, 0x09, - 0xf9, 0x2a, 0xd4, 0xbb, 0xac, 0x5d, 0x78, 0x3e, 0xb5, 0x0d, 0xb5, 0x75, 0x6d, 0xf8, 0x9e, 0xb4, - 0x1a, 0x62, 0xa9, 0x50, 0x34, 0xa1, 0x1f, 0x44, 0x32, 0x30, 0xca, 0xf1, 0x01, 0xdf, 0xa8, 0x76, - 0x09, 0xaa, 0x1e, 0xf5, 0x7d, 0xd3, 0x6e, 0x8b, 0xf5, 0x46, 0x4d, 0xf4, 0x95, 0xa6, 0x4c, 0xc3, - 0x20, 0x97, 0x7c, 0x0c, 0x6a, 0xdc, 0xe5, 0x34, 0xeb, 0xb6, 0xbd, 0x89, 0x1a, 0x0f, 0x17, 0x1b, - 0x15, 0x01, 0x70, 0x32, 0x11, 0xc3, 0x7c, 0xf2, 0x34, 0x8c, 0x6c, 0xf0, 0xee, 0x2b, 0xf7, 0x2e, - 0x0b, 0x1b, 0x17, 0xd7, 0xd6, 0x1a, 0x91, 0x74, 0x8c, 0x51, 0x91, 0x19, 0x00, 0x1a, 0xf8, 0xe5, - 0x92, 0xf6, 0xac, 0xd0, 0x63, 0x87, 0x11, 0x2a, 0xf2, 0x18, 0x14, 0x7c, 0xcb, 0xe3, 0x36, 0xac, - 0x6a, 0xb8, 0x04, 0x5d, 0x5b, 0x6e, 0x22, 0x4b, 0xd7, 0x7e, 0x9d, 0x83, 0x13, 0x89, 0xcd, 0x25, - 0xac, 0x48, 0xcf, 0xb5, 0xe4, 0x30, 0x12, 0x14, 0x59, 0xc7, 0x65, 0x64, 0xe9, 0xe4, 0x55, 0xa9, - 0x96, 0xe7, 0x33, 0x1e, 0xd3, 0x70, 0x5d, 0xf7, 0x3d, 0xa6, 0x87, 0xf7, 0x69, 0xe4, 0xdc, 0xcd, - 0x17, 0xd6, 0x47, 0xce, 0x03, 0x11, 0x37, 0x5f, 0x98, 0x87, 0x31, 0xca, 0x84, 0xc1, 0xaf, 0x78, - 0x10, 0x83, 0x9f, 0xf6, 0xcd, 0x7c, 0x44, 0x02, 0x52, 0xb3, 0xbf, 0x87, 0x04, 0x9e, 0x64, 0x13, - 0x68, 0x30, 0xb9, 0xd7, 0xa2, 0xf3, 0x1f, 0x9f, 0x8c, 0x65, 0x2e, 0x79, 0x49, 0xc8, 0xbe, 0x90, - 0x71, 0x33, 0xec, 0xda, 0x72, 0x53, 0x44, 0x57, 0xa9, 0xbf, 0x16, 0xfc, 0x82, 0xe2, 0x31, 0xfd, - 0x02, 0xed, 0xf7, 0x0b, 0x50, 0xbf, 0xea, 0x6c, 0xbc, 0x4f, 0x22, 0xa8, 0xd3, 0xa7, 0xa9, 0xfc, - 0x7b, 0x38, 0x4d, 0xad, 0xc3, 0x23, 0xbe, 0x6f, 0x35, 0xa9, 0xe1, 0xd8, 0x2d, 0x6f, 0x76, 0xd3, - 0xa7, 0xee, 0x82, 0x69, 0x9b, 0xde, 0x16, 0x6d, 0x49, 0x77, 0xd2, 0xa3, 0xfb, 0x7b, 0x93, 0x8f, - 0xac, 0xad, 0x2d, 0xa7, 0x91, 0xe0, 0xa0, 0xb2, 0x7c, 0xd8, 0x10, 0x3b, 0x01, 0xf9, 0x4e, 0x19, - 0x19, 0x73, 0x23, 0x86, 0x8d, 0x48, 0x3a, 0xc6, 0xa8, 0xb4, 0x77, 0xf2, 0x50, 0x0b, 0x36, 0xe0, - 0x93, 0x27, 0xa0, 0xb2, 0xe1, 0x3a, 0xdb, 0xd4, 0x15, 0x9e, 0x3b, 0xb9, 0x53, 0xa6, 0x21, 0x92, - 0x50, 0xe5, 0x91, 0xc7, 0xa1, 0xe4, 0x3b, 0x5d, 0xd3, 0x48, 0x1a, 0xd4, 0xd6, 0x58, 0x22, 0x8a, - 0xbc, 0xe3, 0x6b, 0xe0, 0x4f, 0xc6, 0x54, 0xbb, 0xda, 0x40, 0x65, 0xec, 0x15, 0x28, 0x7a, 0xba, - 0x67, 0xc9, 0xf9, 0x34, 0xc3, 0x5e, 0xf6, 0xd9, 0xe6, 0xb2, 0xdc, 0xcb, 0x3e, 0xdb, 0x5c, 0x46, - 0x0e, 0xaa, 0xfd, 0x22, 0x0f, 0x75, 0x21, 0x37, 0x31, 0x2a, 0x1c, 0xa5, 0xe4, 0x5e, 0xe0, 0xa1, - 0x14, 0x5e, 0xaf, 0x43, 0x5d, 0x6e, 0x66, 0x92, 0x83, 0x5c, 0xd4, 0x3f, 0x10, 0x66, 0x06, 0xe1, - 0x14, 0x61, 0x92, 0x12, 0x7d, 0xf1, 0x18, 0x45, 0x5f, 0x3a, 0x90, 0xe8, 0xcb, 0xc7, 0x21, 0xfa, - 0xb7, 0xf2, 0x50, 0x5b, 0x36, 0x37, 0xa9, 0xb1, 0x6b, 0x58, 0x7c, 0x4f, 0x60, 0x8b, 0x5a, 0xd4, - 0xa7, 0x8b, 0xae, 0x6e, 0xd0, 0x55, 0xea, 0x9a, 0xfc, 0x80, 0x1a, 0xd6, 0x3f, 0xf8, 0x08, 0x24, - 0xf7, 0x04, 0xce, 0x0f, 0xa0, 0xc1, 0x81, 0xa5, 0xc9, 0x12, 0x8c, 0xb4, 0xa8, 0x67, 0xba, 0xb4, - 0xb5, 0x1a, 0x59, 0xa8, 0x3c, 0xa1, 0xa6, 0x9a, 0xf9, 0x48, 0xde, 0x9d, 0xbd, 0xc9, 0x51, 0x65, - 0xa0, 0x14, 0x2b, 0x96, 0x58, 0x51, 0xd6, 0xe5, 0xbb, 0x7a, 0xcf, 0x4b, 0xab, 0x63, 0xa4, 0xcb, - 0xaf, 0xa6, 0x93, 0xe0, 0xa0, 0xb2, 0x5a, 0x09, 0x0a, 0xcb, 0x4e, 0x5b, 0x7b, 0xbb, 0x00, 0xc1, - 0x49, 0x46, 0xe4, 0x3f, 0xe4, 0xa0, 0xae, 0xdb, 0xb6, 0xe3, 0xcb, 0x53, 0x82, 0x84, 0x07, 0x1e, - 0x33, 0x1f, 0x98, 0x34, 0x35, 0x1b, 0x82, 0x0a, 0xe7, 0x6d, 0xe0, 0x50, 0x8e, 0xe4, 0x60, 0x94, - 0x37, 0xe9, 0x25, 0xfc, 0xc9, 0x2b, 0xd9, 0x6b, 0x71, 0x00, 0xef, 0xf1, 0xb9, 0xcf, 0xc1, 0x78, - 0xb2, 0xb2, 0x87, 0x71, 0x07, 0x65, 0x72, 0xcc, 0xe7, 0x01, 0xc2, 0x98, 0x92, 0xfb, 0x60, 0xc4, - 0x32, 0x63, 0x46, 0xac, 0xc5, 0xe1, 0x05, 0x1c, 0x54, 0x7a, 0xa0, 0xe1, 0xea, 0xf5, 0x84, 0xe1, - 0x6a, 0xe9, 0x28, 0x98, 0xdd, 0xdd, 0x58, 0xf5, 0x7f, 0x72, 0x30, 0x1e, 0x12, 0xcb, 0x1d, 0xb2, - 0xcf, 0xc1, 0xa8, 0x4b, 0xf5, 0x56, 0x43, 0xf7, 0x8d, 0x2d, 0x1e, 0xea, 0x9d, 0xe3, 0xb1, 0xd9, - 0x27, 0xf7, 0xf7, 0x26, 0x47, 0x31, 0x9a, 0x81, 0x71, 0x3a, 0xa2, 0x43, 0x9d, 0x25, 0xac, 0x99, - 0x1d, 0xea, 0xf4, 0xfc, 0x21, 0xad, 0xa6, 0x7c, 0xc1, 0x82, 0x21, 0x0c, 0x46, 0x31, 0xb5, 0x77, - 0x73, 0x30, 0x16, 0xad, 0xf0, 0xb1, 0x5b, 0xd4, 0xb6, 0xe2, 0x16, 0xb5, 0xb9, 0x23, 0xf8, 0x27, - 0x03, 0xac, 0x68, 0xbf, 0xac, 0x46, 0x3f, 0x8d, 0x5b, 0xce, 0xa2, 0xc6, 0x82, 0xdc, 0x5d, 0x8d, - 0x05, 0xef, 0xff, 0xc3, 0x6b, 0x06, 0x69, 0xb9, 0xc5, 0x07, 0x58, 0xcb, 0x7d, 0x2f, 0x4f, 0xc0, - 0x89, 0x9c, 0xe2, 0x52, 0xce, 0x70, 0x8a, 0x4b, 0x27, 0x38, 0xc5, 0xa5, 0x72, 0x64, 0x83, 0xce, - 0x41, 0x4e, 0x72, 0xa9, 0xde, 0xd7, 0x93, 0x5c, 0x6a, 0xc7, 0x75, 0x92, 0x0b, 0x64, 0x3d, 0xc9, - 0xe5, 0xcd, 0x1c, 0x8c, 0xb5, 0x62, 0x3b, 0x66, 0xb9, 0x6d, 0x21, 0xcb, 0x54, 0x13, 0xdf, 0x80, - 0x2b, 0xb6, 0x4c, 0xc5, 0xd3, 0x30, 0xc1, 0x52, 0xfb, 0xdf, 0x95, 0xe8, 0x3c, 0x70, 0xbf, 0x4d, - 0xd5, 0xcf, 0xc6, 0x4d, 0xd5, 0x17, 0x93, 0xa6, 0xea, 0x13, 0x91, 0x28, 0xd2, 0xa8, 0xb9, 0xfa, - 0xe3, 0x91, 0xe1, 0xb1, 0xc0, 0x4f, 0x4e, 0x09, 0x24, 0x9d, 0x32, 0x44, 0x7e, 0x1c, 0xaa, 0x9e, - 0x3a, 0x73, 0x52, 0x2c, 0x6c, 0xc2, 0xff, 0xa2, 0xce, 0x83, 0x0c, 0x28, 0x98, 0x26, 0xee, 0x52, - 0xdd, 0x73, 0xec, 0xa4, 0x26, 0x8e, 0x3c, 0x15, 0x65, 0x6e, 0xd4, 0x64, 0x5e, 0xbe, 0x87, 0xc9, - 0x5c, 0x87, 0xba, 0xa5, 0x7b, 0xfe, 0x7a, 0xb7, 0xa5, 0xfb, 0xb4, 0x25, 0xfb, 0xdb, 0xbf, 0x38, - 0xd8, 0x5c, 0xc5, 0xe6, 0xbf, 0x50, 0x21, 0x5c, 0x0e, 0x61, 0x30, 0x8a, 0x49, 0x5a, 0x30, 0xc2, - 0x5e, 0x79, 0x6f, 0x68, 0xcd, 0xaa, 0x23, 0x00, 0x0e, 0xc3, 0x23, 0xb0, 0xf4, 0x2c, 0x47, 0x70, - 0x30, 0x86, 0x3a, 0xc0, 0xaa, 0x5e, 0x1b, 0xc6, 0xaa, 0x4e, 0x3e, 0x2d, 0x94, 0x8d, 0x5d, 0xf5, - 0xc3, 0xb8, 0x35, 0x6e, 0x34, 0x8c, 0x2a, 0xc4, 0x68, 0x26, 0xc6, 0x69, 0xc9, 0x2c, 0x9c, 0x30, - 0x7a, 0xae, 0xcb, 0xe3, 0x88, 0x64, 0xf1, 0x3a, 0x2f, 0x1e, 0xc4, 0x8b, 0xcd, 0xc5, 0xb3, 0x31, - 0x49, 0xcf, 0x20, 0x7a, 0x52, 0x92, 0x0a, 0x62, 0x24, 0x0e, 0xb1, 0x1e, 0xcf, 0xc6, 0x24, 0x3d, - 0xdf, 0x28, 0x21, 0x50, 0xaf, 0xe8, 0xde, 0x96, 0x0c, 0x36, 0x0b, 0x37, 0x4a, 0x84, 0x59, 0x18, - 0xa5, 0x23, 0x33, 0x00, 0x02, 0x89, 0x97, 0x1a, 0x8b, 0xc7, 0x60, 0xae, 0x07, 0x39, 0x18, 0xa1, - 0xd2, 0xde, 0xac, 0x41, 0xfd, 0xba, 0xee, 0x9b, 0x3b, 0x94, 0xfb, 0xbc, 0x8e, 0xc7, 0xf1, 0xf0, - 0xdf, 0x72, 0x70, 0x26, 0x1e, 0xd8, 0x78, 0x8c, 0xde, 0x07, 0x7e, 0x4c, 0x0a, 0xa6, 0x72, 0xc3, - 0x01, 0xb5, 0xe0, 0x7e, 0x88, 0xbe, 0x38, 0xc9, 0xe3, 0xf6, 0x43, 0x34, 0x07, 0x31, 0xc4, 0xc1, - 0x75, 0x79, 0xbf, 0xf8, 0x21, 0x1e, 0xec, 0xd3, 0xf4, 0x12, 0x5e, 0x92, 0xca, 0x03, 0xe3, 0x25, - 0xa9, 0x3e, 0x10, 0xaa, 0x69, 0x37, 0xe2, 0x25, 0xa9, 0x65, 0x8c, 0xd6, 0x91, 0x7b, 0x01, 0x04, - 0xda, 0x20, 0x6f, 0x0b, 0xdf, 0xc6, 0xaf, 0xac, 0xd7, 0x4c, 0xa3, 0xdb, 0xd0, 0x3d, 0xd3, 0x90, - 0x4a, 0x42, 0x86, 0xd3, 0x43, 0xd5, 0xf9, 0x66, 0xc2, 0xa9, 0xcf, 0x5f, 0x51, 0x60, 0x87, 0xc7, - 0xb9, 0xe5, 0x33, 0x1d, 0xe7, 0x46, 0xe6, 0xa0, 0x68, 0x6f, 0xd3, 0xdd, 0xc3, 0x6d, 0x88, 0xe7, - 0x2b, 0x95, 0xeb, 0xd7, 0xe8, 0x2e, 0xf2, 0xc2, 0xda, 0x3b, 0x79, 0x00, 0xf6, 0xf9, 0x07, 0xf3, - 0x57, 0x7c, 0x14, 0x2a, 0x5e, 0x8f, 0x5b, 0x16, 0xa4, 0x7a, 0x13, 0x86, 0x38, 0x89, 0x64, 0x54, - 0xf9, 0xe4, 0x71, 0x28, 0xbd, 0xde, 0xa3, 0x3d, 0xe5, 0x7c, 0x0f, 0x94, 0xdb, 0xcf, 0xb3, 0x44, - 0x14, 0x79, 0xc7, 0x67, 0x7b, 0x54, 0x7e, 0x8d, 0xd2, 0x71, 0xf9, 0x35, 0x6a, 0x50, 0xb9, 0xee, - 0xf0, 0x88, 0x49, 0xed, 0xaf, 0xf3, 0x00, 0x61, 0x44, 0x1a, 0xf9, 0x76, 0x0e, 0x1e, 0x0e, 0x3a, - 0x9c, 0x2f, 0xd6, 0x28, 0xfc, 0xc0, 0xde, 0xcc, 0x3e, 0x8e, 0xb4, 0xce, 0xce, 0x47, 0xa0, 0xd5, - 0x34, 0x76, 0x98, 0x5e, 0x0b, 0x82, 0x50, 0xa5, 0x9d, 0xae, 0xbf, 0x3b, 0x6f, 0xba, 0xb2, 0x05, - 0xa6, 0x06, 0x3e, 0x5e, 0x96, 0x34, 0xa2, 0xa8, 0x5c, 0x48, 0xf3, 0x4e, 0xa4, 0x72, 0x30, 0xc0, - 0x21, 0x5b, 0x50, 0xb5, 0x9d, 0x57, 0x3d, 0x26, 0x0e, 0xd9, 0x1c, 0x5f, 0x1c, 0x5e, 0xe4, 0x42, - 0xac, 0xc2, 0x26, 0x2e, 0x5f, 0xb0, 0x62, 0x4b, 0x61, 0x7f, 0x2b, 0x0f, 0xa7, 0x52, 0xe4, 0x40, - 0x5e, 0x84, 0x71, 0x19, 0xfc, 0x17, 0x9e, 0x5c, 0x9d, 0x0b, 0x4f, 0xae, 0x6e, 0x26, 0xf2, 0xb0, - 0x8f, 0x9a, 0xbc, 0x0a, 0xa0, 0x1b, 0x06, 0xf5, 0xbc, 0x15, 0xa7, 0xa5, 0xb4, 0xf7, 0x17, 0x98, - 0xfa, 0x32, 0x1b, 0xa4, 0xde, 0xd9, 0x9b, 0xfc, 0x44, 0x5a, 0x3c, 0x6f, 0x42, 0xce, 0x61, 0x01, - 0x8c, 0x40, 0x92, 0x2f, 0x03, 0x88, 0x85, 0x6a, 0x70, 0xe4, 0xc0, 0x3d, 0xac, 0x3b, 0x53, 0xea, - 0x70, 0xa7, 0xa9, 0xcf, 0xf7, 0x74, 0xdb, 0x37, 0xfd, 0x5d, 0x71, 0xc2, 0xcb, 0xcd, 0x00, 0x05, - 0x23, 0x88, 0xda, 0xef, 0xe4, 0xa1, 0xaa, 0xec, 0xca, 0xf7, 0xc1, 0x98, 0xd8, 0x8e, 0x19, 0x13, - 0x8f, 0x28, 0x82, 0x37, 0xcd, 0x94, 0xe8, 0x24, 0x4c, 0x89, 0x8b, 0xd9, 0x59, 0xdd, 0xdd, 0x90, - 0xf8, 0xbd, 0x3c, 0x8c, 0x29, 0xd2, 0xac, 0x66, 0xc4, 0xcf, 0xc2, 0x09, 0xe1, 0x79, 0x5f, 0xd1, - 0x6f, 0x8b, 0xc3, 0x6e, 0xb8, 0xc0, 0x8a, 0x22, 0x68, 0xb6, 0x11, 0xcf, 0xc2, 0x24, 0x2d, 0x6b, - 0xd6, 0x22, 0x69, 0x9d, 0xad, 0xba, 0x84, 0xaf, 0x4e, 0xac, 0x0e, 0x79, 0xb3, 0x6e, 0x24, 0xf2, - 0xb0, 0x8f, 0x3a, 0x69, 0xc7, 0x2c, 0x1e, 0x83, 0x1d, 0xf3, 0x4f, 0x72, 0x30, 0x12, 0xca, 0xeb, - 0xd8, 0xad, 0x98, 0x9b, 0x71, 0x2b, 0xe6, 0x6c, 0xe6, 0xe6, 0x30, 0xc0, 0x86, 0xf9, 0x9f, 0x2a, - 0x10, 0x0b, 0x24, 0x27, 0x1b, 0x70, 0xce, 0x4c, 0x0d, 0x87, 0x8b, 0x8c, 0x36, 0xc1, 0xce, 0xe8, - 0xa5, 0x81, 0x94, 0x78, 0x17, 0x14, 0xd2, 0x83, 0xea, 0x0e, 0x75, 0x7d, 0xd3, 0xa0, 0xea, 0xfb, - 0x16, 0x33, 0xab, 0x64, 0xd2, 0x52, 0x1b, 0xc8, 0xf4, 0xa6, 0x64, 0x80, 0x01, 0x2b, 0xb2, 0x01, - 0x25, 0xda, 0x6a, 0x53, 0x75, 0xfc, 0x50, 0xc6, 0xc3, 0x3d, 0x03, 0x79, 0xb2, 0x37, 0x0f, 0x05, - 0x34, 0xf1, 0xa0, 0x66, 0x29, 0x4f, 0x9c, 0x6c, 0x87, 0xc3, 0x2b, 0x58, 0x81, 0x4f, 0x2f, 0x3c, - 0x99, 0x20, 0x48, 0xc2, 0x90, 0x0f, 0xd9, 0x0e, 0x4c, 0x82, 0xa5, 0x23, 0x1a, 0x3c, 0xee, 0x62, - 0x10, 0xf4, 0xa0, 0x76, 0x4b, 0xf7, 0xa9, 0xdb, 0xd1, 0xdd, 0x6d, 0xb9, 0xda, 0x18, 0xfe, 0x0b, - 0x5f, 0x52, 0x48, 0xe1, 0x17, 0x06, 0x49, 0x18, 0xf2, 0x21, 0x0e, 0xd4, 0x7c, 0xa9, 0x3e, 0x2b, - 0xbb, 0xe7, 0xf0, 0x4c, 0x95, 0x22, 0xee, 0xc9, 0x80, 0x72, 0xf5, 0x8a, 0x21, 0x0f, 0xb2, 0x13, - 0x3b, 0x7f, 0x59, 0x9c, 0xba, 0xdd, 0xc8, 0x60, 0x3f, 0x97, 0x50, 0xe1, 0x74, 0x93, 0x7e, 0x8e, - 0xb3, 0xf6, 0x4e, 0x29, 0x1c, 0x96, 0xef, 0xb7, 0x55, 0xef, 0xe9, 0xb8, 0x55, 0xef, 0x42, 0xd2, - 0xaa, 0x97, 0x70, 0xe8, 0x1e, 0x3e, 0x04, 0x35, 0x61, 0x4f, 0x2b, 0x1e, 0x83, 0x3d, 0xed, 0x29, - 0xa8, 0xef, 0xf0, 0x91, 0x40, 0x9c, 0x65, 0x54, 0xe2, 0xd3, 0x08, 0x1f, 0xd9, 0x6f, 0x86, 0xc9, - 0x18, 0xa5, 0x61, 0x45, 0xe4, 0x8d, 0x13, 0xc1, 0x61, 0xb0, 0xb2, 0x48, 0x33, 0x4c, 0xc6, 0x28, - 0x0d, 0x8f, 0x5e, 0x33, 0xed, 0x6d, 0x51, 0xa0, 0xc2, 0x0b, 0x88, 0xe8, 0x35, 0x95, 0x88, 0x61, - 0x3e, 0xb9, 0x04, 0xd5, 0x5e, 0x6b, 0x53, 0xd0, 0x56, 0x39, 0x2d, 0xd7, 0x30, 0xd7, 0xe7, 0x17, - 0xe4, 0xd9, 0x4a, 0x2a, 0x97, 0xd5, 0xa4, 0xa3, 0x77, 0x55, 0x06, 0x5f, 0x1b, 0xca, 0x9a, 0xac, - 0x84, 0xc9, 0x18, 0xa5, 0x21, 0x9f, 0x82, 0x31, 0x97, 0xb6, 0x7a, 0x06, 0x0d, 0x4a, 0x09, 0x73, - 0x1c, 0x11, 0x57, 0x6b, 0x44, 0x73, 0x30, 0x41, 0x39, 0xc0, 0x2a, 0x58, 0x1f, 0x2a, 0xd6, 0xf6, - 0xa7, 0x39, 0x20, 0xfd, 0xd1, 0xde, 0x64, 0x0b, 0xca, 0x36, 0xb7, 0x7e, 0x65, 0x3e, 0x3e, 0x3a, - 0x62, 0x44, 0x13, 0xc3, 0x92, 0x4c, 0x90, 0xf8, 0xc4, 0x86, 0x2a, 0xbd, 0xed, 0x53, 0xd7, 0x0e, - 0x76, 0x7f, 0x1c, 0xcd, 0x51, 0xd5, 0x62, 0x35, 0x20, 0x91, 0x31, 0xe0, 0xa1, 0xfd, 0x3c, 0x0f, - 0xf5, 0x08, 0xdd, 0xbd, 0x16, 0x95, 0x7c, 0x03, 0xba, 0x30, 0x3a, 0xad, 0xbb, 0x96, 0xec, 0x61, - 0x91, 0x0d, 0xe8, 0x32, 0x0b, 0x97, 0x31, 0x4a, 0x47, 0x66, 0x00, 0x3a, 0xba, 0xe7, 0x53, 0x97, - 0xcf, 0xbe, 0x89, 0x6d, 0xdf, 0x2b, 0x41, 0x0e, 0x46, 0xa8, 0xc8, 0x45, 0x79, 0xd8, 0x78, 0x31, - 0x7e, 0x4c, 0xdf, 0x80, 0x93, 0xc4, 0x4b, 0x47, 0x70, 0x92, 0x38, 0x69, 0xc3, 0xb8, 0xaa, 0xb5, - 0xca, 0x3d, 0xdc, 0x21, 0x6e, 0x62, 0xfd, 0x92, 0x80, 0xc0, 0x3e, 0x50, 0xed, 0x9d, 0x1c, 0x8c, - 0xc6, 0x4c, 0x1e, 0xe2, 0x80, 0x3d, 0xb5, 0x57, 0x21, 0x76, 0xc0, 0x5e, 0x64, 0x8b, 0xc1, 0x93, - 0x50, 0x16, 0x02, 0x4a, 0x86, 0x20, 0x0a, 0x11, 0xa2, 0xcc, 0x65, 0x63, 0x99, 0x34, 0xaa, 0x26, - 0xc7, 0x32, 0x69, 0x75, 0x45, 0x95, 0x2f, 0x9c, 0x13, 0xa2, 0x76, 0xfd, 0xce, 0x09, 0x91, 0x8e, - 0x01, 0x85, 0xf6, 0x03, 0x5e, 0x6f, 0xdf, 0xdd, 0x0d, 0xd6, 0x72, 0x6d, 0xa8, 0xc8, 0xb0, 0x33, - 0xd9, 0x35, 0x5e, 0xcc, 0x60, 0x87, 0xe1, 0x38, 0x32, 0xc0, 0x4a, 0x37, 0xb6, 0x6f, 0x6c, 0x6e, - 0xa2, 0x42, 0x27, 0x97, 0xa1, 0xe6, 0xd8, 0x0b, 0xba, 0x69, 0xf5, 0x5c, 0x35, 0xb2, 0x7f, 0x84, - 0x8d, 0x55, 0x37, 0x54, 0xe2, 0x9d, 0xbd, 0xc9, 0x33, 0xc1, 0x4b, 0xac, 0x92, 0x18, 0x96, 0xd4, - 0xfe, 0xbe, 0x00, 0x3c, 0xe4, 0x88, 0x3c, 0x07, 0xb5, 0x0e, 0x35, 0xb6, 0x74, 0xdb, 0xf4, 0xd4, - 0x11, 0xa1, 0x6c, 0x7d, 0x5e, 0x5b, 0x51, 0x89, 0x77, 0x98, 0x08, 0x66, 0x9b, 0xcb, 0x3c, 0xaa, - 0x3f, 0xa4, 0x25, 0x06, 0x94, 0xdb, 0x9e, 0xa7, 0x77, 0xcd, 0xcc, 0x1e, 0x6f, 0x71, 0x24, 0xa3, - 0x18, 0x06, 0xc4, 0x33, 0x4a, 0x68, 0x62, 0x40, 0xa9, 0x6b, 0xe9, 0xa6, 0x9d, 0xf9, 0x6a, 0x1c, - 0xf6, 0x05, 0xab, 0x0c, 0x49, 0x18, 0xa5, 0xf8, 0x23, 0x0a, 0x6c, 0xd2, 0x83, 0xba, 0x67, 0xb8, - 0x7a, 0xc7, 0xdb, 0xd2, 0x67, 0x9e, 0x79, 0x36, 0xb3, 0x9a, 0x17, 0xb2, 0x12, 0xb3, 0xce, 0x1c, - 0xce, 0xae, 0x34, 0xaf, 0xcc, 0xce, 0x3c, 0xf3, 0x2c, 0x46, 0xf9, 0x44, 0xd9, 0x3e, 0xf3, 0xd4, - 0x8c, 0xec, 0xb9, 0x47, 0xce, 0xf6, 0x99, 0xa7, 0x66, 0x30, 0xca, 0x47, 0xfb, 0xbb, 0x1c, 0xd4, - 0x02, 0x5a, 0xb2, 0x0e, 0xc0, 0xc6, 0x10, 0x79, 0x88, 0xe2, 0xa1, 0x2e, 0x34, 0xe0, 0xeb, 0xfa, - 0xf5, 0xa0, 0x30, 0x46, 0x80, 0x52, 0x4e, 0x99, 0xcc, 0x1f, 0xf5, 0x29, 0x93, 0xd3, 0x50, 0xdb, - 0xd2, 0xed, 0x96, 0xb7, 0xa5, 0x6f, 0x8b, 0xa1, 0x34, 0x72, 0xee, 0xea, 0x15, 0x95, 0x81, 0x21, - 0x8d, 0xf6, 0x5b, 0x65, 0x10, 0x6e, 0x6a, 0xd6, 0xd9, 0x5b, 0xa6, 0x27, 0xe2, 0xa4, 0x73, 0xbc, - 0x64, 0xd0, 0xd9, 0xe7, 0x65, 0x3a, 0x06, 0x14, 0xe4, 0x2c, 0x14, 0x3a, 0xa6, 0x2d, 0x7d, 0x36, - 0xdc, 0x64, 0xb7, 0x62, 0xda, 0xc8, 0xd2, 0x78, 0x96, 0x7e, 0x5b, 0x86, 0xb8, 0x89, 0x2c, 0xfd, - 0x36, 0xb2, 0x34, 0xb6, 0x88, 0xb6, 0x1c, 0x67, 0x9b, 0x75, 0x5b, 0x15, 0x09, 0x57, 0xe4, 0x53, - 0x39, 0x5f, 0x44, 0x2f, 0xc7, 0xb3, 0x30, 0x49, 0x4b, 0xd6, 0xe1, 0x91, 0x37, 0xa8, 0xeb, 0xc8, - 0x71, 0xaa, 0x69, 0x51, 0xda, 0x55, 0x30, 0x42, 0x09, 0xe2, 0x01, 0x75, 0x5f, 0x4c, 0x27, 0xc1, - 0x41, 0x65, 0x79, 0x68, 0xae, 0xee, 0xb6, 0xa9, 0xbf, 0xea, 0x3a, 0x06, 0xf5, 0x3c, 0xd3, 0x6e, - 0x2b, 0xd8, 0x72, 0x08, 0xbb, 0x96, 0x4e, 0x82, 0x83, 0xca, 0x92, 0x97, 0x61, 0x42, 0x64, 0x09, - 0x75, 0x61, 0x76, 0x47, 0x37, 0x2d, 0x7d, 0xc3, 0xb4, 0xd4, 0x8d, 0x72, 0xa3, 0xc2, 0x33, 0xb2, - 0x36, 0x80, 0x06, 0x07, 0x96, 0x26, 0x57, 0x61, 0x5c, 0xf9, 0xc5, 0x56, 0xa9, 0xdb, 0x0c, 0x42, - 0x17, 0x46, 0x1b, 0x17, 0xd8, 0x8a, 0x75, 0x9e, 0x76, 0x5d, 0x6a, 0x44, 0xbd, 0x81, 0x8a, 0x0a, - 0xfb, 0xca, 0x11, 0x84, 0x33, 0x3c, 0x3e, 0x61, 0xbd, 0x3b, 0xe7, 0x38, 0x56, 0xcb, 0xb9, 0x65, - 0xab, 0x6f, 0x17, 0xaa, 0x19, 0x77, 0x85, 0x35, 0x53, 0x29, 0x70, 0x40, 0x49, 0xf6, 0xe5, 0x3c, - 0x67, 0xde, 0xb9, 0x65, 0x27, 0x51, 0x21, 0xfc, 0xf2, 0xe6, 0x00, 0x1a, 0x1c, 0x58, 0x9a, 0x2c, - 0x00, 0x49, 0x7e, 0xc1, 0x7a, 0x57, 0xba, 0x57, 0xcf, 0x88, 0xf3, 0x50, 0x92, 0xb9, 0x98, 0x52, - 0x82, 0x2c, 0xc3, 0xe9, 0x64, 0x2a, 0x63, 0x27, 0xbd, 0xac, 0xfc, 0x24, 0x54, 0x4c, 0xc9, 0xc7, - 0xd4, 0x52, 0xda, 0x6f, 0xe7, 0x61, 0x34, 0xb6, 0x81, 0xfe, 0x81, 0xdb, 0xa8, 0xcc, 0x74, 0xe8, - 0x8e, 0xd7, 0x5e, 0x9a, 0xbf, 0x42, 0xf5, 0x16, 0x75, 0xaf, 0x51, 0x75, 0xd8, 0x01, 0x1f, 0x54, - 0x56, 0x62, 0x39, 0x98, 0xa0, 0x24, 0x9b, 0x50, 0x12, 0x16, 0xe1, 0xac, 0x57, 0x63, 0x28, 0x19, - 0x71, 0xb3, 0xb0, 0xbc, 0x4f, 0xc6, 0x71, 0x29, 0x0a, 0x78, 0xcd, 0x87, 0x91, 0x28, 0x05, 0x1b, - 0x48, 0x42, 0x75, 0xb3, 0x12, 0x53, 0x35, 0x97, 0xa0, 0xe0, 0xfb, 0xc3, 0x6e, 0x81, 0x16, 0x1e, - 0x86, 0xb5, 0x65, 0x64, 0x18, 0xda, 0x26, 0xfb, 0x77, 0x9e, 0x67, 0x3a, 0xb6, 0x3c, 0x0f, 0x7b, - 0x1d, 0x2a, 0xbe, 0x34, 0xb2, 0x0d, 0xb7, 0x85, 0x9b, 0xeb, 0x28, 0xca, 0xc0, 0xa6, 0xb0, 0xb4, - 0x3f, 0xcd, 0x43, 0x2d, 0x58, 0x10, 0x1f, 0xe0, 0x9c, 0x69, 0x07, 0x6a, 0x41, 0x7c, 0x55, 0xe6, - 0xdb, 0xf6, 0xc2, 0xb0, 0x1f, 0xbe, 0x86, 0x0b, 0x5e, 0x31, 0xe4, 0x11, 0x8d, 0xdd, 0x2a, 0x64, - 0x88, 0xdd, 0xea, 0x42, 0xc5, 0x77, 0xcd, 0x76, 0x5b, 0x6a, 0xe7, 0x59, 0x82, 0xb7, 0x02, 0x71, - 0xad, 0x09, 0x40, 0x29, 0x59, 0xf1, 0x82, 0x8a, 0x8d, 0xf6, 0x1a, 0x8c, 0x27, 0x29, 0xb9, 0xea, - 0x6a, 0x6c, 0xd1, 0x56, 0xcf, 0x52, 0x32, 0x0e, 0x55, 0x57, 0x99, 0x8e, 0x01, 0x05, 0x5b, 0xbe, - 0xb2, 0xdf, 0xf4, 0x86, 0x63, 0x2b, 0xf5, 0x91, 0xaf, 0x02, 0xd6, 0x64, 0x1a, 0x06, 0xb9, 0xda, - 0x5f, 0x15, 0xe0, 0x6c, 0x68, 0xd6, 0x58, 0xd1, 0x6d, 0xbd, 0x7d, 0x80, 0x2b, 0xd6, 0x3e, 0xd8, - 0x14, 0x73, 0xd8, 0xcb, 0x02, 0x0a, 0x0f, 0xc0, 0x65, 0x01, 0xff, 0x90, 0x07, 0x1e, 0x0b, 0x4a, - 0xbe, 0x0a, 0x23, 0x7a, 0xe4, 0x76, 0x4d, 0xf9, 0x3b, 0x2f, 0x67, 0xfe, 0x9d, 0x3c, 0xe4, 0x34, - 0x88, 0x6d, 0x8a, 0xa6, 0x62, 0x8c, 0x21, 0x71, 0xa0, 0xba, 0xa9, 0x5b, 0x16, 0xd3, 0x85, 0x32, - 0xbb, 0x69, 0x62, 0xcc, 0x79, 0x33, 0x5f, 0x90, 0xd0, 0x18, 0x30, 0x21, 0x6f, 0xe6, 0x60, 0xd4, - 0x8d, 0x2e, 0x93, 0xe4, 0x0f, 0xc9, 0xe2, 0xc4, 0x8f, 0xa0, 0x45, 0x23, 0xa9, 0xa2, 0x6b, 0xb1, - 0x38, 0x4f, 0xed, 0x2f, 0x73, 0x30, 0xda, 0xb4, 0xcc, 0x96, 0x69, 0xb7, 0x8f, 0xf1, 0xae, 0x82, - 0x1b, 0x50, 0xf2, 0x2c, 0xb3, 0x45, 0x87, 0x9c, 0x4d, 0xc4, 0x3c, 0xc6, 0x00, 0x50, 0xe0, 0xc4, - 0x2f, 0x3f, 0x28, 0x1c, 0xe0, 0xf2, 0x83, 0x5f, 0x95, 0x41, 0x46, 0x35, 0x93, 0x1e, 0xd4, 0xda, - 0xea, 0x4c, 0x75, 0xf9, 0x8d, 0x57, 0x32, 0x9c, 0xc7, 0x17, 0x3b, 0x9d, 0x5d, 0x8c, 0xfd, 0x41, - 0x22, 0x86, 0x9c, 0x08, 0x8d, 0x5f, 0xeb, 0x3a, 0x9f, 0xf1, 0x5a, 0x57, 0xc1, 0xae, 0xff, 0x62, - 0x57, 0x1d, 0x8a, 0x5b, 0xbe, 0xdf, 0x95, 0x8d, 0x69, 0xf8, 0xb0, 0xf5, 0xf0, 0x48, 0x18, 0xa1, - 0x13, 0xb1, 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xad, 0x07, 0xd7, 0x78, 0xcd, 0x65, 0x0a, 0x18, 0x88, - 0xb2, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0x57, 0xa0, 0xee, 0xbb, 0xba, 0xed, 0x6d, 0x3a, 0x6e, 0x87, - 0xba, 0x72, 0x8d, 0xba, 0x90, 0xe1, 0x66, 0xd3, 0xb5, 0x10, 0x4d, 0x78, 0x22, 0x63, 0x49, 0x18, - 0xe5, 0x46, 0xb6, 0xa1, 0xda, 0x6b, 0x89, 0x8a, 0x49, 0xf3, 0xd3, 0x6c, 0x96, 0xcb, 0x6a, 0x23, - 0xe1, 0x00, 0xea, 0x0d, 0x03, 0x06, 0xf1, 0x1b, 0xeb, 0x2a, 0x47, 0x75, 0x63, 0x5d, 0xb4, 0x35, - 0xa6, 0x9d, 0x57, 0x41, 0x3a, 0x52, 0xaf, 0xb5, 0xdb, 0x32, 0x9a, 0x69, 0x21, 0xb3, 0xca, 0x29, - 0x58, 0xd6, 0x03, 0xdd, 0xd8, 0x6e, 0xa3, 0xe2, 0xa1, 0x75, 0x40, 0x7a, 0x09, 0x88, 0x11, 0xbb, - 0xd7, 0x45, 0x6c, 0xa2, 0x9a, 0x3e, 0xd8, 0x78, 0x10, 0x5c, 0x30, 0x12, 0x39, 0x57, 0x3a, 0xf5, - 0x02, 0x17, 0xed, 0xcf, 0xf2, 0x50, 0x58, 0x5b, 0x6e, 0x8a, 0xb3, 0x22, 0xf9, 0xa5, 0x49, 0xb4, - 0xb9, 0x6d, 0x76, 0x6f, 0x52, 0xd7, 0xdc, 0xdc, 0x95, 0x4b, 0xef, 0xc8, 0x59, 0x91, 0x49, 0x0a, - 0x4c, 0x29, 0x45, 0x5e, 0x81, 0x11, 0x43, 0x9f, 0xa3, 0xae, 0x3f, 0x8c, 0x61, 0x81, 0xef, 0x16, - 0x9d, 0x9b, 0x0d, 0x8b, 0x63, 0x0c, 0x8c, 0xac, 0x03, 0x18, 0x21, 0x74, 0xe1, 0xd0, 0xe6, 0x90, - 0x08, 0x70, 0x04, 0x88, 0x20, 0xd4, 0xb6, 0x19, 0x29, 0x47, 0x2d, 0x1e, 0x06, 0x95, 0xb7, 0x9c, - 0x6b, 0xaa, 0x2c, 0x86, 0x30, 0x9a, 0x0d, 0xa3, 0xb1, 0xcb, 0x5e, 0xc8, 0x27, 0xa1, 0xea, 0x74, - 0x23, 0xc3, 0x69, 0x8d, 0xc7, 0x4d, 0x56, 0x6f, 0xc8, 0xb4, 0x3b, 0x7b, 0x93, 0xa3, 0xcb, 0x4e, - 0xdb, 0x34, 0x54, 0x02, 0x06, 0xe4, 0x44, 0x83, 0x32, 0xdf, 0xe2, 0xa5, 0xae, 0x7a, 0xe1, 0x73, - 0x07, 0xbf, 0x8d, 0xc1, 0x43, 0x99, 0xa3, 0x7d, 0xad, 0x08, 0xa1, 0x6f, 0x8d, 0x78, 0x50, 0x16, - 0x21, 0xec, 0x72, 0xe4, 0x3e, 0xd6, 0x68, 0x79, 0xc9, 0x8a, 0xb4, 0xa1, 0xf0, 0x9a, 0xb3, 0x91, - 0x79, 0xe0, 0x8e, 0xec, 0xed, 0x16, 0xb6, 0xb2, 0x48, 0x02, 0x32, 0x0e, 0xe4, 0xbf, 0xe7, 0xe0, - 0xa4, 0x97, 0x54, 0x7d, 0x65, 0x73, 0xc0, 0xec, 0x3a, 0x7e, 0x52, 0x99, 0x96, 0x01, 0xae, 0x83, - 0xb2, 0xb1, 0xbf, 0x2e, 0x4c, 0xfe, 0xc2, 0xe9, 0x25, 0x9b, 0xd3, 0x62, 0xc6, 0x0b, 0x0a, 0xe3, - 0xf2, 0x8f, 0xa7, 0xa1, 0x64, 0xa5, 0x7d, 0x23, 0x0f, 0xf5, 0xc8, 0x68, 0x9d, 0xf9, 0x06, 0xa1, - 0xdb, 0x89, 0x1b, 0x84, 0x56, 0x87, 0xf7, 0x01, 0x87, 0xb5, 0x3a, 0xee, 0x4b, 0x84, 0x7e, 0x2f, - 0x0f, 0x85, 0xf5, 0xf9, 0x85, 0xf8, 0xa2, 0x35, 0x77, 0x1f, 0x16, 0xad, 0x5b, 0x50, 0xd9, 0xe8, - 0x99, 0x96, 0x6f, 0xda, 0x99, 0x4f, 0x9f, 0x50, 0x17, 0x2e, 0x49, 0x1f, 0x83, 0x40, 0x45, 0x05, - 0x4f, 0xda, 0x50, 0x69, 0x8b, 0xe3, 0xff, 0x32, 0x47, 0xc6, 0xc9, 0x63, 0x04, 0x05, 0x23, 0xf9, - 0x82, 0x0a, 0x5d, 0xdb, 0x05, 0x79, 0x73, 0xfc, 0x7d, 0x97, 0xa6, 0xf6, 0x15, 0x08, 0xb4, 0x80, - 0xfb, 0xcf, 0xfc, 0x6f, 0x72, 0x10, 0x57, 0x7c, 0xee, 0x7f, 0x6b, 0xda, 0x4e, 0xb6, 0xa6, 0xf9, - 0xa3, 0xe8, 0x7c, 0xe9, 0x0d, 0x4a, 0xfb, 0xcd, 0x3c, 0x94, 0xef, 0xdb, 0x8e, 0x61, 0x1a, 0x0b, - 0xf2, 0x9b, 0xcb, 0x38, 0x30, 0x0e, 0x0c, 0xf1, 0xeb, 0x24, 0x42, 0xfc, 0xb2, 0x5e, 0x11, 0x7b, - 0x8f, 0x00, 0xbf, 0x3f, 0xca, 0x81, 0x1c, 0x96, 0x97, 0x6c, 0xcf, 0xd7, 0x6d, 0x83, 0x12, 0x23, - 0x98, 0x03, 0xb2, 0x46, 0x92, 0xc8, 0x68, 0x2b, 0x31, 0xed, 0xf3, 0x67, 0x35, 0xe6, 0x93, 0x8f, - 0x43, 0x75, 0xcb, 0xf1, 0x7c, 0x3e, 0xce, 0xe7, 0xe3, 0xd6, 0xa5, 0x2b, 0x32, 0x1d, 0x03, 0x8a, - 0xa4, 0xc7, 0xb5, 0x34, 0xd8, 0xe3, 0xaa, 0x7d, 0x37, 0x0f, 0x23, 0xef, 0x97, 0x6d, 0xcf, 0x69, - 0x21, 0x91, 0x85, 0x8c, 0x21, 0x91, 0xc5, 0xc3, 0x84, 0x44, 0x6a, 0x3f, 0xce, 0x01, 0xdc, 0xb7, - 0x3d, 0xd7, 0xad, 0x78, 0xb4, 0x62, 0xe6, 0x76, 0x95, 0x1e, 0xab, 0xf8, 0xff, 0x4b, 0xea, 0x93, - 0x78, 0xa4, 0xe2, 0x5b, 0x39, 0x18, 0xd3, 0x63, 0xd1, 0x7f, 0x99, 0x55, 0xcb, 0x44, 0x30, 0x61, - 0xb0, 0xbf, 0x34, 0x9e, 0x8e, 0x09, 0xb6, 0xe4, 0xf9, 0xf0, 0xbc, 0xdf, 0xeb, 0x61, 0xb3, 0xef, - 0x3b, 0xa8, 0x97, 0xab, 0x39, 0x31, 0xca, 0x7b, 0x44, 0x5b, 0x16, 0x8e, 0x24, 0xda, 0x32, 0xba, - 0x8f, 0xac, 0x78, 0xd7, 0x7d, 0x64, 0x3b, 0x50, 0xdb, 0x74, 0x9d, 0x0e, 0x0f, 0x68, 0x94, 0x97, - 0xcb, 0x5e, 0xce, 0x30, 0xa7, 0x84, 0xd7, 0xaa, 0x87, 0x36, 0x9e, 0x05, 0x85, 0x8f, 0x21, 0x2b, - 0x6e, 0x16, 0x77, 0x04, 0xd7, 0xf2, 0x51, 0x72, 0x0d, 0xc6, 0x92, 0x35, 0x81, 0x8e, 0x8a, 0x4d, - 0x3c, 0x88, 0xb1, 0x72, 0x7f, 0x82, 0x18, 0xb5, 0xef, 0x97, 0xd5, 0x00, 0xf6, 0xc0, 0x1d, 0x2d, - 0xf9, 0xfe, 0xdf, 0xab, 0x9b, 0xdc, 0x48, 0x5b, 0xb9, 0x8f, 0x1b, 0x69, 0xab, 0x47, 0xb3, 0x91, - 0xb6, 0x96, 0x6d, 0x23, 0x2d, 0x64, 0xdf, 0x48, 0x5b, 0xcf, 0xb6, 0x91, 0x76, 0x64, 0xa8, 0x8d, - 0xb4, 0xa3, 0x07, 0xda, 0x48, 0xbb, 0x57, 0x80, 0xc4, 0x2a, 0xf3, 0x03, 0x8f, 0xd2, 0x3f, 0x29, - 0x8f, 0xd2, 0xdb, 0x79, 0x08, 0x87, 0xcd, 0x43, 0x46, 0xdc, 0xbc, 0x0c, 0xd5, 0x8e, 0x7e, 0x7b, - 0x9e, 0x5a, 0xfa, 0x6e, 0x96, 0xfb, 0x53, 0x57, 0x24, 0x06, 0x06, 0x68, 0xc4, 0x03, 0x30, 0x83, - 0x33, 0xcc, 0x33, 0xdb, 0xe6, 0xc3, 0xe3, 0xd0, 0x85, 0xf5, 0x2f, 0x7c, 0xc7, 0x08, 0x1b, 0xed, - 0x0f, 0xf3, 0x20, 0x0f, 0xbb, 0x27, 0x14, 0x4a, 0x9b, 0xe6, 0x6d, 0xda, 0xca, 0x1c, 0x3f, 0x1b, - 0xb9, 0xd5, 0x5a, 0x38, 0x1f, 0x78, 0x02, 0x0a, 0x74, 0x6e, 0x55, 0x16, 0xce, 0x24, 0x29, 0xbf, - 0x0c, 0x56, 0xe5, 0xa8, 0x53, 0x4a, 0x5a, 0x95, 0x45, 0x12, 0x2a, 0x1e, 0xc2, 0x88, 0xcd, 0xe3, - 0x0a, 0x32, 0xfb, 0xce, 0x62, 0xf1, 0x09, 0xca, 0x88, 0xed, 0x89, 0x9d, 0xf4, 0x92, 0x47, 0xe3, - 0x4b, 0x3f, 0xfa, 0xc9, 0x85, 0x87, 0x7e, 0xfc, 0x93, 0x0b, 0x0f, 0xbd, 0xfb, 0x93, 0x0b, 0x0f, - 0x7d, 0x6d, 0xff, 0x42, 0xee, 0x47, 0xfb, 0x17, 0x72, 0x3f, 0xde, 0xbf, 0x90, 0x7b, 0x77, 0xff, - 0x42, 0xee, 0xcf, 0xf7, 0x2f, 0xe4, 0xfe, 0xcb, 0x5f, 0x5c, 0x78, 0xe8, 0x8b, 0xcf, 0x85, 0x55, - 0x98, 0x56, 0x55, 0x98, 0x56, 0x0c, 0xa7, 0xbb, 0xdb, 0xed, 0x69, 0x56, 0x85, 0x30, 0x45, 0x55, - 0xe1, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x32, 0x6d, 0x43, 0x50, 0x97, 0x00, 0x00, + 0x95, 0x9e, 0xfa, 0xbf, 0xfb, 0x34, 0xc9, 0xe1, 0xdc, 0x19, 0x8d, 0x38, 0xa3, 0xd1, 0x70, 0xb6, + 0xb4, 0xd2, 0xce, 0x66, 0xbd, 0x64, 0xc4, 0xe8, 0xcf, 0xfb, 0x63, 0x89, 0x4d, 0x0e, 0x39, 0x9c, + 0x21, 0x67, 0xe8, 0xd3, 0xe4, 0x48, 0x5e, 0x65, 0xad, 0x14, 0xab, 0x2e, 0x9b, 0x25, 0x56, 0x57, + 0xb5, 0xaa, 0xaa, 0x39, 0x43, 0x6d, 0x02, 0xef, 0xae, 0x02, 0x48, 0x41, 0x10, 0x24, 0xd8, 0x27, + 0x03, 0x81, 0x13, 0x24, 0x48, 0xe0, 0x07, 0xc3, 0x79, 0x08, 0xe0, 0x3c, 0x18, 0x48, 0x1c, 0x07, + 0x41, 0xe2, 0x04, 0xf9, 0xf1, 0x43, 0x80, 0x28, 0x2f, 0x44, 0xcc, 0x20, 0x0f, 0x09, 0x10, 0xc3, + 0x88, 0x91, 0xc4, 0x1e, 0x18, 0x71, 0x70, 0xff, 0xea, 0xaf, 0xab, 0x67, 0xc8, 0x2e, 0x72, 0x34, + 0xca, 0xea, 0xad, 0xea, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xfd, 0x39, 0xf7, 0x9c, 0x73, 0xef, + 0x85, 0xe5, 0x8e, 0x15, 0xec, 0xf4, 0xb7, 0x66, 0x0c, 0xb7, 0x3b, 0xeb, 0xf4, 0xbb, 0x7a, 0xcf, + 0x73, 0xdf, 0xe3, 0x0f, 0xdb, 0xb6, 0x7b, 0x6f, 0xb6, 0xb7, 0xdb, 0x99, 0xd5, 0x7b, 0x96, 0x1f, + 0xa5, 0xec, 0xbd, 0xa4, 0xdb, 0xbd, 0x1d, 0xfd, 0xa5, 0xd9, 0x0e, 0x75, 0xa8, 0xa7, 0x07, 0xd4, + 0x9c, 0xe9, 0x79, 0x6e, 0xe0, 0x92, 0xd7, 0x22, 0xa0, 0x19, 0x05, 0x34, 0xa3, 0xb2, 0xcd, 0xf4, + 0x76, 0x3b, 0x33, 0x0c, 0x28, 0x4a, 0x51, 0x40, 0x97, 0x7e, 0x33, 0x56, 0x82, 0x8e, 0xdb, 0x71, + 0x67, 0x39, 0xde, 0x56, 0x7f, 0x9b, 0xbf, 0xf1, 0x17, 0xfe, 0x24, 0xe4, 0x5c, 0xd2, 0x76, 0x5f, + 0xf7, 0x67, 0x2c, 0x97, 0x15, 0x6b, 0xd6, 0x70, 0x3d, 0x3a, 0xbb, 0x37, 0x50, 0x96, 0x4b, 0x2f, + 0x47, 0x3c, 0x5d, 0xdd, 0xd8, 0xb1, 0x1c, 0xea, 0xed, 0xab, 0x6f, 0x99, 0xf5, 0xa8, 0xef, 0xf6, + 0x3d, 0x83, 0x1e, 0x2b, 0x97, 0x3f, 0xdb, 0xa5, 0x81, 0x9e, 0x25, 0x6b, 0x76, 0x58, 0x2e, 0xaf, + 0xef, 0x04, 0x56, 0x77, 0x50, 0xcc, 0xab, 0x8f, 0xca, 0xe0, 0x1b, 0x3b, 0xb4, 0xab, 0xa7, 0xf3, + 0x69, 0xdf, 0x07, 0x38, 0x37, 0xbf, 0xe5, 0x07, 0x9e, 0x6e, 0x04, 0xeb, 0xae, 0xb9, 0x41, 0xbb, + 0x3d, 0x5b, 0x0f, 0x28, 0xd9, 0x85, 0x3a, 0x2b, 0x9b, 0xa9, 0x07, 0xfa, 0x54, 0xe1, 0x6a, 0xe1, + 0x5a, 0x73, 0x6e, 0x7e, 0x66, 0xc4, 0x7f, 0x31, 0xb3, 0x26, 0x81, 0x5a, 0x63, 0x87, 0x07, 0xd3, + 0x75, 0xf5, 0x86, 0xa1, 0x00, 0xf2, 0xf5, 0x02, 0x8c, 0x39, 0xae, 0x49, 0xdb, 0xd4, 0xa6, 0x46, + 0xe0, 0x7a, 0x53, 0xc5, 0xab, 0xa5, 0x6b, 0xcd, 0xb9, 0xaf, 0x8e, 0x2c, 0x31, 0xe3, 0x8b, 0x66, + 0x6e, 0xc7, 0x04, 0x5c, 0x77, 0x02, 0x6f, 0xbf, 0x75, 0xfe, 0x07, 0x07, 0xd3, 0x4f, 0x1d, 0x1e, + 0x4c, 0x8f, 0xc5, 0x49, 0x98, 0x28, 0x09, 0xd9, 0x84, 0x66, 0xe0, 0xda, 0xac, 0xca, 0x2c, 0xd7, + 0xf1, 0xa7, 0x4a, 0xbc, 0x60, 0x57, 0x66, 0x44, 0x6d, 0x33, 0xf1, 0x33, 0xac, 0xb9, 0xcc, 0xec, + 0xbd, 0x34, 0xb3, 0x11, 0xb2, 0xb5, 0xce, 0x49, 0xe0, 0x66, 0x94, 0xe6, 0x63, 0x1c, 0x87, 0x50, + 0x38, 0xe3, 0x53, 0xa3, 0xef, 0x59, 0xc1, 0xfe, 0x82, 0xeb, 0x04, 0xf4, 0x7e, 0x30, 0x55, 0xe6, + 0xb5, 0xfc, 0x62, 0x16, 0xf4, 0xba, 0x6b, 0xb6, 0x93, 0xdc, 0xad, 0x73, 0x87, 0x07, 0xd3, 0x67, + 0x52, 0x89, 0x98, 0xc6, 0x24, 0x0e, 0x4c, 0x5a, 0x5d, 0xbd, 0x43, 0xd7, 0xfb, 0xb6, 0xdd, 0xa6, + 0x86, 0x47, 0x03, 0x7f, 0xaa, 0xc2, 0x3f, 0xe1, 0x5a, 0x96, 0x9c, 0x55, 0xd7, 0xd0, 0xed, 0x3b, + 0x5b, 0xef, 0x51, 0x23, 0x40, 0xba, 0x4d, 0x3d, 0xea, 0x18, 0xb4, 0x35, 0x25, 0x3f, 0x66, 0x72, + 0x25, 0x85, 0x84, 0x03, 0xd8, 0x64, 0x19, 0xce, 0xf6, 0x3c, 0xcb, 0xe5, 0x45, 0xb0, 0x75, 0xdf, + 0xbf, 0xad, 0x77, 0xe9, 0x54, 0xf5, 0x6a, 0xe1, 0x5a, 0xa3, 0x75, 0x51, 0xc2, 0x9c, 0x5d, 0x4f, + 0x33, 0xe0, 0x60, 0x1e, 0x72, 0x0d, 0xea, 0x2a, 0x71, 0xaa, 0x76, 0xb5, 0x70, 0xad, 0x22, 0xda, + 0x8e, 0xca, 0x8b, 0x21, 0x95, 0x2c, 0x41, 0x5d, 0xdf, 0xde, 0xb6, 0x1c, 0xc6, 0x59, 0xe7, 0x55, + 0x78, 0x39, 0xeb, 0xd3, 0xe6, 0x25, 0x8f, 0xc0, 0x51, 0x6f, 0x18, 0xe6, 0x25, 0x37, 0x81, 0xf8, + 0xd4, 0xdb, 0xb3, 0x0c, 0x3a, 0x6f, 0x18, 0x6e, 0xdf, 0x09, 0x78, 0xd9, 0x1b, 0xbc, 0xec, 0x97, + 0x64, 0xd9, 0x49, 0x7b, 0x80, 0x03, 0x33, 0x72, 0x91, 0x37, 0x61, 0x52, 0x76, 0xbb, 0xa8, 0x16, + 0x80, 0x23, 0x9d, 0x67, 0x15, 0x89, 0x29, 0x1a, 0x0e, 0x70, 0x13, 0x13, 0x2e, 0xeb, 0xfd, 0xc0, + 0xed, 0x32, 0xc8, 0xa4, 0xd0, 0x0d, 0x77, 0x97, 0x3a, 0x53, 0xcd, 0xab, 0x85, 0x6b, 0xf5, 0xd6, + 0xd5, 0xc3, 0x83, 0xe9, 0xcb, 0xf3, 0x0f, 0xe1, 0xc3, 0x87, 0xa2, 0x90, 0x3b, 0xd0, 0x30, 0x1d, + 0x7f, 0xdd, 0xb5, 0x2d, 0x63, 0x7f, 0x6a, 0x8c, 0x17, 0xf0, 0x25, 0xf9, 0xa9, 0x8d, 0xc5, 0xdb, + 0x6d, 0x41, 0x78, 0x70, 0x30, 0x7d, 0x79, 0x70, 0x74, 0x9c, 0x09, 0xe9, 0x18, 0x61, 0x90, 0x35, + 0x0e, 0xb8, 0xe0, 0x3a, 0xdb, 0x56, 0x67, 0x6a, 0x9c, 0xff, 0x8d, 0xab, 0x43, 0x1a, 0xf4, 0xe2, + 0xed, 0xb6, 0xe0, 0x6b, 0x8d, 0x4b, 0x71, 0xe2, 0x15, 0x23, 0x04, 0x62, 0xc2, 0x84, 0x1a, 0x57, + 0x17, 0x6c, 0xdd, 0xea, 0xfa, 0x53, 0x13, 0xbc, 0xf1, 0xfe, 0xea, 0x10, 0x4c, 0x8c, 0x33, 0xb7, + 0x2e, 0xc8, 0x4f, 0x99, 0x48, 0x24, 0xfb, 0x98, 0xc2, 0xbc, 0xf4, 0x06, 0x9c, 0x1d, 0x18, 0x1b, + 0xc8, 0x24, 0x94, 0x76, 0xe9, 0x3e, 0x1f, 0xfa, 0x1a, 0xc8, 0x1e, 0xc9, 0x79, 0xa8, 0xec, 0xe9, + 0x76, 0x9f, 0x4e, 0x15, 0x79, 0x9a, 0x78, 0xf9, 0xad, 0xe2, 0xeb, 0x05, 0xed, 0xef, 0x96, 0x60, + 0x4c, 0x8d, 0x38, 0x6d, 0xcb, 0xd9, 0x25, 0x6f, 0x41, 0xc9, 0x76, 0x3b, 0x72, 0xdc, 0xfc, 0x9d, + 0x91, 0x47, 0xb1, 0x55, 0xb7, 0xd3, 0xaa, 0x1d, 0x1e, 0x4c, 0x97, 0x56, 0xdd, 0x0e, 0x32, 0x44, + 0x62, 0x40, 0x65, 0x57, 0xdf, 0xde, 0xd5, 0x79, 0x19, 0x9a, 0x73, 0xad, 0x91, 0xa1, 0x6f, 0x31, + 0x14, 0x56, 0xd6, 0x56, 0xe3, 0xf0, 0x60, 0xba, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x1a, 0x5b, + 0xb6, 0x6e, 0xec, 0xee, 0xb8, 0x36, 0x9d, 0x2a, 0xe5, 0x14, 0xd4, 0x52, 0x48, 0xe2, 0x37, 0x87, + 0xaf, 0x18, 0xc9, 0x20, 0x06, 0x54, 0xfb, 0xa6, 0x6f, 0x39, 0xbb, 0x72, 0x0c, 0x7c, 0x63, 0x64, + 0x69, 0x9b, 0x8b, 0xfc, 0x9b, 0xe0, 0xf0, 0x60, 0xba, 0x2a, 0x9e, 0x51, 0x42, 0x6b, 0x3f, 0x6e, + 0xc2, 0x84, 0xfa, 0x49, 0x77, 0xa9, 0x17, 0xd0, 0xfb, 0xe4, 0x2a, 0x94, 0x1d, 0xd6, 0x35, 0xf9, + 0x4f, 0x6e, 0x8d, 0xc9, 0xe6, 0x52, 0xe6, 0x5d, 0x92, 0x53, 0x58, 0xc9, 0x44, 0x53, 0x91, 0x15, + 0x3e, 0x7a, 0xc9, 0xda, 0x1c, 0x46, 0x94, 0x4c, 0x3c, 0xa3, 0x84, 0x26, 0xef, 0x40, 0x99, 0x7f, + 0xbc, 0xa8, 0xea, 0xdf, 0x1d, 0x5d, 0x04, 0xfb, 0xf4, 0x3a, 0xfb, 0x02, 0xfe, 0xe1, 0x1c, 0x94, + 0x35, 0xc5, 0xbe, 0xb9, 0x2d, 0x2b, 0xf6, 0x77, 0x72, 0x54, 0xec, 0x92, 0x68, 0x8a, 0x9b, 0x8b, + 0x4b, 0xc8, 0x10, 0xc9, 0x5f, 0x2f, 0xc0, 0x59, 0xc3, 0x75, 0x02, 0x9d, 0xa9, 0x1a, 0x6a, 0x92, + 0x9d, 0xaa, 0x70, 0x39, 0x37, 0x47, 0x96, 0xb3, 0x90, 0x46, 0x6c, 0x3d, 0xcd, 0xe6, 0x8c, 0x81, + 0x64, 0x1c, 0x94, 0x4d, 0xfe, 0x66, 0x01, 0x9e, 0x66, 0x63, 0xf9, 0x00, 0x33, 0x9f, 0x81, 0x4e, + 0xb6, 0x54, 0x17, 0x0f, 0x0f, 0xa6, 0x9f, 0x5e, 0xc9, 0x12, 0x86, 0xd9, 0x65, 0x60, 0xa5, 0x3b, + 0xa7, 0x0f, 0xaa, 0x25, 0x7c, 0x76, 0x6b, 0xce, 0xad, 0x9e, 0xa4, 0xaa, 0xd3, 0x7a, 0x56, 0x36, + 0xe5, 0x2c, 0xcd, 0x0e, 0xb3, 0x4a, 0x41, 0xae, 0x43, 0x6d, 0xcf, 0xb5, 0xfb, 0x5d, 0xea, 0x4f, + 0xd5, 0xf9, 0x10, 0x7b, 0x29, 0x6b, 0x88, 0xbd, 0xcb, 0x59, 0x5a, 0x67, 0x24, 0x7c, 0x4d, 0xbc, + 0xfb, 0xa8, 0xf2, 0x12, 0x0b, 0xaa, 0xb6, 0xd5, 0xb5, 0x02, 0x9f, 0x4f, 0x9c, 0xcd, 0xb9, 0xeb, + 0x23, 0x7f, 0x96, 0xe8, 0xa2, 0xab, 0x1c, 0x4c, 0xf4, 0x1a, 0xf1, 0x8c, 0x52, 0x00, 0x1b, 0x0a, + 0x7d, 0x43, 0xb7, 0xc5, 0xc4, 0xda, 0x9c, 0xfb, 0xd2, 0xe8, 0xdd, 0x86, 0xa1, 0xb4, 0xc6, 0xe5, + 0x37, 0x55, 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0xfb, 0x30, 0x91, 0xf8, 0x9b, 0xfe, 0x54, 0x93, 0xd7, + 0xce, 0x73, 0x59, 0xb5, 0x13, 0x72, 0x45, 0x33, 0x4f, 0xa2, 0x85, 0xf8, 0x98, 0x02, 0x23, 0xb7, + 0xa0, 0xee, 0x5b, 0x26, 0x35, 0x74, 0xcf, 0x9f, 0x1a, 0x3b, 0x0a, 0xf0, 0xa4, 0x04, 0xae, 0xb7, + 0x65, 0x36, 0x0c, 0x01, 0xc8, 0x0c, 0x40, 0x4f, 0xf7, 0x02, 0x4b, 0x28, 0xaa, 0xe3, 0x5c, 0x69, + 0x9a, 0x38, 0x3c, 0x98, 0x86, 0xf5, 0x30, 0x15, 0x63, 0x1c, 0x8c, 0x9f, 0xe5, 0x5d, 0x71, 0x7a, + 0xfd, 0x40, 0x4c, 0xac, 0x0d, 0xc1, 0xdf, 0x0e, 0x53, 0x31, 0xc6, 0x41, 0xbe, 0x5d, 0x80, 0x67, + 0xa3, 0xd7, 0xc1, 0x4e, 0x76, 0xe6, 0xc4, 0x3b, 0xd9, 0xf4, 0xe1, 0xc1, 0xf4, 0xb3, 0xed, 0xe1, + 0x22, 0xf1, 0x61, 0xe5, 0xd1, 0xde, 0x82, 0xf1, 0xf9, 0x7e, 0xb0, 0xe3, 0x7a, 0xd6, 0x07, 0x5c, + 0xe9, 0x26, 0x4b, 0x50, 0x09, 0xb8, 0xf2, 0x24, 0xe6, 0xe5, 0x17, 0xb2, 0xaa, 0x5a, 0x28, 0xb2, + 0xb7, 0xe8, 0xbe, 0xd2, 0x06, 0xc4, 0xfc, 0x28, 0x94, 0x29, 0x91, 0x5d, 0xfb, 0xcb, 0x05, 0xa8, + 0xb5, 0x74, 0x63, 0xd7, 0xdd, 0xde, 0x26, 0x6f, 0x43, 0xdd, 0x72, 0x02, 0xea, 0xed, 0xe9, 0xb6, + 0x84, 0x9d, 0x89, 0xc1, 0x86, 0x2b, 0xb1, 0xe8, 0xbb, 0xd9, 0x9a, 0x87, 0x09, 0x5a, 0xec, 0xcb, + 0xb5, 0x02, 0xd7, 0x47, 0x57, 0x24, 0x06, 0x86, 0x68, 0x64, 0x1a, 0x2a, 0x7e, 0x40, 0x7b, 0x3e, + 0x9f, 0x79, 0xc6, 0x45, 0x31, 0xda, 0x2c, 0x01, 0x45, 0xba, 0xf6, 0x77, 0x0a, 0xd0, 0x68, 0xe9, + 0xbe, 0x65, 0xb0, 0xaf, 0x24, 0x0b, 0x50, 0xee, 0xfb, 0xd4, 0x3b, 0xde, 0xb7, 0xf1, 0xc9, 0x62, + 0xd3, 0xa7, 0x1e, 0xf2, 0xcc, 0xe4, 0x0e, 0xd4, 0x7b, 0xba, 0xef, 0xdf, 0x73, 0x3d, 0x53, 0x4e, + 0x78, 0x47, 0x04, 0x12, 0xca, 0xb9, 0xcc, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x68, 0xc6, 0xd7, 0x7e, + 0x5a, 0x80, 0x73, 0xad, 0xfe, 0xf6, 0x36, 0xf5, 0xa4, 0x2e, 0x2a, 0xb5, 0x3c, 0x0a, 0x15, 0x8f, + 0x9a, 0x96, 0x2f, 0xcb, 0xbe, 0x38, 0x72, 0x0b, 0x42, 0x86, 0x22, 0x95, 0x4a, 0x5e, 0x5f, 0x3c, + 0x01, 0x05, 0x3a, 0xe9, 0x43, 0xe3, 0x3d, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xae, 0xfc, 0xba, 0x1b, + 0x23, 0x8b, 0xba, 0x49, 0x83, 0x36, 0x47, 0x8a, 0xeb, 0xb0, 0x61, 0x22, 0x46, 0x92, 0xb4, 0xef, + 0x57, 0x60, 0x6c, 0xc1, 0xed, 0x6e, 0x59, 0x0e, 0x35, 0xaf, 0x9b, 0x1d, 0x4a, 0xde, 0x85, 0x32, + 0x35, 0x3b, 0x54, 0x7e, 0xed, 0xe8, 0xd3, 0x3d, 0x03, 0x8b, 0x94, 0x16, 0xf6, 0x86, 0x1c, 0x98, + 0xac, 0xc2, 0xc4, 0xb6, 0xe7, 0x76, 0xc5, 0x08, 0xba, 0xb1, 0xdf, 0x93, 0x1a, 0x6b, 0xeb, 0x57, + 0xd5, 0xa8, 0xb4, 0x94, 0xa0, 0x3e, 0x38, 0x98, 0x86, 0xe8, 0x0d, 0x53, 0x79, 0xc9, 0xdb, 0x30, + 0x15, 0xa5, 0x84, 0x43, 0xc9, 0x02, 0x5b, 0x44, 0x70, 0x8d, 0xa5, 0xd2, 0xba, 0x7c, 0x78, 0x30, + 0x3d, 0xb5, 0x34, 0x84, 0x07, 0x87, 0xe6, 0x26, 0x1f, 0x15, 0x60, 0x32, 0x22, 0x8a, 0xe1, 0x5d, + 0x2a, 0x2a, 0x27, 0x34, 0x6f, 0xf0, 0xd5, 0xd6, 0x52, 0x4a, 0x04, 0x0e, 0x08, 0x25, 0x4b, 0x30, + 0x16, 0xb8, 0xb1, 0xfa, 0xaa, 0xf0, 0xfa, 0xd2, 0x94, 0x79, 0x60, 0xc3, 0x1d, 0x5a, 0x5b, 0x89, + 0x7c, 0x04, 0xe1, 0x82, 0x7a, 0x4f, 0xd5, 0x54, 0x95, 0xd7, 0xd4, 0xa5, 0xc3, 0x83, 0xe9, 0x0b, + 0x1b, 0x99, 0x1c, 0x38, 0x24, 0x27, 0xf9, 0xa3, 0x02, 0x4c, 0x28, 0x92, 0xac, 0xa3, 0xda, 0x49, + 0xd6, 0x11, 0x61, 0x2d, 0x62, 0x23, 0x21, 0x00, 0x53, 0x02, 0xb5, 0x9f, 0x95, 0xa1, 0x11, 0x0e, + 0xb0, 0xe4, 0x79, 0xa8, 0xf0, 0x85, 0xbf, 0xd4, 0x9b, 0xc3, 0x99, 0x93, 0xdb, 0x07, 0x50, 0xd0, + 0xc8, 0x0b, 0x50, 0x33, 0xdc, 0x6e, 0x57, 0x77, 0x4c, 0x6e, 0xcc, 0x69, 0xb4, 0x9a, 0x4c, 0x61, + 0x58, 0x10, 0x49, 0xa8, 0x68, 0xe4, 0x32, 0x94, 0x75, 0xaf, 0x23, 0xec, 0x2a, 0x0d, 0x31, 0x1e, + 0xcd, 0x7b, 0x1d, 0x1f, 0x79, 0x2a, 0xf9, 0x22, 0x94, 0xa8, 0xb3, 0x37, 0x55, 0x1e, 0xae, 0x91, + 0x5c, 0x77, 0xf6, 0xee, 0xea, 0x5e, 0xab, 0x29, 0xcb, 0x50, 0xba, 0xee, 0xec, 0x21, 0xcb, 0x43, + 0x56, 0xa1, 0x46, 0x9d, 0x3d, 0xf6, 0xef, 0xa5, 0xc1, 0xe3, 0x57, 0x86, 0x64, 0x67, 0x2c, 0x52, + 0x39, 0x0f, 0xf5, 0x1a, 0x99, 0x8c, 0x0a, 0x82, 0x7c, 0x05, 0xc6, 0x84, 0x8a, 0xb3, 0xc6, 0xfe, + 0x89, 0x3f, 0x55, 0xe5, 0x90, 0xd3, 0xc3, 0x75, 0x24, 0xce, 0x17, 0x19, 0x98, 0x62, 0x89, 0x3e, + 0x26, 0xa0, 0xc8, 0x57, 0xa0, 0xa1, 0xd6, 0xa3, 0xea, 0xcf, 0x66, 0xda, 0x66, 0xd4, 0x22, 0x16, + 0xe9, 0xfb, 0x7d, 0xcb, 0xa3, 0x5d, 0xea, 0x04, 0x7e, 0xeb, 0xac, 0x5a, 0xad, 0x2b, 0xaa, 0x8f, + 0x11, 0x1a, 0xd9, 0x1a, 0x34, 0x32, 0x09, 0x0b, 0xc9, 0xf3, 0x43, 0x46, 0xf5, 0x11, 0x2c, 0x4c, + 0x5f, 0x85, 0x33, 0xa1, 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x65, 0x96, 0x7d, 0x25, 0x49, + 0x7a, 0x70, 0x30, 0xfd, 0x5c, 0x86, 0x29, 0x21, 0x62, 0xc0, 0x34, 0x98, 0xf6, 0xbd, 0x12, 0x0c, + 0x6a, 0xff, 0xc9, 0x4a, 0x2b, 0x9c, 0x74, 0xa5, 0xa5, 0x3f, 0x48, 0x0c, 0x9f, 0xaf, 0xcb, 0x6c, + 0xf9, 0x3f, 0x2a, 0xeb, 0xc7, 0x94, 0x4e, 0xfa, 0xc7, 0x3c, 0x29, 0x7d, 0x47, 0xfb, 0xb8, 0x0c, + 0x13, 0x8b, 0x3a, 0xed, 0xba, 0xce, 0x23, 0xd7, 0x42, 0x85, 0x27, 0x62, 0x2d, 0x74, 0x0d, 0xea, + 0x1e, 0xed, 0xd9, 0x96, 0xa1, 0x0b, 0xe5, 0x4b, 0xda, 0x1e, 0x51, 0xa6, 0x61, 0x48, 0x1d, 0xb2, + 0x06, 0x2e, 0x3d, 0x91, 0x6b, 0xe0, 0xf2, 0xa7, 0xbf, 0x06, 0xd6, 0xfe, 0xa8, 0x08, 0x5c, 0x51, + 0x21, 0x57, 0xa1, 0xcc, 0x26, 0xe1, 0xb4, 0xe5, 0x85, 0x37, 0x1c, 0x4e, 0x21, 0x97, 0xa0, 0x18, + 0xb8, 0xb2, 0xe7, 0x81, 0xa4, 0x17, 0x37, 0x5c, 0x2c, 0x06, 0x2e, 0xf9, 0x00, 0xc0, 0x70, 0x1d, + 0xd3, 0x52, 0x26, 0xf9, 0x7c, 0x1f, 0xb6, 0xe4, 0x7a, 0xf7, 0x74, 0xcf, 0x5c, 0x08, 0x11, 0xc5, + 0x2a, 0x28, 0x7a, 0xc7, 0x98, 0x34, 0xf2, 0x06, 0x54, 0x5d, 0x67, 0xa9, 0x6f, 0xdb, 0xbc, 0x42, + 0x1b, 0xad, 0x5f, 0x63, 0x4b, 0xd3, 0x3b, 0x3c, 0xe5, 0xc1, 0xc1, 0xf4, 0x45, 0xa1, 0xdf, 0xb2, + 0xb7, 0xb7, 0x3c, 0x2b, 0xb0, 0x9c, 0x4e, 0x3b, 0xf0, 0xf4, 0x80, 0x76, 0xf6, 0x51, 0x66, 0xd3, + 0xfe, 0xa4, 0x00, 0xcd, 0x25, 0xeb, 0x3e, 0x35, 0xdf, 0xb2, 0x1c, 0xd3, 0xbd, 0x47, 0x10, 0xaa, + 0x36, 0x75, 0x3a, 0xc1, 0xce, 0x88, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x2c, + 0x34, 0x84, 0xf6, 0x69, 0x39, 0x1d, 0x5e, 0x87, 0xf5, 0x68, 0xd0, 0x6b, 0x2b, 0x02, 0x46, 0x3c, + 0xda, 0x3e, 0x9c, 0x1d, 0xa8, 0x06, 0x62, 0x42, 0x39, 0xd0, 0x3b, 0x6a, 0x7c, 0x5d, 0x1a, 0xb9, + 0x82, 0x37, 0xf4, 0x4e, 0xac, 0x72, 0xf9, 0x1c, 0xbf, 0xa1, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0xbf, + 0x28, 0x40, 0x7d, 0xa9, 0xef, 0x18, 0x7c, 0x89, 0xf6, 0x68, 0x8b, 0x9c, 0x52, 0x18, 0x8a, 0x99, + 0x0a, 0x43, 0x1f, 0xaa, 0xbb, 0xf7, 0x42, 0x85, 0xa2, 0x39, 0xb7, 0x36, 0x7a, 0xab, 0x90, 0x45, + 0x9a, 0xb9, 0xc5, 0xf1, 0x84, 0xc3, 0x68, 0x42, 0x16, 0xa8, 0x7a, 0xeb, 0x2d, 0x2e, 0x54, 0x0a, + 0xbb, 0xf4, 0x45, 0x68, 0xc6, 0xd8, 0x8e, 0x65, 0x3b, 0xfe, 0x47, 0x65, 0xa8, 0x2e, 0xb7, 0xdb, + 0xf3, 0xeb, 0x2b, 0xe4, 0x15, 0x68, 0x4a, 0x5f, 0xc2, 0xed, 0xa8, 0x0e, 0x42, 0x57, 0x52, 0x3b, + 0x22, 0x61, 0x9c, 0x8f, 0xa9, 0x63, 0x1e, 0xd5, 0xed, 0xae, 0xec, 0x2c, 0xa1, 0x3a, 0x86, 0x2c, + 0x11, 0x05, 0x8d, 0xe8, 0x30, 0xc1, 0x56, 0x78, 0xac, 0x0a, 0xc5, 0xea, 0x4d, 0x76, 0x9b, 0x23, + 0xae, 0xef, 0xb8, 0x92, 0xb8, 0x99, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x1d, 0xea, 0x7a, 0x3f, 0xd8, + 0xe1, 0x0a, 0xb4, 0xe8, 0x1b, 0x97, 0xb9, 0xab, 0x45, 0xa6, 0x3d, 0x38, 0x98, 0x1e, 0xbb, 0x85, + 0xad, 0x57, 0xd4, 0x3b, 0x86, 0xdc, 0xac, 0x70, 0x6a, 0xc5, 0x28, 0x0b, 0x57, 0x39, 0x76, 0xe1, + 0xd6, 0x13, 0x00, 0x98, 0x02, 0x24, 0xef, 0xc0, 0xd8, 0x2e, 0xdd, 0x0f, 0xf4, 0x2d, 0x29, 0xa0, + 0x7a, 0x1c, 0x01, 0x93, 0x4c, 0x85, 0xbb, 0x15, 0xcb, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xf9, 0x5d, + 0xea, 0x6d, 0x51, 0xcf, 0x95, 0xab, 0x4f, 0x29, 0xa4, 0x76, 0x1c, 0x21, 0x53, 0x87, 0x07, 0xd3, + 0xe7, 0x6f, 0x65, 0xc0, 0x60, 0x26, 0xb8, 0xf6, 0xf3, 0x22, 0x9c, 0x59, 0x16, 0xce, 0x5c, 0xd7, + 0x13, 0x93, 0x30, 0xb9, 0x08, 0x25, 0xaf, 0xd7, 0xe7, 0x2d, 0xa7, 0x24, 0xcc, 0xb5, 0xb8, 0xbe, + 0x89, 0x2c, 0x8d, 0xbc, 0x0d, 0x75, 0x53, 0x0e, 0x19, 0x72, 0xf1, 0x3b, 0x92, 0xa1, 0x42, 0xbd, + 0x61, 0x88, 0xc6, 0x34, 0xfd, 0xae, 0xdf, 0x69, 0x5b, 0x1f, 0x50, 0xb9, 0x1e, 0xe4, 0x9a, 0xfe, + 0x9a, 0x48, 0x42, 0x45, 0x63, 0xb3, 0xea, 0x2e, 0xdd, 0x17, 0xab, 0xa1, 0x72, 0x34, 0xab, 0xde, + 0x92, 0x69, 0x18, 0x52, 0xc9, 0xb4, 0xea, 0x2c, 0xac, 0x15, 0x94, 0xc5, 0x4a, 0xfe, 0x2e, 0x4b, + 0x90, 0xfd, 0x86, 0x0d, 0x99, 0xef, 0x59, 0x41, 0x40, 0x3d, 0xf9, 0x1b, 0x47, 0x1a, 0x32, 0x6f, + 0x72, 0x04, 0x94, 0x48, 0xe4, 0x37, 0xa0, 0xc1, 0xc1, 0x5b, 0xb6, 0xbb, 0xc5, 0x7f, 0x5c, 0x43, + 0xac, 0xe9, 0xef, 0xaa, 0x44, 0x8c, 0xe8, 0xda, 0x2f, 0x8b, 0x70, 0x61, 0x99, 0x06, 0x42, 0xab, + 0x59, 0xa4, 0x3d, 0xdb, 0xdd, 0x67, 0xaa, 0x25, 0xd2, 0xf7, 0xc9, 0x9b, 0x00, 0x96, 0xbf, 0xd5, + 0xde, 0x33, 0x78, 0x3f, 0x10, 0x7d, 0xf8, 0xaa, 0xec, 0x92, 0xb0, 0xd2, 0x6e, 0x49, 0xca, 0x83, + 0xc4, 0x1b, 0xc6, 0xf2, 0x44, 0xcb, 0xab, 0xe2, 0x43, 0x96, 0x57, 0x6d, 0x80, 0x5e, 0xa4, 0xa0, + 0x96, 0x38, 0xe7, 0x9f, 0x53, 0x62, 0x8e, 0xa3, 0x9b, 0xc6, 0x60, 0xf2, 0xa8, 0x8c, 0x0e, 0x4c, + 0x9a, 0x74, 0x5b, 0xef, 0xdb, 0x41, 0xa8, 0x54, 0xcb, 0x4e, 0x7c, 0x74, 0xbd, 0x3c, 0x74, 0x34, + 0x2f, 0xa6, 0x90, 0x70, 0x00, 0x5b, 0xfb, 0x6e, 0x09, 0x2e, 0x2d, 0xd3, 0x20, 0xb4, 0xb8, 0xc8, + 0xd1, 0xb1, 0xdd, 0xa3, 0x06, 0xfb, 0x0b, 0x1f, 0x15, 0xa0, 0x6a, 0xeb, 0x5b, 0xd4, 0x66, 0xb3, + 0x17, 0xfb, 0x9a, 0x77, 0x47, 0x9e, 0x08, 0x86, 0x4b, 0x99, 0x59, 0xe5, 0x12, 0x52, 0x53, 0x83, + 0x48, 0x44, 0x29, 0x9e, 0x0d, 0xea, 0x86, 0xdd, 0xf7, 0x03, 0xea, 0xad, 0xbb, 0x5e, 0x20, 0xf5, + 0xc9, 0x70, 0x50, 0x5f, 0x88, 0x48, 0x18, 0xe7, 0x23, 0x73, 0x00, 0x86, 0x6d, 0x51, 0x27, 0xe0, + 0xb9, 0x44, 0xbf, 0x22, 0xea, 0xff, 0x2e, 0x84, 0x14, 0x8c, 0x71, 0x31, 0x51, 0x5d, 0xd7, 0xb1, + 0x02, 0x57, 0x88, 0x2a, 0x27, 0x45, 0xad, 0x45, 0x24, 0x8c, 0xf3, 0xf1, 0x6c, 0x34, 0xf0, 0x2c, + 0xc3, 0xe7, 0xd9, 0x2a, 0xa9, 0x6c, 0x11, 0x09, 0xe3, 0x7c, 0x6c, 0xce, 0x8b, 0x7d, 0xff, 0xb1, + 0xe6, 0xbc, 0x6f, 0x35, 0xe0, 0x4a, 0xa2, 0x5a, 0x03, 0x3d, 0xa0, 0xdb, 0x7d, 0xbb, 0x4d, 0x03, + 0xf5, 0x03, 0x47, 0x9c, 0x0b, 0xff, 0x6a, 0xf4, 0xdf, 0x45, 0x08, 0x89, 0x71, 0x32, 0xff, 0x7d, + 0xa0, 0x80, 0x47, 0xfa, 0xf7, 0xb3, 0xd0, 0x70, 0xf4, 0xc0, 0xe7, 0x1d, 0x57, 0xf6, 0xd1, 0x50, + 0x0d, 0xbb, 0xad, 0x08, 0x18, 0xf1, 0x90, 0x75, 0x38, 0x2f, 0xab, 0xf8, 0xfa, 0xfd, 0x9e, 0xeb, + 0x05, 0xd4, 0x13, 0x79, 0xe5, 0x74, 0x2a, 0xf3, 0x9e, 0x5f, 0xcb, 0xe0, 0xc1, 0xcc, 0x9c, 0x64, + 0x0d, 0xce, 0x19, 0xc2, 0xad, 0x4e, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x70, 0x85, 0x4b, 0xa3, + 0x85, 0x41, 0x16, 0xcc, 0xca, 0x97, 0x6e, 0xcd, 0xd5, 0x91, 0x5a, 0x73, 0x6d, 0x94, 0xd6, 0x5c, + 0x1f, 0xad, 0x35, 0x37, 0x8e, 0xd6, 0x9a, 0x59, 0xcd, 0xb3, 0x76, 0x44, 0x3d, 0xa6, 0x9e, 0x88, + 0x19, 0x36, 0x16, 0xb5, 0x11, 0xd6, 0x7c, 0x3b, 0x83, 0x07, 0x33, 0x73, 0x92, 0x2d, 0xb8, 0x24, + 0xd2, 0xaf, 0x3b, 0x86, 0xb7, 0xdf, 0x63, 0x13, 0x4f, 0x0c, 0xb7, 0x99, 0xb0, 0x30, 0x5e, 0x6a, + 0x0f, 0xe5, 0xc4, 0x87, 0xa0, 0x90, 0xdf, 0x86, 0x71, 0xf1, 0x97, 0xd6, 0xf4, 0x1e, 0x87, 0x15, + 0x31, 0x1c, 0x4f, 0x4b, 0xd8, 0xf1, 0x85, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x1e, 0xce, 0xf4, 0xf6, + 0x0c, 0xf6, 0xb8, 0xb2, 0x7d, 0x9b, 0x52, 0x93, 0x9a, 0xdc, 0x69, 0xd4, 0x68, 0x3d, 0xa3, 0x0c, + 0x1d, 0xeb, 0x49, 0x32, 0xa6, 0xf9, 0xc9, 0xeb, 0x30, 0xe6, 0x07, 0xba, 0x17, 0x48, 0xb3, 0xde, + 0xd4, 0x84, 0x88, 0x71, 0x51, 0x56, 0xaf, 0x76, 0x8c, 0x86, 0x09, 0xce, 0xcc, 0xf9, 0xe2, 0xcc, + 0xe9, 0xcd, 0x17, 0x79, 0x46, 0xab, 0x7f, 0x59, 0x84, 0xab, 0xcb, 0x34, 0x58, 0x73, 0x1d, 0x69, + 0x14, 0xcd, 0x9a, 0xf6, 0x8f, 0x64, 0x13, 0x4d, 0x4e, 0xda, 0xc5, 0x13, 0x9d, 0xb4, 0x4b, 0x27, + 0x34, 0x69, 0x97, 0x4f, 0x71, 0xd2, 0xfe, 0x27, 0x45, 0x78, 0x26, 0x51, 0x93, 0xeb, 0xae, 0xa9, + 0x06, 0xfc, 0xcf, 0x2b, 0xf0, 0x08, 0x15, 0xf8, 0x40, 0xe8, 0x9d, 0xdc, 0xad, 0x95, 0xd2, 0x78, + 0x3e, 0x4c, 0x6b, 0x3c, 0xef, 0xe4, 0x99, 0xf9, 0x32, 0x24, 0x1c, 0x69, 0xc6, 0xbb, 0x09, 0xc4, + 0x93, 0x4e, 0x38, 0x61, 0xfa, 0x89, 0x29, 0x3d, 0x61, 0x10, 0x1d, 0x0e, 0x70, 0x60, 0x46, 0x2e, + 0xd2, 0x86, 0xa7, 0x7d, 0xea, 0x04, 0x96, 0x43, 0xed, 0x24, 0x9c, 0xd0, 0x86, 0x9e, 0x93, 0x70, + 0x4f, 0xb7, 0xb3, 0x98, 0x30, 0x3b, 0x6f, 0x9e, 0x71, 0xe0, 0xdf, 0x00, 0x57, 0x39, 0x45, 0xd5, + 0x9c, 0x98, 0xc6, 0xf2, 0x51, 0x5a, 0x63, 0x79, 0x37, 0xff, 0x7f, 0x1b, 0x4d, 0x5b, 0x99, 0x03, + 0xe0, 0x7f, 0x21, 0xae, 0xae, 0x84, 0x93, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x09, 0x48, 0xd5, + 0x73, 0x5c, 0x53, 0x09, 0x27, 0xa0, 0x76, 0x9c, 0x88, 0x49, 0xde, 0xa1, 0xda, 0x4e, 0x65, 0x64, + 0x6d, 0xe7, 0x26, 0x90, 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0x31, 0x9c, 0x2b, 0x03, 0x1c, 0x98, + 0x91, 0x6b, 0x48, 0x53, 0xae, 0x9d, 0x6c, 0x53, 0xae, 0x8f, 0xde, 0x94, 0xc9, 0xbb, 0x70, 0x91, + 0x8b, 0x92, 0xf5, 0x93, 0x04, 0x16, 0x7a, 0xcf, 0xaf, 0x48, 0xe0, 0x8b, 0x38, 0x8c, 0x11, 0x87, + 0x63, 0xb0, 0xff, 0x63, 0x78, 0xd4, 0x64, 0xc2, 0x75, 0x7b, 0xb8, 0x4e, 0xb4, 0x90, 0xc1, 0x83, + 0x99, 0x39, 0x59, 0x13, 0x0b, 0x58, 0x33, 0xd4, 0xb7, 0x6c, 0x6a, 0xca, 0x18, 0xd6, 0xb0, 0x89, + 0x6d, 0xac, 0xb6, 0x25, 0x05, 0x63, 0x5c, 0x59, 0x6a, 0xca, 0xd8, 0x31, 0xd5, 0x94, 0x65, 0x6e, + 0xa5, 0xdf, 0x4e, 0x68, 0x43, 0x52, 0xd7, 0x09, 0xa3, 0x92, 0x17, 0xd2, 0x0c, 0x38, 0x98, 0x87, + 0x6b, 0x89, 0x86, 0x67, 0xf5, 0x02, 0x3f, 0x89, 0x35, 0x91, 0xd2, 0x12, 0x33, 0x78, 0x30, 0x33, + 0x27, 0xd3, 0xcf, 0x77, 0xa8, 0x6e, 0x07, 0x3b, 0x49, 0xc0, 0x33, 0x49, 0xfd, 0xfc, 0xc6, 0x20, + 0x0b, 0x66, 0xe5, 0xcb, 0x9c, 0x90, 0x26, 0x9f, 0x4c, 0xb5, 0xea, 0x8f, 0x4b, 0x70, 0x71, 0x99, + 0x06, 0x61, 0x78, 0xcf, 0xe7, 0x66, 0x94, 0x4f, 0xc1, 0x8c, 0xf2, 0xcd, 0x0a, 0x9c, 0x5b, 0xa6, + 0xc1, 0x80, 0x36, 0xf6, 0xa7, 0xb4, 0xfa, 0xd7, 0xe0, 0x5c, 0x14, 0x51, 0xd6, 0x0e, 0x5c, 0x4f, + 0xcc, 0xe5, 0xa9, 0xd5, 0x72, 0x7b, 0x90, 0x05, 0xb3, 0xf2, 0x91, 0xaf, 0xc0, 0x33, 0x7c, 0xaa, + 0x77, 0x3a, 0xc2, 0x3e, 0x2b, 0x8c, 0x09, 0xb1, 0x3d, 0x11, 0xd3, 0x12, 0xf2, 0x99, 0x76, 0x36, + 0x1b, 0x0e, 0xcb, 0x4f, 0xbe, 0x06, 0x63, 0x3d, 0xab, 0x47, 0x6d, 0xcb, 0xe1, 0xfa, 0x59, 0xee, + 0x90, 0x90, 0xf5, 0x18, 0x58, 0xb4, 0x80, 0x8b, 0xa7, 0x62, 0x42, 0x60, 0x66, 0x4b, 0xad, 0x9f, + 0x62, 0x4b, 0xfd, 0x9f, 0x45, 0xa8, 0x2d, 0x7b, 0x6e, 0xbf, 0xd7, 0xda, 0x27, 0x1d, 0xa8, 0xde, + 0xe3, 0xce, 0x33, 0xe9, 0x9a, 0x1a, 0x3d, 0x2a, 0x5b, 0xf8, 0xe0, 0x22, 0x95, 0x48, 0xbc, 0xa3, + 0x84, 0x67, 0x8d, 0x78, 0x97, 0xee, 0x53, 0x53, 0xfa, 0xd0, 0xc2, 0x46, 0x7c, 0x8b, 0x25, 0xa2, + 0xa0, 0x91, 0x2e, 0x9c, 0xd1, 0x6d, 0xdb, 0xbd, 0x47, 0xcd, 0x55, 0x3d, 0xa0, 0x0e, 0xf5, 0x95, + 0x4b, 0xf2, 0xb8, 0x66, 0x69, 0xee, 0xd7, 0x9f, 0x4f, 0x42, 0x61, 0x1a, 0x9b, 0xbc, 0x07, 0x35, + 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0x35, 0xe7, 0x16, 0x46, 0xff, 0xe9, 0xad, 0x2f, 0xb7, 0x05, 0x94, + 0xb0, 0xd9, 0xcb, 0x17, 0x54, 0x02, 0xb4, 0x6f, 0x14, 0x00, 0x6e, 0x6c, 0x6c, 0xac, 0x4b, 0xf7, + 0x82, 0x09, 0x65, 0xbd, 0x1f, 0x3a, 0x2a, 0x47, 0x77, 0x08, 0x26, 0xc2, 0x32, 0xa5, 0x0f, 0xaf, + 0x1f, 0xec, 0x20, 0x47, 0x27, 0xbf, 0x0e, 0x35, 0xa9, 0x20, 0xcb, 0x6a, 0x0f, 0x43, 0x0b, 0xa4, + 0x12, 0x8d, 0x8a, 0xae, 0xfd, 0xc3, 0x22, 0xc0, 0x8a, 0x69, 0xd3, 0xb6, 0x0a, 0xa4, 0x6f, 0x04, + 0x3b, 0x1e, 0xf5, 0x77, 0x5c, 0xdb, 0x1c, 0xd1, 0x9b, 0xca, 0x6d, 0xfe, 0x1b, 0x0a, 0x04, 0x23, + 0x3c, 0x62, 0xc2, 0x98, 0x1f, 0xd0, 0x9e, 0x8a, 0xd4, 0x1c, 0xd1, 0x89, 0x32, 0x29, 0xec, 0x22, + 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x4d, 0xcb, 0x31, 0x44, 0x07, 0x69, 0xed, 0x8f, 0xd8, 0x90, + 0xce, 0xb0, 0x15, 0xc7, 0x4a, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x27, 0x45, 0xb8, 0xc0, 0xe5, 0xb1, + 0x62, 0x24, 0xe2, 0x31, 0xc9, 0x5f, 0x18, 0xd8, 0xf4, 0xf7, 0x67, 0x8f, 0x26, 0x5a, 0xec, 0x19, + 0x5b, 0xa3, 0x81, 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0x6c, 0xa7, 0x5f, 0x1f, 0xca, 0x3e, 0x1b, 0xaf, + 0x44, 0xed, 0xb5, 0x47, 0x6e, 0x42, 0xd9, 0x1f, 0xc0, 0x47, 0xaf, 0xd0, 0x6b, 0xcc, 0x47, 0x2d, + 0x2e, 0x8e, 0xfc, 0x25, 0xa8, 0xfa, 0x81, 0x1e, 0xf4, 0x55, 0xd7, 0xdc, 0x3c, 0x69, 0xc1, 0x1c, + 0x3c, 0x1a, 0x47, 0xc4, 0x3b, 0x4a, 0xa1, 0xda, 0x4f, 0x0a, 0x70, 0x29, 0x3b, 0xe3, 0xaa, 0xe5, + 0x07, 0xe4, 0xcf, 0x0f, 0x54, 0xfb, 0x11, 0xff, 0x38, 0xcb, 0xcd, 0x2b, 0x3d, 0x8c, 0x0b, 0x57, + 0x29, 0xb1, 0x2a, 0x0f, 0xa0, 0x62, 0x05, 0xb4, 0xab, 0xd6, 0x97, 0x77, 0x4e, 0xf8, 0xd3, 0x63, + 0x53, 0x3b, 0x93, 0x82, 0x42, 0x98, 0xf6, 0x71, 0x71, 0xd8, 0x27, 0xf3, 0xe9, 0xc3, 0x4e, 0xc6, + 0xfc, 0xde, 0xca, 0x17, 0xf3, 0x9b, 0x2c, 0xd0, 0x60, 0xe8, 0xef, 0x5f, 0x1c, 0x0c, 0xfd, 0xbd, + 0x93, 0x3f, 0xf4, 0x37, 0x55, 0x0d, 0x43, 0x23, 0x80, 0x3f, 0x29, 0xc1, 0xe5, 0x87, 0x35, 0x1b, + 0x36, 0x9f, 0xc9, 0xd6, 0x99, 0x77, 0x3e, 0x7b, 0x78, 0x3b, 0x24, 0x73, 0x50, 0xe9, 0xed, 0xe8, + 0xbe, 0x52, 0xca, 0xd4, 0x82, 0xa5, 0xb2, 0xce, 0x12, 0x1f, 0xb0, 0x41, 0x83, 0x2b, 0x73, 0xfc, + 0x15, 0x05, 0x2b, 0x1b, 0x8e, 0xbb, 0xd4, 0xf7, 0x23, 0x9b, 0x40, 0x38, 0x1c, 0xaf, 0x89, 0x64, + 0x54, 0x74, 0x12, 0x40, 0x55, 0x98, 0x98, 0xe5, 0xcc, 0x34, 0x7a, 0x20, 0x57, 0x46, 0x98, 0x78, + 0xf4, 0x51, 0xd2, 0x5b, 0x21, 0x65, 0x91, 0x19, 0x28, 0x07, 0x51, 0xd0, 0xae, 0x5a, 0x9a, 0x97, + 0x33, 0xf4, 0x53, 0xce, 0xc7, 0x16, 0xf6, 0xee, 0x16, 0x37, 0xaa, 0x9b, 0xd2, 0x7f, 0x6e, 0xb9, + 0x0e, 0x57, 0xc8, 0x4a, 0xd1, 0xc2, 0xfe, 0xce, 0x00, 0x07, 0x66, 0xe4, 0xd2, 0xfe, 0x7d, 0x1d, + 0x2e, 0x64, 0xb7, 0x07, 0x56, 0x6f, 0x7b, 0xd4, 0xf3, 0x19, 0x76, 0x21, 0x59, 0x6f, 0x77, 0x45, + 0x32, 0x2a, 0xfa, 0x67, 0x3a, 0xe0, 0xec, 0x9b, 0x05, 0xb8, 0xe8, 0x49, 0x1f, 0xd1, 0xe3, 0x08, + 0x3a, 0x7b, 0x4e, 0x98, 0x33, 0x86, 0x08, 0xc4, 0xe1, 0x65, 0x21, 0x7f, 0xaf, 0x00, 0x53, 0xdd, + 0x94, 0x9d, 0xe3, 0x14, 0xf7, 0xad, 0xf1, 0xa8, 0xf8, 0xb5, 0x21, 0xf2, 0x70, 0x68, 0x49, 0xc8, + 0xd7, 0xa0, 0xd9, 0x63, 0xed, 0xc2, 0x0f, 0xa8, 0x63, 0xa8, 0xad, 0x6b, 0xa3, 0xf7, 0xa4, 0xf5, + 0x08, 0x4b, 0x85, 0xa2, 0x09, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0x27, 0x7c, 0xa3, 0xda, 0x35, + 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x23, 0xd6, 0x1b, 0x0d, 0xd1, 0x57, 0xda, 0x32, 0x0d, 0x43, + 0x2a, 0xf9, 0x0d, 0x68, 0x70, 0x97, 0xd3, 0xbc, 0xd7, 0xf1, 0xa7, 0x1a, 0x3c, 0x5c, 0x6c, 0x5c, + 0x04, 0xc0, 0xc9, 0x44, 0x8c, 0xe8, 0xe4, 0x65, 0x18, 0xdb, 0xe2, 0xdd, 0x57, 0xee, 0x5d, 0x16, + 0x36, 0x2e, 0xae, 0xad, 0xb5, 0x62, 0xe9, 0x98, 0xe0, 0x22, 0x73, 0x00, 0x34, 0xf4, 0xcb, 0xa5, + 0xed, 0x59, 0x91, 0xc7, 0x0e, 0x63, 0x5c, 0xe4, 0x39, 0x28, 0x05, 0xb6, 0xcf, 0x6d, 0x58, 0xf5, + 0x68, 0x09, 0xba, 0xb1, 0xda, 0x46, 0x96, 0xae, 0xfd, 0xb2, 0x00, 0x67, 0x52, 0x9b, 0x4b, 0x58, + 0x96, 0xbe, 0x67, 0xcb, 0x61, 0x24, 0xcc, 0xb2, 0x89, 0xab, 0xc8, 0xd2, 0xc9, 0xbb, 0x52, 0x2d, + 0x2f, 0xe6, 0x3c, 0xa6, 0xe1, 0xb6, 0x1e, 0xf8, 0x4c, 0x0f, 0x1f, 0xd0, 0xc8, 0xb9, 0x9b, 0x2f, + 0x2a, 0x8f, 0x9c, 0x07, 0x62, 0x6e, 0xbe, 0x88, 0x86, 0x09, 0xce, 0x94, 0xc1, 0xaf, 0x7c, 0x14, + 0x83, 0x9f, 0xf6, 0x27, 0xc5, 0x58, 0x0d, 0x48, 0xcd, 0xfe, 0x11, 0x35, 0xf0, 0x22, 0x9b, 0x40, + 0xc3, 0xc9, 0xbd, 0x11, 0x9f, 0xff, 0xf8, 0x64, 0x2c, 0xa9, 0xe4, 0x2d, 0x51, 0xf7, 0xa5, 0x9c, + 0x9b, 0x61, 0x37, 0x56, 0xdb, 0x22, 0xba, 0x4a, 0xfd, 0xb5, 0xf0, 0x17, 0x94, 0x4f, 0xe9, 0x17, + 0x68, 0xff, 0xba, 0x04, 0xcd, 0x9b, 0xee, 0xd6, 0x67, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0x7e, + 0x8a, 0xd3, 0xd4, 0x26, 0x3c, 0x13, 0x04, 0x76, 0x9b, 0x1a, 0xae, 0x63, 0xfa, 0xf3, 0xdb, 0x01, + 0xf5, 0x96, 0x2c, 0xc7, 0xf2, 0x77, 0xa8, 0x29, 0xdd, 0x49, 0xcf, 0x1e, 0x1e, 0x4c, 0x3f, 0xb3, + 0xb1, 0xb1, 0x9a, 0xc5, 0x82, 0xc3, 0xf2, 0xf2, 0x61, 0x43, 0xec, 0x04, 0xe4, 0x3b, 0x65, 0x64, + 0xcc, 0x8d, 0x18, 0x36, 0x62, 0xe9, 0x98, 0xe0, 0xd2, 0xbe, 0x53, 0x84, 0x46, 0xb8, 0x01, 0x9f, + 0xbc, 0x00, 0xb5, 0x2d, 0xcf, 0xdd, 0xa5, 0x9e, 0xf0, 0xdc, 0xc9, 0x9d, 0x32, 0x2d, 0x91, 0x84, + 0x8a, 0x46, 0x9e, 0x87, 0x4a, 0xe0, 0xf6, 0x2c, 0x23, 0x6d, 0x50, 0xdb, 0x60, 0x89, 0x28, 0x68, + 0xa7, 0xd7, 0xc0, 0x5f, 0x4c, 0xa8, 0x76, 0x8d, 0xa1, 0xca, 0xd8, 0x3b, 0x50, 0xf6, 0x75, 0xdf, + 0x96, 0xf3, 0x69, 0x8e, 0xbd, 0xec, 0xf3, 0xed, 0x55, 0xb9, 0x97, 0x7d, 0xbe, 0xbd, 0x8a, 0x1c, + 0x54, 0xfb, 0x59, 0x11, 0x9a, 0xa2, 0xde, 0xc4, 0xa8, 0x70, 0x92, 0x35, 0xf7, 0x06, 0x0f, 0xa5, + 0xf0, 0xfb, 0x5d, 0xea, 0x71, 0x33, 0x93, 0x1c, 0xe4, 0xe2, 0xfe, 0x81, 0x88, 0x18, 0x86, 0x53, + 0x44, 0x49, 0xaa, 0xea, 0xcb, 0xa7, 0x58, 0xf5, 0x95, 0x23, 0x55, 0x7d, 0xf5, 0x34, 0xaa, 0xfe, + 0xa3, 0x22, 0x34, 0x56, 0xad, 0x6d, 0x6a, 0xec, 0x1b, 0x36, 0xdf, 0x13, 0x68, 0x52, 0x9b, 0x06, + 0x74, 0xd9, 0xd3, 0x0d, 0xba, 0x4e, 0x3d, 0x8b, 0x1f, 0x50, 0xc3, 0xfa, 0x07, 0x1f, 0x81, 0xe4, + 0x9e, 0xc0, 0xc5, 0x21, 0x3c, 0x38, 0x34, 0x37, 0x59, 0x81, 0x31, 0x93, 0xfa, 0x96, 0x47, 0xcd, + 0xf5, 0xd8, 0x42, 0xe5, 0x05, 0x35, 0xd5, 0x2c, 0xc6, 0x68, 0x0f, 0x0e, 0xa6, 0xc7, 0x95, 0x81, + 0x52, 0xac, 0x58, 0x12, 0x59, 0x59, 0x97, 0xef, 0xe9, 0x7d, 0x3f, 0xab, 0x8c, 0xb1, 0x2e, 0xbf, + 0x9e, 0xcd, 0x82, 0xc3, 0xf2, 0x6a, 0x15, 0x28, 0xad, 0xba, 0x1d, 0xed, 0xe3, 0x12, 0x84, 0x27, + 0x19, 0x91, 0xbf, 0x52, 0x80, 0xa6, 0xee, 0x38, 0x6e, 0x20, 0x4f, 0x09, 0x12, 0x1e, 0x78, 0xcc, + 0x7d, 0x60, 0xd2, 0xcc, 0x7c, 0x04, 0x2a, 0x9c, 0xb7, 0xa1, 0x43, 0x39, 0x46, 0xc1, 0xb8, 0x6c, + 0xd2, 0x4f, 0xf9, 0x93, 0xd7, 0xf2, 0x97, 0xe2, 0x08, 0xde, 0xe3, 0x4b, 0x5f, 0x82, 0xc9, 0x74, + 0x61, 0x8f, 0xe3, 0x0e, 0xca, 0xe5, 0x98, 0x2f, 0x02, 0x44, 0x31, 0x25, 0x8f, 0xc1, 0x88, 0x65, + 0x25, 0x8c, 0x58, 0xcb, 0xa3, 0x57, 0x70, 0x58, 0xe8, 0xa1, 0x86, 0xab, 0xf7, 0x53, 0x86, 0xab, + 0x95, 0x93, 0x10, 0xf6, 0x70, 0x63, 0xd5, 0x3f, 0x28, 0xc0, 0x64, 0xc4, 0x2c, 0x77, 0xc8, 0xbe, + 0x06, 0xe3, 0x1e, 0xd5, 0xcd, 0x96, 0x1e, 0x18, 0x3b, 0x3c, 0xd4, 0xbb, 0xc0, 0x63, 0xb3, 0xcf, + 0x1e, 0x1e, 0x4c, 0x8f, 0x63, 0x9c, 0x80, 0x49, 0x3e, 0xa2, 0x43, 0x93, 0x25, 0x6c, 0x58, 0x5d, + 0xea, 0xf6, 0x83, 0x11, 0xad, 0xa6, 0x7c, 0xc1, 0x82, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x4f, 0x0a, + 0x30, 0x11, 0x2f, 0xf0, 0xa9, 0x5b, 0xd4, 0x76, 0x92, 0x16, 0xb5, 0x85, 0x13, 0xf8, 0x27, 0x43, + 0xac, 0x68, 0x3f, 0xaf, 0xc7, 0x3f, 0x8d, 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x43, 0x8d, 0x05, + 0x9f, 0xfd, 0xc3, 0x6b, 0x86, 0x69, 0xb9, 0xe5, 0x27, 0x58, 0xcb, 0xfd, 0x34, 0x4f, 0xc0, 0x89, + 0x9d, 0xe2, 0x52, 0xcd, 0x71, 0x8a, 0x4b, 0x37, 0x3c, 0xc5, 0xa5, 0x76, 0x62, 0x83, 0xce, 0x51, + 0x4e, 0x72, 0xa9, 0x3f, 0xd6, 0x93, 0x5c, 0x1a, 0xa7, 0x75, 0x92, 0x0b, 0xe4, 0x3d, 0xc9, 0xe5, + 0xc3, 0x02, 0x4c, 0x98, 0x89, 0x1d, 0xb3, 0xdc, 0xb6, 0x90, 0x67, 0xaa, 0x49, 0x6e, 0xc0, 0x15, + 0x5b, 0xa6, 0x92, 0x69, 0x98, 0x12, 0xa9, 0xfd, 0xfd, 0x5a, 0x7c, 0x1e, 0x78, 0xdc, 0xa6, 0xea, + 0x57, 0x93, 0xa6, 0xea, 0xab, 0x69, 0x53, 0xf5, 0x99, 0x58, 0x14, 0x69, 0xdc, 0x5c, 0xfd, 0x85, + 0xd8, 0xf0, 0x58, 0xe2, 0x27, 0xa7, 0x84, 0x35, 0x9d, 0x31, 0x44, 0x7e, 0x01, 0xea, 0xbe, 0x3a, + 0x73, 0x52, 0x2c, 0x6c, 0xa2, 0xff, 0xa2, 0xce, 0x83, 0x0c, 0x39, 0x98, 0x26, 0xee, 0x51, 0xdd, + 0x77, 0x9d, 0xb4, 0x26, 0x8e, 0x3c, 0x15, 0x25, 0x35, 0x6e, 0x32, 0xaf, 0x3e, 0xc2, 0x64, 0xae, + 0x43, 0xd3, 0xd6, 0xfd, 0x60, 0xb3, 0x67, 0xea, 0x01, 0x35, 0x65, 0x7f, 0xfb, 0x33, 0x47, 0x9b, + 0xab, 0xd8, 0xfc, 0x17, 0x29, 0x84, 0xab, 0x11, 0x0c, 0xc6, 0x31, 0x89, 0x09, 0x63, 0xec, 0x95, + 0xf7, 0x06, 0x73, 0x5e, 0x1d, 0x01, 0x70, 0x1c, 0x19, 0xa1, 0xa5, 0x67, 0x35, 0x86, 0x83, 0x09, + 0xd4, 0x21, 0x56, 0xf5, 0xc6, 0x28, 0x56, 0x75, 0xf2, 0xdb, 0x42, 0xd9, 0xd8, 0x57, 0x3f, 0x8c, + 0x5b, 0xe3, 0xc6, 0xa3, 0xa8, 0x42, 0x8c, 0x13, 0x31, 0xc9, 0x4b, 0xe6, 0xe1, 0x8c, 0xd1, 0xf7, + 0x3c, 0x1e, 0x47, 0x24, 0xb3, 0x37, 0x79, 0xf6, 0x30, 0x5e, 0x6c, 0x21, 0x49, 0xc6, 0x34, 0x3f, + 0x83, 0xe8, 0xcb, 0x9a, 0x54, 0x10, 0x63, 0x49, 0x88, 0xcd, 0x24, 0x19, 0xd3, 0xfc, 0x7c, 0xa3, + 0x84, 0x40, 0xbd, 0xa1, 0xfb, 0x3b, 0x32, 0xd8, 0x2c, 0xda, 0x28, 0x11, 0x91, 0x30, 0xce, 0x47, + 0xe6, 0x00, 0x04, 0x12, 0xcf, 0x35, 0x91, 0x8c, 0xc1, 0xdc, 0x0c, 0x29, 0x18, 0xe3, 0xd2, 0x3e, + 0x6c, 0x40, 0xf3, 0xb6, 0x1e, 0x58, 0x7b, 0x94, 0xfb, 0xbc, 0x4e, 0xc7, 0xf1, 0xf0, 0xb7, 0x0a, + 0x70, 0x21, 0x19, 0xd8, 0x78, 0x8a, 0xde, 0x07, 0x7e, 0x4c, 0x0a, 0x66, 0x4a, 0xc3, 0x21, 0xa5, + 0xe0, 0x7e, 0x88, 0x81, 0x38, 0xc9, 0xd3, 0xf6, 0x43, 0xb4, 0x87, 0x09, 0xc4, 0xe1, 0x65, 0xf9, + 0xac, 0xf8, 0x21, 0x9e, 0xec, 0xd3, 0xf4, 0x52, 0x5e, 0x92, 0xda, 0x13, 0xe3, 0x25, 0xa9, 0x3f, + 0x11, 0xaa, 0x69, 0x2f, 0xe6, 0x25, 0x69, 0xe4, 0x8c, 0xd6, 0x91, 0x7b, 0x01, 0x04, 0xda, 0x30, + 0x6f, 0x0b, 0xdf, 0xc6, 0xaf, 0xac, 0xd7, 0x4c, 0xa3, 0xdb, 0xd2, 0x7d, 0xcb, 0x90, 0x4a, 0x42, + 0x8e, 0xd3, 0x43, 0xd5, 0xf9, 0x66, 0xc2, 0xa9, 0xcf, 0x5f, 0x51, 0x60, 0x47, 0xc7, 0xb9, 0x15, + 0x73, 0x1d, 0xe7, 0x46, 0x16, 0xa0, 0xec, 0xec, 0xd2, 0xfd, 0xe3, 0x6d, 0x88, 0xe7, 0x2b, 0x95, + 0xdb, 0xb7, 0xe8, 0x3e, 0xf2, 0xcc, 0xda, 0x77, 0x8a, 0x00, 0xec, 0xf3, 0x8f, 0xe6, 0xaf, 0xf8, + 0x75, 0xa8, 0xf9, 0x7d, 0x6e, 0x59, 0x90, 0xea, 0x4d, 0x14, 0xe2, 0x24, 0x92, 0x51, 0xd1, 0xc9, + 0xf3, 0x50, 0x79, 0xbf, 0x4f, 0xfb, 0xca, 0xf9, 0x1e, 0x2a, 0xb7, 0x5f, 0x66, 0x89, 0x28, 0x68, + 0xa7, 0x67, 0x7b, 0x54, 0x7e, 0x8d, 0xca, 0x69, 0xf9, 0x35, 0x1a, 0x50, 0xbb, 0xed, 0xf2, 0x88, + 0x49, 0xed, 0xbf, 0x17, 0x01, 0xa2, 0x88, 0x34, 0xf2, 0x8d, 0x02, 0x3c, 0x1d, 0x76, 0xb8, 0x40, + 0xac, 0x51, 0xf8, 0x81, 0xbd, 0xb9, 0x7d, 0x1c, 0x59, 0x9d, 0x9d, 0x8f, 0x40, 0xeb, 0x59, 0xe2, + 0x30, 0xbb, 0x14, 0x04, 0xa1, 0x4e, 0xbb, 0xbd, 0x60, 0x7f, 0xd1, 0xf2, 0x64, 0x0b, 0xcc, 0x0c, + 0x7c, 0xbc, 0x2e, 0x79, 0x44, 0x56, 0xb9, 0x90, 0xe6, 0x9d, 0x48, 0x51, 0x30, 0xc4, 0x21, 0x3b, + 0x50, 0x77, 0xdc, 0x77, 0x7d, 0x56, 0x1d, 0xb2, 0x39, 0xbe, 0x39, 0x7a, 0x95, 0x8b, 0x6a, 0x15, + 0x36, 0x71, 0xf9, 0x82, 0x35, 0x47, 0x56, 0xf6, 0xd7, 0x8b, 0x70, 0x2e, 0xa3, 0x1e, 0xc8, 0x9b, + 0x30, 0x29, 0x83, 0xff, 0xa2, 0x93, 0xab, 0x0b, 0xd1, 0xc9, 0xd5, 0xed, 0x14, 0x0d, 0x07, 0xb8, + 0xc9, 0xbb, 0x00, 0xba, 0x61, 0x50, 0xdf, 0x5f, 0x73, 0x4d, 0xa5, 0xbd, 0xbf, 0xc1, 0xd4, 0x97, + 0xf9, 0x30, 0xf5, 0xc1, 0xc1, 0xf4, 0x6f, 0x66, 0xc5, 0xf3, 0xa6, 0xea, 0x39, 0xca, 0x80, 0x31, + 0x48, 0xf2, 0x55, 0x00, 0xb1, 0x50, 0x0d, 0x8f, 0x1c, 0x78, 0x84, 0x75, 0x67, 0x46, 0x1d, 0xee, + 0x34, 0xf3, 0xe5, 0xbe, 0xee, 0x04, 0x56, 0xb0, 0x2f, 0x4e, 0x78, 0xb9, 0x1b, 0xa2, 0x60, 0x0c, + 0x51, 0xfb, 0x17, 0x45, 0xa8, 0x2b, 0xbb, 0xf2, 0x63, 0x30, 0x26, 0x76, 0x12, 0xc6, 0xc4, 0x13, + 0x8a, 0xe0, 0xcd, 0x32, 0x25, 0xba, 0x29, 0x53, 0xe2, 0x72, 0x7e, 0x51, 0x0f, 0x37, 0x24, 0x7e, + 0xbb, 0x08, 0x13, 0x8a, 0x35, 0xaf, 0x19, 0xf1, 0x77, 0xe1, 0x8c, 0xf0, 0xbc, 0xaf, 0xe9, 0xf7, + 0xc5, 0x61, 0x37, 0xbc, 0xc2, 0xca, 0x22, 0x68, 0xb6, 0x95, 0x24, 0x61, 0x9a, 0x97, 0x35, 0x6b, + 0x91, 0xb4, 0xc9, 0x56, 0x5d, 0xc2, 0x57, 0x27, 0x56, 0x87, 0xbc, 0x59, 0xb7, 0x52, 0x34, 0x1c, + 0xe0, 0x4e, 0xdb, 0x31, 0xcb, 0xa7, 0x60, 0xc7, 0xfc, 0x0f, 0x05, 0x18, 0x8b, 0xea, 0xeb, 0xd4, + 0xad, 0x98, 0xdb, 0x49, 0x2b, 0xe6, 0x7c, 0xee, 0xe6, 0x30, 0xc4, 0x86, 0xf9, 0xd7, 0x6a, 0x90, + 0x08, 0x24, 0x27, 0x5b, 0x70, 0xc9, 0xca, 0x0c, 0x87, 0x8b, 0x8d, 0x36, 0xe1, 0xce, 0xe8, 0x95, + 0xa1, 0x9c, 0xf8, 0x10, 0x14, 0xd2, 0x87, 0xfa, 0x1e, 0xf5, 0x02, 0xcb, 0xa0, 0xea, 0xfb, 0x96, + 0x73, 0xab, 0x64, 0xd2, 0x52, 0x1b, 0xd6, 0xe9, 0x5d, 0x29, 0x00, 0x43, 0x51, 0x64, 0x0b, 0x2a, + 0xd4, 0xec, 0x50, 0x75, 0xfc, 0x50, 0xce, 0xc3, 0x3d, 0xc3, 0xfa, 0x64, 0x6f, 0x3e, 0x0a, 0x68, + 0xe2, 0x43, 0xc3, 0x56, 0x9e, 0x38, 0xd9, 0x0e, 0x47, 0x57, 0xb0, 0x42, 0x9f, 0x5e, 0x74, 0x32, + 0x41, 0x98, 0x84, 0x91, 0x1c, 0xb2, 0x1b, 0x9a, 0x04, 0x2b, 0x27, 0x34, 0x78, 0x3c, 0xc4, 0x20, + 0xe8, 0x43, 0xe3, 0x9e, 0x1e, 0x50, 0xaf, 0xab, 0x7b, 0xbb, 0x72, 0xb5, 0x31, 0xfa, 0x17, 0xbe, + 0xa5, 0x90, 0xa2, 0x2f, 0x0c, 0x93, 0x30, 0x92, 0x43, 0x5c, 0x68, 0x04, 0x52, 0x7d, 0x56, 0x76, + 0xcf, 0xd1, 0x85, 0x2a, 0x45, 0xdc, 0x97, 0x01, 0xe5, 0xea, 0x15, 0x23, 0x19, 0x64, 0x2f, 0x71, + 0xfe, 0xb2, 0x38, 0x75, 0xbb, 0x95, 0xc3, 0x7e, 0x2e, 0xa1, 0xa2, 0xe9, 0x26, 0xfb, 0x1c, 0x67, + 0xed, 0x7f, 0x55, 0xa2, 0x61, 0xf9, 0x71, 0x5b, 0xf5, 0x5e, 0x4e, 0x5a, 0xf5, 0xae, 0xa4, 0xad, + 0x7a, 0x29, 0x87, 0xee, 0xf1, 0x43, 0x50, 0x53, 0xf6, 0xb4, 0xf2, 0x29, 0xd8, 0xd3, 0x5e, 0x82, + 0xe6, 0x1e, 0x1f, 0x09, 0xc4, 0x59, 0x46, 0x15, 0x3e, 0x8d, 0xf0, 0x91, 0xfd, 0x6e, 0x94, 0x8c, + 0x71, 0x1e, 0x96, 0x45, 0xde, 0x38, 0x11, 0x1e, 0x06, 0x2b, 0xb3, 0xb4, 0xa3, 0x64, 0x8c, 0xf3, + 0xf0, 0xe8, 0x35, 0xcb, 0xd9, 0x15, 0x19, 0x6a, 0x3c, 0x83, 0x88, 0x5e, 0x53, 0x89, 0x18, 0xd1, + 0xc9, 0x35, 0xa8, 0xf7, 0xcd, 0x6d, 0xc1, 0x5b, 0xe7, 0xbc, 0x5c, 0xc3, 0xdc, 0x5c, 0x5c, 0x92, + 0x67, 0x2b, 0x29, 0x2a, 0x2b, 0x49, 0x57, 0xef, 0x29, 0x02, 0x5f, 0x1b, 0xca, 0x92, 0xac, 0x45, + 0xc9, 0x18, 0xe7, 0x21, 0xbf, 0x05, 0x13, 0x1e, 0x35, 0xfb, 0x06, 0x0d, 0x73, 0x09, 0x73, 0x1c, + 0x11, 0x57, 0x6b, 0xc4, 0x29, 0x98, 0xe2, 0x1c, 0x62, 0x15, 0x6c, 0x8e, 0x64, 0x15, 0xfc, 0x12, + 0x4c, 0x98, 0x9e, 0x6e, 0x39, 0xd4, 0xbc, 0xe3, 0x70, 0xaf, 0xbd, 0x8c, 0xa1, 0x0b, 0x4d, 0xf4, + 0x8b, 0x09, 0x2a, 0xa6, 0xb8, 0xb5, 0x1f, 0x17, 0x80, 0x0c, 0x46, 0x8b, 0x93, 0x1d, 0xa8, 0x3a, + 0xdc, 0x7a, 0x96, 0xfb, 0xf8, 0xe9, 0x98, 0x11, 0x4e, 0x0c, 0x6b, 0x32, 0x41, 0xe2, 0x13, 0x07, + 0xea, 0xf4, 0x7e, 0x40, 0x3d, 0x27, 0xdc, 0x3d, 0x72, 0x32, 0x47, 0x5d, 0x8b, 0xd5, 0x84, 0x44, + 0xc6, 0x50, 0x86, 0xf6, 0xd3, 0x22, 0x34, 0x63, 0x7c, 0x8f, 0x5a, 0x94, 0xf2, 0x0d, 0xec, 0xc2, + 0x68, 0xb5, 0xe9, 0xd9, 0xb2, 0x87, 0xc6, 0x36, 0xb0, 0x4b, 0x12, 0xae, 0x62, 0x9c, 0x8f, 0xcc, + 0x01, 0x74, 0x75, 0x3f, 0xa0, 0x1e, 0x9f, 0xbd, 0x53, 0xdb, 0xc6, 0xd7, 0x42, 0x0a, 0xc6, 0xb8, + 0xc8, 0x55, 0x79, 0x58, 0x79, 0x39, 0x79, 0xcc, 0xdf, 0x90, 0x93, 0xc8, 0x2b, 0x27, 0x70, 0x12, + 0x39, 0xe9, 0xc0, 0xa4, 0x2a, 0xb5, 0xa2, 0x1e, 0xef, 0x10, 0x38, 0xb1, 0xfe, 0x49, 0x41, 0xe0, + 0x00, 0xa8, 0xf6, 0x9d, 0x02, 0x8c, 0x27, 0x4c, 0x26, 0xe2, 0x80, 0x3e, 0xb5, 0xd7, 0x21, 0x71, + 0x40, 0x5f, 0x6c, 0x8b, 0xc2, 0x8b, 0x50, 0x15, 0x15, 0x94, 0x0e, 0x61, 0x14, 0x55, 0x88, 0x92, + 0xca, 0xc6, 0x42, 0x69, 0x94, 0x4d, 0x8f, 0x85, 0xd2, 0x6a, 0x8b, 0x8a, 0x2e, 0x9c, 0x1b, 0xa2, + 0x74, 0x83, 0xce, 0x0d, 0x91, 0x8e, 0x21, 0x87, 0xf6, 0x3d, 0x5e, 0xee, 0xc0, 0xdb, 0x0f, 0xd7, + 0x82, 0x1d, 0xa8, 0xc9, 0xb0, 0x35, 0xd9, 0x35, 0xde, 0xcc, 0x61, 0xc7, 0xe1, 0x38, 0x32, 0x40, + 0x4b, 0x37, 0x76, 0xef, 0x6c, 0x6f, 0xa3, 0x42, 0x27, 0xd7, 0xa1, 0xe1, 0x3a, 0x4b, 0xba, 0x65, + 0xf7, 0x3d, 0x35, 0x33, 0xfc, 0x1a, 0x1b, 0xeb, 0xee, 0xa8, 0xc4, 0x07, 0x07, 0xd3, 0x17, 0xc2, + 0x97, 0x44, 0x21, 0x31, 0xca, 0xa9, 0xfd, 0x9f, 0x12, 0xf0, 0x90, 0x25, 0xf2, 0x1a, 0x34, 0xba, + 0xd4, 0xd8, 0xd1, 0x1d, 0xcb, 0x57, 0x47, 0x8c, 0xb2, 0xf5, 0x7d, 0x63, 0x4d, 0x25, 0x3e, 0x60, + 0x55, 0x30, 0xdf, 0x5e, 0xe5, 0xbb, 0x02, 0x22, 0x5e, 0x62, 0x40, 0xb5, 0xe3, 0xfb, 0x7a, 0xcf, + 0xca, 0xed, 0x31, 0x17, 0x47, 0x3a, 0x8a, 0x61, 0x40, 0x3c, 0xa3, 0x84, 0x26, 0x06, 0x54, 0x7a, + 0xb6, 0x6e, 0x39, 0xb9, 0xaf, 0xd6, 0x61, 0x5f, 0xb0, 0xce, 0x90, 0x84, 0x51, 0x8b, 0x3f, 0xa2, + 0xc0, 0x26, 0x7d, 0x68, 0xfa, 0x86, 0xa7, 0x77, 0xfd, 0x1d, 0x7d, 0xee, 0x95, 0x57, 0x73, 0xab, + 0x89, 0x91, 0x28, 0x31, 0x6b, 0x2d, 0xe0, 0xfc, 0x5a, 0xfb, 0xc6, 0xfc, 0xdc, 0x2b, 0xaf, 0x62, + 0x5c, 0x4e, 0x5c, 0xec, 0x2b, 0x2f, 0xcd, 0xc9, 0x9e, 0x7b, 0xe2, 0x62, 0x5f, 0x79, 0x69, 0x0e, + 0xe3, 0x72, 0xb4, 0xff, 0x5d, 0x80, 0x46, 0xc8, 0x4b, 0x36, 0x01, 0xd8, 0x18, 0x22, 0x0f, 0x61, + 0x3c, 0xd6, 0x85, 0x08, 0xdc, 0x2e, 0xb0, 0x19, 0x66, 0xc6, 0x18, 0x50, 0xc6, 0x29, 0x95, 0xc5, + 0x93, 0x3e, 0xa5, 0x72, 0x16, 0x1a, 0x3b, 0xba, 0x63, 0xfa, 0x3b, 0xfa, 0xae, 0x18, 0x4a, 0x63, + 0xe7, 0xb6, 0xde, 0x50, 0x04, 0x8c, 0x78, 0xb4, 0x7f, 0x56, 0x05, 0xe1, 0xe6, 0x66, 0x9d, 0xdd, + 0xb4, 0x7c, 0x11, 0x67, 0x5d, 0xe0, 0x39, 0xc3, 0xce, 0xbe, 0x28, 0xd3, 0x31, 0xe4, 0x20, 0x17, + 0xa1, 0xd4, 0xb5, 0x1c, 0xe9, 0xf3, 0xe1, 0x26, 0xbf, 0x35, 0xcb, 0x41, 0x96, 0xc6, 0x49, 0xfa, + 0x7d, 0x19, 0x22, 0x27, 0x48, 0xfa, 0x7d, 0x64, 0x69, 0x6c, 0x11, 0x6e, 0xbb, 0xee, 0x2e, 0xeb, + 0xb6, 0x2a, 0x92, 0xae, 0xcc, 0x55, 0x01, 0xbe, 0x08, 0x5f, 0x4d, 0x92, 0x30, 0xcd, 0x4b, 0x36, + 0xe1, 0x99, 0x0f, 0xa8, 0xe7, 0xca, 0x71, 0xaa, 0x6d, 0x53, 0xda, 0x53, 0x30, 0x42, 0x89, 0xe2, + 0x01, 0x79, 0xbf, 0x97, 0xcd, 0x82, 0xc3, 0xf2, 0xf2, 0xd0, 0x5e, 0xdd, 0xeb, 0xd0, 0x60, 0xdd, + 0x73, 0x0d, 0xea, 0xfb, 0x96, 0xd3, 0x51, 0xb0, 0xd5, 0x08, 0x76, 0x23, 0x9b, 0x05, 0x87, 0xe5, + 0x25, 0x6f, 0xc3, 0x94, 0x20, 0x09, 0x75, 0x61, 0x7e, 0x4f, 0xb7, 0x6c, 0x7d, 0xcb, 0xb2, 0xd5, + 0x8d, 0x74, 0xe3, 0xc2, 0xb3, 0xb2, 0x31, 0x84, 0x07, 0x87, 0xe6, 0x26, 0x37, 0x61, 0x52, 0xf9, + 0xd5, 0xd6, 0xa9, 0xd7, 0x0e, 0x43, 0x1f, 0xc6, 0x5b, 0x57, 0xd8, 0x8a, 0x77, 0x91, 0xf6, 0x3c, + 0x6a, 0xc4, 0xbd, 0x89, 0x8a, 0x0b, 0x07, 0xf2, 0x11, 0x84, 0x0b, 0x3c, 0xbe, 0x61, 0xb3, 0xb7, + 0xe0, 0xba, 0xb6, 0xe9, 0xde, 0x73, 0xd4, 0xb7, 0x0b, 0xd5, 0x8e, 0xbb, 0xd2, 0xda, 0x99, 0x1c, + 0x38, 0x24, 0x27, 0xfb, 0x72, 0x4e, 0x59, 0x74, 0xef, 0x39, 0x69, 0x54, 0x88, 0xbe, 0xbc, 0x3d, + 0x84, 0x07, 0x87, 0xe6, 0x26, 0x4b, 0x40, 0xd2, 0x5f, 0xb0, 0xd9, 0x93, 0xee, 0xd9, 0x0b, 0xe2, + 0x3c, 0x95, 0x34, 0x15, 0x33, 0x72, 0x90, 0x55, 0x38, 0x9f, 0x4e, 0x65, 0xe2, 0xa4, 0x97, 0x96, + 0x9f, 0xa4, 0x8a, 0x19, 0x74, 0xcc, 0xcc, 0xa5, 0xfd, 0xf3, 0x22, 0x8c, 0x27, 0x36, 0xe0, 0x3f, + 0x71, 0x1b, 0x9d, 0x99, 0x0e, 0xde, 0xf5, 0x3b, 0x2b, 0x8b, 0x37, 0xa8, 0x6e, 0x52, 0xef, 0x16, + 0x55, 0x87, 0x25, 0xf0, 0x41, 0x65, 0x2d, 0x41, 0xc1, 0x14, 0x27, 0xd9, 0x86, 0x8a, 0xb0, 0x28, + 0xe7, 0xbd, 0x5a, 0x43, 0xd5, 0x11, 0x37, 0x2b, 0xcb, 0xfb, 0x68, 0x5c, 0x8f, 0xa2, 0x80, 0xd7, + 0x02, 0x18, 0x8b, 0x73, 0xb0, 0x81, 0x24, 0x52, 0x37, 0x6b, 0x09, 0x55, 0x73, 0x05, 0x4a, 0x41, + 0x30, 0xea, 0x16, 0x6a, 0xe1, 0xa1, 0xd8, 0x58, 0x45, 0x86, 0xa1, 0x6d, 0xb3, 0x7f, 0xe7, 0xfb, + 0x96, 0xeb, 0xc8, 0xf3, 0xb4, 0x37, 0xa1, 0x16, 0x48, 0x23, 0xdd, 0x68, 0x5b, 0xc0, 0xb9, 0x8e, + 0xa2, 0x0c, 0x74, 0x0a, 0x4b, 0xfb, 0x8f, 0x45, 0x68, 0x84, 0x0b, 0xea, 0x23, 0x9c, 0x53, 0xed, + 0x42, 0x23, 0x8c, 0xcf, 0xca, 0x7d, 0x5b, 0x5f, 0x14, 0x36, 0xc4, 0xd7, 0x80, 0xe1, 0x2b, 0x46, + 0x32, 0xe2, 0xb1, 0x5f, 0xa5, 0x1c, 0xb1, 0x5f, 0x3d, 0xa8, 0x05, 0x9e, 0xd5, 0xe9, 0x48, 0xed, + 0x3c, 0x4f, 0xf0, 0x57, 0x58, 0x5d, 0x1b, 0x02, 0x50, 0xd6, 0xac, 0x78, 0x41, 0x25, 0x46, 0x7b, + 0x0f, 0x26, 0xd3, 0x9c, 0x5c, 0x75, 0x35, 0x76, 0xa8, 0xd9, 0xb7, 0x55, 0x1d, 0x47, 0xaa, 0xab, + 0x4c, 0xc7, 0x90, 0x83, 0x2d, 0x7f, 0xd9, 0x6f, 0xfa, 0xc0, 0x75, 0x94, 0xfa, 0xc8, 0x57, 0x01, + 0x1b, 0x32, 0x0d, 0x43, 0xaa, 0xf6, 0xdf, 0x4a, 0x70, 0x31, 0x32, 0x8b, 0xac, 0xe9, 0x8e, 0xde, + 0x39, 0xc2, 0x15, 0x6d, 0x9f, 0x6f, 0xaa, 0x39, 0xee, 0x65, 0x03, 0xa5, 0x27, 0xe0, 0xb2, 0x81, + 0xff, 0x5b, 0x04, 0x1e, 0x4b, 0x4a, 0xbe, 0x06, 0x63, 0x7a, 0xec, 0x76, 0x4e, 0xf9, 0x3b, 0xaf, + 0xe7, 0xfe, 0x9d, 0x3c, 0x64, 0x35, 0x8c, 0x8d, 0x8a, 0xa7, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xbe, + 0xad, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0xbb, 0x79, 0x12, 0xc2, 0x79, 0x33, 0x5f, 0x92, 0xd0, 0x18, + 0x0a, 0x21, 0x1f, 0x16, 0x60, 0xdc, 0x8b, 0x2f, 0x93, 0xe4, 0x0f, 0xc9, 0x13, 0x04, 0x10, 0x43, + 0x8b, 0x47, 0x62, 0xc5, 0xd7, 0x62, 0x49, 0x99, 0xda, 0x7f, 0x2d, 0xc0, 0x78, 0xdb, 0xb6, 0x4c, + 0xcb, 0xe9, 0x9c, 0xe2, 0x5d, 0x07, 0x77, 0xa0, 0xe2, 0xdb, 0x96, 0x49, 0x47, 0x9c, 0x4d, 0xc4, + 0x3c, 0xc6, 0x00, 0x50, 0xe0, 0x24, 0x2f, 0x4f, 0x28, 0x1d, 0xe1, 0xf2, 0x84, 0x5f, 0x54, 0x41, + 0x46, 0x45, 0x93, 0x3e, 0x34, 0x3a, 0xea, 0x4c, 0x76, 0xf9, 0x8d, 0x37, 0x72, 0x9c, 0xe7, 0x97, + 0x38, 0xdd, 0x5d, 0x8c, 0xfd, 0x61, 0x22, 0x46, 0x92, 0x08, 0x4d, 0x5e, 0x0b, 0xbb, 0x98, 0xf3, + 0x5a, 0x58, 0x21, 0x6e, 0xf0, 0x62, 0x58, 0x1d, 0xca, 0x3b, 0x41, 0xd0, 0x93, 0x8d, 0x69, 0xf4, + 0xb0, 0xf7, 0xe8, 0x48, 0x19, 0xa1, 0x13, 0xb1, 0x77, 0xe4, 0xd0, 0x4c, 0x84, 0xa3, 0x87, 0xd7, + 0x80, 0x2d, 0xe4, 0x0a, 0x38, 0x88, 0x8b, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0x1f, 0x40, 0x33, 0xf0, + 0x74, 0xc7, 0xdf, 0x76, 0xbd, 0x2e, 0xf5, 0xe4, 0x1a, 0x75, 0x29, 0xc7, 0xcd, 0xa8, 0x1b, 0x11, + 0x9a, 0xf0, 0x64, 0x26, 0x92, 0x30, 0x2e, 0x8d, 0xec, 0x42, 0xbd, 0x6f, 0x8a, 0x82, 0x49, 0xf3, + 0xd3, 0x7c, 0x9e, 0xcb, 0x6e, 0x63, 0xe1, 0x04, 0xea, 0x0d, 0x43, 0x01, 0xc9, 0x1b, 0xef, 0x6a, + 0x27, 0x75, 0xe3, 0x5d, 0xbc, 0x35, 0x66, 0x9d, 0x77, 0x41, 0xba, 0x52, 0xaf, 0x75, 0x3a, 0x32, + 0x1a, 0x6a, 0x29, 0xb7, 0xca, 0x29, 0x44, 0x36, 0x43, 0xdd, 0xd8, 0xe9, 0xa0, 0x92, 0xa1, 0x75, + 0x41, 0x7a, 0x19, 0x88, 0x91, 0xb8, 0x17, 0x46, 0x6c, 0xc2, 0x9a, 0x3d, 0xda, 0x78, 0x10, 0x5e, + 0x50, 0x12, 0x3b, 0x97, 0x3a, 0xf3, 0x02, 0x18, 0xed, 0x3f, 0x15, 0xa1, 0xb4, 0xb1, 0xda, 0x16, + 0x67, 0x4d, 0xf2, 0x4b, 0x97, 0x68, 0x7b, 0xd7, 0xea, 0xdd, 0xa5, 0x9e, 0xb5, 0xbd, 0x2f, 0x97, + 0xde, 0xb1, 0xb3, 0x26, 0xd3, 0x1c, 0x98, 0x91, 0x8b, 0xbc, 0x03, 0x63, 0x86, 0xbe, 0x40, 0xbd, + 0x60, 0x14, 0xc3, 0x02, 0xdf, 0x6d, 0xba, 0x30, 0x1f, 0x65, 0xc7, 0x04, 0x18, 0xd9, 0x04, 0x30, + 0x22, 0xe8, 0xd2, 0xb1, 0xcd, 0x21, 0x31, 0xe0, 0x18, 0x10, 0x41, 0x68, 0xec, 0x32, 0x56, 0x8e, + 0x5a, 0x3e, 0x0e, 0x2a, 0x6f, 0x39, 0xb7, 0x54, 0x5e, 0x8c, 0x60, 0x34, 0x07, 0xc6, 0x13, 0x97, + 0xc5, 0x90, 0x2f, 0x42, 0xdd, 0xed, 0xc5, 0x86, 0xd3, 0x06, 0x8f, 0xbb, 0xac, 0xdf, 0x91, 0x69, + 0x0f, 0x0e, 0xa6, 0xc7, 0x57, 0xdd, 0x8e, 0x65, 0xa8, 0x04, 0x0c, 0xd9, 0x89, 0x06, 0x55, 0xbe, + 0x45, 0x4c, 0x5d, 0x15, 0xc3, 0xe7, 0x0e, 0x7e, 0x9b, 0x83, 0x8f, 0x92, 0xa2, 0xfd, 0x61, 0x19, + 0x22, 0xdf, 0x1c, 0xf1, 0xa1, 0x2a, 0x42, 0xe0, 0xe5, 0xc8, 0x7d, 0xaa, 0xd1, 0xf6, 0x52, 0x14, + 0xe9, 0x40, 0xe9, 0x3d, 0x77, 0x2b, 0xf7, 0xc0, 0x1d, 0xdb, 0x1b, 0x2e, 0x6c, 0x65, 0xb1, 0x04, + 0x64, 0x12, 0xc8, 0xdf, 0x2e, 0xc0, 0x59, 0x3f, 0xad, 0xfa, 0xca, 0xe6, 0x80, 0xf9, 0x75, 0xfc, + 0xb4, 0x32, 0x2d, 0x03, 0x64, 0x87, 0x91, 0x71, 0xb0, 0x2c, 0xac, 0xfe, 0x85, 0xd3, 0x4c, 0x36, + 0xa7, 0xe5, 0x9c, 0x17, 0x1c, 0x26, 0xeb, 0x3f, 0x99, 0x86, 0x52, 0x94, 0xf6, 0xc7, 0x45, 0x68, + 0xc6, 0x46, 0xeb, 0xdc, 0x37, 0x10, 0xdd, 0x4f, 0xdd, 0x40, 0xb4, 0x3e, 0xba, 0x0f, 0x39, 0x2a, + 0xd5, 0x69, 0x5f, 0x42, 0xf4, 0xaf, 0x8a, 0x50, 0xda, 0x5c, 0x5c, 0x4a, 0x2e, 0x5a, 0x0b, 0x8f, + 0x61, 0xd1, 0xba, 0x03, 0xb5, 0xad, 0xbe, 0x65, 0x07, 0x96, 0x93, 0xfb, 0xf4, 0x0a, 0x75, 0x61, + 0x93, 0xf4, 0x31, 0x08, 0x54, 0x54, 0xf0, 0xa4, 0x03, 0xb5, 0x8e, 0x38, 0x3e, 0x30, 0x77, 0x64, + 0x9d, 0x3c, 0x86, 0x50, 0x08, 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x7d, 0x90, 0x37, 0xcf, 0x3f, 0xf6, + 0xda, 0xd4, 0xfe, 0x00, 0x42, 0x2d, 0xe0, 0xf1, 0x0b, 0xff, 0x1f, 0x05, 0x48, 0x2a, 0x3e, 0x8f, + 0xbf, 0x35, 0xed, 0xa6, 0x5b, 0xd3, 0xe2, 0x49, 0x74, 0xbe, 0xec, 0x06, 0xa5, 0xfd, 0xd3, 0x22, + 0x54, 0x1f, 0xdb, 0x8e, 0x63, 0x9a, 0x08, 0x12, 0x5c, 0xc8, 0x39, 0x30, 0x0e, 0x0d, 0x11, 0xec, + 0xa6, 0x42, 0x04, 0xf3, 0x5e, 0x31, 0xfb, 0x88, 0x00, 0xc1, 0x7f, 0x57, 0x00, 0x39, 0x2c, 0xaf, + 0x38, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0x38, 0x07, 0xe4, 0x8d, 0x44, 0x91, 0xd1, 0x5a, 0x62, + 0xda, 0xe7, 0xcf, 0x6a, 0xcc, 0x27, 0x5f, 0x80, 0xfa, 0x8e, 0xeb, 0x07, 0x7c, 0x9c, 0x2f, 0x26, + 0xad, 0x4b, 0x37, 0x64, 0x3a, 0x86, 0x1c, 0x69, 0x8f, 0x6b, 0x65, 0xb8, 0xc7, 0x55, 0xfb, 0x56, + 0x11, 0xc6, 0x3e, 0x2b, 0xdb, 0xa6, 0xb3, 0x42, 0x2a, 0x4b, 0x39, 0x43, 0x2a, 0xcb, 0xc7, 0x09, + 0xa9, 0xd4, 0x7e, 0x58, 0x00, 0x78, 0x6c, 0x7b, 0xb6, 0xcd, 0x64, 0xb4, 0x63, 0xee, 0x76, 0x95, + 0x1d, 0xeb, 0xf8, 0x8f, 0x2b, 0xea, 0x93, 0x78, 0xa4, 0xe3, 0x47, 0x05, 0x98, 0xd0, 0x13, 0xd1, + 0x83, 0xb9, 0x55, 0xcb, 0x54, 0x30, 0x62, 0x18, 0xfc, 0x92, 0x4c, 0xc7, 0x94, 0x58, 0xf2, 0x7a, + 0x74, 0x5e, 0xf0, 0xed, 0xa8, 0xd9, 0x0f, 0x1c, 0xf4, 0xcb, 0xd5, 0x9c, 0x04, 0xe7, 0x23, 0xa2, + 0x35, 0x4b, 0x27, 0x12, 0xad, 0x19, 0xdf, 0x87, 0x56, 0x7e, 0xe8, 0x3e, 0xb4, 0x3d, 0x68, 0x6c, + 0x7b, 0x6e, 0x97, 0x07, 0x44, 0xca, 0xcb, 0x69, 0xaf, 0xe7, 0x98, 0x53, 0xa2, 0x6b, 0xd9, 0x23, + 0x1b, 0xcf, 0x92, 0xc2, 0xc7, 0x48, 0x14, 0x37, 0x8b, 0xbb, 0x42, 0x6a, 0xf5, 0x24, 0xa5, 0x86, + 0x63, 0xc9, 0x86, 0x40, 0x47, 0x25, 0x26, 0x19, 0x04, 0x59, 0x7b, 0x3c, 0x41, 0x90, 0xda, 0x77, + 0xab, 0x6a, 0x00, 0x7b, 0xe2, 0x8e, 0xa6, 0xfc, 0xec, 0xef, 0xf5, 0x4d, 0x6f, 0xc4, 0xad, 0x3d, + 0xc6, 0x8d, 0xb8, 0xf5, 0x93, 0xd9, 0x88, 0xdb, 0xc8, 0xb7, 0x11, 0x17, 0xf2, 0x6f, 0xc4, 0x6d, + 0xe6, 0xdb, 0x88, 0x3b, 0x36, 0xd2, 0x46, 0xdc, 0xf1, 0x23, 0x6d, 0xc4, 0x3d, 0x28, 0x41, 0x6a, + 0x95, 0xf9, 0xb9, 0x47, 0xe9, 0xff, 0x2b, 0x8f, 0xd2, 0xc7, 0x45, 0x88, 0x86, 0xcd, 0x63, 0x46, + 0xdc, 0xbc, 0x0d, 0xf5, 0xae, 0x7e, 0x7f, 0x91, 0xda, 0xfa, 0x7e, 0x9e, 0xfb, 0x57, 0xd7, 0x24, + 0x06, 0x86, 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0x33, 0xd0, 0x73, 0xdb, 0xe6, 0xa3, 0xe3, 0xd4, 0x85, + 0xf5, 0x2f, 0x7a, 0xc7, 0x98, 0x18, 0xed, 0xdf, 0x16, 0x41, 0x1e, 0x96, 0x4f, 0x28, 0x54, 0xb6, + 0xad, 0xfb, 0xd4, 0xcc, 0x1d, 0x3f, 0x1b, 0xbb, 0x15, 0x5b, 0x38, 0x1f, 0x78, 0x02, 0x0a, 0x74, + 0x6e, 0x55, 0x16, 0xce, 0x24, 0x59, 0x7f, 0x39, 0xac, 0xca, 0x71, 0xa7, 0x94, 0xb4, 0x2a, 0x8b, + 0x24, 0x54, 0x32, 0x84, 0x11, 0x9b, 0xc7, 0x15, 0xe4, 0xf6, 0x9d, 0x25, 0xe2, 0x13, 0x94, 0x11, + 0xdb, 0x17, 0x3b, 0xf1, 0xa5, 0x8c, 0xd6, 0xef, 0xff, 0xe0, 0x47, 0x57, 0x9e, 0xfa, 0xe1, 0x8f, + 0xae, 0x3c, 0xf5, 0xc9, 0x8f, 0xae, 0x3c, 0xf5, 0x87, 0x87, 0x57, 0x0a, 0x3f, 0x38, 0xbc, 0x52, + 0xf8, 0xe1, 0xe1, 0x95, 0xc2, 0x27, 0x87, 0x57, 0x0a, 0xff, 0xf9, 0xf0, 0x4a, 0xe1, 0x6f, 0xfc, + 0x97, 0x2b, 0x4f, 0xfd, 0xde, 0x6b, 0x51, 0x11, 0x66, 0x55, 0x11, 0x66, 0x95, 0xc0, 0xd9, 0xde, + 0x6e, 0x67, 0x96, 0x15, 0x21, 0x4a, 0x51, 0x45, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, + 0x98, 0x3b, 0x47, 0x90, 0x97, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -7208,6 +7210,14 @@ func (m *PipelineStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.DrainedOnPause { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x58 @@ -10699,6 +10709,7 @@ func (m *PipelineStatus) Size() (n int) { n += 1 + sovGenerated(uint64(*m.ReduceUDFCount)) } n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 2 return n } @@ -12452,6 +12463,7 @@ func (this *PipelineStatus) String() string { `MapUDFCount:` + valueToStringGenerated(this.MapUDFCount) + `,`, `ReduceUDFCount:` + valueToStringGenerated(this.ReduceUDFCount) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DrainedOnPause:` + fmt.Sprintf("%v", this.DrainedOnPause) + `,`, `}`, }, "") return s @@ -25929,6 +25941,26 @@ func (m *PipelineStatus) Unmarshal(dAtA []byte) error { break } } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DrainedOnPause", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DrainedOnPause = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 8fadbab191..55940285e3 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -1218,6 +1218,12 @@ message PipelineStatus { // The generation observed by the Pipeline controller. // +optional optional int64 observedGeneration = 11; + + // Field to indicate if a pipeline drain successfully occurred, or it timed out. + // Set to true when the Pipeline is in Paused state, and after it has successfully been drained. + // defaults to false + // +kubebuilder:default=false + optional bool drainedOnPause = 12; } message RedisBufferService { diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index bb128213f0..0ed5471bc8 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -4132,6 +4132,13 @@ func schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref common.ReferenceCallba Format: "int64", }, }, + "drainedOnPause": { + SchemaProps: spec.SchemaProps{ + Description: "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index 4604239674..c68a7d647c 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -633,6 +633,11 @@ type PipelineStatus struct { // The generation observed by the Pipeline controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` + // Field to indicate if a pipeline drain successfully occurred, or it timed out. + // Set to true when the Pipeline is in Paused state, and after it has successfully been drained. + // defaults to false + // +kubebuilder:default=false + DrainedOnPause bool `json:"drainedOnPause,omitempty" protobuf:"bytes,12,opt,name=drainedOnPause"` } // SetVertexCounts sets the counts of vertices. @@ -764,6 +769,16 @@ func (pls *PipelineStatus) SetObservedGeneration(value int64) { pls.ObservedGeneration = value } +// MarkDrainedOnPauseTrue sets the DrainedOnPause field to true +func (pls *PipelineStatus) MarkDrainedOnPauseTrue() { + pls.DrainedOnPause = true +} + +// MarkDrainedOnPauseFalse sets the DrainedOnPause field to false +func (pls *PipelineStatus) MarkDrainedOnPauseFalse() { + pls.DrainedOnPause = false +} + // IsHealthy indicates whether the pipeline is in healthy status func (pls *PipelineStatus) IsHealthy() bool { switch pls.Phase { diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go index 0bd93e53fb..d7e5c334b9 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go @@ -339,6 +339,15 @@ func Test_PipelineMarkPhases(t *testing.T) { assert.Equal(t, PipelinePhaseRunning, s.Phase) } +func Test_PipelineMarkDrained(t *testing.T) { + s := PipelineStatus{} + assert.Equal(t, false, s.DrainedOnPause) + s.MarkDrainedOnPauseTrue() + assert.Equal(t, true, s.DrainedOnPause) + s.MarkDrainedOnPauseFalse() + assert.Equal(t, false, s.DrainedOnPause) +} + func Test_GetDownstreamEdges(t *testing.T) { pl := Pipeline{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 0be76a8d0b..0f5354b2c8 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -144,9 +144,17 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( }() pl.Status.SetObservedGeneration(pl.Generation) - - if oldPhase := pl.Status.Phase; pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused || - oldPhase == dfv1.PipelinePhasePaused || oldPhase == dfv1.PipelinePhasePausing { + // Regular pipeline change + // This should be happening in call cases to ensure a clean initialization regardless of the lifecycle phase + // Eg: even for a pipeline started with desiredPhase = Pause, we should still create the resources for the pipeline + result, err := r.reconcileNonLifecycleChanges(ctx, pl) + if err != nil { + r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) + return result, err + } + // check if any changes related to pause/resume lifecycle for the pipeline + if isLifecycleChange(pl) { + oldPhase := pl.Status.Phase requeue, err := r.updateDesiredState(ctx, pl) if err != nil { logMsg := fmt.Sprintf("Updated desired pipeline phase failed: %v", zap.Error(err)) @@ -162,16 +170,24 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( return ctrl.Result{RequeueAfter: dfv1.DefaultRequeueAfter}, nil } return ctrl.Result{}, nil - } + return result, nil +} - // Regular pipeline change - result, err := r.reconcileNonLifecycleChanges(ctx, pl) - if err != nil { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) +// isLifecycleChange determines whether there has been a change requested in the lifecycle +// of a Pipeline object, specifically relating to the paused and pausing states. +func isLifecycleChange(pl *dfv1.Pipeline) bool { + // Extract the current phase from the status of the pipeline. + // Check if the desired phase of the pipeline is 'Paused', or if the current phase of the + // pipeline is either 'Paused' or 'Pausing'. This indicates a transition into or out of + // a paused state which is a lifecycle phase change + if oldPhase := pl.Status.Phase; pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused || + oldPhase == dfv1.PipelinePhasePaused || oldPhase == dfv1.PipelinePhasePausing { + return true } - return result, err + // If none of the conditions are met, return false + return false } // reconcileNonLifecycleChanges do the jobs not related to pipeline lifecycle changes. @@ -345,7 +361,12 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p } pl.Status.MarkDeployed() - pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") + // If the pipeline has a lifecycle change, then do not update the phase as + // this should happen only after the required configs for the lifecycle changes + // have been applied. + if !isLifecycleChange(pl) { + pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") + } if err := r.checkChildrenResourceStatus(ctx, pl); err != nil { return ctrl.Result{}, fmt.Errorf("failed to check pipeline children resource status, %w", err) } @@ -599,7 +620,8 @@ func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { copyVertexTemplate(pl, vCopy) copyVertexLimits(pl, vCopy) replicas := int32(1) - if pl.Status.Phase == dfv1.PipelinePhasePaused { + // If the desired phase is pause or we are in the middle of pausing we should not start any vertex replicas + if isLifecycleChange(pl) { replicas = int32(0) } else if v.IsReduceUDF() { partitions := pl.NumOfPartitions(v.Name) @@ -794,7 +816,6 @@ func (r *pipelineReconciler) updateDesiredState(ctx context.Context, pl *dfv1.Pi } func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { - // reset pause timestamp if pl.GetAnnotations()[dfv1.KeyPauseTimestamp] != "" { err := r.client.Patch(ctx, pl, client.RawPatch(types.JSONPatchType, []byte(dfv1.RemovePauseTimestampPatch))) @@ -806,17 +827,18 @@ func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeli } } } - _, err := r.scaleUpAllVertices(ctx, pl) if err != nil { return false, err } + // mark the drained field as false to refresh the drained status as this will + // be a new lifecycle from running + pl.Status.MarkDrainedOnPauseFalse() pl.Status.MarkPhaseRunning() return false, nil } func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { - // check that annotations / pause timestamp annotation exist if pl.GetAnnotations() == nil || pl.GetAnnotations()[dfv1.KeyPauseTimestamp] == "" { pl.SetAnnotations(map[string]string{dfv1.KeyPauseTimestamp: time.Now().Format(time.RFC3339)}) @@ -855,12 +877,16 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin return false, err } - // if drain is completed or we have exceed pause deadline, mark pl as paused and scale down + // if drain is completed, or we have exceeded the pause deadline, mark pl as paused and scale down if time.Now().After(pauseTimestamp.Add(time.Duration(pl.Spec.Lifecycle.GetPauseGracePeriodSeconds())*time.Second)) || drainCompleted { _, err := r.scaleDownAllVertices(ctx, pl) if err != nil { return true, err } + // if the drain completed succesfully, then set the DrainedOnPause field to true + if drainCompleted { + pl.Status.MarkDrainedOnPauseTrue() + } pl.Status.MarkPhasePaused() return false, nil } diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index 3e65502147..0cf9205f0a 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -220,7 +220,6 @@ func Test_reconcileEvents(t *testing.T) { _, err = r.reconcile(ctx, testObj) assert.Error(t, err) events := getEvents(t, r) - assert.Contains(t, events, "Normal UpdatePipelinePhase Updated pipeline phase from Paused to Running") assert.Contains(t, events, "Warning ReconcilePipelineFailed Failed to reconcile pipeline: the length of the pipeline name plus the vertex name is over the max limit. (very-very-very-loooooooooooooooooooooooooooooooooooong-input), [must be no more than 63 characters]") }) @@ -945,3 +944,72 @@ func Test_checkChildrenResourceStatus(t *testing.T) { } }) } + +func TestIsLifecycleChange(t *testing.T) { + tests := []struct { + name string + currentPhase dfv1.PipelinePhase + desiredPhase dfv1.PipelinePhase + expectedResult bool + }{ + { + name: "Change to paused from another state", + currentPhase: dfv1.PipelinePhaseRunning, + desiredPhase: dfv1.PipelinePhasePaused, + expectedResult: true, + }, + { + name: "when already in paused", + currentPhase: dfv1.PipelinePhasePaused, + desiredPhase: dfv1.PipelinePhasePaused, + expectedResult: true, + }, + { + name: "Change out of paused", + currentPhase: dfv1.PipelinePhasePaused, + desiredPhase: dfv1.PipelinePhaseRunning, + expectedResult: true, + }, + { + name: "Change from another state to pausing", + currentPhase: dfv1.PipelinePhaseRunning, + desiredPhase: dfv1.PipelinePhasePausing, + expectedResult: false, + }, + { + name: "Change from pausing to running", + currentPhase: dfv1.PipelinePhasePausing, + desiredPhase: dfv1.PipelinePhaseRunning, + expectedResult: true, + }, + { + name: "No lifecycle change", + currentPhase: dfv1.PipelinePhaseRunning, + desiredPhase: dfv1.PipelinePhaseRunning, + expectedResult: false, + }, + { + name: "No lifecycle change - updated phase", + currentPhase: dfv1.PipelinePhaseRunning, + desiredPhase: dfv1.PipelinePhaseDeleting, + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pl := &dfv1.Pipeline{ + Spec: dfv1.PipelineSpec{ + Lifecycle: dfv1.Lifecycle{ + DesiredPhase: test.desiredPhase, + }, + }, + Status: dfv1.PipelineStatus{ + Phase: test.currentPhase, + }, + } + result := isLifecycleChange(pl) + assert.Equal(t, test.expectedResult, result) + }) + } +} diff --git a/rust/numaflow-models/src/models/pipeline_status.rs b/rust/numaflow-models/src/models/pipeline_status.rs index 27d6049658..e67205b3cd 100644 --- a/rust/numaflow-models/src/models/pipeline_status.rs +++ b/rust/numaflow-models/src/models/pipeline_status.rs @@ -21,6 +21,9 @@ pub struct PipelineStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, + /// Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false + #[serde(rename = "drainedOnPause", skip_serializing_if = "Option::is_none")] + pub drained_on_pause: Option, #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] pub last_updated: Option, #[serde(rename = "mapUDFCount", skip_serializing_if = "Option::is_none")] @@ -48,6 +51,7 @@ impl PipelineStatus { pub fn new() -> PipelineStatus { PipelineStatus { conditions: None, + drained_on_pause: None, last_updated: None, map_udf_count: None, message: None, From 4f6b02e7f63a6f85373f93d4f1c493b25ea748e1 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Fri, 6 Sep 2024 14:28:55 -0700 Subject: [PATCH 049/188] chore: improve pending logs to be on single line (#2037) Signed-off-by: Vigith Maurice --- rust/monovertex/src/metrics.rs | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index f6f5519765..375e7c071c 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::{Arc, OnceLock}; use std::time::Duration; @@ -8,6 +9,12 @@ use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; +use prometheus_client::encoding::text::encode; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::Registry; use rcgen::{generate_simple_self_signed, CertifiedKey}; use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; @@ -20,12 +27,6 @@ use crate::error::Error; use crate::sink::SinkClient; use crate::source::SourceClient; use crate::transformer::TransformerClient; -use prometheus_client::encoding::text::encode; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::Registry; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon @@ -483,6 +484,11 @@ async fn expose_pending_metrics( ) { let mut ticker = time::interval(refresh_interval); let lookback_seconds_map = vec![("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; + + // store the pending info in a sorted way for deterministic display + // string concat is more efficient? + let mut pending_info: BTreeMap<&str, i64> = BTreeMap::new(); + loop { ticker.tick().await; for (label, seconds) in &lookback_seconds_map { @@ -490,13 +496,18 @@ async fn expose_pending_metrics( if pending != -1 { let mut metric_labels = forward_metrics_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); + pending_info.insert(label, pending); forward_metrics() .source_pending .get_or_create(&metric_labels) .set(pending); - info!("Pending messages ({}): {}", label, pending); } } + // skip for those the pending is not implemented + if !pending_info.is_empty() { + info!("Pending messages {:?}", pending_info); + pending_info.clear(); + } } } From 3287887761fa5a8da12ca70c5ce53947cbe896ec Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 6 Sep 2024 18:44:47 -0700 Subject: [PATCH 050/188] feat: rolling update for MonoVertex (#2029) --- api/json-schema/schema.json | 44 +- api/openapi-spec/swagger.json | 44 +- config/advanced-install/minimal-crds.yaml | 2 +- .../numaflow.numaproj.io_monovertices.yaml | 24 +- .../numaflow.numaproj.io_monovertices.yaml | 2 +- config/install.yaml | 24 +- config/namespace-install.yaml | 24 +- docs/APIs.md | 268 ++- .../numaflow-transformer-config.yaml | 1 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 1558 +++++++++++------ pkg/apis/numaflow/v1alpha1/generated.proto | 68 +- .../numaflow/v1alpha1/mono_vertex_types.go | 53 +- .../numaflow/v1alpha1/openapi_generated.go | 81 +- pkg/apis/numaflow/v1alpha1/update_strategy.go | 80 + .../numaflow/v1alpha1/update_strategy_test.go | 128 ++ .../v1alpha1/zz_generated.deepcopy.go | 44 + .../server/service/health_status.go | 8 +- pkg/reconciler/monovertex/controller.go | 265 ++- pkg/reconciler/monovertex/controller_test.go | 299 ++++ pkg/reconciler/monovertex/scaling/scaling.go | 8 +- pkg/reconciler/pipeline/controller.go | 2 +- pkg/reconciler/util.go | 21 +- pkg/reconciler/util_test.go | 21 + rust/numaflow-models/Makefile | 2 + rust/numaflow-models/src/models/mod.rs | 4 + .../src/models/mono_vertex_spec.rs | 3 + .../src/models/mono_vertex_status.rs | 19 +- .../src/models/rolling_update_strategy.rs | 34 + .../src/models/update_strategy.rs | 51 + test/fixtures/util.go | 2 +- 30 files changed, 2477 insertions(+), 707 deletions(-) create mode 100644 pkg/apis/numaflow/v1alpha1/update_strategy.go create mode 100644 pkg/apis/numaflow/v1alpha1/update_strategy_test.go create mode 100644 rust/numaflow-models/src/models/rolling_update_strategy.rs create mode 100644 rust/numaflow-models/src/models/update_strategy.rs diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index d58a13beb6..27796003ba 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19270,6 +19270,10 @@ }, "type": "array" }, + "updateStrategy": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy", + "description": "The strategy to use to replace existing pods with new ones." + }, "volumes": { "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Volume" @@ -19293,11 +19297,11 @@ "x-kubernetes-patch-strategy": "merge" }, "currentHash": { - "description": "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + "description": "If not empty, indicates the current version of the MonoVertex used to generate Pods.", "type": "string" }, - "currentReplicas": { - "description": "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + "desiredReplicas": { + "description": "The number of desired replicas.", "format": "int64", "type": "integer" }, @@ -19336,9 +19340,14 @@ "type": "string" }, "updateHash": { - "description": "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "description": "If not empty, indicates the updated version of the MonoVertex used to generate Pods.", "type": "string" }, + "updatedReadyReplicas": { + "description": "The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash.", + "format": "int64", + "type": "integer" + }, "updatedReplicas": { "description": "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", "format": "int64", @@ -19803,6 +19812,16 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.RollingUpdateStrategy": { + "description": "RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType.", + "properties": { + "maxUnavailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. Defaults to 25%. Example: when this is set to 30%, the old pods can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old pods can be scaled down further, followed by scaling up the new pods, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods." + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.SASL": { "properties": { "gssapi": { @@ -20318,6 +20337,23 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.UpdateStrategy": { + "description": "UpdateStrategy indicates the strategy that the controller will use to perform updates for Vertex or MonoVertex.", + "properties": { + "rollingUpdate": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.RollingUpdateStrategy", + "description": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStrategy." + }, + "type": { + "description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.\n\nPossible enum values:\n - `\"RollingUpdate\"`", + "enum": [ + "RollingUpdate" + ], + "type": "string" + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.Vertex": { "properties": { "apiVersion": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b3cdb6b120..bb918bac66 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19266,6 +19266,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" } }, + "updateStrategy": { + "description": "The strategy to use to replace existing pods with new ones.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy" + }, "volumes": { "type": "array", "items": { @@ -19289,11 +19293,11 @@ "x-kubernetes-patch-strategy": "merge" }, "currentHash": { - "description": "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + "description": "If not empty, indicates the current version of the MonoVertex used to generate Pods.", "type": "string" }, - "currentReplicas": { - "description": "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + "desiredReplicas": { + "description": "The number of desired replicas.", "type": "integer", "format": "int64" }, @@ -19332,9 +19336,14 @@ "type": "string" }, "updateHash": { - "description": "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "description": "If not empty, indicates the updated version of the MonoVertex used to generate Pods.", "type": "string" }, + "updatedReadyReplicas": { + "description": "The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash.", + "type": "integer", + "format": "int64" + }, "updatedReplicas": { "description": "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", "type": "integer", @@ -19789,6 +19798,16 @@ } } }, + "io.numaproj.numaflow.v1alpha1.RollingUpdateStrategy": { + "description": "RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType.", + "type": "object", + "properties": { + "maxUnavailable": { + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. Defaults to 25%. Example: when this is set to 30%, the old pods can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old pods can be scaled down further, followed by scaling up the new pods, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + } + } + }, "io.numaproj.numaflow.v1alpha1.SASL": { "type": "object", "required": [ @@ -20304,6 +20323,23 @@ } } }, + "io.numaproj.numaflow.v1alpha1.UpdateStrategy": { + "description": "UpdateStrategy indicates the strategy that the controller will use to perform updates for Vertex or MonoVertex.", + "type": "object", + "properties": { + "rollingUpdate": { + "description": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStrategy.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.RollingUpdateStrategy" + }, + "type": { + "description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.\n\nPossible enum values:\n - `\"RollingUpdate\"`", + "type": "string", + "enum": [ + "RollingUpdate" + ] + } + } + }, "io.numaproj.numaflow.v1alpha1.Vertex": { "type": "object", "required": [ diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index 3e647ee3d6..a8eac9fc22 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -69,7 +69,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 8e503f47d6..02ae281ebd 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -21,7 +21,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -4790,6 +4790,23 @@ spec: type: string type: object type: array + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -5551,7 +5568,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -5586,6 +5603,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml index 65cb6b2652..0a3af26fbb 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_monovertices.yaml @@ -17,7 +17,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas diff --git a/config/install.yaml b/config/install.yaml index 8a84ffac83..ac272ddf19 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -2665,7 +2665,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -7434,6 +7434,23 @@ spec: type: string type: object type: array + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -8195,7 +8212,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -8230,6 +8247,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index c9301892de..12579dc36f 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -2665,7 +2665,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -7434,6 +7434,23 @@ spec: type: string type: object type: array + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -8195,7 +8212,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -8230,6 +8247,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/docs/APIs.md b/docs/APIs.md index 97ddf96672..5fd26ad505 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5791,6 +5791,27 @@ Template for the daemon service deployment. + + + + +updateStrategy
+ UpdateStrategy + + + + + +(Optional) +

+ +The strategy to use to replace existing pods with new ones. +

+ + + + + @@ -6159,6 +6180,27 @@ Template for the daemon service deployment. + + + + +updateStrategy
+ UpdateStrategy + + + + + +(Optional) +

+ +The strategy to use to replace existing pods with new ones. +

+ + + + + @@ -6259,6 +6301,25 @@ labels match the selector). +desiredReplicas
uint32 + + + + +(Optional) +

+ +The number of desired replicas. +

+ + + + + + + + + selector
string @@ -6376,7 +6437,7 @@ The number of pods targeted by this MonoVertex with a Ready Condition. -currentReplicas
uint32 +updatedReplicas
uint32 @@ -6384,7 +6445,7 @@ The number of pods targeted by this MonoVertex with a Ready Condition.

The number of Pods created by the controller from the MonoVertex version -indicated by currentHash. +indicated by updateHash.

@@ -6395,15 +6456,15 @@ indicated by currentHash. -updatedReplicas
uint32 +updatedReadyReplicas
uint32

-The number of Pods created by the controller from the MonoVertex version -indicated by updateHash. +The number of ready Pods created by the controller from the MonoVertex +version indicated by updateHash.

@@ -6421,8 +6482,8 @@ indicated by updateHash.

-If not empty, indicates the version of the MonoVertex used to generate -Pods in the sequence \[0,currentReplicas). +If not empty, indicates the current version of the MonoVertex used to +generate Pods.

@@ -6440,8 +6501,8 @@ Pods in the sequence \[0,currentReplicas).

-If not empty, indicates the version of the MonoVertx used to generate -Pods in the sequence \[replicas-updatedReplicas,replicas) +If not empty, indicates the updated version of the MonoVertex used to +generate Pods.

@@ -8418,6 +8479,81 @@ action is to retry. +

+ +RollingUpdateStrategy +

+ +

+ +(Appears on: +UpdateStrategy) +

+ +

+ +

+ +RollingUpdateStrategy is used to communicate parameter for +RollingUpdateStrategyType. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +maxUnavailable
+k8s.io/apimachinery/pkg/util/intstr.IntOrString +
+ +(Optional) +

+ +The maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods +(ex: 10%). Absolute number is calculated from percentage by rounding +down. Defaults to 25%. Example: when this is set to 30%, the old pods +can be scaled down to 70% of desired pods immediately when the rolling +update starts. Once new pods are ready, old pods can be scaled down +further, followed by scaling up the new pods, ensuring that the total +number of pods available at all times during the update is at least 70% +of desired pods. +

+ +
+

SASL @@ -10659,6 +10795,120 @@ Description +

+ +UpdateStrategy +

+ +

+ +(Appears on: +MonoVertexSpec) +

+ +

+ +

+ +UpdateStrategy indicates the strategy that the controller will use to +perform updates for Vertex or MonoVertex. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +type
+ +UpdateStrategyType +
+ +(Optional) +

+ +Type indicates the type of the StatefulSetUpdateStrategy. Default is +RollingUpdate. +

+ +
+ +rollingUpdate
+ +RollingUpdateStrategy +
+ +(Optional) +

+ +RollingUpdate is used to communicate parameters when Type is +RollingUpdateStrategy. +

+ +
+ +

+ +UpdateStrategyType (string alias) +

+ +

+ +

+ +(Appears on: +UpdateStrategy) +

+ +

+ +

+ +UpdateStrategyType is a string enumeration type that enumerates all +possible update strategies. +

+ +

+

Vertex diff --git a/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml b/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml index 39439f10da..37b74aba6e 100644 --- a/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml +++ b/docs/user-guide/reference/kustomize/numaflow-transformer-config.yaml @@ -435,6 +435,7 @@ varReference: - path: spec/source/udsource/container/command kind: MonoVertex - path: spec/source/udsource/container/env/value + kind: MonoVertex - path: spec/sink/udsink/container/args kind: MonoVertex - path: spec/sink/udsink/container/command diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index abf3a4e47a..5297014695 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -35,6 +35,8 @@ import ( math_bits "math/bits" reflect "reflect" strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -1812,10 +1814,38 @@ func (m *RetryStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo +func (m *RollingUpdateStrategy) Reset() { *m = RollingUpdateStrategy{} } +func (*RollingUpdateStrategy) ProtoMessage() {} +func (*RollingUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{63} +} +func (m *RollingUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStrategy.Merge(m, src) +} +func (m *RollingUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStrategy proto.InternalMessageInfo + func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1843,7 +1873,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1871,7 +1901,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1899,7 +1929,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1927,7 +1957,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1955,7 +1985,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1983,7 +2013,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2011,7 +2041,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2039,7 +2069,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2067,7 +2097,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2095,7 +2125,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2123,7 +2153,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2151,7 +2181,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2179,7 +2209,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2207,7 +2237,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2235,7 +2265,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2263,7 +2293,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2291,7 +2321,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2319,7 +2349,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2347,7 +2377,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2375,7 +2405,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2400,10 +2430,38 @@ func (m *UDTransformer) XXX_DiscardUnknown() { var xxx_messageInfo_UDTransformer proto.InternalMessageInfo +func (m *UpdateStrategy) Reset() { *m = UpdateStrategy{} } +func (*UpdateStrategy) ProtoMessage() {} +func (*UpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{85} +} +func (m *UpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateStrategy.Merge(m, src) +} +func (m *UpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *UpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateStrategy proto.InternalMessageInfo + func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{84} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2431,7 +2489,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{85} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2459,7 +2517,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{86} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2487,7 +2545,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{87} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2515,7 +2573,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{88} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2543,7 +2601,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{89} + return fileDescriptor_9d0d1b17d3865563, []int{91} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2571,7 +2629,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{90} + return fileDescriptor_9d0d1b17d3865563, []int{92} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2599,7 +2657,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{91} + return fileDescriptor_9d0d1b17d3865563, []int{93} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2627,7 +2685,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{92} + return fileDescriptor_9d0d1b17d3865563, []int{94} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2724,6 +2782,7 @@ func init() { proto.RegisterType((*RedisConfig)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisConfig") proto.RegisterType((*RedisSettings)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisSettings") proto.RegisterType((*RetryStrategy)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RetryStrategy") + proto.RegisterType((*RollingUpdateStrategy)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RollingUpdateStrategy") proto.RegisterType((*SASL)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.SASL") proto.RegisterType((*SASLPlain)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.SASLPlain") proto.RegisterType((*Scale)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Scale") @@ -2746,6 +2805,7 @@ func init() { proto.RegisterType((*UDSink)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.UDSink") proto.RegisterType((*UDSource)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.UDSource") proto.RegisterType((*UDTransformer)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.UDTransformer") + proto.RegisterType((*UpdateStrategy)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.UpdateStrategy") proto.RegisterType((*Vertex)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Vertex") proto.RegisterType((*VertexInstance)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.VertexInstance") proto.RegisterType((*VertexLimits)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.VertexLimits") @@ -2762,490 +2822,502 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7719 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd7, - 0x95, 0x9e, 0xfa, 0xbf, 0xfb, 0x34, 0xc9, 0xe1, 0xdc, 0x19, 0x8d, 0x38, 0xa3, 0xd1, 0x70, 0xb6, - 0xb4, 0xd2, 0xce, 0x66, 0xbd, 0x64, 0xc4, 0xe8, 0xcf, 0xfb, 0x63, 0x89, 0x4d, 0x0e, 0x39, 0x9c, - 0x21, 0x67, 0xe8, 0xd3, 0xe4, 0x48, 0x5e, 0x65, 0xad, 0x14, 0xab, 0x2e, 0x9b, 0x25, 0x56, 0x57, - 0xb5, 0xaa, 0xaa, 0x39, 0x43, 0x6d, 0x02, 0xef, 0xae, 0x02, 0x48, 0x41, 0x10, 0x24, 0xd8, 0x27, - 0x03, 0x81, 0x13, 0x24, 0x48, 0xe0, 0x07, 0xc3, 0x79, 0x08, 0xe0, 0x3c, 0x18, 0x48, 0x1c, 0x07, - 0x41, 0xe2, 0x04, 0xf9, 0xf1, 0x43, 0x80, 0x28, 0x2f, 0x44, 0xcc, 0x20, 0x0f, 0x09, 0x10, 0xc3, - 0x88, 0x91, 0xc4, 0x1e, 0x18, 0x71, 0x70, 0xff, 0xea, 0xaf, 0xab, 0x67, 0xc8, 0x2e, 0x72, 0x34, - 0xca, 0xea, 0xad, 0xea, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xfd, 0x39, 0xf7, 0x9c, 0x73, 0xef, - 0x85, 0xe5, 0x8e, 0x15, 0xec, 0xf4, 0xb7, 0x66, 0x0c, 0xb7, 0x3b, 0xeb, 0xf4, 0xbb, 0x7a, 0xcf, - 0x73, 0xdf, 0xe3, 0x0f, 0xdb, 0xb6, 0x7b, 0x6f, 0xb6, 0xb7, 0xdb, 0x99, 0xd5, 0x7b, 0x96, 0x1f, - 0xa5, 0xec, 0xbd, 0xa4, 0xdb, 0xbd, 0x1d, 0xfd, 0xa5, 0xd9, 0x0e, 0x75, 0xa8, 0xa7, 0x07, 0xd4, - 0x9c, 0xe9, 0x79, 0x6e, 0xe0, 0x92, 0xd7, 0x22, 0xa0, 0x19, 0x05, 0x34, 0xa3, 0xb2, 0xcd, 0xf4, - 0x76, 0x3b, 0x33, 0x0c, 0x28, 0x4a, 0x51, 0x40, 0x97, 0x7e, 0x33, 0x56, 0x82, 0x8e, 0xdb, 0x71, - 0x67, 0x39, 0xde, 0x56, 0x7f, 0x9b, 0xbf, 0xf1, 0x17, 0xfe, 0x24, 0xe4, 0x5c, 0xd2, 0x76, 0x5f, - 0xf7, 0x67, 0x2c, 0x97, 0x15, 0x6b, 0xd6, 0x70, 0x3d, 0x3a, 0xbb, 0x37, 0x50, 0x96, 0x4b, 0x2f, - 0x47, 0x3c, 0x5d, 0xdd, 0xd8, 0xb1, 0x1c, 0xea, 0xed, 0xab, 0x6f, 0x99, 0xf5, 0xa8, 0xef, 0xf6, - 0x3d, 0x83, 0x1e, 0x2b, 0x97, 0x3f, 0xdb, 0xa5, 0x81, 0x9e, 0x25, 0x6b, 0x76, 0x58, 0x2e, 0xaf, - 0xef, 0x04, 0x56, 0x77, 0x50, 0xcc, 0xab, 0x8f, 0xca, 0xe0, 0x1b, 0x3b, 0xb4, 0xab, 0xa7, 0xf3, - 0x69, 0xdf, 0x07, 0x38, 0x37, 0xbf, 0xe5, 0x07, 0x9e, 0x6e, 0x04, 0xeb, 0xae, 0xb9, 0x41, 0xbb, - 0x3d, 0x5b, 0x0f, 0x28, 0xd9, 0x85, 0x3a, 0x2b, 0x9b, 0xa9, 0x07, 0xfa, 0x54, 0xe1, 0x6a, 0xe1, - 0x5a, 0x73, 0x6e, 0x7e, 0x66, 0xc4, 0x7f, 0x31, 0xb3, 0x26, 0x81, 0x5a, 0x63, 0x87, 0x07, 0xd3, - 0x75, 0xf5, 0x86, 0xa1, 0x00, 0xf2, 0xf5, 0x02, 0x8c, 0x39, 0xae, 0x49, 0xdb, 0xd4, 0xa6, 0x46, - 0xe0, 0x7a, 0x53, 0xc5, 0xab, 0xa5, 0x6b, 0xcd, 0xb9, 0xaf, 0x8e, 0x2c, 0x31, 0xe3, 0x8b, 0x66, - 0x6e, 0xc7, 0x04, 0x5c, 0x77, 0x02, 0x6f, 0xbf, 0x75, 0xfe, 0x07, 0x07, 0xd3, 0x4f, 0x1d, 0x1e, - 0x4c, 0x8f, 0xc5, 0x49, 0x98, 0x28, 0x09, 0xd9, 0x84, 0x66, 0xe0, 0xda, 0xac, 0xca, 0x2c, 0xd7, - 0xf1, 0xa7, 0x4a, 0xbc, 0x60, 0x57, 0x66, 0x44, 0x6d, 0x33, 0xf1, 0x33, 0xac, 0xb9, 0xcc, 0xec, - 0xbd, 0x34, 0xb3, 0x11, 0xb2, 0xb5, 0xce, 0x49, 0xe0, 0x66, 0x94, 0xe6, 0x63, 0x1c, 0x87, 0x50, - 0x38, 0xe3, 0x53, 0xa3, 0xef, 0x59, 0xc1, 0xfe, 0x82, 0xeb, 0x04, 0xf4, 0x7e, 0x30, 0x55, 0xe6, - 0xb5, 0xfc, 0x62, 0x16, 0xf4, 0xba, 0x6b, 0xb6, 0x93, 0xdc, 0xad, 0x73, 0x87, 0x07, 0xd3, 0x67, - 0x52, 0x89, 0x98, 0xc6, 0x24, 0x0e, 0x4c, 0x5a, 0x5d, 0xbd, 0x43, 0xd7, 0xfb, 0xb6, 0xdd, 0xa6, - 0x86, 0x47, 0x03, 0x7f, 0xaa, 0xc2, 0x3f, 0xe1, 0x5a, 0x96, 0x9c, 0x55, 0xd7, 0xd0, 0xed, 0x3b, - 0x5b, 0xef, 0x51, 0x23, 0x40, 0xba, 0x4d, 0x3d, 0xea, 0x18, 0xb4, 0x35, 0x25, 0x3f, 0x66, 0x72, - 0x25, 0x85, 0x84, 0x03, 0xd8, 0x64, 0x19, 0xce, 0xf6, 0x3c, 0xcb, 0xe5, 0x45, 0xb0, 0x75, 0xdf, - 0xbf, 0xad, 0x77, 0xe9, 0x54, 0xf5, 0x6a, 0xe1, 0x5a, 0xa3, 0x75, 0x51, 0xc2, 0x9c, 0x5d, 0x4f, - 0x33, 0xe0, 0x60, 0x1e, 0x72, 0x0d, 0xea, 0x2a, 0x71, 0xaa, 0x76, 0xb5, 0x70, 0xad, 0x22, 0xda, - 0x8e, 0xca, 0x8b, 0x21, 0x95, 0x2c, 0x41, 0x5d, 0xdf, 0xde, 0xb6, 0x1c, 0xc6, 0x59, 0xe7, 0x55, - 0x78, 0x39, 0xeb, 0xd3, 0xe6, 0x25, 0x8f, 0xc0, 0x51, 0x6f, 0x18, 0xe6, 0x25, 0x37, 0x81, 0xf8, - 0xd4, 0xdb, 0xb3, 0x0c, 0x3a, 0x6f, 0x18, 0x6e, 0xdf, 0x09, 0x78, 0xd9, 0x1b, 0xbc, 0xec, 0x97, - 0x64, 0xd9, 0x49, 0x7b, 0x80, 0x03, 0x33, 0x72, 0x91, 0x37, 0x61, 0x52, 0x76, 0xbb, 0xa8, 0x16, - 0x80, 0x23, 0x9d, 0x67, 0x15, 0x89, 0x29, 0x1a, 0x0e, 0x70, 0x13, 0x13, 0x2e, 0xeb, 0xfd, 0xc0, - 0xed, 0x32, 0xc8, 0xa4, 0xd0, 0x0d, 0x77, 0x97, 0x3a, 0x53, 0xcd, 0xab, 0x85, 0x6b, 0xf5, 0xd6, - 0xd5, 0xc3, 0x83, 0xe9, 0xcb, 0xf3, 0x0f, 0xe1, 0xc3, 0x87, 0xa2, 0x90, 0x3b, 0xd0, 0x30, 0x1d, - 0x7f, 0xdd, 0xb5, 0x2d, 0x63, 0x7f, 0x6a, 0x8c, 0x17, 0xf0, 0x25, 0xf9, 0xa9, 0x8d, 0xc5, 0xdb, - 0x6d, 0x41, 0x78, 0x70, 0x30, 0x7d, 0x79, 0x70, 0x74, 0x9c, 0x09, 0xe9, 0x18, 0x61, 0x90, 0x35, - 0x0e, 0xb8, 0xe0, 0x3a, 0xdb, 0x56, 0x67, 0x6a, 0x9c, 0xff, 0x8d, 0xab, 0x43, 0x1a, 0xf4, 0xe2, - 0xed, 0xb6, 0xe0, 0x6b, 0x8d, 0x4b, 0x71, 0xe2, 0x15, 0x23, 0x04, 0x62, 0xc2, 0x84, 0x1a, 0x57, - 0x17, 0x6c, 0xdd, 0xea, 0xfa, 0x53, 0x13, 0xbc, 0xf1, 0xfe, 0xea, 0x10, 0x4c, 0x8c, 0x33, 0xb7, - 0x2e, 0xc8, 0x4f, 0x99, 0x48, 0x24, 0xfb, 0x98, 0xc2, 0xbc, 0xf4, 0x06, 0x9c, 0x1d, 0x18, 0x1b, - 0xc8, 0x24, 0x94, 0x76, 0xe9, 0x3e, 0x1f, 0xfa, 0x1a, 0xc8, 0x1e, 0xc9, 0x79, 0xa8, 0xec, 0xe9, - 0x76, 0x9f, 0x4e, 0x15, 0x79, 0x9a, 0x78, 0xf9, 0xad, 0xe2, 0xeb, 0x05, 0xed, 0xef, 0x96, 0x60, - 0x4c, 0x8d, 0x38, 0x6d, 0xcb, 0xd9, 0x25, 0x6f, 0x41, 0xc9, 0x76, 0x3b, 0x72, 0xdc, 0xfc, 0x9d, - 0x91, 0x47, 0xb1, 0x55, 0xb7, 0xd3, 0xaa, 0x1d, 0x1e, 0x4c, 0x97, 0x56, 0xdd, 0x0e, 0x32, 0x44, - 0x62, 0x40, 0x65, 0x57, 0xdf, 0xde, 0xd5, 0x79, 0x19, 0x9a, 0x73, 0xad, 0x91, 0xa1, 0x6f, 0x31, - 0x14, 0x56, 0xd6, 0x56, 0xe3, 0xf0, 0x60, 0xba, 0xc2, 0x5f, 0x51, 0x60, 0x13, 0x17, 0x1a, 0x5b, - 0xb6, 0x6e, 0xec, 0xee, 0xb8, 0x36, 0x9d, 0x2a, 0xe5, 0x14, 0xd4, 0x52, 0x48, 0xe2, 0x37, 0x87, - 0xaf, 0x18, 0xc9, 0x20, 0x06, 0x54, 0xfb, 0xa6, 0x6f, 0x39, 0xbb, 0x72, 0x0c, 0x7c, 0x63, 0x64, - 0x69, 0x9b, 0x8b, 0xfc, 0x9b, 0xe0, 0xf0, 0x60, 0xba, 0x2a, 0x9e, 0x51, 0x42, 0x6b, 0x3f, 0x6e, - 0xc2, 0x84, 0xfa, 0x49, 0x77, 0xa9, 0x17, 0xd0, 0xfb, 0xe4, 0x2a, 0x94, 0x1d, 0xd6, 0x35, 0xf9, - 0x4f, 0x6e, 0x8d, 0xc9, 0xe6, 0x52, 0xe6, 0x5d, 0x92, 0x53, 0x58, 0xc9, 0x44, 0x53, 0x91, 0x15, - 0x3e, 0x7a, 0xc9, 0xda, 0x1c, 0x46, 0x94, 0x4c, 0x3c, 0xa3, 0x84, 0x26, 0xef, 0x40, 0x99, 0x7f, - 0xbc, 0xa8, 0xea, 0xdf, 0x1d, 0x5d, 0x04, 0xfb, 0xf4, 0x3a, 0xfb, 0x02, 0xfe, 0xe1, 0x1c, 0x94, - 0x35, 0xc5, 0xbe, 0xb9, 0x2d, 0x2b, 0xf6, 0x77, 0x72, 0x54, 0xec, 0x92, 0x68, 0x8a, 0x9b, 0x8b, - 0x4b, 0xc8, 0x10, 0xc9, 0x5f, 0x2f, 0xc0, 0x59, 0xc3, 0x75, 0x02, 0x9d, 0xa9, 0x1a, 0x6a, 0x92, - 0x9d, 0xaa, 0x70, 0x39, 0x37, 0x47, 0x96, 0xb3, 0x90, 0x46, 0x6c, 0x3d, 0xcd, 0xe6, 0x8c, 0x81, - 0x64, 0x1c, 0x94, 0x4d, 0xfe, 0x66, 0x01, 0x9e, 0x66, 0x63, 0xf9, 0x00, 0x33, 0x9f, 0x81, 0x4e, - 0xb6, 0x54, 0x17, 0x0f, 0x0f, 0xa6, 0x9f, 0x5e, 0xc9, 0x12, 0x86, 0xd9, 0x65, 0x60, 0xa5, 0x3b, - 0xa7, 0x0f, 0xaa, 0x25, 0x7c, 0x76, 0x6b, 0xce, 0xad, 0x9e, 0xa4, 0xaa, 0xd3, 0x7a, 0x56, 0x36, - 0xe5, 0x2c, 0xcd, 0x0e, 0xb3, 0x4a, 0x41, 0xae, 0x43, 0x6d, 0xcf, 0xb5, 0xfb, 0x5d, 0xea, 0x4f, - 0xd5, 0xf9, 0x10, 0x7b, 0x29, 0x6b, 0x88, 0xbd, 0xcb, 0x59, 0x5a, 0x67, 0x24, 0x7c, 0x4d, 0xbc, - 0xfb, 0xa8, 0xf2, 0x12, 0x0b, 0xaa, 0xb6, 0xd5, 0xb5, 0x02, 0x9f, 0x4f, 0x9c, 0xcd, 0xb9, 0xeb, - 0x23, 0x7f, 0x96, 0xe8, 0xa2, 0xab, 0x1c, 0x4c, 0xf4, 0x1a, 0xf1, 0x8c, 0x52, 0x00, 0x1b, 0x0a, - 0x7d, 0x43, 0xb7, 0xc5, 0xc4, 0xda, 0x9c, 0xfb, 0xd2, 0xe8, 0xdd, 0x86, 0xa1, 0xb4, 0xc6, 0xe5, - 0x37, 0x55, 0xf8, 0x2b, 0x0a, 0x6c, 0xf2, 0xfb, 0x30, 0x91, 0xf8, 0x9b, 0xfe, 0x54, 0x93, 0xd7, - 0xce, 0x73, 0x59, 0xb5, 0x13, 0x72, 0x45, 0x33, 0x4f, 0xa2, 0x85, 0xf8, 0x98, 0x02, 0x23, 0xb7, - 0xa0, 0xee, 0x5b, 0x26, 0x35, 0x74, 0xcf, 0x9f, 0x1a, 0x3b, 0x0a, 0xf0, 0xa4, 0x04, 0xae, 0xb7, - 0x65, 0x36, 0x0c, 0x01, 0xc8, 0x0c, 0x40, 0x4f, 0xf7, 0x02, 0x4b, 0x28, 0xaa, 0xe3, 0x5c, 0x69, - 0x9a, 0x38, 0x3c, 0x98, 0x86, 0xf5, 0x30, 0x15, 0x63, 0x1c, 0x8c, 0x9f, 0xe5, 0x5d, 0x71, 0x7a, - 0xfd, 0x40, 0x4c, 0xac, 0x0d, 0xc1, 0xdf, 0x0e, 0x53, 0x31, 0xc6, 0x41, 0xbe, 0x5d, 0x80, 0x67, - 0xa3, 0xd7, 0xc1, 0x4e, 0x76, 0xe6, 0xc4, 0x3b, 0xd9, 0xf4, 0xe1, 0xc1, 0xf4, 0xb3, 0xed, 0xe1, - 0x22, 0xf1, 0x61, 0xe5, 0xd1, 0xde, 0x82, 0xf1, 0xf9, 0x7e, 0xb0, 0xe3, 0x7a, 0xd6, 0x07, 0x5c, - 0xe9, 0x26, 0x4b, 0x50, 0x09, 0xb8, 0xf2, 0x24, 0xe6, 0xe5, 0x17, 0xb2, 0xaa, 0x5a, 0x28, 0xb2, - 0xb7, 0xe8, 0xbe, 0xd2, 0x06, 0xc4, 0xfc, 0x28, 0x94, 0x29, 0x91, 0x5d, 0xfb, 0xcb, 0x05, 0xa8, - 0xb5, 0x74, 0x63, 0xd7, 0xdd, 0xde, 0x26, 0x6f, 0x43, 0xdd, 0x72, 0x02, 0xea, 0xed, 0xe9, 0xb6, - 0x84, 0x9d, 0x89, 0xc1, 0x86, 0x2b, 0xb1, 0xe8, 0xbb, 0xd9, 0x9a, 0x87, 0x09, 0x5a, 0xec, 0xcb, - 0xb5, 0x02, 0xd7, 0x47, 0x57, 0x24, 0x06, 0x86, 0x68, 0x64, 0x1a, 0x2a, 0x7e, 0x40, 0x7b, 0x3e, - 0x9f, 0x79, 0xc6, 0x45, 0x31, 0xda, 0x2c, 0x01, 0x45, 0xba, 0xf6, 0x77, 0x0a, 0xd0, 0x68, 0xe9, - 0xbe, 0x65, 0xb0, 0xaf, 0x24, 0x0b, 0x50, 0xee, 0xfb, 0xd4, 0x3b, 0xde, 0xb7, 0xf1, 0xc9, 0x62, - 0xd3, 0xa7, 0x1e, 0xf2, 0xcc, 0xe4, 0x0e, 0xd4, 0x7b, 0xba, 0xef, 0xdf, 0x73, 0x3d, 0x53, 0x4e, - 0x78, 0x47, 0x04, 0x12, 0xca, 0xb9, 0xcc, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x68, 0xc6, 0xd7, 0x7e, - 0x5a, 0x80, 0x73, 0xad, 0xfe, 0xf6, 0x36, 0xf5, 0xa4, 0x2e, 0x2a, 0xb5, 0x3c, 0x0a, 0x15, 0x8f, - 0x9a, 0x96, 0x2f, 0xcb, 0xbe, 0x38, 0x72, 0x0b, 0x42, 0x86, 0x22, 0x95, 0x4a, 0x5e, 0x5f, 0x3c, - 0x01, 0x05, 0x3a, 0xe9, 0x43, 0xe3, 0x3d, 0x1a, 0xf8, 0x81, 0x47, 0xf5, 0xae, 0xfc, 0xba, 0x1b, - 0x23, 0x8b, 0xba, 0x49, 0x83, 0x36, 0x47, 0x8a, 0xeb, 0xb0, 0x61, 0x22, 0x46, 0x92, 0xb4, 0xef, - 0x57, 0x60, 0x6c, 0xc1, 0xed, 0x6e, 0x59, 0x0e, 0x35, 0xaf, 0x9b, 0x1d, 0x4a, 0xde, 0x85, 0x32, - 0x35, 0x3b, 0x54, 0x7e, 0xed, 0xe8, 0xd3, 0x3d, 0x03, 0x8b, 0x94, 0x16, 0xf6, 0x86, 0x1c, 0x98, - 0xac, 0xc2, 0xc4, 0xb6, 0xe7, 0x76, 0xc5, 0x08, 0xba, 0xb1, 0xdf, 0x93, 0x1a, 0x6b, 0xeb, 0x57, - 0xd5, 0xa8, 0xb4, 0x94, 0xa0, 0x3e, 0x38, 0x98, 0x86, 0xe8, 0x0d, 0x53, 0x79, 0xc9, 0xdb, 0x30, - 0x15, 0xa5, 0x84, 0x43, 0xc9, 0x02, 0x5b, 0x44, 0x70, 0x8d, 0xa5, 0xd2, 0xba, 0x7c, 0x78, 0x30, - 0x3d, 0xb5, 0x34, 0x84, 0x07, 0x87, 0xe6, 0x26, 0x1f, 0x15, 0x60, 0x32, 0x22, 0x8a, 0xe1, 0x5d, - 0x2a, 0x2a, 0x27, 0x34, 0x6f, 0xf0, 0xd5, 0xd6, 0x52, 0x4a, 0x04, 0x0e, 0x08, 0x25, 0x4b, 0x30, - 0x16, 0xb8, 0xb1, 0xfa, 0xaa, 0xf0, 0xfa, 0xd2, 0x94, 0x79, 0x60, 0xc3, 0x1d, 0x5a, 0x5b, 0x89, - 0x7c, 0x04, 0xe1, 0x82, 0x7a, 0x4f, 0xd5, 0x54, 0x95, 0xd7, 0xd4, 0xa5, 0xc3, 0x83, 0xe9, 0x0b, - 0x1b, 0x99, 0x1c, 0x38, 0x24, 0x27, 0xf9, 0xa3, 0x02, 0x4c, 0x28, 0x92, 0xac, 0xa3, 0xda, 0x49, - 0xd6, 0x11, 0x61, 0x2d, 0x62, 0x23, 0x21, 0x00, 0x53, 0x02, 0xb5, 0x9f, 0x95, 0xa1, 0x11, 0x0e, - 0xb0, 0xe4, 0x79, 0xa8, 0xf0, 0x85, 0xbf, 0xd4, 0x9b, 0xc3, 0x99, 0x93, 0xdb, 0x07, 0x50, 0xd0, - 0xc8, 0x0b, 0x50, 0x33, 0xdc, 0x6e, 0x57, 0x77, 0x4c, 0x6e, 0xcc, 0x69, 0xb4, 0x9a, 0x4c, 0x61, - 0x58, 0x10, 0x49, 0xa8, 0x68, 0xe4, 0x32, 0x94, 0x75, 0xaf, 0x23, 0xec, 0x2a, 0x0d, 0x31, 0x1e, - 0xcd, 0x7b, 0x1d, 0x1f, 0x79, 0x2a, 0xf9, 0x22, 0x94, 0xa8, 0xb3, 0x37, 0x55, 0x1e, 0xae, 0x91, - 0x5c, 0x77, 0xf6, 0xee, 0xea, 0x5e, 0xab, 0x29, 0xcb, 0x50, 0xba, 0xee, 0xec, 0x21, 0xcb, 0x43, - 0x56, 0xa1, 0x46, 0x9d, 0x3d, 0xf6, 0xef, 0xa5, 0xc1, 0xe3, 0x57, 0x86, 0x64, 0x67, 0x2c, 0x52, - 0x39, 0x0f, 0xf5, 0x1a, 0x99, 0x8c, 0x0a, 0x82, 0x7c, 0x05, 0xc6, 0x84, 0x8a, 0xb3, 0xc6, 0xfe, - 0x89, 0x3f, 0x55, 0xe5, 0x90, 0xd3, 0xc3, 0x75, 0x24, 0xce, 0x17, 0x19, 0x98, 0x62, 0x89, 0x3e, - 0x26, 0xa0, 0xc8, 0x57, 0xa0, 0xa1, 0xd6, 0xa3, 0xea, 0xcf, 0x66, 0xda, 0x66, 0xd4, 0x22, 0x16, - 0xe9, 0xfb, 0x7d, 0xcb, 0xa3, 0x5d, 0xea, 0x04, 0x7e, 0xeb, 0xac, 0x5a, 0xad, 0x2b, 0xaa, 0x8f, - 0x11, 0x1a, 0xd9, 0x1a, 0x34, 0x32, 0x09, 0x0b, 0xc9, 0xf3, 0x43, 0x46, 0xf5, 0x11, 0x2c, 0x4c, - 0x5f, 0x85, 0x33, 0xa1, 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x65, 0x96, 0x7d, 0x25, 0x49, - 0x7a, 0x70, 0x30, 0xfd, 0x5c, 0x86, 0x29, 0x21, 0x62, 0xc0, 0x34, 0x98, 0xf6, 0xbd, 0x12, 0x0c, - 0x6a, 0xff, 0xc9, 0x4a, 0x2b, 0x9c, 0x74, 0xa5, 0xa5, 0x3f, 0x48, 0x0c, 0x9f, 0xaf, 0xcb, 0x6c, - 0xf9, 0x3f, 0x2a, 0xeb, 0xc7, 0x94, 0x4e, 0xfa, 0xc7, 0x3c, 0x29, 0x7d, 0x47, 0xfb, 0xb8, 0x0c, - 0x13, 0x8b, 0x3a, 0xed, 0xba, 0xce, 0x23, 0xd7, 0x42, 0x85, 0x27, 0x62, 0x2d, 0x74, 0x0d, 0xea, - 0x1e, 0xed, 0xd9, 0x96, 0xa1, 0x0b, 0xe5, 0x4b, 0xda, 0x1e, 0x51, 0xa6, 0x61, 0x48, 0x1d, 0xb2, - 0x06, 0x2e, 0x3d, 0x91, 0x6b, 0xe0, 0xf2, 0xa7, 0xbf, 0x06, 0xd6, 0xfe, 0xa8, 0x08, 0x5c, 0x51, - 0x21, 0x57, 0xa1, 0xcc, 0x26, 0xe1, 0xb4, 0xe5, 0x85, 0x37, 0x1c, 0x4e, 0x21, 0x97, 0xa0, 0x18, - 0xb8, 0xb2, 0xe7, 0x81, 0xa4, 0x17, 0x37, 0x5c, 0x2c, 0x06, 0x2e, 0xf9, 0x00, 0xc0, 0x70, 0x1d, - 0xd3, 0x52, 0x26, 0xf9, 0x7c, 0x1f, 0xb6, 0xe4, 0x7a, 0xf7, 0x74, 0xcf, 0x5c, 0x08, 0x11, 0xc5, - 0x2a, 0x28, 0x7a, 0xc7, 0x98, 0x34, 0xf2, 0x06, 0x54, 0x5d, 0x67, 0xa9, 0x6f, 0xdb, 0xbc, 0x42, - 0x1b, 0xad, 0x5f, 0x63, 0x4b, 0xd3, 0x3b, 0x3c, 0xe5, 0xc1, 0xc1, 0xf4, 0x45, 0xa1, 0xdf, 0xb2, - 0xb7, 0xb7, 0x3c, 0x2b, 0xb0, 0x9c, 0x4e, 0x3b, 0xf0, 0xf4, 0x80, 0x76, 0xf6, 0x51, 0x66, 0xd3, - 0xfe, 0xa4, 0x00, 0xcd, 0x25, 0xeb, 0x3e, 0x35, 0xdf, 0xb2, 0x1c, 0xd3, 0xbd, 0x47, 0x10, 0xaa, - 0x36, 0x75, 0x3a, 0xc1, 0xce, 0x88, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x2c, - 0x34, 0x84, 0xf6, 0x69, 0x39, 0x1d, 0x5e, 0x87, 0xf5, 0x68, 0xd0, 0x6b, 0x2b, 0x02, 0x46, 0x3c, - 0xda, 0x3e, 0x9c, 0x1d, 0xa8, 0x06, 0x62, 0x42, 0x39, 0xd0, 0x3b, 0x6a, 0x7c, 0x5d, 0x1a, 0xb9, - 0x82, 0x37, 0xf4, 0x4e, 0xac, 0x72, 0xf9, 0x1c, 0xbf, 0xa1, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0xbf, - 0x28, 0x40, 0x7d, 0xa9, 0xef, 0x18, 0x7c, 0x89, 0xf6, 0x68, 0x8b, 0x9c, 0x52, 0x18, 0x8a, 0x99, - 0x0a, 0x43, 0x1f, 0xaa, 0xbb, 0xf7, 0x42, 0x85, 0xa2, 0x39, 0xb7, 0x36, 0x7a, 0xab, 0x90, 0x45, - 0x9a, 0xb9, 0xc5, 0xf1, 0x84, 0xc3, 0x68, 0x42, 0x16, 0xa8, 0x7a, 0xeb, 0x2d, 0x2e, 0x54, 0x0a, - 0xbb, 0xf4, 0x45, 0x68, 0xc6, 0xd8, 0x8e, 0x65, 0x3b, 0xfe, 0x47, 0x65, 0xa8, 0x2e, 0xb7, 0xdb, - 0xf3, 0xeb, 0x2b, 0xe4, 0x15, 0x68, 0x4a, 0x5f, 0xc2, 0xed, 0xa8, 0x0e, 0x42, 0x57, 0x52, 0x3b, - 0x22, 0x61, 0x9c, 0x8f, 0xa9, 0x63, 0x1e, 0xd5, 0xed, 0xae, 0xec, 0x2c, 0xa1, 0x3a, 0x86, 0x2c, - 0x11, 0x05, 0x8d, 0xe8, 0x30, 0xc1, 0x56, 0x78, 0xac, 0x0a, 0xc5, 0xea, 0x4d, 0x76, 0x9b, 0x23, - 0xae, 0xef, 0xb8, 0x92, 0xb8, 0x99, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x1d, 0xea, 0x7a, 0x3f, 0xd8, - 0xe1, 0x0a, 0xb4, 0xe8, 0x1b, 0x97, 0xb9, 0xab, 0x45, 0xa6, 0x3d, 0x38, 0x98, 0x1e, 0xbb, 0x85, - 0xad, 0x57, 0xd4, 0x3b, 0x86, 0xdc, 0xac, 0x70, 0x6a, 0xc5, 0x28, 0x0b, 0x57, 0x39, 0x76, 0xe1, - 0xd6, 0x13, 0x00, 0x98, 0x02, 0x24, 0xef, 0xc0, 0xd8, 0x2e, 0xdd, 0x0f, 0xf4, 0x2d, 0x29, 0xa0, - 0x7a, 0x1c, 0x01, 0x93, 0x4c, 0x85, 0xbb, 0x15, 0xcb, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xf9, 0x5d, - 0xea, 0x6d, 0x51, 0xcf, 0x95, 0xab, 0x4f, 0x29, 0xa4, 0x76, 0x1c, 0x21, 0x53, 0x87, 0x07, 0xd3, - 0xe7, 0x6f, 0x65, 0xc0, 0x60, 0x26, 0xb8, 0xf6, 0xf3, 0x22, 0x9c, 0x59, 0x16, 0xce, 0x5c, 0xd7, - 0x13, 0x93, 0x30, 0xb9, 0x08, 0x25, 0xaf, 0xd7, 0xe7, 0x2d, 0xa7, 0x24, 0xcc, 0xb5, 0xb8, 0xbe, - 0x89, 0x2c, 0x8d, 0xbc, 0x0d, 0x75, 0x53, 0x0e, 0x19, 0x72, 0xf1, 0x3b, 0x92, 0xa1, 0x42, 0xbd, - 0x61, 0x88, 0xc6, 0x34, 0xfd, 0xae, 0xdf, 0x69, 0x5b, 0x1f, 0x50, 0xb9, 0x1e, 0xe4, 0x9a, 0xfe, - 0x9a, 0x48, 0x42, 0x45, 0x63, 0xb3, 0xea, 0x2e, 0xdd, 0x17, 0xab, 0xa1, 0x72, 0x34, 0xab, 0xde, - 0x92, 0x69, 0x18, 0x52, 0xc9, 0xb4, 0xea, 0x2c, 0xac, 0x15, 0x94, 0xc5, 0x4a, 0xfe, 0x2e, 0x4b, - 0x90, 0xfd, 0x86, 0x0d, 0x99, 0xef, 0x59, 0x41, 0x40, 0x3d, 0xf9, 0x1b, 0x47, 0x1a, 0x32, 0x6f, - 0x72, 0x04, 0x94, 0x48, 0xe4, 0x37, 0xa0, 0xc1, 0xc1, 0x5b, 0xb6, 0xbb, 0xc5, 0x7f, 0x5c, 0x43, - 0xac, 0xe9, 0xef, 0xaa, 0x44, 0x8c, 0xe8, 0xda, 0x2f, 0x8b, 0x70, 0x61, 0x99, 0x06, 0x42, 0xab, - 0x59, 0xa4, 0x3d, 0xdb, 0xdd, 0x67, 0xaa, 0x25, 0xd2, 0xf7, 0xc9, 0x9b, 0x00, 0x96, 0xbf, 0xd5, - 0xde, 0x33, 0x78, 0x3f, 0x10, 0x7d, 0xf8, 0xaa, 0xec, 0x92, 0xb0, 0xd2, 0x6e, 0x49, 0xca, 0x83, - 0xc4, 0x1b, 0xc6, 0xf2, 0x44, 0xcb, 0xab, 0xe2, 0x43, 0x96, 0x57, 0x6d, 0x80, 0x5e, 0xa4, 0xa0, - 0x96, 0x38, 0xe7, 0x9f, 0x53, 0x62, 0x8e, 0xa3, 0x9b, 0xc6, 0x60, 0xf2, 0xa8, 0x8c, 0x0e, 0x4c, - 0x9a, 0x74, 0x5b, 0xef, 0xdb, 0x41, 0xa8, 0x54, 0xcb, 0x4e, 0x7c, 0x74, 0xbd, 0x3c, 0x74, 0x34, - 0x2f, 0xa6, 0x90, 0x70, 0x00, 0x5b, 0xfb, 0x6e, 0x09, 0x2e, 0x2d, 0xd3, 0x20, 0xb4, 0xb8, 0xc8, - 0xd1, 0xb1, 0xdd, 0xa3, 0x06, 0xfb, 0x0b, 0x1f, 0x15, 0xa0, 0x6a, 0xeb, 0x5b, 0xd4, 0x66, 0xb3, - 0x17, 0xfb, 0x9a, 0x77, 0x47, 0x9e, 0x08, 0x86, 0x4b, 0x99, 0x59, 0xe5, 0x12, 0x52, 0x53, 0x83, - 0x48, 0x44, 0x29, 0x9e, 0x0d, 0xea, 0x86, 0xdd, 0xf7, 0x03, 0xea, 0xad, 0xbb, 0x5e, 0x20, 0xf5, - 0xc9, 0x70, 0x50, 0x5f, 0x88, 0x48, 0x18, 0xe7, 0x23, 0x73, 0x00, 0x86, 0x6d, 0x51, 0x27, 0xe0, - 0xb9, 0x44, 0xbf, 0x22, 0xea, 0xff, 0x2e, 0x84, 0x14, 0x8c, 0x71, 0x31, 0x51, 0x5d, 0xd7, 0xb1, - 0x02, 0x57, 0x88, 0x2a, 0x27, 0x45, 0xad, 0x45, 0x24, 0x8c, 0xf3, 0xf1, 0x6c, 0x34, 0xf0, 0x2c, - 0xc3, 0xe7, 0xd9, 0x2a, 0xa9, 0x6c, 0x11, 0x09, 0xe3, 0x7c, 0x6c, 0xce, 0x8b, 0x7d, 0xff, 0xb1, - 0xe6, 0xbc, 0x6f, 0x35, 0xe0, 0x4a, 0xa2, 0x5a, 0x03, 0x3d, 0xa0, 0xdb, 0x7d, 0xbb, 0x4d, 0x03, - 0xf5, 0x03, 0x47, 0x9c, 0x0b, 0xff, 0x6a, 0xf4, 0xdf, 0x45, 0x08, 0x89, 0x71, 0x32, 0xff, 0x7d, - 0xa0, 0x80, 0x47, 0xfa, 0xf7, 0xb3, 0xd0, 0x70, 0xf4, 0xc0, 0xe7, 0x1d, 0x57, 0xf6, 0xd1, 0x50, - 0x0d, 0xbb, 0xad, 0x08, 0x18, 0xf1, 0x90, 0x75, 0x38, 0x2f, 0xab, 0xf8, 0xfa, 0xfd, 0x9e, 0xeb, - 0x05, 0xd4, 0x13, 0x79, 0xe5, 0x74, 0x2a, 0xf3, 0x9e, 0x5f, 0xcb, 0xe0, 0xc1, 0xcc, 0x9c, 0x64, - 0x0d, 0xce, 0x19, 0xc2, 0xad, 0x4e, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x70, 0x85, 0x4b, 0xa3, - 0x85, 0x41, 0x16, 0xcc, 0xca, 0x97, 0x6e, 0xcd, 0xd5, 0x91, 0x5a, 0x73, 0x6d, 0x94, 0xd6, 0x5c, - 0x1f, 0xad, 0x35, 0x37, 0x8e, 0xd6, 0x9a, 0x59, 0xcd, 0xb3, 0x76, 0x44, 0x3d, 0xa6, 0x9e, 0x88, - 0x19, 0x36, 0x16, 0xb5, 0x11, 0xd6, 0x7c, 0x3b, 0x83, 0x07, 0x33, 0x73, 0x92, 0x2d, 0xb8, 0x24, - 0xd2, 0xaf, 0x3b, 0x86, 0xb7, 0xdf, 0x63, 0x13, 0x4f, 0x0c, 0xb7, 0x99, 0xb0, 0x30, 0x5e, 0x6a, - 0x0f, 0xe5, 0xc4, 0x87, 0xa0, 0x90, 0xdf, 0x86, 0x71, 0xf1, 0x97, 0xd6, 0xf4, 0x1e, 0x87, 0x15, - 0x31, 0x1c, 0x4f, 0x4b, 0xd8, 0xf1, 0x85, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x1e, 0xce, 0xf4, 0xf6, - 0x0c, 0xf6, 0xb8, 0xb2, 0x7d, 0x9b, 0x52, 0x93, 0x9a, 0xdc, 0x69, 0xd4, 0x68, 0x3d, 0xa3, 0x0c, - 0x1d, 0xeb, 0x49, 0x32, 0xa6, 0xf9, 0xc9, 0xeb, 0x30, 0xe6, 0x07, 0xba, 0x17, 0x48, 0xb3, 0xde, - 0xd4, 0x84, 0x88, 0x71, 0x51, 0x56, 0xaf, 0x76, 0x8c, 0x86, 0x09, 0xce, 0xcc, 0xf9, 0xe2, 0xcc, - 0xe9, 0xcd, 0x17, 0x79, 0x46, 0xab, 0x7f, 0x59, 0x84, 0xab, 0xcb, 0x34, 0x58, 0x73, 0x1d, 0x69, - 0x14, 0xcd, 0x9a, 0xf6, 0x8f, 0x64, 0x13, 0x4d, 0x4e, 0xda, 0xc5, 0x13, 0x9d, 0xb4, 0x4b, 0x27, - 0x34, 0x69, 0x97, 0x4f, 0x71, 0xd2, 0xfe, 0x27, 0x45, 0x78, 0x26, 0x51, 0x93, 0xeb, 0xae, 0xa9, - 0x06, 0xfc, 0xcf, 0x2b, 0xf0, 0x08, 0x15, 0xf8, 0x40, 0xe8, 0x9d, 0xdc, 0xad, 0x95, 0xd2, 0x78, - 0x3e, 0x4c, 0x6b, 0x3c, 0xef, 0xe4, 0x99, 0xf9, 0x32, 0x24, 0x1c, 0x69, 0xc6, 0xbb, 0x09, 0xc4, - 0x93, 0x4e, 0x38, 0x61, 0xfa, 0x89, 0x29, 0x3d, 0x61, 0x10, 0x1d, 0x0e, 0x70, 0x60, 0x46, 0x2e, - 0xd2, 0x86, 0xa7, 0x7d, 0xea, 0x04, 0x96, 0x43, 0xed, 0x24, 0x9c, 0xd0, 0x86, 0x9e, 0x93, 0x70, - 0x4f, 0xb7, 0xb3, 0x98, 0x30, 0x3b, 0x6f, 0x9e, 0x71, 0xe0, 0xdf, 0x00, 0x57, 0x39, 0x45, 0xd5, - 0x9c, 0x98, 0xc6, 0xf2, 0x51, 0x5a, 0x63, 0x79, 0x37, 0xff, 0x7f, 0x1b, 0x4d, 0x5b, 0x99, 0x03, - 0xe0, 0x7f, 0x21, 0xae, 0xae, 0x84, 0x93, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x09, 0x48, 0xd5, - 0x73, 0x5c, 0x53, 0x09, 0x27, 0xa0, 0x76, 0x9c, 0x88, 0x49, 0xde, 0xa1, 0xda, 0x4e, 0x65, 0x64, - 0x6d, 0xe7, 0x26, 0x90, 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0x31, 0x9c, 0x2b, 0x03, 0x1c, 0x98, - 0x91, 0x6b, 0x48, 0x53, 0xae, 0x9d, 0x6c, 0x53, 0xae, 0x8f, 0xde, 0x94, 0xc9, 0xbb, 0x70, 0x91, - 0x8b, 0x92, 0xf5, 0x93, 0x04, 0x16, 0x7a, 0xcf, 0xaf, 0x48, 0xe0, 0x8b, 0x38, 0x8c, 0x11, 0x87, - 0x63, 0xb0, 0xff, 0x63, 0x78, 0xd4, 0x64, 0xc2, 0x75, 0x7b, 0xb8, 0x4e, 0xb4, 0x90, 0xc1, 0x83, - 0x99, 0x39, 0x59, 0x13, 0x0b, 0x58, 0x33, 0xd4, 0xb7, 0x6c, 0x6a, 0xca, 0x18, 0xd6, 0xb0, 0x89, - 0x6d, 0xac, 0xb6, 0x25, 0x05, 0x63, 0x5c, 0x59, 0x6a, 0xca, 0xd8, 0x31, 0xd5, 0x94, 0x65, 0x6e, - 0xa5, 0xdf, 0x4e, 0x68, 0x43, 0x52, 0xd7, 0x09, 0xa3, 0x92, 0x17, 0xd2, 0x0c, 0x38, 0x98, 0x87, - 0x6b, 0x89, 0x86, 0x67, 0xf5, 0x02, 0x3f, 0x89, 0x35, 0x91, 0xd2, 0x12, 0x33, 0x78, 0x30, 0x33, - 0x27, 0xd3, 0xcf, 0x77, 0xa8, 0x6e, 0x07, 0x3b, 0x49, 0xc0, 0x33, 0x49, 0xfd, 0xfc, 0xc6, 0x20, - 0x0b, 0x66, 0xe5, 0xcb, 0x9c, 0x90, 0x26, 0x9f, 0x4c, 0xb5, 0xea, 0x8f, 0x4b, 0x70, 0x71, 0x99, - 0x06, 0x61, 0x78, 0xcf, 0xe7, 0x66, 0x94, 0x4f, 0xc1, 0x8c, 0xf2, 0xcd, 0x0a, 0x9c, 0x5b, 0xa6, - 0xc1, 0x80, 0x36, 0xf6, 0xa7, 0xb4, 0xfa, 0xd7, 0xe0, 0x5c, 0x14, 0x51, 0xd6, 0x0e, 0x5c, 0x4f, - 0xcc, 0xe5, 0xa9, 0xd5, 0x72, 0x7b, 0x90, 0x05, 0xb3, 0xf2, 0x91, 0xaf, 0xc0, 0x33, 0x7c, 0xaa, - 0x77, 0x3a, 0xc2, 0x3e, 0x2b, 0x8c, 0x09, 0xb1, 0x3d, 0x11, 0xd3, 0x12, 0xf2, 0x99, 0x76, 0x36, - 0x1b, 0x0e, 0xcb, 0x4f, 0xbe, 0x06, 0x63, 0x3d, 0xab, 0x47, 0x6d, 0xcb, 0xe1, 0xfa, 0x59, 0xee, - 0x90, 0x90, 0xf5, 0x18, 0x58, 0xb4, 0x80, 0x8b, 0xa7, 0x62, 0x42, 0x60, 0x66, 0x4b, 0xad, 0x9f, - 0x62, 0x4b, 0xfd, 0x9f, 0x45, 0xa8, 0x2d, 0x7b, 0x6e, 0xbf, 0xd7, 0xda, 0x27, 0x1d, 0xa8, 0xde, - 0xe3, 0xce, 0x33, 0xe9, 0x9a, 0x1a, 0x3d, 0x2a, 0x5b, 0xf8, 0xe0, 0x22, 0x95, 0x48, 0xbc, 0xa3, - 0x84, 0x67, 0x8d, 0x78, 0x97, 0xee, 0x53, 0x53, 0xfa, 0xd0, 0xc2, 0x46, 0x7c, 0x8b, 0x25, 0xa2, - 0xa0, 0x91, 0x2e, 0x9c, 0xd1, 0x6d, 0xdb, 0xbd, 0x47, 0xcd, 0x55, 0x3d, 0xa0, 0x0e, 0xf5, 0x95, - 0x4b, 0xf2, 0xb8, 0x66, 0x69, 0xee, 0xd7, 0x9f, 0x4f, 0x42, 0x61, 0x1a, 0x9b, 0xbc, 0x07, 0x35, - 0x3f, 0x70, 0x3d, 0xa5, 0x6c, 0x35, 0xe7, 0x16, 0x46, 0xff, 0xe9, 0xad, 0x2f, 0xb7, 0x05, 0x94, - 0xb0, 0xd9, 0xcb, 0x17, 0x54, 0x02, 0xb4, 0x6f, 0x14, 0x00, 0x6e, 0x6c, 0x6c, 0xac, 0x4b, 0xf7, - 0x82, 0x09, 0x65, 0xbd, 0x1f, 0x3a, 0x2a, 0x47, 0x77, 0x08, 0x26, 0xc2, 0x32, 0xa5, 0x0f, 0xaf, - 0x1f, 0xec, 0x20, 0x47, 0x27, 0xbf, 0x0e, 0x35, 0xa9, 0x20, 0xcb, 0x6a, 0x0f, 0x43, 0x0b, 0xa4, - 0x12, 0x8d, 0x8a, 0xae, 0xfd, 0xc3, 0x22, 0xc0, 0x8a, 0x69, 0xd3, 0xb6, 0x0a, 0xa4, 0x6f, 0x04, - 0x3b, 0x1e, 0xf5, 0x77, 0x5c, 0xdb, 0x1c, 0xd1, 0x9b, 0xca, 0x6d, 0xfe, 0x1b, 0x0a, 0x04, 0x23, - 0x3c, 0x62, 0xc2, 0x98, 0x1f, 0xd0, 0x9e, 0x8a, 0xd4, 0x1c, 0xd1, 0x89, 0x32, 0x29, 0xec, 0x22, - 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x4d, 0xcb, 0x31, 0x44, 0x07, 0x69, 0xed, 0x8f, 0xd8, 0x90, - 0xce, 0xb0, 0x15, 0xc7, 0x4a, 0x04, 0x83, 0x71, 0x4c, 0xed, 0x27, 0x45, 0xb8, 0xc0, 0xe5, 0xb1, - 0x62, 0x24, 0xe2, 0x31, 0xc9, 0x5f, 0x18, 0xd8, 0xf4, 0xf7, 0x67, 0x8f, 0x26, 0x5a, 0xec, 0x19, - 0x5b, 0xa3, 0x81, 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0x6c, 0xa7, 0x5f, 0x1f, 0xca, 0x3e, 0x1b, 0xaf, - 0x44, 0xed, 0xb5, 0x47, 0x6e, 0x42, 0xd9, 0x1f, 0xc0, 0x47, 0xaf, 0xd0, 0x6b, 0xcc, 0x47, 0x2d, - 0x2e, 0x8e, 0xfc, 0x25, 0xa8, 0xfa, 0x81, 0x1e, 0xf4, 0x55, 0xd7, 0xdc, 0x3c, 0x69, 0xc1, 0x1c, - 0x3c, 0x1a, 0x47, 0xc4, 0x3b, 0x4a, 0xa1, 0xda, 0x4f, 0x0a, 0x70, 0x29, 0x3b, 0xe3, 0xaa, 0xe5, - 0x07, 0xe4, 0xcf, 0x0f, 0x54, 0xfb, 0x11, 0xff, 0x38, 0xcb, 0xcd, 0x2b, 0x3d, 0x8c, 0x0b, 0x57, - 0x29, 0xb1, 0x2a, 0x0f, 0xa0, 0x62, 0x05, 0xb4, 0xab, 0xd6, 0x97, 0x77, 0x4e, 0xf8, 0xd3, 0x63, - 0x53, 0x3b, 0x93, 0x82, 0x42, 0x98, 0xf6, 0x71, 0x71, 0xd8, 0x27, 0xf3, 0xe9, 0xc3, 0x4e, 0xc6, - 0xfc, 0xde, 0xca, 0x17, 0xf3, 0x9b, 0x2c, 0xd0, 0x60, 0xe8, 0xef, 0x5f, 0x1c, 0x0c, 0xfd, 0xbd, - 0x93, 0x3f, 0xf4, 0x37, 0x55, 0x0d, 0x43, 0x23, 0x80, 0x3f, 0x29, 0xc1, 0xe5, 0x87, 0x35, 0x1b, - 0x36, 0x9f, 0xc9, 0xd6, 0x99, 0x77, 0x3e, 0x7b, 0x78, 0x3b, 0x24, 0x73, 0x50, 0xe9, 0xed, 0xe8, - 0xbe, 0x52, 0xca, 0xd4, 0x82, 0xa5, 0xb2, 0xce, 0x12, 0x1f, 0xb0, 0x41, 0x83, 0x2b, 0x73, 0xfc, - 0x15, 0x05, 0x2b, 0x1b, 0x8e, 0xbb, 0xd4, 0xf7, 0x23, 0x9b, 0x40, 0x38, 0x1c, 0xaf, 0x89, 0x64, - 0x54, 0x74, 0x12, 0x40, 0x55, 0x98, 0x98, 0xe5, 0xcc, 0x34, 0x7a, 0x20, 0x57, 0x46, 0x98, 0x78, - 0xf4, 0x51, 0xd2, 0x5b, 0x21, 0x65, 0x91, 0x19, 0x28, 0x07, 0x51, 0xd0, 0xae, 0x5a, 0x9a, 0x97, - 0x33, 0xf4, 0x53, 0xce, 0xc7, 0x16, 0xf6, 0xee, 0x16, 0x37, 0xaa, 0x9b, 0xd2, 0x7f, 0x6e, 0xb9, - 0x0e, 0x57, 0xc8, 0x4a, 0xd1, 0xc2, 0xfe, 0xce, 0x00, 0x07, 0x66, 0xe4, 0xd2, 0xfe, 0x7d, 0x1d, - 0x2e, 0x64, 0xb7, 0x07, 0x56, 0x6f, 0x7b, 0xd4, 0xf3, 0x19, 0x76, 0x21, 0x59, 0x6f, 0x77, 0x45, - 0x32, 0x2a, 0xfa, 0x67, 0x3a, 0xe0, 0xec, 0x9b, 0x05, 0xb8, 0xe8, 0x49, 0x1f, 0xd1, 0xe3, 0x08, - 0x3a, 0x7b, 0x4e, 0x98, 0x33, 0x86, 0x08, 0xc4, 0xe1, 0x65, 0x21, 0x7f, 0xaf, 0x00, 0x53, 0xdd, - 0x94, 0x9d, 0xe3, 0x14, 0xf7, 0xad, 0xf1, 0xa8, 0xf8, 0xb5, 0x21, 0xf2, 0x70, 0x68, 0x49, 0xc8, - 0xd7, 0xa0, 0xd9, 0x63, 0xed, 0xc2, 0x0f, 0xa8, 0x63, 0xa8, 0xad, 0x6b, 0xa3, 0xf7, 0xa4, 0xf5, - 0x08, 0x4b, 0x85, 0xa2, 0x09, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0x27, 0x7c, 0xa3, 0xda, 0x35, - 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x23, 0xd6, 0x1b, 0x0d, 0xd1, 0x57, 0xda, 0x32, 0x0d, 0x43, - 0x2a, 0xf9, 0x0d, 0x68, 0x70, 0x97, 0xd3, 0xbc, 0xd7, 0xf1, 0xa7, 0x1a, 0x3c, 0x5c, 0x6c, 0x5c, - 0x04, 0xc0, 0xc9, 0x44, 0x8c, 0xe8, 0xe4, 0x65, 0x18, 0xdb, 0xe2, 0xdd, 0x57, 0xee, 0x5d, 0x16, - 0x36, 0x2e, 0xae, 0xad, 0xb5, 0x62, 0xe9, 0x98, 0xe0, 0x22, 0x73, 0x00, 0x34, 0xf4, 0xcb, 0xa5, - 0xed, 0x59, 0x91, 0xc7, 0x0e, 0x63, 0x5c, 0xe4, 0x39, 0x28, 0x05, 0xb6, 0xcf, 0x6d, 0x58, 0xf5, - 0x68, 0x09, 0xba, 0xb1, 0xda, 0x46, 0x96, 0xae, 0xfd, 0xb2, 0x00, 0x67, 0x52, 0x9b, 0x4b, 0x58, - 0x96, 0xbe, 0x67, 0xcb, 0x61, 0x24, 0xcc, 0xb2, 0x89, 0xab, 0xc8, 0xd2, 0xc9, 0xbb, 0x52, 0x2d, - 0x2f, 0xe6, 0x3c, 0xa6, 0xe1, 0xb6, 0x1e, 0xf8, 0x4c, 0x0f, 0x1f, 0xd0, 0xc8, 0xb9, 0x9b, 0x2f, - 0x2a, 0x8f, 0x9c, 0x07, 0x62, 0x6e, 0xbe, 0x88, 0x86, 0x09, 0xce, 0x94, 0xc1, 0xaf, 0x7c, 0x14, - 0x83, 0x9f, 0xf6, 0x27, 0xc5, 0x58, 0x0d, 0x48, 0xcd, 0xfe, 0x11, 0x35, 0xf0, 0x22, 0x9b, 0x40, - 0xc3, 0xc9, 0xbd, 0x11, 0x9f, 0xff, 0xf8, 0x64, 0x2c, 0xa9, 0xe4, 0x2d, 0x51, 0xf7, 0xa5, 0x9c, - 0x9b, 0x61, 0x37, 0x56, 0xdb, 0x22, 0xba, 0x4a, 0xfd, 0xb5, 0xf0, 0x17, 0x94, 0x4f, 0xe9, 0x17, - 0x68, 0xff, 0xba, 0x04, 0xcd, 0x9b, 0xee, 0xd6, 0x67, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0x7e, - 0x8a, 0xd3, 0xd4, 0x26, 0x3c, 0x13, 0x04, 0x76, 0x9b, 0x1a, 0xae, 0x63, 0xfa, 0xf3, 0xdb, 0x01, - 0xf5, 0x96, 0x2c, 0xc7, 0xf2, 0x77, 0xa8, 0x29, 0xdd, 0x49, 0xcf, 0x1e, 0x1e, 0x4c, 0x3f, 0xb3, - 0xb1, 0xb1, 0x9a, 0xc5, 0x82, 0xc3, 0xf2, 0xf2, 0x61, 0x43, 0xec, 0x04, 0xe4, 0x3b, 0x65, 0x64, - 0xcc, 0x8d, 0x18, 0x36, 0x62, 0xe9, 0x98, 0xe0, 0xd2, 0xbe, 0x53, 0x84, 0x46, 0xb8, 0x01, 0x9f, - 0xbc, 0x00, 0xb5, 0x2d, 0xcf, 0xdd, 0xa5, 0x9e, 0xf0, 0xdc, 0xc9, 0x9d, 0x32, 0x2d, 0x91, 0x84, - 0x8a, 0x46, 0x9e, 0x87, 0x4a, 0xe0, 0xf6, 0x2c, 0x23, 0x6d, 0x50, 0xdb, 0x60, 0x89, 0x28, 0x68, - 0xa7, 0xd7, 0xc0, 0x5f, 0x4c, 0xa8, 0x76, 0x8d, 0xa1, 0xca, 0xd8, 0x3b, 0x50, 0xf6, 0x75, 0xdf, - 0x96, 0xf3, 0x69, 0x8e, 0xbd, 0xec, 0xf3, 0xed, 0x55, 0xb9, 0x97, 0x7d, 0xbe, 0xbd, 0x8a, 0x1c, - 0x54, 0xfb, 0x59, 0x11, 0x9a, 0xa2, 0xde, 0xc4, 0xa8, 0x70, 0x92, 0x35, 0xf7, 0x06, 0x0f, 0xa5, - 0xf0, 0xfb, 0x5d, 0xea, 0x71, 0x33, 0x93, 0x1c, 0xe4, 0xe2, 0xfe, 0x81, 0x88, 0x18, 0x86, 0x53, - 0x44, 0x49, 0xaa, 0xea, 0xcb, 0xa7, 0x58, 0xf5, 0x95, 0x23, 0x55, 0x7d, 0xf5, 0x34, 0xaa, 0xfe, - 0xa3, 0x22, 0x34, 0x56, 0xad, 0x6d, 0x6a, 0xec, 0x1b, 0x36, 0xdf, 0x13, 0x68, 0x52, 0x9b, 0x06, - 0x74, 0xd9, 0xd3, 0x0d, 0xba, 0x4e, 0x3d, 0x8b, 0x1f, 0x50, 0xc3, 0xfa, 0x07, 0x1f, 0x81, 0xe4, - 0x9e, 0xc0, 0xc5, 0x21, 0x3c, 0x38, 0x34, 0x37, 0x59, 0x81, 0x31, 0x93, 0xfa, 0x96, 0x47, 0xcd, - 0xf5, 0xd8, 0x42, 0xe5, 0x05, 0x35, 0xd5, 0x2c, 0xc6, 0x68, 0x0f, 0x0e, 0xa6, 0xc7, 0x95, 0x81, - 0x52, 0xac, 0x58, 0x12, 0x59, 0x59, 0x97, 0xef, 0xe9, 0x7d, 0x3f, 0xab, 0x8c, 0xb1, 0x2e, 0xbf, - 0x9e, 0xcd, 0x82, 0xc3, 0xf2, 0x6a, 0x15, 0x28, 0xad, 0xba, 0x1d, 0xed, 0xe3, 0x12, 0x84, 0x27, - 0x19, 0x91, 0xbf, 0x52, 0x80, 0xa6, 0xee, 0x38, 0x6e, 0x20, 0x4f, 0x09, 0x12, 0x1e, 0x78, 0xcc, - 0x7d, 0x60, 0xd2, 0xcc, 0x7c, 0x04, 0x2a, 0x9c, 0xb7, 0xa1, 0x43, 0x39, 0x46, 0xc1, 0xb8, 0x6c, - 0xd2, 0x4f, 0xf9, 0x93, 0xd7, 0xf2, 0x97, 0xe2, 0x08, 0xde, 0xe3, 0x4b, 0x5f, 0x82, 0xc9, 0x74, - 0x61, 0x8f, 0xe3, 0x0e, 0xca, 0xe5, 0x98, 0x2f, 0x02, 0x44, 0x31, 0x25, 0x8f, 0xc1, 0x88, 0x65, - 0x25, 0x8c, 0x58, 0xcb, 0xa3, 0x57, 0x70, 0x58, 0xe8, 0xa1, 0x86, 0xab, 0xf7, 0x53, 0x86, 0xab, - 0x95, 0x93, 0x10, 0xf6, 0x70, 0x63, 0xd5, 0x3f, 0x28, 0xc0, 0x64, 0xc4, 0x2c, 0x77, 0xc8, 0xbe, - 0x06, 0xe3, 0x1e, 0xd5, 0xcd, 0x96, 0x1e, 0x18, 0x3b, 0x3c, 0xd4, 0xbb, 0xc0, 0x63, 0xb3, 0xcf, - 0x1e, 0x1e, 0x4c, 0x8f, 0x63, 0x9c, 0x80, 0x49, 0x3e, 0xa2, 0x43, 0x93, 0x25, 0x6c, 0x58, 0x5d, - 0xea, 0xf6, 0x83, 0x11, 0xad, 0xa6, 0x7c, 0xc1, 0x82, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x4f, 0x0a, - 0x30, 0x11, 0x2f, 0xf0, 0xa9, 0x5b, 0xd4, 0x76, 0x92, 0x16, 0xb5, 0x85, 0x13, 0xf8, 0x27, 0x43, - 0xac, 0x68, 0x3f, 0xaf, 0xc7, 0x3f, 0x8d, 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x43, 0x8d, 0x05, - 0x9f, 0xfd, 0xc3, 0x6b, 0x86, 0x69, 0xb9, 0xe5, 0x27, 0x58, 0xcb, 0xfd, 0x34, 0x4f, 0xc0, 0x89, - 0x9d, 0xe2, 0x52, 0xcd, 0x71, 0x8a, 0x4b, 0x37, 0x3c, 0xc5, 0xa5, 0x76, 0x62, 0x83, 0xce, 0x51, - 0x4e, 0x72, 0xa9, 0x3f, 0xd6, 0x93, 0x5c, 0x1a, 0xa7, 0x75, 0x92, 0x0b, 0xe4, 0x3d, 0xc9, 0xe5, - 0xc3, 0x02, 0x4c, 0x98, 0x89, 0x1d, 0xb3, 0xdc, 0xb6, 0x90, 0x67, 0xaa, 0x49, 0x6e, 0xc0, 0x15, - 0x5b, 0xa6, 0x92, 0x69, 0x98, 0x12, 0xa9, 0xfd, 0xfd, 0x5a, 0x7c, 0x1e, 0x78, 0xdc, 0xa6, 0xea, - 0x57, 0x93, 0xa6, 0xea, 0xab, 0x69, 0x53, 0xf5, 0x99, 0x58, 0x14, 0x69, 0xdc, 0x5c, 0xfd, 0x85, - 0xd8, 0xf0, 0x58, 0xe2, 0x27, 0xa7, 0x84, 0x35, 0x9d, 0x31, 0x44, 0x7e, 0x01, 0xea, 0xbe, 0x3a, - 0x73, 0x52, 0x2c, 0x6c, 0xa2, 0xff, 0xa2, 0xce, 0x83, 0x0c, 0x39, 0x98, 0x26, 0xee, 0x51, 0xdd, - 0x77, 0x9d, 0xb4, 0x26, 0x8e, 0x3c, 0x15, 0x25, 0x35, 0x6e, 0x32, 0xaf, 0x3e, 0xc2, 0x64, 0xae, - 0x43, 0xd3, 0xd6, 0xfd, 0x60, 0xb3, 0x67, 0xea, 0x01, 0x35, 0x65, 0x7f, 0xfb, 0x33, 0x47, 0x9b, - 0xab, 0xd8, 0xfc, 0x17, 0x29, 0x84, 0xab, 0x11, 0x0c, 0xc6, 0x31, 0x89, 0x09, 0x63, 0xec, 0x95, - 0xf7, 0x06, 0x73, 0x5e, 0x1d, 0x01, 0x70, 0x1c, 0x19, 0xa1, 0xa5, 0x67, 0x35, 0x86, 0x83, 0x09, - 0xd4, 0x21, 0x56, 0xf5, 0xc6, 0x28, 0x56, 0x75, 0xf2, 0xdb, 0x42, 0xd9, 0xd8, 0x57, 0x3f, 0x8c, - 0x5b, 0xe3, 0xc6, 0xa3, 0xa8, 0x42, 0x8c, 0x13, 0x31, 0xc9, 0x4b, 0xe6, 0xe1, 0x8c, 0xd1, 0xf7, - 0x3c, 0x1e, 0x47, 0x24, 0xb3, 0x37, 0x79, 0xf6, 0x30, 0x5e, 0x6c, 0x21, 0x49, 0xc6, 0x34, 0x3f, - 0x83, 0xe8, 0xcb, 0x9a, 0x54, 0x10, 0x63, 0x49, 0x88, 0xcd, 0x24, 0x19, 0xd3, 0xfc, 0x7c, 0xa3, - 0x84, 0x40, 0xbd, 0xa1, 0xfb, 0x3b, 0x32, 0xd8, 0x2c, 0xda, 0x28, 0x11, 0x91, 0x30, 0xce, 0x47, - 0xe6, 0x00, 0x04, 0x12, 0xcf, 0x35, 0x91, 0x8c, 0xc1, 0xdc, 0x0c, 0x29, 0x18, 0xe3, 0xd2, 0x3e, - 0x6c, 0x40, 0xf3, 0xb6, 0x1e, 0x58, 0x7b, 0x94, 0xfb, 0xbc, 0x4e, 0xc7, 0xf1, 0xf0, 0xb7, 0x0a, - 0x70, 0x21, 0x19, 0xd8, 0x78, 0x8a, 0xde, 0x07, 0x7e, 0x4c, 0x0a, 0x66, 0x4a, 0xc3, 0x21, 0xa5, - 0xe0, 0x7e, 0x88, 0x81, 0x38, 0xc9, 0xd3, 0xf6, 0x43, 0xb4, 0x87, 0x09, 0xc4, 0xe1, 0x65, 0xf9, - 0xac, 0xf8, 0x21, 0x9e, 0xec, 0xd3, 0xf4, 0x52, 0x5e, 0x92, 0xda, 0x13, 0xe3, 0x25, 0xa9, 0x3f, - 0x11, 0xaa, 0x69, 0x2f, 0xe6, 0x25, 0x69, 0xe4, 0x8c, 0xd6, 0x91, 0x7b, 0x01, 0x04, 0xda, 0x30, - 0x6f, 0x0b, 0xdf, 0xc6, 0xaf, 0xac, 0xd7, 0x4c, 0xa3, 0xdb, 0xd2, 0x7d, 0xcb, 0x90, 0x4a, 0x42, - 0x8e, 0xd3, 0x43, 0xd5, 0xf9, 0x66, 0xc2, 0xa9, 0xcf, 0x5f, 0x51, 0x60, 0x47, 0xc7, 0xb9, 0x15, - 0x73, 0x1d, 0xe7, 0x46, 0x16, 0xa0, 0xec, 0xec, 0xd2, 0xfd, 0xe3, 0x6d, 0x88, 0xe7, 0x2b, 0x95, - 0xdb, 0xb7, 0xe8, 0x3e, 0xf2, 0xcc, 0xda, 0x77, 0x8a, 0x00, 0xec, 0xf3, 0x8f, 0xe6, 0xaf, 0xf8, - 0x75, 0xa8, 0xf9, 0x7d, 0x6e, 0x59, 0x90, 0xea, 0x4d, 0x14, 0xe2, 0x24, 0x92, 0x51, 0xd1, 0xc9, - 0xf3, 0x50, 0x79, 0xbf, 0x4f, 0xfb, 0xca, 0xf9, 0x1e, 0x2a, 0xb7, 0x5f, 0x66, 0x89, 0x28, 0x68, - 0xa7, 0x67, 0x7b, 0x54, 0x7e, 0x8d, 0xca, 0x69, 0xf9, 0x35, 0x1a, 0x50, 0xbb, 0xed, 0xf2, 0x88, - 0x49, 0xed, 0xbf, 0x17, 0x01, 0xa2, 0x88, 0x34, 0xf2, 0x8d, 0x02, 0x3c, 0x1d, 0x76, 0xb8, 0x40, - 0xac, 0x51, 0xf8, 0x81, 0xbd, 0xb9, 0x7d, 0x1c, 0x59, 0x9d, 0x9d, 0x8f, 0x40, 0xeb, 0x59, 0xe2, - 0x30, 0xbb, 0x14, 0x04, 0xa1, 0x4e, 0xbb, 0xbd, 0x60, 0x7f, 0xd1, 0xf2, 0x64, 0x0b, 0xcc, 0x0c, - 0x7c, 0xbc, 0x2e, 0x79, 0x44, 0x56, 0xb9, 0x90, 0xe6, 0x9d, 0x48, 0x51, 0x30, 0xc4, 0x21, 0x3b, - 0x50, 0x77, 0xdc, 0x77, 0x7d, 0x56, 0x1d, 0xb2, 0x39, 0xbe, 0x39, 0x7a, 0x95, 0x8b, 0x6a, 0x15, - 0x36, 0x71, 0xf9, 0x82, 0x35, 0x47, 0x56, 0xf6, 0xd7, 0x8b, 0x70, 0x2e, 0xa3, 0x1e, 0xc8, 0x9b, - 0x30, 0x29, 0x83, 0xff, 0xa2, 0x93, 0xab, 0x0b, 0xd1, 0xc9, 0xd5, 0xed, 0x14, 0x0d, 0x07, 0xb8, - 0xc9, 0xbb, 0x00, 0xba, 0x61, 0x50, 0xdf, 0x5f, 0x73, 0x4d, 0xa5, 0xbd, 0xbf, 0xc1, 0xd4, 0x97, - 0xf9, 0x30, 0xf5, 0xc1, 0xc1, 0xf4, 0x6f, 0x66, 0xc5, 0xf3, 0xa6, 0xea, 0x39, 0xca, 0x80, 0x31, - 0x48, 0xf2, 0x55, 0x00, 0xb1, 0x50, 0x0d, 0x8f, 0x1c, 0x78, 0x84, 0x75, 0x67, 0x46, 0x1d, 0xee, - 0x34, 0xf3, 0xe5, 0xbe, 0xee, 0x04, 0x56, 0xb0, 0x2f, 0x4e, 0x78, 0xb9, 0x1b, 0xa2, 0x60, 0x0c, - 0x51, 0xfb, 0x17, 0x45, 0xa8, 0x2b, 0xbb, 0xf2, 0x63, 0x30, 0x26, 0x76, 0x12, 0xc6, 0xc4, 0x13, - 0x8a, 0xe0, 0xcd, 0x32, 0x25, 0xba, 0x29, 0x53, 0xe2, 0x72, 0x7e, 0x51, 0x0f, 0x37, 0x24, 0x7e, - 0xbb, 0x08, 0x13, 0x8a, 0x35, 0xaf, 0x19, 0xf1, 0x77, 0xe1, 0x8c, 0xf0, 0xbc, 0xaf, 0xe9, 0xf7, - 0xc5, 0x61, 0x37, 0xbc, 0xc2, 0xca, 0x22, 0x68, 0xb6, 0x95, 0x24, 0x61, 0x9a, 0x97, 0x35, 0x6b, - 0x91, 0xb4, 0xc9, 0x56, 0x5d, 0xc2, 0x57, 0x27, 0x56, 0x87, 0xbc, 0x59, 0xb7, 0x52, 0x34, 0x1c, - 0xe0, 0x4e, 0xdb, 0x31, 0xcb, 0xa7, 0x60, 0xc7, 0xfc, 0x0f, 0x05, 0x18, 0x8b, 0xea, 0xeb, 0xd4, - 0xad, 0x98, 0xdb, 0x49, 0x2b, 0xe6, 0x7c, 0xee, 0xe6, 0x30, 0xc4, 0x86, 0xf9, 0xd7, 0x6a, 0x90, - 0x08, 0x24, 0x27, 0x5b, 0x70, 0xc9, 0xca, 0x0c, 0x87, 0x8b, 0x8d, 0x36, 0xe1, 0xce, 0xe8, 0x95, - 0xa1, 0x9c, 0xf8, 0x10, 0x14, 0xd2, 0x87, 0xfa, 0x1e, 0xf5, 0x02, 0xcb, 0xa0, 0xea, 0xfb, 0x96, - 0x73, 0xab, 0x64, 0xd2, 0x52, 0x1b, 0xd6, 0xe9, 0x5d, 0x29, 0x00, 0x43, 0x51, 0x64, 0x0b, 0x2a, - 0xd4, 0xec, 0x50, 0x75, 0xfc, 0x50, 0xce, 0xc3, 0x3d, 0xc3, 0xfa, 0x64, 0x6f, 0x3e, 0x0a, 0x68, - 0xe2, 0x43, 0xc3, 0x56, 0x9e, 0x38, 0xd9, 0x0e, 0x47, 0x57, 0xb0, 0x42, 0x9f, 0x5e, 0x74, 0x32, - 0x41, 0x98, 0x84, 0x91, 0x1c, 0xb2, 0x1b, 0x9a, 0x04, 0x2b, 0x27, 0x34, 0x78, 0x3c, 0xc4, 0x20, - 0xe8, 0x43, 0xe3, 0x9e, 0x1e, 0x50, 0xaf, 0xab, 0x7b, 0xbb, 0x72, 0xb5, 0x31, 0xfa, 0x17, 0xbe, - 0xa5, 0x90, 0xa2, 0x2f, 0x0c, 0x93, 0x30, 0x92, 0x43, 0x5c, 0x68, 0x04, 0x52, 0x7d, 0x56, 0x76, - 0xcf, 0xd1, 0x85, 0x2a, 0x45, 0xdc, 0x97, 0x01, 0xe5, 0xea, 0x15, 0x23, 0x19, 0x64, 0x2f, 0x71, - 0xfe, 0xb2, 0x38, 0x75, 0xbb, 0x95, 0xc3, 0x7e, 0x2e, 0xa1, 0xa2, 0xe9, 0x26, 0xfb, 0x1c, 0x67, - 0xed, 0x7f, 0x55, 0xa2, 0x61, 0xf9, 0x71, 0x5b, 0xf5, 0x5e, 0x4e, 0x5a, 0xf5, 0xae, 0xa4, 0xad, - 0x7a, 0x29, 0x87, 0xee, 0xf1, 0x43, 0x50, 0x53, 0xf6, 0xb4, 0xf2, 0x29, 0xd8, 0xd3, 0x5e, 0x82, - 0xe6, 0x1e, 0x1f, 0x09, 0xc4, 0x59, 0x46, 0x15, 0x3e, 0x8d, 0xf0, 0x91, 0xfd, 0x6e, 0x94, 0x8c, - 0x71, 0x1e, 0x96, 0x45, 0xde, 0x38, 0x11, 0x1e, 0x06, 0x2b, 0xb3, 0xb4, 0xa3, 0x64, 0x8c, 0xf3, - 0xf0, 0xe8, 0x35, 0xcb, 0xd9, 0x15, 0x19, 0x6a, 0x3c, 0x83, 0x88, 0x5e, 0x53, 0x89, 0x18, 0xd1, - 0xc9, 0x35, 0xa8, 0xf7, 0xcd, 0x6d, 0xc1, 0x5b, 0xe7, 0xbc, 0x5c, 0xc3, 0xdc, 0x5c, 0x5c, 0x92, - 0x67, 0x2b, 0x29, 0x2a, 0x2b, 0x49, 0x57, 0xef, 0x29, 0x02, 0x5f, 0x1b, 0xca, 0x92, 0xac, 0x45, - 0xc9, 0x18, 0xe7, 0x21, 0xbf, 0x05, 0x13, 0x1e, 0x35, 0xfb, 0x06, 0x0d, 0x73, 0x09, 0x73, 0x1c, - 0x11, 0x57, 0x6b, 0xc4, 0x29, 0x98, 0xe2, 0x1c, 0x62, 0x15, 0x6c, 0x8e, 0x64, 0x15, 0xfc, 0x12, - 0x4c, 0x98, 0x9e, 0x6e, 0x39, 0xd4, 0xbc, 0xe3, 0x70, 0xaf, 0xbd, 0x8c, 0xa1, 0x0b, 0x4d, 0xf4, - 0x8b, 0x09, 0x2a, 0xa6, 0xb8, 0xb5, 0x1f, 0x17, 0x80, 0x0c, 0x46, 0x8b, 0x93, 0x1d, 0xa8, 0x3a, - 0xdc, 0x7a, 0x96, 0xfb, 0xf8, 0xe9, 0x98, 0x11, 0x4e, 0x0c, 0x6b, 0x32, 0x41, 0xe2, 0x13, 0x07, - 0xea, 0xf4, 0x7e, 0x40, 0x3d, 0x27, 0xdc, 0x3d, 0x72, 0x32, 0x47, 0x5d, 0x8b, 0xd5, 0x84, 0x44, - 0xc6, 0x50, 0x86, 0xf6, 0xd3, 0x22, 0x34, 0x63, 0x7c, 0x8f, 0x5a, 0x94, 0xf2, 0x0d, 0xec, 0xc2, - 0x68, 0xb5, 0xe9, 0xd9, 0xb2, 0x87, 0xc6, 0x36, 0xb0, 0x4b, 0x12, 0xae, 0x62, 0x9c, 0x8f, 0xcc, - 0x01, 0x74, 0x75, 0x3f, 0xa0, 0x1e, 0x9f, 0xbd, 0x53, 0xdb, 0xc6, 0xd7, 0x42, 0x0a, 0xc6, 0xb8, - 0xc8, 0x55, 0x79, 0x58, 0x79, 0x39, 0x79, 0xcc, 0xdf, 0x90, 0x93, 0xc8, 0x2b, 0x27, 0x70, 0x12, - 0x39, 0xe9, 0xc0, 0xa4, 0x2a, 0xb5, 0xa2, 0x1e, 0xef, 0x10, 0x38, 0xb1, 0xfe, 0x49, 0x41, 0xe0, - 0x00, 0xa8, 0xf6, 0x9d, 0x02, 0x8c, 0x27, 0x4c, 0x26, 0xe2, 0x80, 0x3e, 0xb5, 0xd7, 0x21, 0x71, - 0x40, 0x5f, 0x6c, 0x8b, 0xc2, 0x8b, 0x50, 0x15, 0x15, 0x94, 0x0e, 0x61, 0x14, 0x55, 0x88, 0x92, - 0xca, 0xc6, 0x42, 0x69, 0x94, 0x4d, 0x8f, 0x85, 0xd2, 0x6a, 0x8b, 0x8a, 0x2e, 0x9c, 0x1b, 0xa2, - 0x74, 0x83, 0xce, 0x0d, 0x91, 0x8e, 0x21, 0x87, 0xf6, 0x3d, 0x5e, 0xee, 0xc0, 0xdb, 0x0f, 0xd7, - 0x82, 0x1d, 0xa8, 0xc9, 0xb0, 0x35, 0xd9, 0x35, 0xde, 0xcc, 0x61, 0xc7, 0xe1, 0x38, 0x32, 0x40, - 0x4b, 0x37, 0x76, 0xef, 0x6c, 0x6f, 0xa3, 0x42, 0x27, 0xd7, 0xa1, 0xe1, 0x3a, 0x4b, 0xba, 0x65, - 0xf7, 0x3d, 0x35, 0x33, 0xfc, 0x1a, 0x1b, 0xeb, 0xee, 0xa8, 0xc4, 0x07, 0x07, 0xd3, 0x17, 0xc2, - 0x97, 0x44, 0x21, 0x31, 0xca, 0xa9, 0xfd, 0x9f, 0x12, 0xf0, 0x90, 0x25, 0xf2, 0x1a, 0x34, 0xba, - 0xd4, 0xd8, 0xd1, 0x1d, 0xcb, 0x57, 0x47, 0x8c, 0xb2, 0xf5, 0x7d, 0x63, 0x4d, 0x25, 0x3e, 0x60, - 0x55, 0x30, 0xdf, 0x5e, 0xe5, 0xbb, 0x02, 0x22, 0x5e, 0x62, 0x40, 0xb5, 0xe3, 0xfb, 0x7a, 0xcf, - 0xca, 0xed, 0x31, 0x17, 0x47, 0x3a, 0x8a, 0x61, 0x40, 0x3c, 0xa3, 0x84, 0x26, 0x06, 0x54, 0x7a, - 0xb6, 0x6e, 0x39, 0xb9, 0xaf, 0xd6, 0x61, 0x5f, 0xb0, 0xce, 0x90, 0x84, 0x51, 0x8b, 0x3f, 0xa2, - 0xc0, 0x26, 0x7d, 0x68, 0xfa, 0x86, 0xa7, 0x77, 0xfd, 0x1d, 0x7d, 0xee, 0x95, 0x57, 0x73, 0xab, - 0x89, 0x91, 0x28, 0x31, 0x6b, 0x2d, 0xe0, 0xfc, 0x5a, 0xfb, 0xc6, 0xfc, 0xdc, 0x2b, 0xaf, 0x62, - 0x5c, 0x4e, 0x5c, 0xec, 0x2b, 0x2f, 0xcd, 0xc9, 0x9e, 0x7b, 0xe2, 0x62, 0x5f, 0x79, 0x69, 0x0e, - 0xe3, 0x72, 0xb4, 0xff, 0x5d, 0x80, 0x46, 0xc8, 0x4b, 0x36, 0x01, 0xd8, 0x18, 0x22, 0x0f, 0x61, - 0x3c, 0xd6, 0x85, 0x08, 0xdc, 0x2e, 0xb0, 0x19, 0x66, 0xc6, 0x18, 0x50, 0xc6, 0x29, 0x95, 0xc5, - 0x93, 0x3e, 0xa5, 0x72, 0x16, 0x1a, 0x3b, 0xba, 0x63, 0xfa, 0x3b, 0xfa, 0xae, 0x18, 0x4a, 0x63, - 0xe7, 0xb6, 0xde, 0x50, 0x04, 0x8c, 0x78, 0xb4, 0x7f, 0x56, 0x05, 0xe1, 0xe6, 0x66, 0x9d, 0xdd, - 0xb4, 0x7c, 0x11, 0x67, 0x5d, 0xe0, 0x39, 0xc3, 0xce, 0xbe, 0x28, 0xd3, 0x31, 0xe4, 0x20, 0x17, - 0xa1, 0xd4, 0xb5, 0x1c, 0xe9, 0xf3, 0xe1, 0x26, 0xbf, 0x35, 0xcb, 0x41, 0x96, 0xc6, 0x49, 0xfa, - 0x7d, 0x19, 0x22, 0x27, 0x48, 0xfa, 0x7d, 0x64, 0x69, 0x6c, 0x11, 0x6e, 0xbb, 0xee, 0x2e, 0xeb, - 0xb6, 0x2a, 0x92, 0xae, 0xcc, 0x55, 0x01, 0xbe, 0x08, 0x5f, 0x4d, 0x92, 0x30, 0xcd, 0x4b, 0x36, - 0xe1, 0x99, 0x0f, 0xa8, 0xe7, 0xca, 0x71, 0xaa, 0x6d, 0x53, 0xda, 0x53, 0x30, 0x42, 0x89, 0xe2, - 0x01, 0x79, 0xbf, 0x97, 0xcd, 0x82, 0xc3, 0xf2, 0xf2, 0xd0, 0x5e, 0xdd, 0xeb, 0xd0, 0x60, 0xdd, - 0x73, 0x0d, 0xea, 0xfb, 0x96, 0xd3, 0x51, 0xb0, 0xd5, 0x08, 0x76, 0x23, 0x9b, 0x05, 0x87, 0xe5, - 0x25, 0x6f, 0xc3, 0x94, 0x20, 0x09, 0x75, 0x61, 0x7e, 0x4f, 0xb7, 0x6c, 0x7d, 0xcb, 0xb2, 0xd5, - 0x8d, 0x74, 0xe3, 0xc2, 0xb3, 0xb2, 0x31, 0x84, 0x07, 0x87, 0xe6, 0x26, 0x37, 0x61, 0x52, 0xf9, - 0xd5, 0xd6, 0xa9, 0xd7, 0x0e, 0x43, 0x1f, 0xc6, 0x5b, 0x57, 0xd8, 0x8a, 0x77, 0x91, 0xf6, 0x3c, - 0x6a, 0xc4, 0xbd, 0x89, 0x8a, 0x0b, 0x07, 0xf2, 0x11, 0x84, 0x0b, 0x3c, 0xbe, 0x61, 0xb3, 0xb7, - 0xe0, 0xba, 0xb6, 0xe9, 0xde, 0x73, 0xd4, 0xb7, 0x0b, 0xd5, 0x8e, 0xbb, 0xd2, 0xda, 0x99, 0x1c, - 0x38, 0x24, 0x27, 0xfb, 0x72, 0x4e, 0x59, 0x74, 0xef, 0x39, 0x69, 0x54, 0x88, 0xbe, 0xbc, 0x3d, - 0x84, 0x07, 0x87, 0xe6, 0x26, 0x4b, 0x40, 0xd2, 0x5f, 0xb0, 0xd9, 0x93, 0xee, 0xd9, 0x0b, 0xe2, - 0x3c, 0x95, 0x34, 0x15, 0x33, 0x72, 0x90, 0x55, 0x38, 0x9f, 0x4e, 0x65, 0xe2, 0xa4, 0x97, 0x96, - 0x9f, 0xa4, 0x8a, 0x19, 0x74, 0xcc, 0xcc, 0xa5, 0xfd, 0xf3, 0x22, 0x8c, 0x27, 0x36, 0xe0, 0x3f, - 0x71, 0x1b, 0x9d, 0x99, 0x0e, 0xde, 0xf5, 0x3b, 0x2b, 0x8b, 0x37, 0xa8, 0x6e, 0x52, 0xef, 0x16, - 0x55, 0x87, 0x25, 0xf0, 0x41, 0x65, 0x2d, 0x41, 0xc1, 0x14, 0x27, 0xd9, 0x86, 0x8a, 0xb0, 0x28, - 0xe7, 0xbd, 0x5a, 0x43, 0xd5, 0x11, 0x37, 0x2b, 0xcb, 0xfb, 0x68, 0x5c, 0x8f, 0xa2, 0x80, 0xd7, - 0x02, 0x18, 0x8b, 0x73, 0xb0, 0x81, 0x24, 0x52, 0x37, 0x6b, 0x09, 0x55, 0x73, 0x05, 0x4a, 0x41, - 0x30, 0xea, 0x16, 0x6a, 0xe1, 0xa1, 0xd8, 0x58, 0x45, 0x86, 0xa1, 0x6d, 0xb3, 0x7f, 0xe7, 0xfb, - 0x96, 0xeb, 0xc8, 0xf3, 0xb4, 0x37, 0xa1, 0x16, 0x48, 0x23, 0xdd, 0x68, 0x5b, 0xc0, 0xb9, 0x8e, - 0xa2, 0x0c, 0x74, 0x0a, 0x4b, 0xfb, 0x8f, 0x45, 0x68, 0x84, 0x0b, 0xea, 0x23, 0x9c, 0x53, 0xed, - 0x42, 0x23, 0x8c, 0xcf, 0xca, 0x7d, 0x5b, 0x5f, 0x14, 0x36, 0xc4, 0xd7, 0x80, 0xe1, 0x2b, 0x46, - 0x32, 0xe2, 0xb1, 0x5f, 0xa5, 0x1c, 0xb1, 0x5f, 0x3d, 0xa8, 0x05, 0x9e, 0xd5, 0xe9, 0x48, 0xed, - 0x3c, 0x4f, 0xf0, 0x57, 0x58, 0x5d, 0x1b, 0x02, 0x50, 0xd6, 0xac, 0x78, 0x41, 0x25, 0x46, 0x7b, - 0x0f, 0x26, 0xd3, 0x9c, 0x5c, 0x75, 0x35, 0x76, 0xa8, 0xd9, 0xb7, 0x55, 0x1d, 0x47, 0xaa, 0xab, - 0x4c, 0xc7, 0x90, 0x83, 0x2d, 0x7f, 0xd9, 0x6f, 0xfa, 0xc0, 0x75, 0x94, 0xfa, 0xc8, 0x57, 0x01, - 0x1b, 0x32, 0x0d, 0x43, 0xaa, 0xf6, 0xdf, 0x4a, 0x70, 0x31, 0x32, 0x8b, 0xac, 0xe9, 0x8e, 0xde, - 0x39, 0xc2, 0x15, 0x6d, 0x9f, 0x6f, 0xaa, 0x39, 0xee, 0x65, 0x03, 0xa5, 0x27, 0xe0, 0xb2, 0x81, - 0xff, 0x5b, 0x04, 0x1e, 0x4b, 0x4a, 0xbe, 0x06, 0x63, 0x7a, 0xec, 0x76, 0x4e, 0xf9, 0x3b, 0xaf, - 0xe7, 0xfe, 0x9d, 0x3c, 0x64, 0x35, 0x8c, 0x8d, 0x8a, 0xa7, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xbe, - 0xad, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0xbb, 0x79, 0x12, 0xc2, 0x79, 0x33, 0x5f, 0x92, 0xd0, 0x18, - 0x0a, 0x21, 0x1f, 0x16, 0x60, 0xdc, 0x8b, 0x2f, 0x93, 0xe4, 0x0f, 0xc9, 0x13, 0x04, 0x10, 0x43, - 0x8b, 0x47, 0x62, 0xc5, 0xd7, 0x62, 0x49, 0x99, 0xda, 0x7f, 0x2d, 0xc0, 0x78, 0xdb, 0xb6, 0x4c, - 0xcb, 0xe9, 0x9c, 0xe2, 0x5d, 0x07, 0x77, 0xa0, 0xe2, 0xdb, 0x96, 0x49, 0x47, 0x9c, 0x4d, 0xc4, - 0x3c, 0xc6, 0x00, 0x50, 0xe0, 0x24, 0x2f, 0x4f, 0x28, 0x1d, 0xe1, 0xf2, 0x84, 0x5f, 0x54, 0x41, - 0x46, 0x45, 0x93, 0x3e, 0x34, 0x3a, 0xea, 0x4c, 0x76, 0xf9, 0x8d, 0x37, 0x72, 0x9c, 0xe7, 0x97, - 0x38, 0xdd, 0x5d, 0x8c, 0xfd, 0x61, 0x22, 0x46, 0x92, 0x08, 0x4d, 0x5e, 0x0b, 0xbb, 0x98, 0xf3, - 0x5a, 0x58, 0x21, 0x6e, 0xf0, 0x62, 0x58, 0x1d, 0xca, 0x3b, 0x41, 0xd0, 0x93, 0x8d, 0x69, 0xf4, - 0xb0, 0xf7, 0xe8, 0x48, 0x19, 0xa1, 0x13, 0xb1, 0x77, 0xe4, 0xd0, 0x4c, 0x84, 0xa3, 0x87, 0xd7, - 0x80, 0x2d, 0xe4, 0x0a, 0x38, 0x88, 0x8b, 0x60, 0xef, 0xc8, 0xa1, 0xc9, 0x1f, 0x40, 0x33, 0xf0, - 0x74, 0xc7, 0xdf, 0x76, 0xbd, 0x2e, 0xf5, 0xe4, 0x1a, 0x75, 0x29, 0xc7, 0xcd, 0xa8, 0x1b, 0x11, - 0x9a, 0xf0, 0x64, 0x26, 0x92, 0x30, 0x2e, 0x8d, 0xec, 0x42, 0xbd, 0x6f, 0x8a, 0x82, 0x49, 0xf3, - 0xd3, 0x7c, 0x9e, 0xcb, 0x6e, 0x63, 0xe1, 0x04, 0xea, 0x0d, 0x43, 0x01, 0xc9, 0x1b, 0xef, 0x6a, - 0x27, 0x75, 0xe3, 0x5d, 0xbc, 0x35, 0x66, 0x9d, 0x77, 0x41, 0xba, 0x52, 0xaf, 0x75, 0x3a, 0x32, - 0x1a, 0x6a, 0x29, 0xb7, 0xca, 0x29, 0x44, 0x36, 0x43, 0xdd, 0xd8, 0xe9, 0xa0, 0x92, 0xa1, 0x75, - 0x41, 0x7a, 0x19, 0x88, 0x91, 0xb8, 0x17, 0x46, 0x6c, 0xc2, 0x9a, 0x3d, 0xda, 0x78, 0x10, 0x5e, - 0x50, 0x12, 0x3b, 0x97, 0x3a, 0xf3, 0x02, 0x18, 0xed, 0x3f, 0x15, 0xa1, 0xb4, 0xb1, 0xda, 0x16, - 0x67, 0x4d, 0xf2, 0x4b, 0x97, 0x68, 0x7b, 0xd7, 0xea, 0xdd, 0xa5, 0x9e, 0xb5, 0xbd, 0x2f, 0x97, - 0xde, 0xb1, 0xb3, 0x26, 0xd3, 0x1c, 0x98, 0x91, 0x8b, 0xbc, 0x03, 0x63, 0x86, 0xbe, 0x40, 0xbd, - 0x60, 0x14, 0xc3, 0x02, 0xdf, 0x6d, 0xba, 0x30, 0x1f, 0x65, 0xc7, 0x04, 0x18, 0xd9, 0x04, 0x30, - 0x22, 0xe8, 0xd2, 0xb1, 0xcd, 0x21, 0x31, 0xe0, 0x18, 0x10, 0x41, 0x68, 0xec, 0x32, 0x56, 0x8e, - 0x5a, 0x3e, 0x0e, 0x2a, 0x6f, 0x39, 0xb7, 0x54, 0x5e, 0x8c, 0x60, 0x34, 0x07, 0xc6, 0x13, 0x97, - 0xc5, 0x90, 0x2f, 0x42, 0xdd, 0xed, 0xc5, 0x86, 0xd3, 0x06, 0x8f, 0xbb, 0xac, 0xdf, 0x91, 0x69, - 0x0f, 0x0e, 0xa6, 0xc7, 0x57, 0xdd, 0x8e, 0x65, 0xa8, 0x04, 0x0c, 0xd9, 0x89, 0x06, 0x55, 0xbe, - 0x45, 0x4c, 0x5d, 0x15, 0xc3, 0xe7, 0x0e, 0x7e, 0x9b, 0x83, 0x8f, 0x92, 0xa2, 0xfd, 0x61, 0x19, - 0x22, 0xdf, 0x1c, 0xf1, 0xa1, 0x2a, 0x42, 0xe0, 0xe5, 0xc8, 0x7d, 0xaa, 0xd1, 0xf6, 0x52, 0x14, - 0xe9, 0x40, 0xe9, 0x3d, 0x77, 0x2b, 0xf7, 0xc0, 0x1d, 0xdb, 0x1b, 0x2e, 0x6c, 0x65, 0xb1, 0x04, - 0x64, 0x12, 0xc8, 0xdf, 0x2e, 0xc0, 0x59, 0x3f, 0xad, 0xfa, 0xca, 0xe6, 0x80, 0xf9, 0x75, 0xfc, - 0xb4, 0x32, 0x2d, 0x03, 0x64, 0x87, 0x91, 0x71, 0xb0, 0x2c, 0xac, 0xfe, 0x85, 0xd3, 0x4c, 0x36, - 0xa7, 0xe5, 0x9c, 0x17, 0x1c, 0x26, 0xeb, 0x3f, 0x99, 0x86, 0x52, 0x94, 0xf6, 0xc7, 0x45, 0x68, - 0xc6, 0x46, 0xeb, 0xdc, 0x37, 0x10, 0xdd, 0x4f, 0xdd, 0x40, 0xb4, 0x3e, 0xba, 0x0f, 0x39, 0x2a, - 0xd5, 0x69, 0x5f, 0x42, 0xf4, 0xaf, 0x8a, 0x50, 0xda, 0x5c, 0x5c, 0x4a, 0x2e, 0x5a, 0x0b, 0x8f, - 0x61, 0xd1, 0xba, 0x03, 0xb5, 0xad, 0xbe, 0x65, 0x07, 0x96, 0x93, 0xfb, 0xf4, 0x0a, 0x75, 0x61, - 0x93, 0xf4, 0x31, 0x08, 0x54, 0x54, 0xf0, 0xa4, 0x03, 0xb5, 0x8e, 0x38, 0x3e, 0x30, 0x77, 0x64, - 0x9d, 0x3c, 0x86, 0x50, 0x08, 0x92, 0x2f, 0xa8, 0xd0, 0xb5, 0x7d, 0x90, 0x37, 0xcf, 0x3f, 0xf6, - 0xda, 0xd4, 0xfe, 0x00, 0x42, 0x2d, 0xe0, 0xf1, 0x0b, 0xff, 0x1f, 0x05, 0x48, 0x2a, 0x3e, 0x8f, - 0xbf, 0x35, 0xed, 0xa6, 0x5b, 0xd3, 0xe2, 0x49, 0x74, 0xbe, 0xec, 0x06, 0xa5, 0xfd, 0xd3, 0x22, - 0x54, 0x1f, 0xdb, 0x8e, 0x63, 0x9a, 0x08, 0x12, 0x5c, 0xc8, 0x39, 0x30, 0x0e, 0x0d, 0x11, 0xec, - 0xa6, 0x42, 0x04, 0xf3, 0x5e, 0x31, 0xfb, 0x88, 0x00, 0xc1, 0x7f, 0x57, 0x00, 0x39, 0x2c, 0xaf, - 0x38, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0x38, 0x07, 0xe4, 0x8d, 0x44, 0x91, 0xd1, 0x5a, 0x62, - 0xda, 0xe7, 0xcf, 0x6a, 0xcc, 0x27, 0x5f, 0x80, 0xfa, 0x8e, 0xeb, 0x07, 0x7c, 0x9c, 0x2f, 0x26, - 0xad, 0x4b, 0x37, 0x64, 0x3a, 0x86, 0x1c, 0x69, 0x8f, 0x6b, 0x65, 0xb8, 0xc7, 0x55, 0xfb, 0x56, - 0x11, 0xc6, 0x3e, 0x2b, 0xdb, 0xa6, 0xb3, 0x42, 0x2a, 0x4b, 0x39, 0x43, 0x2a, 0xcb, 0xc7, 0x09, - 0xa9, 0xd4, 0x7e, 0x58, 0x00, 0x78, 0x6c, 0x7b, 0xb6, 0xcd, 0x64, 0xb4, 0x63, 0xee, 0x76, 0x95, - 0x1d, 0xeb, 0xf8, 0x8f, 0x2b, 0xea, 0x93, 0x78, 0xa4, 0xe3, 0x47, 0x05, 0x98, 0xd0, 0x13, 0xd1, - 0x83, 0xb9, 0x55, 0xcb, 0x54, 0x30, 0x62, 0x18, 0xfc, 0x92, 0x4c, 0xc7, 0x94, 0x58, 0xf2, 0x7a, - 0x74, 0x5e, 0xf0, 0xed, 0xa8, 0xd9, 0x0f, 0x1c, 0xf4, 0xcb, 0xd5, 0x9c, 0x04, 0xe7, 0x23, 0xa2, - 0x35, 0x4b, 0x27, 0x12, 0xad, 0x19, 0xdf, 0x87, 0x56, 0x7e, 0xe8, 0x3e, 0xb4, 0x3d, 0x68, 0x6c, - 0x7b, 0x6e, 0x97, 0x07, 0x44, 0xca, 0xcb, 0x69, 0xaf, 0xe7, 0x98, 0x53, 0xa2, 0x6b, 0xd9, 0x23, - 0x1b, 0xcf, 0x92, 0xc2, 0xc7, 0x48, 0x14, 0x37, 0x8b, 0xbb, 0x42, 0x6a, 0xf5, 0x24, 0xa5, 0x86, - 0x63, 0xc9, 0x86, 0x40, 0x47, 0x25, 0x26, 0x19, 0x04, 0x59, 0x7b, 0x3c, 0x41, 0x90, 0xda, 0x77, - 0xab, 0x6a, 0x00, 0x7b, 0xe2, 0x8e, 0xa6, 0xfc, 0xec, 0xef, 0xf5, 0x4d, 0x6f, 0xc4, 0xad, 0x3d, - 0xc6, 0x8d, 0xb8, 0xf5, 0x93, 0xd9, 0x88, 0xdb, 0xc8, 0xb7, 0x11, 0x17, 0xf2, 0x6f, 0xc4, 0x6d, - 0xe6, 0xdb, 0x88, 0x3b, 0x36, 0xd2, 0x46, 0xdc, 0xf1, 0x23, 0x6d, 0xc4, 0x3d, 0x28, 0x41, 0x6a, - 0x95, 0xf9, 0xb9, 0x47, 0xe9, 0xff, 0x2b, 0x8f, 0xd2, 0xc7, 0x45, 0x88, 0x86, 0xcd, 0x63, 0x46, - 0xdc, 0xbc, 0x0d, 0xf5, 0xae, 0x7e, 0x7f, 0x91, 0xda, 0xfa, 0x7e, 0x9e, 0xfb, 0x57, 0xd7, 0x24, - 0x06, 0x86, 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0x33, 0xd0, 0x73, 0xdb, 0xe6, 0xa3, 0xe3, 0xd4, 0x85, - 0xf5, 0x2f, 0x7a, 0xc7, 0x98, 0x18, 0xed, 0xdf, 0x16, 0x41, 0x1e, 0x96, 0x4f, 0x28, 0x54, 0xb6, - 0xad, 0xfb, 0xd4, 0xcc, 0x1d, 0x3f, 0x1b, 0xbb, 0x15, 0x5b, 0x38, 0x1f, 0x78, 0x02, 0x0a, 0x74, - 0x6e, 0x55, 0x16, 0xce, 0x24, 0x59, 0x7f, 0x39, 0xac, 0xca, 0x71, 0xa7, 0x94, 0xb4, 0x2a, 0x8b, - 0x24, 0x54, 0x32, 0x84, 0x11, 0x9b, 0xc7, 0x15, 0xe4, 0xf6, 0x9d, 0x25, 0xe2, 0x13, 0x94, 0x11, - 0xdb, 0x17, 0x3b, 0xf1, 0xa5, 0x8c, 0xd6, 0xef, 0xff, 0xe0, 0x47, 0x57, 0x9e, 0xfa, 0xe1, 0x8f, - 0xae, 0x3c, 0xf5, 0xc9, 0x8f, 0xae, 0x3c, 0xf5, 0x87, 0x87, 0x57, 0x0a, 0x3f, 0x38, 0xbc, 0x52, - 0xf8, 0xe1, 0xe1, 0x95, 0xc2, 0x27, 0x87, 0x57, 0x0a, 0xff, 0xf9, 0xf0, 0x4a, 0xe1, 0x6f, 0xfc, - 0x97, 0x2b, 0x4f, 0xfd, 0xde, 0x6b, 0x51, 0x11, 0x66, 0x55, 0x11, 0x66, 0x95, 0xc0, 0xd9, 0xde, - 0x6e, 0x67, 0x96, 0x15, 0x21, 0x4a, 0x51, 0x45, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, - 0x98, 0x3b, 0x47, 0x90, 0x97, 0x00, 0x00, + // 7907 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x25, 0x47, + 0x76, 0x9e, 0xee, 0xff, 0xbd, 0xe7, 0xf2, 0x6f, 0x6a, 0x7e, 0xc4, 0x19, 0x8d, 0x86, 0xe3, 0x96, + 0x25, 0x8f, 0x63, 0x9b, 0x8c, 0x68, 0xfd, 0xad, 0xed, 0x5d, 0x89, 0x97, 0x1c, 0x72, 0x38, 0x43, + 0xce, 0x70, 0xcf, 0x25, 0x47, 0x5a, 0x2b, 0x5e, 0xa5, 0xd9, 0x5d, 0xbc, 0x6c, 0xb1, 0x6f, 0xf7, + 0x55, 0x77, 0x5f, 0xce, 0x50, 0x4e, 0xb0, 0xb6, 0x95, 0x40, 0x0a, 0x82, 0x20, 0x81, 0x9f, 0x0c, + 0x04, 0x4e, 0x90, 0x20, 0x80, 0x1f, 0x0c, 0xe7, 0x21, 0xc0, 0xe6, 0xc1, 0x40, 0xe2, 0x38, 0x08, + 0x92, 0x4d, 0x90, 0x9f, 0x45, 0x10, 0x20, 0xca, 0x0b, 0x91, 0x65, 0x90, 0x87, 0x04, 0x88, 0x61, + 0xc4, 0x48, 0xec, 0x0c, 0x16, 0xd9, 0xa0, 0xfe, 0xfa, 0xef, 0xf6, 0x9d, 0x21, 0x6f, 0x93, 0xa3, + 0x51, 0xac, 0xb7, 0xee, 0x3a, 0xa7, 0xbe, 0x53, 0x55, 0x5d, 0x5d, 0x75, 0xea, 0x9c, 0x53, 0x55, + 0xb0, 0xd2, 0xb1, 0x82, 0xdd, 0xfe, 0xf6, 0xac, 0xe1, 0x76, 0xe7, 0x9c, 0x7e, 0x57, 0xef, 0x79, + 0xee, 0x87, 0xfc, 0x61, 0xc7, 0x76, 0x1f, 0xcc, 0xf5, 0xf6, 0x3a, 0x73, 0x7a, 0xcf, 0xf2, 0xa3, + 0x94, 0xfd, 0x57, 0x75, 0xbb, 0xb7, 0xab, 0xbf, 0x3a, 0xd7, 0xa1, 0x0e, 0xf5, 0xf4, 0x80, 0x9a, + 0xb3, 0x3d, 0xcf, 0x0d, 0x5c, 0xf2, 0x66, 0x04, 0x34, 0xab, 0x80, 0x66, 0x55, 0xb6, 0xd9, 0xde, + 0x5e, 0x67, 0x96, 0x01, 0x45, 0x29, 0x0a, 0xe8, 0xca, 0xcf, 0xc4, 0x4a, 0xd0, 0x71, 0x3b, 0xee, + 0x1c, 0xc7, 0xdb, 0xee, 0xef, 0xf0, 0x37, 0xfe, 0xc2, 0x9f, 0x84, 0x9c, 0x2b, 0xda, 0xde, 0x5b, + 0xfe, 0xac, 0xe5, 0xb2, 0x62, 0xcd, 0x19, 0xae, 0x47, 0xe7, 0xf6, 0x07, 0xca, 0x72, 0xe5, 0xb5, + 0x88, 0xa7, 0xab, 0x1b, 0xbb, 0x96, 0x43, 0xbd, 0x03, 0x55, 0x97, 0x39, 0x8f, 0xfa, 0x6e, 0xdf, + 0x33, 0xe8, 0x89, 0x72, 0xf9, 0x73, 0x5d, 0x1a, 0xe8, 0x59, 0xb2, 0xe6, 0x86, 0xe5, 0xf2, 0xfa, + 0x4e, 0x60, 0x75, 0x07, 0xc5, 0xbc, 0xf1, 0xa4, 0x0c, 0xbe, 0xb1, 0x4b, 0xbb, 0xfa, 0x40, 0xbe, + 0x9f, 0x1d, 0x96, 0xaf, 0x1f, 0x58, 0xf6, 0x9c, 0xe5, 0x04, 0x7e, 0xe0, 0xa5, 0x33, 0x69, 0xbf, + 0x0f, 0x70, 0x7e, 0x61, 0xdb, 0x0f, 0x3c, 0xdd, 0x08, 0x36, 0x5c, 0x73, 0x93, 0x76, 0x7b, 0xb6, + 0x1e, 0x50, 0xb2, 0x07, 0x75, 0x56, 0x21, 0x53, 0x0f, 0xf4, 0xe9, 0xc2, 0xf5, 0xc2, 0x8d, 0xe6, + 0xfc, 0xc2, 0xec, 0x88, 0x1f, 0x70, 0x76, 0x5d, 0x02, 0xb5, 0xc6, 0x8e, 0x0e, 0x67, 0xea, 0xea, + 0x0d, 0x43, 0x01, 0xe4, 0x37, 0x0a, 0x30, 0xe6, 0xb8, 0x26, 0x6d, 0x53, 0x9b, 0x1a, 0x81, 0xeb, + 0x4d, 0x17, 0xaf, 0x97, 0x6e, 0x34, 0xe7, 0xbf, 0x3d, 0xb2, 0xc4, 0x8c, 0x1a, 0xcd, 0xde, 0x8d, + 0x09, 0xb8, 0xe9, 0x04, 0xde, 0x41, 0xeb, 0xc2, 0xf7, 0x0e, 0x67, 0x9e, 0x3b, 0x3a, 0x9c, 0x19, + 0x8b, 0x93, 0x30, 0x51, 0x12, 0xb2, 0x05, 0xcd, 0xc0, 0xb5, 0x59, 0x93, 0x59, 0xae, 0xe3, 0x4f, + 0x97, 0x78, 0xc1, 0xae, 0xcd, 0x8a, 0xa6, 0x66, 0xe2, 0x67, 0x59, 0x1f, 0x9b, 0xdd, 0x7f, 0x75, + 0x76, 0x33, 0x64, 0x6b, 0x9d, 0x97, 0xc0, 0xcd, 0x28, 0xcd, 0xc7, 0x38, 0x0e, 0xa1, 0x30, 0xe9, + 0x53, 0xa3, 0xef, 0x59, 0xc1, 0xc1, 0xa2, 0xeb, 0x04, 0xf4, 0x61, 0x30, 0x5d, 0xe6, 0xad, 0xfc, + 0x4a, 0x16, 0xf4, 0x86, 0x6b, 0xb6, 0x93, 0xdc, 0xad, 0xf3, 0x47, 0x87, 0x33, 0x93, 0xa9, 0x44, + 0x4c, 0x63, 0x12, 0x07, 0xa6, 0xac, 0xae, 0xde, 0xa1, 0x1b, 0x7d, 0xdb, 0x6e, 0x53, 0xc3, 0xa3, + 0x81, 0x3f, 0x5d, 0xe1, 0x55, 0xb8, 0x91, 0x25, 0x67, 0xcd, 0x35, 0x74, 0xfb, 0xde, 0xf6, 0x87, + 0xd4, 0x08, 0x90, 0xee, 0x50, 0x8f, 0x3a, 0x06, 0x6d, 0x4d, 0xcb, 0xca, 0x4c, 0xad, 0xa6, 0x90, + 0x70, 0x00, 0x9b, 0xac, 0xc0, 0xb9, 0x9e, 0x67, 0xb9, 0xbc, 0x08, 0xb6, 0xee, 0xfb, 0x77, 0xf5, + 0x2e, 0x9d, 0xae, 0x5e, 0x2f, 0xdc, 0x68, 0xb4, 0x2e, 0x4b, 0x98, 0x73, 0x1b, 0x69, 0x06, 0x1c, + 0xcc, 0x43, 0x6e, 0x40, 0x5d, 0x25, 0x4e, 0xd7, 0xae, 0x17, 0x6e, 0x54, 0x44, 0xdf, 0x51, 0x79, + 0x31, 0xa4, 0x92, 0x65, 0xa8, 0xeb, 0x3b, 0x3b, 0x96, 0xc3, 0x38, 0xeb, 0xbc, 0x09, 0xaf, 0x66, + 0x55, 0x6d, 0x41, 0xf2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x5e, 0x72, 0x1b, 0x88, 0x4f, 0xbd, 0x7d, + 0xcb, 0xa0, 0x0b, 0x86, 0xe1, 0xf6, 0x9d, 0x80, 0x97, 0xbd, 0xc1, 0xcb, 0x7e, 0x45, 0x96, 0x9d, + 0xb4, 0x07, 0x38, 0x30, 0x23, 0x17, 0x79, 0x07, 0xa6, 0xe4, 0xbf, 0x1a, 0xb5, 0x02, 0x70, 0xa4, + 0x0b, 0xac, 0x21, 0x31, 0x45, 0xc3, 0x01, 0x6e, 0x62, 0xc2, 0x55, 0xbd, 0x1f, 0xb8, 0x5d, 0x06, + 0x99, 0x14, 0xba, 0xe9, 0xee, 0x51, 0x67, 0xba, 0x79, 0xbd, 0x70, 0xa3, 0xde, 0xba, 0x7e, 0x74, + 0x38, 0x73, 0x75, 0xe1, 0x31, 0x7c, 0xf8, 0x58, 0x14, 0x72, 0x0f, 0x1a, 0xa6, 0xe3, 0x6f, 0xb8, + 0xb6, 0x65, 0x1c, 0x4c, 0x8f, 0xf1, 0x02, 0xbe, 0x2a, 0xab, 0xda, 0x58, 0xba, 0xdb, 0x16, 0x84, + 0x47, 0x87, 0x33, 0x57, 0x07, 0x87, 0xd4, 0xd9, 0x90, 0x8e, 0x11, 0x06, 0x59, 0xe7, 0x80, 0x8b, + 0xae, 0xb3, 0x63, 0x75, 0xa6, 0xc7, 0xf9, 0xd7, 0xb8, 0x3e, 0xa4, 0x43, 0x2f, 0xdd, 0x6d, 0x0b, + 0xbe, 0xd6, 0xb8, 0x14, 0x27, 0x5e, 0x31, 0x42, 0x20, 0x26, 0x4c, 0xa8, 0xc1, 0x78, 0xd1, 0xd6, + 0xad, 0xae, 0x3f, 0x3d, 0xc1, 0x3b, 0xef, 0x8f, 0x0f, 0xc1, 0xc4, 0x38, 0x73, 0xeb, 0x92, 0xac, + 0xca, 0x44, 0x22, 0xd9, 0xc7, 0x14, 0xe6, 0x95, 0xb7, 0xe1, 0xdc, 0xc0, 0xd8, 0x40, 0xa6, 0xa0, + 0xb4, 0x47, 0x0f, 0xf8, 0xd0, 0xd7, 0x40, 0xf6, 0x48, 0x2e, 0x40, 0x65, 0x5f, 0xb7, 0xfb, 0x74, + 0xba, 0xc8, 0xd3, 0xc4, 0xcb, 0xcf, 0x15, 0xdf, 0x2a, 0x68, 0x7f, 0xb7, 0x04, 0x63, 0x6a, 0xc4, + 0x69, 0x5b, 0xce, 0x1e, 0x79, 0x17, 0x4a, 0xb6, 0xdb, 0x91, 0xe3, 0xe6, 0x2f, 0x8c, 0x3c, 0x8a, + 0xad, 0xb9, 0x9d, 0x56, 0xed, 0xe8, 0x70, 0xa6, 0xb4, 0xe6, 0x76, 0x90, 0x21, 0x12, 0x03, 0x2a, + 0x7b, 0xfa, 0xce, 0x9e, 0xce, 0xcb, 0xd0, 0x9c, 0x6f, 0x8d, 0x0c, 0x7d, 0x87, 0xa1, 0xb0, 0xb2, + 0xb6, 0x1a, 0x47, 0x87, 0x33, 0x15, 0xfe, 0x8a, 0x02, 0x9b, 0xb8, 0xd0, 0xd8, 0xb6, 0x75, 0x63, + 0x6f, 0xd7, 0xb5, 0xe9, 0x74, 0x29, 0xa7, 0xa0, 0x96, 0x42, 0x12, 0x9f, 0x39, 0x7c, 0xc5, 0x48, + 0x06, 0x31, 0xa0, 0xda, 0x37, 0x7d, 0xcb, 0xd9, 0x93, 0x63, 0xe0, 0xdb, 0x23, 0x4b, 0xdb, 0x5a, + 0xe2, 0x75, 0x82, 0xa3, 0xc3, 0x99, 0xaa, 0x78, 0x46, 0x09, 0xad, 0xfd, 0x41, 0x13, 0x26, 0xd4, + 0x47, 0xba, 0x4f, 0xbd, 0x80, 0x3e, 0x24, 0xd7, 0xa1, 0xec, 0xb0, 0x5f, 0x93, 0x7f, 0xe4, 0xd6, + 0x98, 0xec, 0x2e, 0x65, 0xfe, 0x4b, 0x72, 0x0a, 0x2b, 0x99, 0xe8, 0x2a, 0xb2, 0xc1, 0x47, 0x2f, + 0x59, 0x9b, 0xc3, 0x88, 0x92, 0x89, 0x67, 0x94, 0xd0, 0xe4, 0x7d, 0x28, 0xf3, 0xca, 0x8b, 0xa6, + 0xfe, 0xfa, 0xe8, 0x22, 0x58, 0xd5, 0xeb, 0xac, 0x06, 0xbc, 0xe2, 0x1c, 0x94, 0x75, 0xc5, 0xbe, + 0xb9, 0x23, 0x1b, 0xf6, 0x17, 0x72, 0x34, 0xec, 0xb2, 0xe8, 0x8a, 0x5b, 0x4b, 0xcb, 0xc8, 0x10, + 0xc9, 0x5f, 0x2f, 0xc0, 0x39, 0xc3, 0x75, 0x02, 0x9d, 0xe9, 0x19, 0x6a, 0x92, 0x9d, 0xae, 0x70, + 0x39, 0xb7, 0x47, 0x96, 0xb3, 0x98, 0x46, 0x6c, 0x5d, 0x64, 0x73, 0xc6, 0x40, 0x32, 0x0e, 0xca, + 0x26, 0x7f, 0xb3, 0x00, 0x17, 0xd9, 0x58, 0x3e, 0xc0, 0xcc, 0x67, 0xa0, 0xd3, 0x2d, 0xd5, 0xe5, + 0xa3, 0xc3, 0x99, 0x8b, 0xab, 0x59, 0xc2, 0x30, 0xbb, 0x0c, 0xac, 0x74, 0xe7, 0xf5, 0x41, 0xb5, + 0x84, 0xcf, 0x6e, 0xcd, 0xf9, 0xb5, 0xd3, 0x54, 0x75, 0x5a, 0x2f, 0xc8, 0xae, 0x9c, 0xa5, 0xd9, + 0x61, 0x56, 0x29, 0xc8, 0x4d, 0xa8, 0xed, 0xbb, 0x76, 0xbf, 0x4b, 0xfd, 0xe9, 0x3a, 0x1f, 0x62, + 0xaf, 0x64, 0x0d, 0xb1, 0xf7, 0x39, 0x4b, 0x6b, 0x52, 0xc2, 0xd7, 0xc4, 0xbb, 0x8f, 0x2a, 0x2f, + 0xb1, 0xa0, 0x6a, 0x5b, 0x5d, 0x2b, 0xf0, 0xf9, 0xc4, 0xd9, 0x9c, 0xbf, 0x39, 0x72, 0xb5, 0xc4, + 0x2f, 0xba, 0xc6, 0xc1, 0xc4, 0x5f, 0x23, 0x9e, 0x51, 0x0a, 0x60, 0x43, 0xa1, 0x6f, 0xe8, 0xb6, + 0x98, 0x58, 0x9b, 0xf3, 0xdf, 0x18, 0xfd, 0xb7, 0x61, 0x28, 0xad, 0x71, 0x59, 0xa7, 0x0a, 0x7f, + 0x45, 0x81, 0x4d, 0x7e, 0x09, 0x26, 0x12, 0x5f, 0xd3, 0x9f, 0x6e, 0xf2, 0xd6, 0x79, 0x31, 0xab, + 0x75, 0x42, 0xae, 0x68, 0xe6, 0x49, 0xf4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x0e, 0xd4, 0x7d, 0xcb, + 0xa4, 0x86, 0xee, 0xf9, 0xd3, 0x63, 0xc7, 0x01, 0x9e, 0x92, 0xc0, 0xf5, 0xb6, 0xcc, 0x86, 0x21, + 0x00, 0x99, 0x05, 0xe8, 0xe9, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x9c, 0x2b, 0x4d, 0x13, 0x47, 0x87, + 0x33, 0xb0, 0x11, 0xa6, 0x62, 0x8c, 0x83, 0xf1, 0xb3, 0xbc, 0xab, 0x4e, 0xaf, 0x1f, 0x88, 0x89, + 0xb5, 0x21, 0xf8, 0xdb, 0x61, 0x2a, 0xc6, 0x38, 0xc8, 0xef, 0x14, 0xe0, 0x85, 0xe8, 0x75, 0xf0, + 0x27, 0x9b, 0x3c, 0xf5, 0x9f, 0x6c, 0xe6, 0xe8, 0x70, 0xe6, 0x85, 0xf6, 0x70, 0x91, 0xf8, 0xb8, + 0xf2, 0x68, 0xef, 0xc2, 0xf8, 0x42, 0x3f, 0xd8, 0x75, 0x3d, 0xeb, 0x63, 0xae, 0x74, 0x93, 0x65, + 0xa8, 0x04, 0x5c, 0x79, 0x12, 0xf3, 0xf2, 0xcb, 0x59, 0x4d, 0x2d, 0x14, 0xd9, 0x3b, 0xf4, 0x40, + 0x69, 0x03, 0x62, 0x7e, 0x14, 0xca, 0x94, 0xc8, 0xae, 0xfd, 0xa5, 0x02, 0xd4, 0x5a, 0xba, 0xb1, + 0xe7, 0xee, 0xec, 0x90, 0xf7, 0xa0, 0x6e, 0x39, 0x01, 0xf5, 0xf6, 0x75, 0x5b, 0xc2, 0xce, 0xc6, + 0x60, 0xc3, 0x65, 0x58, 0x54, 0x6f, 0xb6, 0xe6, 0x61, 0x82, 0x96, 0xfa, 0x72, 0xad, 0xc0, 0xf5, + 0xd1, 0x55, 0x89, 0x81, 0x21, 0x1a, 0x99, 0x81, 0x8a, 0x1f, 0xd0, 0x9e, 0xcf, 0x67, 0x9e, 0x71, + 0x51, 0x8c, 0x36, 0x4b, 0x40, 0x91, 0xae, 0xfd, 0x9d, 0x02, 0x34, 0x5a, 0xba, 0x6f, 0x19, 0xac, + 0x96, 0x64, 0x11, 0xca, 0x7d, 0x9f, 0x7a, 0x27, 0xab, 0x1b, 0x9f, 0x2c, 0xb6, 0x7c, 0xea, 0x21, + 0xcf, 0x4c, 0xee, 0x41, 0xbd, 0xa7, 0xfb, 0xfe, 0x03, 0xd7, 0x33, 0xe5, 0x84, 0x77, 0x4c, 0x20, + 0xa1, 0x9c, 0xcb, 0xac, 0x18, 0x82, 0x68, 0x4d, 0x88, 0x66, 0x7c, 0xed, 0x8f, 0x0a, 0x70, 0xbe, + 0xd5, 0xdf, 0xd9, 0xa1, 0x9e, 0xd4, 0x45, 0xa5, 0x96, 0x47, 0xa1, 0xe2, 0x51, 0xd3, 0xf2, 0x65, + 0xd9, 0x97, 0x46, 0xee, 0x41, 0xc8, 0x50, 0xa4, 0x52, 0xc9, 0xdb, 0x8b, 0x27, 0xa0, 0x40, 0x27, + 0x7d, 0x68, 0x7c, 0x48, 0xd9, 0x1a, 0x98, 0xea, 0x5d, 0x59, 0xbb, 0x5b, 0x23, 0x8b, 0xba, 0x4d, + 0x83, 0x36, 0x47, 0x8a, 0xeb, 0xb0, 0x61, 0x22, 0x46, 0x92, 0xb4, 0xdf, 0xaf, 0xc0, 0xd8, 0xa2, + 0xdb, 0xdd, 0xb6, 0x1c, 0x6a, 0xde, 0x34, 0x3b, 0x94, 0x7c, 0x00, 0x65, 0x6a, 0x76, 0xa8, 0xac, + 0xed, 0xe8, 0xd3, 0x3d, 0x03, 0x8b, 0x94, 0x16, 0xf6, 0x86, 0x1c, 0x98, 0xac, 0xc1, 0xc4, 0x8e, + 0xe7, 0x76, 0xc5, 0x08, 0xba, 0x79, 0xd0, 0x93, 0x1a, 0x6b, 0xeb, 0xc7, 0xd5, 0xa8, 0xb4, 0x9c, + 0xa0, 0x3e, 0x3a, 0x9c, 0x81, 0xe8, 0x0d, 0x53, 0x79, 0xc9, 0x7b, 0x30, 0x1d, 0xa5, 0x84, 0x43, + 0xc9, 0x22, 0x5b, 0x44, 0x70, 0x8d, 0xa5, 0xd2, 0xba, 0x7a, 0x74, 0x38, 0x33, 0xbd, 0x3c, 0x84, + 0x07, 0x87, 0xe6, 0x26, 0x9f, 0x16, 0x60, 0x2a, 0x22, 0x8a, 0xe1, 0x5d, 0x2a, 0x2a, 0xa7, 0x34, + 0x6f, 0xf0, 0xd5, 0xd6, 0x72, 0x4a, 0x04, 0x0e, 0x08, 0x25, 0xcb, 0x30, 0x16, 0xb8, 0xb1, 0xf6, + 0xaa, 0xf0, 0xf6, 0xd2, 0x94, 0x79, 0x60, 0xd3, 0x1d, 0xda, 0x5a, 0x89, 0x7c, 0x04, 0xe1, 0x92, + 0x7a, 0x4f, 0xb5, 0x54, 0x95, 0xb7, 0xd4, 0x95, 0xa3, 0xc3, 0x99, 0x4b, 0x9b, 0x99, 0x1c, 0x38, + 0x24, 0x27, 0xf9, 0xd5, 0x02, 0x4c, 0x28, 0x92, 0x6c, 0xa3, 0xda, 0x69, 0xb6, 0x11, 0x61, 0x3d, + 0x62, 0x33, 0x21, 0x00, 0x53, 0x02, 0xb5, 0x3f, 0x29, 0x43, 0x23, 0x1c, 0x60, 0xc9, 0x4b, 0x50, + 0xe1, 0x0b, 0x7f, 0xa9, 0x37, 0x87, 0x33, 0x27, 0xb7, 0x0f, 0xa0, 0xa0, 0x91, 0x97, 0xa1, 0x66, + 0xb8, 0xdd, 0xae, 0xee, 0x98, 0xdc, 0x98, 0xd3, 0x68, 0x35, 0x99, 0xc2, 0xb0, 0x28, 0x92, 0x50, + 0xd1, 0xc8, 0x55, 0x28, 0xeb, 0x5e, 0x47, 0xd8, 0x55, 0x1a, 0x62, 0x3c, 0x5a, 0xf0, 0x3a, 0x3e, + 0xf2, 0x54, 0xf2, 0x35, 0x28, 0x51, 0x67, 0x7f, 0xba, 0x3c, 0x5c, 0x23, 0xb9, 0xe9, 0xec, 0xdf, + 0xd7, 0xbd, 0x56, 0x53, 0x96, 0xa1, 0x74, 0xd3, 0xd9, 0x47, 0x96, 0x87, 0xac, 0x41, 0x8d, 0x3a, + 0xfb, 0xec, 0xdb, 0x4b, 0x83, 0xc7, 0x8f, 0x0d, 0xc9, 0xce, 0x58, 0xa4, 0x72, 0x1e, 0xea, 0x35, + 0x32, 0x19, 0x15, 0x04, 0xf9, 0x16, 0x8c, 0x09, 0x15, 0x67, 0x9d, 0x7d, 0x13, 0x7f, 0xba, 0xca, + 0x21, 0x67, 0x86, 0xeb, 0x48, 0x9c, 0x2f, 0x32, 0x30, 0xc5, 0x12, 0x7d, 0x4c, 0x40, 0x91, 0x6f, + 0x41, 0x43, 0xad, 0x47, 0xd5, 0x97, 0xcd, 0xb4, 0xcd, 0xa8, 0x45, 0x2c, 0xd2, 0x8f, 0xfa, 0x96, + 0x47, 0xbb, 0xd4, 0x09, 0xfc, 0xd6, 0x39, 0xb5, 0x5a, 0x57, 0x54, 0x1f, 0x23, 0x34, 0xb2, 0x3d, + 0x68, 0x64, 0x12, 0x16, 0x92, 0x97, 0x86, 0x8c, 0xea, 0x23, 0x58, 0x98, 0xbe, 0x0d, 0x93, 0xa1, + 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x35, 0x96, 0x7d, 0x35, 0x49, 0x7a, 0x74, 0x38, 0xf3, + 0x62, 0x86, 0x29, 0x21, 0x62, 0xc0, 0x34, 0x98, 0xf6, 0x7b, 0x25, 0x18, 0xd4, 0xfe, 0x93, 0x8d, + 0x56, 0x38, 0xed, 0x46, 0x4b, 0x57, 0x48, 0x0c, 0x9f, 0x6f, 0xc9, 0x6c, 0xf9, 0x2b, 0x95, 0xf5, + 0x61, 0x4a, 0xa7, 0xfd, 0x61, 0x9e, 0x95, 0x7f, 0x47, 0xfb, 0xac, 0x0c, 0x13, 0x4b, 0x3a, 0xed, + 0xba, 0xce, 0x13, 0xd7, 0x42, 0x85, 0x67, 0x62, 0x2d, 0x74, 0x03, 0xea, 0x1e, 0xed, 0xd9, 0x96, + 0xa1, 0x0b, 0xe5, 0x4b, 0xda, 0x1e, 0x51, 0xa6, 0x61, 0x48, 0x1d, 0xb2, 0x06, 0x2e, 0x3d, 0x93, + 0x6b, 0xe0, 0xf2, 0x17, 0xbf, 0x06, 0xd6, 0x7e, 0xb5, 0x08, 0x5c, 0x51, 0x21, 0xd7, 0xa1, 0xcc, + 0x26, 0xe1, 0xb4, 0xe5, 0x85, 0x77, 0x1c, 0x4e, 0x21, 0x57, 0xa0, 0x18, 0xb8, 0xf2, 0xcf, 0x03, + 0x49, 0x2f, 0x6e, 0xba, 0x58, 0x0c, 0x5c, 0xf2, 0x31, 0x80, 0xe1, 0x3a, 0xa6, 0xa5, 0x4c, 0xf2, + 0xf9, 0x2a, 0xb6, 0xec, 0x7a, 0x0f, 0x74, 0xcf, 0x5c, 0x0c, 0x11, 0xc5, 0x2a, 0x28, 0x7a, 0xc7, + 0x98, 0x34, 0xf2, 0x36, 0x54, 0x5d, 0x67, 0xb9, 0x6f, 0xdb, 0xbc, 0x41, 0x1b, 0xad, 0x9f, 0x60, + 0x4b, 0xd3, 0x7b, 0x3c, 0xe5, 0xd1, 0xe1, 0xcc, 0x65, 0xa1, 0xdf, 0xb2, 0xb7, 0x77, 0x3d, 0x2b, + 0xb0, 0x9c, 0x4e, 0x3b, 0xf0, 0xf4, 0x80, 0x76, 0x0e, 0x50, 0x66, 0xd3, 0x7e, 0xbd, 0x00, 0xcd, + 0x65, 0xeb, 0x21, 0x35, 0xdf, 0xb5, 0x1c, 0xd3, 0x7d, 0x40, 0x10, 0xaa, 0x36, 0x75, 0x3a, 0xc1, + 0xee, 0x88, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x1c, 0x34, 0x84, 0xf6, 0x69, + 0x39, 0x1d, 0xde, 0x86, 0xf5, 0x68, 0xd0, 0x6b, 0x2b, 0x02, 0x46, 0x3c, 0xda, 0x01, 0x9c, 0x1b, + 0x68, 0x06, 0x62, 0x42, 0x39, 0xd0, 0x3b, 0x6a, 0x7c, 0x5d, 0x1e, 0xb9, 0x81, 0x37, 0xf5, 0x4e, + 0xac, 0x71, 0xf9, 0x1c, 0xbf, 0xa9, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0x3f, 0x2c, 0x40, 0x7d, 0xb9, + 0xef, 0x18, 0x7c, 0x89, 0xf6, 0x64, 0x8b, 0x9c, 0x52, 0x18, 0x8a, 0x99, 0x0a, 0x43, 0x1f, 0xaa, + 0x7b, 0x0f, 0x42, 0x85, 0xa2, 0x39, 0xbf, 0x3e, 0x7a, 0xaf, 0x90, 0x45, 0x9a, 0xbd, 0xc3, 0xf1, + 0x84, 0xc3, 0x68, 0x42, 0x16, 0xa8, 0x7a, 0xe7, 0x5d, 0x2e, 0x54, 0x0a, 0xbb, 0xf2, 0x35, 0x68, + 0xc6, 0xd8, 0x4e, 0x64, 0x3b, 0xfe, 0x87, 0x65, 0xa8, 0xae, 0xb4, 0xdb, 0x0b, 0x1b, 0xab, 0xe4, + 0x75, 0x68, 0x4a, 0x5f, 0xc2, 0xdd, 0xa8, 0x0d, 0x42, 0x57, 0x52, 0x3b, 0x22, 0x61, 0x9c, 0x8f, + 0xa9, 0x63, 0x1e, 0xd5, 0xed, 0xae, 0xfc, 0x59, 0x42, 0x75, 0x0c, 0x59, 0x22, 0x0a, 0x1a, 0xd1, + 0x61, 0x82, 0xad, 0xf0, 0x58, 0x13, 0x8a, 0xd5, 0x9b, 0xfc, 0x6d, 0x8e, 0xb9, 0xbe, 0xe3, 0x4a, + 0xe2, 0x56, 0x02, 0x00, 0x53, 0x80, 0xe4, 0x2d, 0xa8, 0xeb, 0xfd, 0x60, 0x97, 0x2b, 0xd0, 0xe2, + 0xdf, 0xb8, 0xca, 0x5d, 0x2d, 0x32, 0xed, 0xd1, 0xe1, 0xcc, 0xd8, 0x1d, 0x6c, 0xbd, 0xae, 0xde, + 0x31, 0xe4, 0x66, 0x85, 0x53, 0x2b, 0x46, 0x59, 0xb8, 0xca, 0x89, 0x0b, 0xb7, 0x91, 0x00, 0xc0, + 0x14, 0x20, 0x79, 0x1f, 0xc6, 0xf6, 0xe8, 0x41, 0xa0, 0x6f, 0x4b, 0x01, 0xd5, 0x93, 0x08, 0x98, + 0x62, 0x2a, 0xdc, 0x9d, 0x58, 0x76, 0x4c, 0x80, 0x11, 0x1f, 0x2e, 0xec, 0x51, 0x6f, 0x9b, 0x7a, + 0xae, 0x5c, 0x7d, 0x4a, 0x21, 0xb5, 0x93, 0x08, 0x99, 0x3e, 0x3a, 0x9c, 0xb9, 0x70, 0x27, 0x03, + 0x06, 0x33, 0xc1, 0xb5, 0xff, 0x53, 0x84, 0xc9, 0x15, 0xe1, 0xcc, 0x75, 0x3d, 0x31, 0x09, 0x93, + 0xcb, 0x50, 0xf2, 0x7a, 0x7d, 0xde, 0x73, 0x4a, 0xc2, 0x5c, 0x8b, 0x1b, 0x5b, 0xc8, 0xd2, 0xc8, + 0x7b, 0x50, 0x37, 0xe5, 0x90, 0x21, 0x17, 0xbf, 0x23, 0x19, 0x2a, 0xd4, 0x1b, 0x86, 0x68, 0x4c, + 0xd3, 0xef, 0xfa, 0x9d, 0xb6, 0xf5, 0x31, 0x95, 0xeb, 0x41, 0xae, 0xe9, 0xaf, 0x8b, 0x24, 0x54, + 0x34, 0x36, 0xab, 0xee, 0xd1, 0x03, 0xb1, 0x1a, 0x2a, 0x47, 0xb3, 0xea, 0x1d, 0x99, 0x86, 0x21, + 0x95, 0xcc, 0xa8, 0x9f, 0x85, 0xf5, 0x82, 0xb2, 0x58, 0xc9, 0xdf, 0x67, 0x09, 0xf2, 0xbf, 0x61, + 0x43, 0xe6, 0x87, 0x56, 0x10, 0x50, 0x4f, 0x7e, 0xc6, 0x91, 0x86, 0xcc, 0xdb, 0x1c, 0x01, 0x25, + 0x12, 0xf9, 0x29, 0x68, 0x70, 0xf0, 0x96, 0xed, 0x6e, 0xf3, 0x0f, 0xd7, 0x10, 0x6b, 0xfa, 0xfb, + 0x2a, 0x11, 0x23, 0xba, 0xf6, 0xa3, 0x22, 0x5c, 0x5a, 0xa1, 0x81, 0xd0, 0x6a, 0x96, 0x68, 0xcf, + 0x76, 0x0f, 0x98, 0x6a, 0x89, 0xf4, 0x23, 0xf2, 0x0e, 0x80, 0xe5, 0x6f, 0xb7, 0xf7, 0x0d, 0xfe, + 0x1f, 0x88, 0x7f, 0xf8, 0xba, 0xfc, 0x25, 0x61, 0xb5, 0xdd, 0x92, 0x94, 0x47, 0x89, 0x37, 0x8c, + 0xe5, 0x89, 0x96, 0x57, 0xc5, 0xc7, 0x2c, 0xaf, 0xda, 0x00, 0xbd, 0x48, 0x41, 0x2d, 0x71, 0xce, + 0x9f, 0x55, 0x62, 0x4e, 0xa2, 0x9b, 0xc6, 0x60, 0xf2, 0xa8, 0x8c, 0x0e, 0x4c, 0x99, 0x74, 0x47, + 0xef, 0xdb, 0x41, 0xa8, 0x54, 0xcb, 0x9f, 0xf8, 0xf8, 0x7a, 0x79, 0xe8, 0x68, 0x5e, 0x4a, 0x21, + 0xe1, 0x00, 0xb6, 0xf6, 0xbb, 0x25, 0xb8, 0xb2, 0x42, 0x83, 0xd0, 0xe2, 0x22, 0x47, 0xc7, 0x76, + 0x8f, 0x1a, 0xec, 0x2b, 0x7c, 0x5a, 0x80, 0xaa, 0xad, 0x6f, 0x53, 0x9b, 0xcd, 0x5e, 0xac, 0x36, + 0x1f, 0x8c, 0x3c, 0x11, 0x0c, 0x97, 0x32, 0xbb, 0xc6, 0x25, 0xa4, 0xa6, 0x06, 0x91, 0x88, 0x52, + 0x3c, 0x1b, 0xd4, 0x0d, 0xbb, 0xef, 0x07, 0xd4, 0xdb, 0x70, 0xbd, 0x40, 0xea, 0x93, 0xe1, 0xa0, + 0xbe, 0x18, 0x91, 0x30, 0xce, 0x47, 0xe6, 0x01, 0x0c, 0xdb, 0xa2, 0x4e, 0xc0, 0x73, 0x89, 0xff, + 0x8a, 0xa8, 0xef, 0xbb, 0x18, 0x52, 0x30, 0xc6, 0xc5, 0x44, 0x75, 0x5d, 0xc7, 0x0a, 0x5c, 0x21, + 0xaa, 0x9c, 0x14, 0xb5, 0x1e, 0x91, 0x30, 0xce, 0xc7, 0xb3, 0xd1, 0xc0, 0xb3, 0x0c, 0x9f, 0x67, + 0xab, 0xa4, 0xb2, 0x45, 0x24, 0x8c, 0xf3, 0xb1, 0x39, 0x2f, 0x56, 0xff, 0x13, 0xcd, 0x79, 0xbf, + 0xdd, 0x80, 0x6b, 0x89, 0x66, 0x0d, 0xf4, 0x80, 0xee, 0xf4, 0xed, 0x36, 0x0d, 0xd4, 0x07, 0x1c, + 0x71, 0x2e, 0xfc, 0xab, 0xd1, 0x77, 0x17, 0x21, 0x24, 0xc6, 0xe9, 0x7c, 0xf7, 0x81, 0x02, 0x1e, + 0xeb, 0xdb, 0xcf, 0x41, 0xc3, 0xd1, 0x03, 0x9f, 0xff, 0xb8, 0xf2, 0x1f, 0x0d, 0xd5, 0xb0, 0xbb, + 0x8a, 0x80, 0x11, 0x0f, 0xd9, 0x80, 0x0b, 0xb2, 0x89, 0x6f, 0x3e, 0xec, 0xb9, 0x5e, 0x40, 0x3d, + 0x91, 0x57, 0x4e, 0xa7, 0x32, 0xef, 0x85, 0xf5, 0x0c, 0x1e, 0xcc, 0xcc, 0x49, 0xd6, 0xe1, 0xbc, + 0x21, 0xdc, 0xea, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x03, 0x57, 0xb8, 0x34, 0x5a, 0x1c, 0x64, + 0xc1, 0xac, 0x7c, 0xe9, 0xde, 0x5c, 0x1d, 0xa9, 0x37, 0xd7, 0x46, 0xe9, 0xcd, 0xf5, 0xd1, 0x7a, + 0x73, 0xe3, 0x78, 0xbd, 0x99, 0xb5, 0x3c, 0xeb, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x98, 0x61, 0x63, + 0x51, 0x1b, 0x61, 0xcb, 0xb7, 0x33, 0x78, 0x30, 0x33, 0x27, 0xd9, 0x86, 0x2b, 0x22, 0xfd, 0xa6, + 0x63, 0x78, 0x07, 0x3d, 0x36, 0xf1, 0xc4, 0x70, 0x9b, 0x09, 0x0b, 0xe3, 0x95, 0xf6, 0x50, 0x4e, + 0x7c, 0x0c, 0x0a, 0xf9, 0x79, 0x18, 0x17, 0x5f, 0x69, 0x5d, 0xef, 0x71, 0x58, 0x11, 0xc3, 0x71, + 0x51, 0xc2, 0x8e, 0x2f, 0xc6, 0x89, 0x98, 0xe4, 0x25, 0x0b, 0x30, 0xd9, 0xdb, 0x37, 0xd8, 0xe3, + 0xea, 0xce, 0x5d, 0x4a, 0x4d, 0x6a, 0x72, 0xa7, 0x51, 0xa3, 0xf5, 0xbc, 0x32, 0x74, 0x6c, 0x24, + 0xc9, 0x98, 0xe6, 0x27, 0x6f, 0xc1, 0x98, 0x1f, 0xe8, 0x5e, 0x20, 0xcd, 0x7a, 0xd3, 0x13, 0x22, + 0xc6, 0x45, 0x59, 0xbd, 0xda, 0x31, 0x1a, 0x26, 0x38, 0x33, 0xe7, 0x8b, 0xc9, 0xb3, 0x9b, 0x2f, + 0xf2, 0x8c, 0x56, 0xff, 0xa2, 0x08, 0xd7, 0x57, 0x68, 0xb0, 0xee, 0x3a, 0xd2, 0x28, 0x9a, 0x35, + 0xed, 0x1f, 0xcb, 0x26, 0x9a, 0x9c, 0xb4, 0x8b, 0xa7, 0x3a, 0x69, 0x97, 0x4e, 0x69, 0xd2, 0x2e, + 0x9f, 0xe1, 0xa4, 0xfd, 0x8f, 0x8b, 0xf0, 0x7c, 0xa2, 0x25, 0x37, 0x5c, 0x53, 0x0d, 0xf8, 0x5f, + 0x35, 0xe0, 0x31, 0x1a, 0xf0, 0x91, 0xd0, 0x3b, 0xb9, 0x5b, 0x2b, 0xa5, 0xf1, 0x7c, 0x92, 0xd6, + 0x78, 0xde, 0xcf, 0x33, 0xf3, 0x65, 0x48, 0x38, 0xd6, 0x8c, 0x77, 0x1b, 0x88, 0x27, 0x9d, 0x70, + 0xc2, 0xf4, 0x13, 0x53, 0x7a, 0xc2, 0x20, 0x3a, 0x1c, 0xe0, 0xc0, 0x8c, 0x5c, 0xa4, 0x0d, 0x17, + 0x7d, 0xea, 0x04, 0x96, 0x43, 0xed, 0x24, 0x9c, 0xd0, 0x86, 0x5e, 0x94, 0x70, 0x17, 0xdb, 0x59, + 0x4c, 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0xaf, 0x81, 0xab, 0x9c, 0xa2, 0x69, 0x4e, 0x4d, 0x63, + 0xf9, 0x34, 0xad, 0xb1, 0x7c, 0x90, 0xff, 0xbb, 0x8d, 0xa6, 0xad, 0xcc, 0x03, 0xf0, 0xaf, 0x10, + 0x57, 0x57, 0xc2, 0x49, 0x1a, 0x43, 0x0a, 0xc6, 0xb8, 0xd8, 0x04, 0xa4, 0xda, 0x39, 0xae, 0xa9, + 0x84, 0x13, 0x50, 0x3b, 0x4e, 0xc4, 0x24, 0xef, 0x50, 0x6d, 0xa7, 0x32, 0xb2, 0xb6, 0x73, 0x1b, + 0x48, 0xc2, 0xf0, 0x28, 0xf0, 0xaa, 0xc9, 0x18, 0xce, 0xd5, 0x01, 0x0e, 0xcc, 0xc8, 0x35, 0xa4, + 0x2b, 0xd7, 0x4e, 0xb7, 0x2b, 0xd7, 0x47, 0xef, 0xca, 0xe4, 0x03, 0xb8, 0xcc, 0x45, 0xc9, 0xf6, + 0x49, 0x02, 0x0b, 0xbd, 0xe7, 0xc7, 0x24, 0xf0, 0x65, 0x1c, 0xc6, 0x88, 0xc3, 0x31, 0xd8, 0xf7, + 0x31, 0x3c, 0x6a, 0x32, 0xe1, 0xba, 0x3d, 0x5c, 0x27, 0x5a, 0xcc, 0xe0, 0xc1, 0xcc, 0x9c, 0xac, + 0x8b, 0x05, 0xac, 0x1b, 0xea, 0xdb, 0x36, 0x35, 0x65, 0x0c, 0x6b, 0xd8, 0xc5, 0x36, 0xd7, 0xda, + 0x92, 0x82, 0x31, 0xae, 0x2c, 0x35, 0x65, 0xec, 0x84, 0x6a, 0xca, 0x0a, 0xb7, 0xd2, 0xef, 0x24, + 0xb4, 0x21, 0xa9, 0xeb, 0x84, 0x51, 0xc9, 0x8b, 0x69, 0x06, 0x1c, 0xcc, 0xc3, 0xb5, 0x44, 0xc3, + 0xb3, 0x7a, 0x81, 0x9f, 0xc4, 0x9a, 0x48, 0x69, 0x89, 0x19, 0x3c, 0x98, 0x99, 0x93, 0xe9, 0xe7, + 0xbb, 0x54, 0xb7, 0x83, 0xdd, 0x24, 0xe0, 0x64, 0x52, 0x3f, 0xbf, 0x35, 0xc8, 0x82, 0x59, 0xf9, + 0x32, 0x27, 0xa4, 0xa9, 0x67, 0x53, 0xad, 0xfa, 0xb5, 0x12, 0x5c, 0x5e, 0xa1, 0x41, 0x18, 0xde, + 0xf3, 0x95, 0x19, 0xe5, 0x0b, 0x30, 0xa3, 0xfc, 0x56, 0x05, 0xce, 0xaf, 0xd0, 0x60, 0x40, 0x1b, + 0xfb, 0x53, 0xda, 0xfc, 0xeb, 0x70, 0x3e, 0x8a, 0x28, 0x6b, 0x07, 0xae, 0x27, 0xe6, 0xf2, 0xd4, + 0x6a, 0xb9, 0x3d, 0xc8, 0x82, 0x59, 0xf9, 0xc8, 0xb7, 0xe0, 0x79, 0x3e, 0xd5, 0x3b, 0x1d, 0x61, + 0x9f, 0x15, 0xc6, 0x84, 0xd8, 0x9e, 0x88, 0x19, 0x09, 0xf9, 0x7c, 0x3b, 0x9b, 0x0d, 0x87, 0xe5, + 0x27, 0xdf, 0x81, 0xb1, 0x9e, 0xd5, 0xa3, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0x77, 0x48, 0xc8, 0x46, + 0x0c, 0x2c, 0x5a, 0xc0, 0xc5, 0x53, 0x31, 0x21, 0x30, 0xb3, 0xa7, 0xd6, 0xcf, 0xb0, 0xa7, 0xfe, + 0xcf, 0x22, 0xd4, 0x56, 0x3c, 0xb7, 0xdf, 0x6b, 0x1d, 0x90, 0x0e, 0x54, 0x1f, 0x70, 0xe7, 0x99, + 0x74, 0x4d, 0x8d, 0x1e, 0x95, 0x2d, 0x7c, 0x70, 0x91, 0x4a, 0x24, 0xde, 0x51, 0xc2, 0xb3, 0x4e, + 0xbc, 0x47, 0x0f, 0xa8, 0x29, 0x7d, 0x68, 0x61, 0x27, 0xbe, 0xc3, 0x12, 0x51, 0xd0, 0x48, 0x17, + 0x26, 0x75, 0xdb, 0x76, 0x1f, 0x50, 0x73, 0x4d, 0x0f, 0xa8, 0x43, 0x7d, 0xe5, 0x92, 0x3c, 0xa9, + 0x59, 0x9a, 0xfb, 0xf5, 0x17, 0x92, 0x50, 0x98, 0xc6, 0x26, 0x1f, 0x42, 0xcd, 0x0f, 0x5c, 0x4f, + 0x29, 0x5b, 0xcd, 0xf9, 0xc5, 0xd1, 0x3f, 0x7a, 0xeb, 0x9b, 0x6d, 0x01, 0x25, 0x6c, 0xf6, 0xf2, + 0x05, 0x95, 0x00, 0xed, 0x37, 0x0b, 0x00, 0xb7, 0x36, 0x37, 0x37, 0xa4, 0x7b, 0xc1, 0x84, 0xb2, + 0xde, 0x0f, 0x1d, 0x95, 0xa3, 0x3b, 0x04, 0x13, 0x61, 0x99, 0xd2, 0x87, 0xd7, 0x0f, 0x76, 0x91, + 0xa3, 0x93, 0x9f, 0x84, 0x9a, 0x54, 0x90, 0x65, 0xb3, 0x87, 0xa1, 0x05, 0x52, 0x89, 0x46, 0x45, + 0xd7, 0xfe, 0x41, 0x11, 0x60, 0xd5, 0xb4, 0x69, 0x5b, 0x05, 0xd2, 0x37, 0x82, 0x5d, 0x8f, 0xfa, + 0xbb, 0xae, 0x6d, 0x8e, 0xe8, 0x4d, 0xe5, 0x36, 0xff, 0x4d, 0x05, 0x82, 0x11, 0x1e, 0x31, 0x61, + 0xcc, 0x0f, 0x68, 0x4f, 0x45, 0x6a, 0x8e, 0xe8, 0x44, 0x99, 0x12, 0x76, 0x91, 0x08, 0x07, 0x13, + 0xa8, 0x44, 0x87, 0xa6, 0xe5, 0x18, 0xe2, 0x07, 0x69, 0x1d, 0x8c, 0xd8, 0x91, 0x26, 0xd9, 0x8a, + 0x63, 0x35, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x87, 0x45, 0xb8, 0xc4, 0xe5, 0xb1, 0x62, 0x24, 0xe2, + 0x31, 0xc9, 0x9f, 0x1f, 0xd8, 0xf4, 0xf7, 0x67, 0x8f, 0x27, 0x5a, 0xec, 0x19, 0x5b, 0xa7, 0x81, + 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0x6c, 0xa7, 0x5f, 0x1f, 0xca, 0x3e, 0x1b, 0xaf, 0x44, 0xeb, 0xb5, + 0x47, 0xee, 0x42, 0xd9, 0x15, 0xe0, 0xa3, 0x57, 0xe8, 0x35, 0xe6, 0xa3, 0x16, 0x17, 0x47, 0xfe, + 0x22, 0x54, 0xfd, 0x40, 0x0f, 0xfa, 0xea, 0xd7, 0xdc, 0x3a, 0x6d, 0xc1, 0x1c, 0x3c, 0x1a, 0x47, + 0xc4, 0x3b, 0x4a, 0xa1, 0xda, 0x1f, 0x16, 0xe0, 0x4a, 0x76, 0xc6, 0x35, 0xcb, 0x0f, 0xc8, 0x9f, + 0x1b, 0x68, 0xf6, 0x63, 0x7e, 0x71, 0x96, 0x9b, 0x37, 0x7a, 0x18, 0x17, 0xae, 0x52, 0x62, 0x4d, + 0x1e, 0x40, 0xc5, 0x0a, 0x68, 0x57, 0xad, 0x2f, 0xef, 0x9d, 0x72, 0xd5, 0x63, 0x53, 0x3b, 0x93, + 0x82, 0x42, 0x98, 0xf6, 0x59, 0x71, 0x58, 0x95, 0xf9, 0xf4, 0x61, 0x27, 0x63, 0x7e, 0xef, 0xe4, + 0x8b, 0xf9, 0x4d, 0x16, 0x68, 0x30, 0xf4, 0xf7, 0x2f, 0x0c, 0x86, 0xfe, 0xde, 0xcb, 0x1f, 0xfa, + 0x9b, 0x6a, 0x86, 0xa1, 0x11, 0xc0, 0x9f, 0x97, 0xe0, 0xea, 0xe3, 0xba, 0x0d, 0x9b, 0xcf, 0x64, + 0xef, 0xcc, 0x3b, 0x9f, 0x3d, 0xbe, 0x1f, 0x92, 0x79, 0xa8, 0xf4, 0x76, 0x75, 0x5f, 0x29, 0x65, + 0x6a, 0xc1, 0x52, 0xd9, 0x60, 0x89, 0x8f, 0xd8, 0xa0, 0xc1, 0x95, 0x39, 0xfe, 0x8a, 0x82, 0x95, + 0x0d, 0xc7, 0x5d, 0xea, 0xfb, 0x91, 0x4d, 0x20, 0x1c, 0x8e, 0xd7, 0x45, 0x32, 0x2a, 0x3a, 0x09, + 0xa0, 0x2a, 0x4c, 0xcc, 0x72, 0x66, 0x1a, 0x3d, 0x90, 0x2b, 0x23, 0x4c, 0x3c, 0xaa, 0x94, 0xf4, + 0x56, 0x48, 0x59, 0x64, 0x16, 0xca, 0x41, 0x14, 0xb4, 0xab, 0x96, 0xe6, 0xe5, 0x0c, 0xfd, 0x94, + 0xf3, 0xb1, 0x85, 0xbd, 0xbb, 0xcd, 0x8d, 0xea, 0xa6, 0xf4, 0x9f, 0x5b, 0xae, 0xc3, 0x15, 0xb2, + 0x52, 0xb4, 0xb0, 0xbf, 0x37, 0xc0, 0x81, 0x19, 0xb9, 0xb4, 0x7f, 0x57, 0x87, 0x4b, 0xd9, 0xfd, + 0x81, 0xb5, 0xdb, 0x3e, 0xf5, 0x7c, 0x86, 0x5d, 0x48, 0xb6, 0xdb, 0x7d, 0x91, 0x8c, 0x8a, 0xfe, + 0xa5, 0x0e, 0x38, 0xfb, 0xad, 0x02, 0x5c, 0xf6, 0xa4, 0x8f, 0xe8, 0x69, 0x04, 0x9d, 0xbd, 0x28, + 0xcc, 0x19, 0x43, 0x04, 0xe2, 0xf0, 0xb2, 0x90, 0xbf, 0x57, 0x80, 0xe9, 0x6e, 0xca, 0xce, 0x71, + 0x86, 0xfb, 0xd6, 0x78, 0x54, 0xfc, 0xfa, 0x10, 0x79, 0x38, 0xb4, 0x24, 0xe4, 0x3b, 0xd0, 0xec, + 0xb1, 0x7e, 0xe1, 0x07, 0xd4, 0x31, 0xd4, 0xd6, 0xb5, 0xd1, 0xff, 0xa4, 0x8d, 0x08, 0x4b, 0x85, + 0xa2, 0x09, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0x67, 0x7c, 0xa3, 0xda, 0x0d, 0xa8, 0xfb, 0x34, + 0x08, 0x2c, 0xa7, 0x23, 0xd6, 0x1b, 0x0d, 0xf1, 0xaf, 0xb4, 0x65, 0x1a, 0x86, 0x54, 0xf2, 0x53, + 0xd0, 0xe0, 0x2e, 0xa7, 0x05, 0xaf, 0xe3, 0x4f, 0x37, 0x78, 0xb8, 0xd8, 0xb8, 0x08, 0x80, 0x93, + 0x89, 0x18, 0xd1, 0xc9, 0x6b, 0x30, 0xb6, 0xcd, 0x7f, 0x5f, 0xb9, 0x77, 0x59, 0xd8, 0xb8, 0xb8, + 0xb6, 0xd6, 0x8a, 0xa5, 0x63, 0x82, 0x8b, 0xcc, 0x03, 0xd0, 0xd0, 0x2f, 0x97, 0xb6, 0x67, 0x45, + 0x1e, 0x3b, 0x8c, 0x71, 0x91, 0x17, 0xa1, 0x14, 0xd8, 0x3e, 0xb7, 0x61, 0xd5, 0xa3, 0x25, 0xe8, + 0xe6, 0x5a, 0x1b, 0x59, 0xba, 0xf6, 0xa3, 0x02, 0x4c, 0xa6, 0x36, 0x97, 0xb0, 0x2c, 0x7d, 0xcf, + 0x96, 0xc3, 0x48, 0x98, 0x65, 0x0b, 0xd7, 0x90, 0xa5, 0x93, 0x0f, 0xa4, 0x5a, 0x5e, 0xcc, 0x79, + 0x4c, 0xc3, 0x5d, 0x3d, 0xf0, 0x99, 0x1e, 0x3e, 0xa0, 0x91, 0x73, 0x37, 0x5f, 0x54, 0x1e, 0x39, + 0x0f, 0xc4, 0xdc, 0x7c, 0x11, 0x0d, 0x13, 0x9c, 0x29, 0x83, 0x5f, 0xf9, 0x38, 0x06, 0x3f, 0xed, + 0xd7, 0x8b, 0xb1, 0x16, 0x90, 0x9a, 0xfd, 0x13, 0x5a, 0xe0, 0x15, 0x36, 0x81, 0x86, 0x93, 0x7b, + 0x23, 0x3e, 0xff, 0xf1, 0xc9, 0x58, 0x52, 0xc9, 0xbb, 0xa2, 0xed, 0x4b, 0x39, 0x37, 0xc3, 0x6e, + 0xae, 0xb5, 0x45, 0x74, 0x95, 0xfa, 0x6a, 0xe1, 0x27, 0x28, 0x9f, 0xd1, 0x27, 0xd0, 0xfe, 0x55, + 0x09, 0x9a, 0xb7, 0xdd, 0xed, 0x2f, 0x49, 0x04, 0x75, 0xf6, 0x34, 0x55, 0xfc, 0x02, 0xa7, 0xa9, + 0x2d, 0x78, 0x3e, 0x08, 0xec, 0x36, 0x35, 0x5c, 0xc7, 0xf4, 0x17, 0x76, 0x02, 0xea, 0x2d, 0x5b, + 0x8e, 0xe5, 0xef, 0x52, 0x53, 0xba, 0x93, 0x5e, 0x38, 0x3a, 0x9c, 0x79, 0x7e, 0x73, 0x73, 0x2d, + 0x8b, 0x05, 0x87, 0xe5, 0xe5, 0xc3, 0x86, 0xd8, 0x09, 0xc8, 0x77, 0xca, 0xc8, 0x98, 0x1b, 0x31, + 0x6c, 0xc4, 0xd2, 0x31, 0xc1, 0xa5, 0x7d, 0xb7, 0x08, 0x8d, 0x70, 0x03, 0x3e, 0x79, 0x19, 0x6a, + 0xdb, 0x9e, 0xbb, 0x47, 0x3d, 0xe1, 0xb9, 0x93, 0x3b, 0x65, 0x5a, 0x22, 0x09, 0x15, 0x8d, 0xbc, + 0x04, 0x95, 0xc0, 0xed, 0x59, 0x46, 0xda, 0xa0, 0xb6, 0xc9, 0x12, 0x51, 0xd0, 0xce, 0xae, 0x83, + 0xbf, 0x92, 0x50, 0xed, 0x1a, 0x43, 0x95, 0xb1, 0xf7, 0xa1, 0xec, 0xeb, 0xbe, 0x2d, 0xe7, 0xd3, + 0x1c, 0x7b, 0xd9, 0x17, 0xda, 0x6b, 0x72, 0x2f, 0xfb, 0x42, 0x7b, 0x0d, 0x39, 0xa8, 0xf6, 0x27, + 0x45, 0x68, 0x8a, 0x76, 0x13, 0xa3, 0xc2, 0x69, 0xb6, 0xdc, 0xdb, 0x3c, 0x94, 0xc2, 0xef, 0x77, + 0xa9, 0xc7, 0xcd, 0x4c, 0x72, 0x90, 0x8b, 0xfb, 0x07, 0x22, 0x62, 0x18, 0x4e, 0x11, 0x25, 0xa9, + 0xa6, 0x2f, 0x9f, 0x61, 0xd3, 0x57, 0x8e, 0xd5, 0xf4, 0xd5, 0xb3, 0x68, 0xfa, 0x4f, 0x8b, 0xd0, + 0x58, 0xb3, 0x76, 0xa8, 0x71, 0x60, 0xd8, 0x7c, 0x4f, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x15, 0x4f, + 0x37, 0xe8, 0x06, 0xf5, 0x2c, 0x7e, 0x40, 0x0d, 0xfb, 0x3f, 0xf8, 0x08, 0x24, 0xf7, 0x04, 0x2e, + 0x0d, 0xe1, 0xc1, 0xa1, 0xb9, 0xc9, 0x2a, 0x8c, 0x99, 0xd4, 0xb7, 0x3c, 0x6a, 0x6e, 0xc4, 0x16, + 0x2a, 0x2f, 0xab, 0xa9, 0x66, 0x29, 0x46, 0x7b, 0x74, 0x38, 0x33, 0xae, 0x0c, 0x94, 0x62, 0xc5, + 0x92, 0xc8, 0xca, 0x7e, 0xf9, 0x9e, 0xde, 0xf7, 0xb3, 0xca, 0x18, 0xfb, 0xe5, 0x37, 0xb2, 0x59, + 0x70, 0x58, 0x5e, 0xad, 0x02, 0xa5, 0x35, 0xb7, 0xa3, 0x7d, 0x56, 0x82, 0xf0, 0x24, 0x23, 0xf2, + 0x57, 0x0a, 0xd0, 0xd4, 0x1d, 0xc7, 0x0d, 0xe4, 0x29, 0x41, 0xc2, 0x03, 0x8f, 0xb9, 0x0f, 0x4c, + 0x9a, 0x5d, 0x88, 0x40, 0x85, 0xf3, 0x36, 0x74, 0x28, 0xc7, 0x28, 0x18, 0x97, 0x4d, 0xfa, 0x29, + 0x7f, 0xf2, 0x7a, 0xfe, 0x52, 0x1c, 0xc3, 0x7b, 0x7c, 0xe5, 0x1b, 0x30, 0x95, 0x2e, 0xec, 0x49, + 0xdc, 0x41, 0xb9, 0x1c, 0xf3, 0x45, 0x80, 0x28, 0xa6, 0xe4, 0x29, 0x18, 0xb1, 0xac, 0x84, 0x11, + 0x6b, 0x65, 0xf4, 0x06, 0x0e, 0x0b, 0x3d, 0xd4, 0x70, 0xf5, 0x51, 0xca, 0x70, 0xb5, 0x7a, 0x1a, + 0xc2, 0x1e, 0x6f, 0xac, 0xfa, 0xfb, 0x05, 0x98, 0x8a, 0x98, 0xe5, 0x0e, 0xd9, 0x37, 0x61, 0xdc, + 0xa3, 0xba, 0xd9, 0xd2, 0x03, 0x63, 0x97, 0x87, 0x7a, 0x17, 0x78, 0x6c, 0xf6, 0xb9, 0xa3, 0xc3, + 0x99, 0x71, 0x8c, 0x13, 0x30, 0xc9, 0x47, 0x74, 0x68, 0xb2, 0x84, 0x4d, 0xab, 0x4b, 0xdd, 0x7e, + 0x30, 0xa2, 0xd5, 0x94, 0x2f, 0x58, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x79, 0x01, 0x26, 0xe2, + 0x05, 0x3e, 0x73, 0x8b, 0xda, 0x6e, 0xd2, 0xa2, 0xb6, 0x78, 0x0a, 0xdf, 0x64, 0x88, 0x15, 0xed, + 0x13, 0x88, 0x57, 0x8d, 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x63, 0x8d, 0x05, 0x5f, 0xfe, 0xc3, + 0x6b, 0x86, 0x69, 0xb9, 0xe5, 0x67, 0x58, 0xcb, 0xfd, 0x22, 0x4f, 0xc0, 0x89, 0x9d, 0xe2, 0x52, + 0xcd, 0x71, 0x8a, 0x4b, 0x37, 0x3c, 0xc5, 0xa5, 0x76, 0x6a, 0x83, 0xce, 0x71, 0x4e, 0x72, 0xa9, + 0x3f, 0xd5, 0x93, 0x5c, 0x1a, 0x67, 0x75, 0x92, 0x0b, 0xe4, 0x3d, 0xc9, 0xe5, 0x93, 0x02, 0x4c, + 0x98, 0x89, 0x1d, 0xb3, 0xdc, 0xb6, 0x90, 0x67, 0xaa, 0x49, 0x6e, 0xc0, 0x15, 0x5b, 0xa6, 0x92, + 0x69, 0x98, 0x12, 0x49, 0x3e, 0x2d, 0xc0, 0x44, 0xbf, 0x67, 0xea, 0x41, 0x68, 0x38, 0xe2, 0x46, + 0x8b, 0x3c, 0xa5, 0xd8, 0x4a, 0xc0, 0x45, 0x8d, 0x9b, 0x4c, 0xc7, 0x94, 0x58, 0xed, 0x8f, 0x6b, + 0xf1, 0x19, 0xe9, 0x69, 0x1b, 0xcd, 0xdf, 0x48, 0x1a, 0xcd, 0xaf, 0xa7, 0x8d, 0xe6, 0x93, 0xb1, + 0x78, 0xd6, 0xb8, 0xe1, 0xfc, 0xa7, 0x63, 0x03, 0x75, 0x89, 0x9f, 0xe1, 0x12, 0x7e, 0xf3, 0x8c, + 0xc1, 0x7a, 0x01, 0x26, 0xa5, 0xf6, 0xaa, 0x88, 0x7c, 0x94, 0x1b, 0x8f, 0xc2, 0x9c, 0x96, 0x92, + 0x64, 0x4c, 0xf3, 0x33, 0x81, 0xbe, 0x3a, 0x40, 0x53, 0x2c, 0x15, 0xa2, 0x4e, 0xa6, 0x0e, 0xb7, + 0x0c, 0x39, 0xd8, 0xb2, 0xc2, 0xa3, 0xba, 0x2f, 0x4d, 0xdf, 0xb1, 0x65, 0x05, 0xf2, 0x54, 0x94, + 0xd4, 0xb8, 0xfd, 0xbf, 0xf6, 0x04, 0xfb, 0xbf, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0xf1, 0x35, 0x4d, + 0xf9, 0x3b, 0xff, 0x99, 0xe3, 0x4d, 0xbc, 0x6c, 0x32, 0x8f, 0xb4, 0xdb, 0xb5, 0x08, 0x06, 0xe3, + 0x98, 0xc4, 0x84, 0x31, 0xf6, 0xca, 0x7f, 0x6d, 0x73, 0x21, 0x90, 0xc7, 0x4c, 0x9d, 0x44, 0x46, + 0x68, 0xb6, 0x5a, 0x8b, 0xe1, 0x60, 0x02, 0x75, 0x88, 0x8b, 0x00, 0x46, 0x71, 0x11, 0x90, 0x9f, + 0x17, 0x9a, 0xd3, 0x41, 0xf8, 0x59, 0x9b, 0xfc, 0xb3, 0x86, 0x21, 0x92, 0x18, 0x27, 0x62, 0x92, + 0x97, 0xf5, 0x8a, 0xbe, 0x6c, 0x06, 0x95, 0x7d, 0x2c, 0xd9, 0x2b, 0xb6, 0x92, 0x64, 0x4c, 0xf3, + 0x93, 0x0d, 0xb8, 0x10, 0x26, 0xc5, 0x8b, 0x31, 0xce, 0x71, 0xc2, 0x98, 0xb5, 0xad, 0x0c, 0x1e, + 0xcc, 0xcc, 0xc9, 0x37, 0x81, 0xf4, 0x3d, 0x8f, 0x3a, 0xc1, 0x2d, 0xdd, 0xdf, 0x95, 0xc1, 0x6f, + 0xd1, 0x26, 0x90, 0x88, 0x84, 0x71, 0x3e, 0x32, 0x0f, 0x20, 0xe0, 0x78, 0xae, 0xc9, 0x64, 0x7c, + 0xe9, 0x56, 0x48, 0xc1, 0x18, 0x97, 0xf6, 0x49, 0x03, 0x9a, 0x77, 0xf5, 0xc0, 0xda, 0xa7, 0xdc, + 0x9f, 0x77, 0x36, 0x4e, 0x95, 0xbf, 0x55, 0x80, 0x4b, 0xc9, 0xa0, 0xcd, 0x33, 0xf4, 0xac, 0xf0, + 0x23, 0x60, 0x30, 0x53, 0x1a, 0x0e, 0x29, 0x05, 0xf7, 0xb1, 0x0c, 0xc4, 0x80, 0x9e, 0xb5, 0x8f, + 0xa5, 0x3d, 0x4c, 0x20, 0x0e, 0x2f, 0xcb, 0x97, 0xc5, 0xc7, 0xf2, 0x6c, 0x9f, 0x14, 0x98, 0xf2, + 0x00, 0xd5, 0x9e, 0x19, 0x0f, 0x50, 0xfd, 0x99, 0x50, 0xbb, 0x7b, 0x31, 0x0f, 0x50, 0x23, 0x67, + 0x24, 0x92, 0xdc, 0xe7, 0x20, 0xd0, 0x86, 0x79, 0x92, 0xf8, 0x11, 0x05, 0xca, 0x32, 0xcf, 0xb4, + 0xd5, 0x6d, 0xdd, 0xb7, 0x0c, 0xa9, 0x76, 0xe4, 0x38, 0x19, 0x55, 0x9d, 0xdd, 0x26, 0x02, 0x16, + 0xf8, 0x2b, 0x0a, 0xec, 0xe8, 0xa8, 0xba, 0x62, 0xae, 0xa3, 0xea, 0xc8, 0x22, 0x94, 0x9d, 0x3d, + 0x7a, 0x70, 0xb2, 0xcd, 0xfe, 0x7c, 0x15, 0x76, 0xf7, 0x0e, 0x3d, 0x40, 0x9e, 0x59, 0xfb, 0x6e, + 0x11, 0x80, 0x55, 0xff, 0x78, 0xbe, 0x98, 0x9f, 0x84, 0x9a, 0xdf, 0xe7, 0x56, 0x13, 0xa9, 0x30, + 0x45, 0xe1, 0x5b, 0x22, 0x19, 0x15, 0x9d, 0xbc, 0x04, 0x95, 0x8f, 0xfa, 0xb4, 0xaf, 0x02, 0x0b, + 0x42, 0xc5, 0xfd, 0x9b, 0x2c, 0x11, 0x05, 0xed, 0xec, 0xec, 0xaa, 0xca, 0x67, 0x53, 0x39, 0x2b, + 0x9f, 0x4d, 0x03, 0x6a, 0x77, 0x5d, 0x1e, 0x0d, 0xaa, 0xfd, 0xf7, 0x22, 0x40, 0x14, 0x6d, 0x47, + 0x7e, 0xb3, 0x00, 0x17, 0xc3, 0x1f, 0x2e, 0x10, 0xeb, 0x2f, 0x7e, 0x18, 0x71, 0x6e, 0xff, 0x4d, + 0xd6, 0xcf, 0xce, 0x47, 0xa0, 0x8d, 0x2c, 0x71, 0x98, 0x5d, 0x0a, 0x82, 0x50, 0xa7, 0xdd, 0x5e, + 0x70, 0xb0, 0x64, 0x79, 0xb2, 0x07, 0x66, 0x06, 0x75, 0xde, 0x94, 0x3c, 0x22, 0xab, 0x34, 0x12, + 0xf0, 0x9f, 0x48, 0x51, 0x30, 0xc4, 0x21, 0xbb, 0x50, 0x77, 0xdc, 0x0f, 0x7c, 0xd6, 0x1c, 0xb2, + 0x3b, 0xbe, 0x33, 0x7a, 0x93, 0x8b, 0x66, 0x15, 0xf6, 0x7e, 0xf9, 0x82, 0x35, 0x47, 0x36, 0xf6, + 0x6f, 0x14, 0xe1, 0x7c, 0x46, 0x3b, 0x90, 0x77, 0x60, 0x4a, 0x06, 0x36, 0x46, 0xa7, 0x72, 0x17, + 0xa2, 0x53, 0xb9, 0xdb, 0x29, 0x1a, 0x0e, 0x70, 0x93, 0x0f, 0x00, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, + 0xee, 0x9a, 0x6a, 0x3d, 0xf0, 0x36, 0x53, 0x5f, 0x16, 0xc2, 0xd4, 0x47, 0x87, 0x33, 0x3f, 0x93, + 0x15, 0xab, 0x9c, 0x6a, 0xe7, 0x28, 0x03, 0xc6, 0x20, 0xc9, 0xb7, 0x01, 0xc4, 0x22, 0x3c, 0x3c, + 0x4e, 0xe1, 0x09, 0x96, 0xab, 0x59, 0x75, 0x70, 0xd5, 0xec, 0x37, 0xfb, 0xba, 0x13, 0x58, 0xc1, + 0x81, 0x38, 0xbd, 0xe6, 0x7e, 0x88, 0x82, 0x31, 0x44, 0xed, 0x9f, 0x17, 0xa1, 0xae, 0x6c, 0xe6, + 0x4f, 0xc1, 0x50, 0xda, 0x49, 0x18, 0x4a, 0x4f, 0x29, 0x3a, 0x39, 0xcb, 0x4c, 0xea, 0xa6, 0xcc, + 0xa4, 0x2b, 0xf9, 0x45, 0x3d, 0xde, 0x48, 0xfa, 0x3b, 0x45, 0x98, 0x50, 0xac, 0x79, 0x4d, 0xa4, + 0x5f, 0x87, 0x49, 0x11, 0x55, 0xb0, 0xae, 0x3f, 0x14, 0x07, 0xf9, 0xf0, 0x06, 0x2b, 0x8b, 0x80, + 0xe0, 0x56, 0x92, 0x84, 0x69, 0x5e, 0xd6, 0xad, 0x45, 0xd2, 0x16, 0x5b, 0x84, 0x09, 0x3f, 0xa4, + 0x58, 0x6f, 0xf2, 0x6e, 0xdd, 0x4a, 0xd1, 0x70, 0x80, 0x3b, 0x6d, 0xa3, 0x2d, 0x9f, 0x81, 0x8d, + 0xf6, 0x3f, 0x14, 0x60, 0x2c, 0x6a, 0xaf, 0x33, 0xb7, 0xd0, 0xee, 0x24, 0x2d, 0xb4, 0x0b, 0xb9, + 0xbb, 0xc3, 0x10, 0xfb, 0xec, 0x5f, 0xab, 0x41, 0x22, 0x48, 0x9e, 0x6c, 0xc3, 0x15, 0x2b, 0x33, + 0xd4, 0x2f, 0x36, 0xda, 0x84, 0xbb, 0xbe, 0x57, 0x87, 0x72, 0xe2, 0x63, 0x50, 0x48, 0x1f, 0xea, + 0xfb, 0xd4, 0x0b, 0x2c, 0x83, 0xaa, 0xfa, 0xad, 0xe4, 0x56, 0xc9, 0xa4, 0x15, 0x3a, 0x6c, 0xd3, + 0xfb, 0x52, 0x00, 0x86, 0xa2, 0xc8, 0x36, 0x54, 0xa8, 0xd9, 0xa1, 0xea, 0x68, 0xa5, 0x9c, 0x07, + 0x97, 0x86, 0xed, 0xc9, 0xde, 0x7c, 0x14, 0xd0, 0xc4, 0x87, 0x86, 0xad, 0xbc, 0x8c, 0xb2, 0x1f, + 0x8e, 0xae, 0x60, 0x85, 0xfe, 0xca, 0xe8, 0xd4, 0x85, 0x30, 0x09, 0x23, 0x39, 0x64, 0x2f, 0x34, + 0x77, 0x56, 0x4e, 0x69, 0xf0, 0x78, 0x8c, 0xb1, 0xd3, 0x87, 0xc6, 0x03, 0x3d, 0xa0, 0x5e, 0x57, + 0xf7, 0xf6, 0xe4, 0x6a, 0x63, 0xf4, 0x1a, 0xbe, 0xab, 0x90, 0xa2, 0x1a, 0x86, 0x49, 0x18, 0xc9, + 0x21, 0x2e, 0x34, 0x02, 0xa9, 0x3e, 0x2b, 0x9b, 0xee, 0xe8, 0x42, 0x95, 0x22, 0xee, 0xcb, 0x60, + 0x79, 0xf5, 0x8a, 0x91, 0x0c, 0xb2, 0x9f, 0x38, 0x5b, 0x5a, 0x9c, 0x28, 0xde, 0xca, 0xe1, 0x1b, + 0x90, 0x50, 0xd1, 0x74, 0x93, 0x7d, 0x46, 0xb5, 0xf6, 0xbf, 0x2a, 0xd1, 0xb0, 0xfc, 0xb4, 0xed, + 0x84, 0xaf, 0x25, 0xed, 0x84, 0xd7, 0xd2, 0x76, 0xc2, 0x94, 0xb3, 0xfa, 0xe4, 0xe1, 0xb5, 0x29, + 0xf3, 0x5a, 0xf9, 0x0c, 0xcc, 0x6b, 0xaf, 0x42, 0x73, 0x9f, 0x8f, 0x04, 0xe2, 0x9c, 0xa6, 0x0a, + 0x9f, 0x46, 0xf8, 0xc8, 0x7e, 0x3f, 0x4a, 0xc6, 0x38, 0x0f, 0xcb, 0x22, 0x6f, 0xd3, 0x08, 0x0f, + 0xba, 0x95, 0x59, 0xda, 0x51, 0x32, 0xc6, 0x79, 0x78, 0x64, 0x9e, 0xe5, 0xec, 0x89, 0x0c, 0x35, + 0x9e, 0x41, 0x44, 0xe6, 0xa9, 0x44, 0x8c, 0xe8, 0xe4, 0x06, 0xd4, 0xfb, 0xe6, 0x8e, 0xe0, 0xad, + 0x73, 0x5e, 0xae, 0x61, 0x6e, 0x2d, 0x2d, 0xcb, 0x73, 0xa3, 0x14, 0x95, 0x95, 0xa4, 0xab, 0xf7, + 0x14, 0x81, 0xaf, 0x0d, 0x65, 0x49, 0xd6, 0xa3, 0x64, 0x8c, 0xf3, 0x90, 0x9f, 0x83, 0x09, 0x8f, + 0x9a, 0x7d, 0x83, 0x86, 0xb9, 0x80, 0xe7, 0x22, 0xe2, 0xda, 0x90, 0x38, 0x05, 0x53, 0x9c, 0x43, + 0x8c, 0x84, 0xcd, 0x91, 0x8c, 0x84, 0xdf, 0x80, 0x09, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0xf7, 0x1c, + 0x1e, 0x91, 0x20, 0xe3, 0x03, 0x43, 0x0b, 0xf9, 0x52, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0x0f, 0x0a, + 0x40, 0x06, 0x23, 0xe1, 0xc9, 0x2e, 0x54, 0x1d, 0x6e, 0x3d, 0xcb, 0x7d, 0xb4, 0x76, 0xcc, 0x08, + 0x27, 0x86, 0x35, 0x99, 0x20, 0xf1, 0x89, 0x03, 0x75, 0xfa, 0x30, 0xa0, 0x9e, 0x13, 0xee, 0x8c, + 0x39, 0x9d, 0x63, 0xbc, 0xc5, 0x6a, 0x42, 0x22, 0x63, 0x28, 0x43, 0xfb, 0xa3, 0x22, 0x34, 0x63, + 0x7c, 0x4f, 0x5a, 0x94, 0xf2, 0xcd, 0xf9, 0xc2, 0x68, 0xb5, 0xe5, 0xd9, 0xf2, 0x0f, 0x8d, 0x6d, + 0xce, 0x97, 0x24, 0x5c, 0xc3, 0x38, 0x1f, 0x99, 0x07, 0xe8, 0xea, 0x7e, 0x40, 0x3d, 0x3e, 0x7b, + 0xa7, 0xb6, 0xc4, 0xaf, 0x87, 0x14, 0x8c, 0x71, 0x91, 0xeb, 0xf2, 0x20, 0xf6, 0x72, 0xf2, 0x08, + 0xc3, 0x21, 0xa7, 0xac, 0x57, 0x4e, 0xe1, 0x94, 0x75, 0xd2, 0x81, 0x29, 0x55, 0x6a, 0x45, 0x3d, + 0xd9, 0x01, 0x77, 0x62, 0xfd, 0x93, 0x82, 0xc0, 0x01, 0x50, 0xed, 0xbb, 0x05, 0x18, 0x4f, 0x98, + 0x4c, 0xc4, 0xe1, 0x83, 0x6a, 0x1f, 0x47, 0xe2, 0xf0, 0xc1, 0xd8, 0xf6, 0x8b, 0x57, 0xa0, 0x2a, + 0x1a, 0x28, 0x1d, 0x9e, 0x29, 0x9a, 0x10, 0x25, 0x95, 0x8d, 0x85, 0xd2, 0x28, 0x9b, 0x1e, 0x0b, + 0xa5, 0xd5, 0x16, 0x15, 0x5d, 0xf8, 0x3a, 0x44, 0xe9, 0x64, 0x4b, 0xc7, 0x7c, 0x1d, 0x22, 0x1d, + 0x43, 0x0e, 0xed, 0xf7, 0x78, 0xb9, 0x03, 0xef, 0x20, 0x5c, 0x0b, 0x76, 0xa0, 0x26, 0x43, 0xf2, + 0xe4, 0xaf, 0xf1, 0x4e, 0x0e, 0x3b, 0x0e, 0xc7, 0x91, 0xc1, 0x67, 0xba, 0xb1, 0x77, 0x6f, 0x67, + 0x07, 0x15, 0x3a, 0xb9, 0x09, 0x0d, 0xd7, 0x59, 0xd6, 0x2d, 0xbb, 0xef, 0xa9, 0x99, 0xe1, 0x27, + 0xd8, 0x58, 0x77, 0x4f, 0x25, 0x3e, 0x3a, 0x9c, 0xb9, 0x14, 0xbe, 0x24, 0x0a, 0x89, 0x51, 0x4e, + 0xed, 0x2f, 0x17, 0xe0, 0x22, 0xba, 0xb6, 0x6d, 0x39, 0x9d, 0xa4, 0xb3, 0x8c, 0xd8, 0x30, 0xd1, + 0xd5, 0x1f, 0x6e, 0x39, 0xfa, 0xbe, 0x6e, 0xd9, 0xfa, 0xb6, 0x4d, 0x9f, 0xb8, 0x96, 0xeb, 0x07, + 0x96, 0x3d, 0x2b, 0xae, 0x83, 0x9b, 0x5d, 0x75, 0x82, 0x7b, 0x5e, 0x3b, 0xf0, 0x2c, 0xa7, 0x23, + 0x06, 0xbd, 0xf5, 0x04, 0x16, 0xa6, 0xb0, 0xb5, 0x3f, 0x2e, 0x01, 0x0f, 0x0b, 0x23, 0x6f, 0x42, + 0xa3, 0x4b, 0x8d, 0x5d, 0xdd, 0xb1, 0x7c, 0x75, 0x8c, 0xeb, 0x65, 0x56, 0xaf, 0x75, 0x95, 0xf8, + 0x88, 0x7d, 0x8a, 0x85, 0xf6, 0x1a, 0xdf, 0x79, 0x11, 0xf1, 0x12, 0x03, 0xaa, 0x1d, 0xdf, 0xd7, + 0x7b, 0x56, 0xee, 0xa8, 0x04, 0x71, 0x6c, 0xa6, 0x18, 0x8e, 0xc4, 0x33, 0x4a, 0x68, 0x62, 0x40, + 0xa5, 0x67, 0xeb, 0x96, 0x93, 0xfb, 0xfa, 0x22, 0x56, 0x83, 0x0d, 0x86, 0x24, 0x8c, 0x6b, 0xfc, + 0x11, 0x05, 0x36, 0xe9, 0x43, 0xd3, 0x37, 0x3c, 0xbd, 0xeb, 0xef, 0xea, 0xf3, 0xaf, 0xbf, 0x91, + 0x5b, 0x5d, 0x8d, 0x44, 0x89, 0xd9, 0x73, 0x11, 0x17, 0xd6, 0xdb, 0xb7, 0x16, 0xe6, 0x5f, 0x7f, + 0x03, 0xe3, 0x72, 0xe2, 0x62, 0x5f, 0x7f, 0x75, 0x5e, 0x8e, 0x20, 0xa7, 0x2e, 0xf6, 0xf5, 0x57, + 0xe7, 0x31, 0x2e, 0x47, 0xfb, 0xdf, 0x05, 0x68, 0x84, 0xbc, 0x64, 0x0b, 0x80, 0x8d, 0x65, 0xf2, + 0xa0, 0xcb, 0x13, 0x5d, 0x3a, 0xc1, 0xed, 0x13, 0x5b, 0x61, 0x66, 0x8c, 0x01, 0x65, 0x9c, 0x04, + 0x5a, 0x3c, 0xed, 0x93, 0x40, 0xe7, 0xa0, 0xb1, 0xab, 0x3b, 0xa6, 0xbf, 0xab, 0xef, 0x89, 0x21, + 0x3d, 0x76, 0x36, 0xee, 0x2d, 0x45, 0xc0, 0x88, 0x47, 0xfb, 0xa7, 0x55, 0x10, 0xa1, 0x04, 0x6c, + 0xd0, 0x31, 0x2d, 0x5f, 0xc4, 0xb2, 0x17, 0x78, 0xce, 0x70, 0xd0, 0x59, 0x92, 0xe9, 0x18, 0x72, + 0x90, 0xcb, 0x50, 0xea, 0x5a, 0x8e, 0xf4, 0x3d, 0x71, 0xd3, 0xe3, 0xba, 0xe5, 0x20, 0x4b, 0xe3, + 0x24, 0xfd, 0xa1, 0x0c, 0x43, 0x14, 0x24, 0xfd, 0x21, 0xb2, 0x34, 0xf2, 0x75, 0x98, 0xb4, 0x5d, + 0x77, 0x8f, 0x0d, 0x1f, 0x2a, 0x5a, 0x51, 0xf8, 0x81, 0xb9, 0x31, 0x60, 0x2d, 0x49, 0xc2, 0x34, + 0x2f, 0xd9, 0x82, 0xe7, 0x3f, 0xa6, 0x9e, 0x2b, 0xc7, 0xcb, 0xb6, 0x4d, 0x69, 0x4f, 0xc1, 0x08, + 0x65, 0x8e, 0x07, 0x3d, 0xfe, 0x62, 0x36, 0x0b, 0x0e, 0xcb, 0xcb, 0xc3, 0xa7, 0x75, 0xaf, 0x43, + 0x83, 0x0d, 0xcf, 0x35, 0xa8, 0xef, 0x5b, 0x4e, 0x47, 0xc1, 0x56, 0x23, 0xd8, 0xcd, 0x6c, 0x16, + 0x1c, 0x96, 0x97, 0xbc, 0x07, 0xd3, 0x82, 0x24, 0xd4, 0x96, 0x05, 0x31, 0xcc, 0x58, 0xb6, 0xba, + 0xf5, 0x6f, 0x5c, 0x78, 0x78, 0x36, 0x87, 0xf0, 0xe0, 0xd0, 0xdc, 0xe4, 0x36, 0x4c, 0x29, 0xff, + 0xde, 0x06, 0xf5, 0xda, 0x61, 0x78, 0xc9, 0x78, 0xeb, 0x1a, 0x5b, 0x79, 0x2f, 0xd1, 0x9e, 0x47, + 0x8d, 0xb8, 0x9f, 0x54, 0x71, 0xe1, 0x40, 0x3e, 0x82, 0x70, 0x89, 0xc7, 0x90, 0x6c, 0xf5, 0x16, + 0x5d, 0xd7, 0x36, 0xdd, 0x07, 0x8e, 0xaa, 0xbb, 0x50, 0x31, 0xb9, 0x4b, 0xaf, 0x9d, 0xc9, 0x81, + 0x43, 0x72, 0xb2, 0x9a, 0x73, 0xca, 0x92, 0xfb, 0xc0, 0x49, 0xa3, 0x42, 0x54, 0xf3, 0xf6, 0x10, + 0x1e, 0x1c, 0x9a, 0x9b, 0x2c, 0x03, 0x49, 0xd7, 0x60, 0xab, 0x27, 0x9d, 0xce, 0x97, 0xc4, 0x99, + 0x35, 0x69, 0x2a, 0x66, 0xe4, 0x20, 0x6b, 0x70, 0x21, 0x9d, 0xca, 0xc4, 0x49, 0xff, 0x33, 0x3f, + 0xad, 0x16, 0x33, 0xe8, 0x98, 0x99, 0x4b, 0xfb, 0x67, 0x45, 0x18, 0x4f, 0x1c, 0x72, 0xf0, 0xcc, + 0x6d, 0x26, 0x67, 0x6b, 0x81, 0xae, 0xdf, 0x59, 0x5d, 0xba, 0x45, 0x75, 0x93, 0x7a, 0x77, 0xa8, + 0x3a, 0x90, 0x42, 0x4c, 0x8b, 0x09, 0x0a, 0xa6, 0x38, 0xc9, 0x0e, 0x54, 0x84, 0x65, 0x3b, 0xef, + 0xf5, 0x25, 0xaa, 0x8d, 0xb8, 0x79, 0x5b, 0xde, 0xf9, 0xe3, 0x7a, 0x14, 0x05, 0xbc, 0x16, 0xc0, + 0x58, 0x9c, 0x83, 0x0d, 0x24, 0x91, 0xda, 0x5b, 0x4b, 0xa8, 0xbc, 0xab, 0x50, 0x0a, 0x82, 0x51, + 0xb7, 0xa9, 0x0b, 0x4f, 0xc9, 0xe6, 0x1a, 0x32, 0x0c, 0x6d, 0x87, 0x7d, 0x3b, 0xdf, 0xb7, 0x5c, + 0x47, 0x9e, 0x59, 0xbe, 0x05, 0xb5, 0x40, 0x1a, 0x0b, 0x47, 0xdb, 0x66, 0xcf, 0x75, 0x25, 0x65, + 0x28, 0x54, 0x58, 0xda, 0x7f, 0x2c, 0x42, 0x23, 0x5c, 0xd8, 0x1f, 0xe3, 0x2c, 0x70, 0x17, 0x1a, + 0x61, 0x0c, 0x5c, 0xee, 0x1b, 0x11, 0xa3, 0xd0, 0x2c, 0xbe, 0x16, 0x0d, 0x5f, 0x31, 0x92, 0x11, + 0x8f, 0xaf, 0x2b, 0xe5, 0x88, 0xaf, 0xeb, 0x41, 0x2d, 0xf0, 0xac, 0x4e, 0x47, 0xae, 0x12, 0xf2, + 0x04, 0xd8, 0x85, 0xcd, 0xb5, 0x29, 0x00, 0x65, 0xcb, 0x8a, 0x17, 0x54, 0x62, 0xb4, 0x0f, 0x61, + 0x2a, 0xcd, 0xc9, 0x55, 0x68, 0x63, 0x97, 0x9a, 0x7d, 0x5b, 0xb5, 0x71, 0xa4, 0x42, 0xcb, 0x74, + 0x0c, 0x39, 0xd8, 0x32, 0x9c, 0x7d, 0xa6, 0x8f, 0x5d, 0x47, 0xa9, 0xb1, 0x7c, 0x35, 0xb2, 0x29, + 0xd3, 0x30, 0xa4, 0x6a, 0xff, 0xad, 0x04, 0x97, 0x23, 0xf3, 0xcc, 0xba, 0xee, 0xe8, 0x9d, 0x63, + 0x5c, 0x83, 0xf7, 0xd5, 0xc6, 0xa5, 0x93, 0x5e, 0xe8, 0x50, 0x7a, 0x06, 0x2e, 0x74, 0xf8, 0xbf, + 0x45, 0xe0, 0xf1, 0xba, 0xe4, 0x3b, 0x30, 0xa6, 0xc7, 0x6e, 0x40, 0x95, 0x9f, 0xf3, 0x66, 0xee, + 0xcf, 0xc9, 0xc3, 0x82, 0xc3, 0x90, 0xad, 0x78, 0x2a, 0x26, 0x04, 0x12, 0x17, 0xea, 0x3b, 0xba, + 0x6d, 0x33, 0x5d, 0x28, 0xb7, 0xbb, 0x29, 0x21, 0x9c, 0x77, 0xf3, 0x65, 0x09, 0x8d, 0xa1, 0x10, + 0xf2, 0x49, 0x01, 0xc6, 0xbd, 0xf8, 0x72, 0x4d, 0x7e, 0x90, 0x3c, 0xc1, 0x08, 0x31, 0xb4, 0x78, + 0x80, 0x58, 0x7c, 0x4d, 0x98, 0x94, 0xa9, 0xfd, 0xd7, 0x02, 0x8c, 0xb7, 0x6d, 0xcb, 0xb4, 0x9c, + 0xce, 0x19, 0xde, 0x27, 0x71, 0x0f, 0x2a, 0xbe, 0x6d, 0x99, 0x74, 0xc4, 0xd9, 0x44, 0xcc, 0x63, + 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0x82, 0x8a, 0xd2, 0x31, 0x2e, 0xa8, 0xf8, 0x61, 0x15, 0x64, 0xe4, + 0x39, 0xe9, 0x43, 0xa3, 0xa3, 0xce, 0xbd, 0x97, 0x75, 0xbc, 0x95, 0xe3, 0xcc, 0xc4, 0xc4, 0x09, + 0xfa, 0x62, 0xec, 0x0f, 0x13, 0x31, 0x92, 0x44, 0x68, 0xf2, 0xea, 0xdd, 0xa5, 0x9c, 0x57, 0xef, + 0x0a, 0x71, 0x83, 0x97, 0xef, 0xea, 0x50, 0xde, 0x0d, 0x82, 0x9e, 0xec, 0x4c, 0xa3, 0x6f, 0x2d, + 0x88, 0x8e, 0xed, 0x11, 0x3a, 0x11, 0x7b, 0x47, 0x0e, 0xcd, 0x44, 0x38, 0x7a, 0x78, 0xd5, 0xda, + 0x62, 0xae, 0xc0, 0x87, 0xb8, 0x08, 0xf6, 0x8e, 0x1c, 0x9a, 0xfc, 0x32, 0x34, 0x03, 0x4f, 0x77, + 0xfc, 0x1d, 0xd7, 0xeb, 0x52, 0x4f, 0xae, 0x51, 0x97, 0x73, 0xdc, 0x3e, 0xbb, 0x19, 0xa1, 0x09, + 0x8f, 0x6a, 0x22, 0x09, 0xe3, 0xd2, 0xc8, 0x1e, 0xd4, 0xfb, 0xa6, 0x28, 0x98, 0x34, 0x83, 0x2d, + 0xe4, 0xb9, 0x50, 0x38, 0x16, 0xd6, 0xa0, 0xde, 0x30, 0x14, 0x90, 0xbc, 0x55, 0xb0, 0x76, 0x5a, + 0xb7, 0x0a, 0xc6, 0x7b, 0x63, 0xd6, 0x99, 0x22, 0xa4, 0x2b, 0xf5, 0x5a, 0xa7, 0x23, 0xa3, 0xb2, + 0x96, 0x73, 0xab, 0x9c, 0x42, 0x64, 0x33, 0xd4, 0x8d, 0x9d, 0x0e, 0x2a, 0x19, 0x5a, 0x17, 0xa4, + 0xb7, 0x83, 0x18, 0x89, 0xbb, 0x77, 0xc4, 0x46, 0xb7, 0xb9, 0xe3, 0x8d, 0x07, 0xe1, 0x25, 0x30, + 0xb1, 0xb3, 0xbf, 0x33, 0x2f, 0xd9, 0xd1, 0xfe, 0x53, 0x11, 0x4a, 0x9b, 0x6b, 0x6d, 0x71, 0x9e, + 0x27, 0xbf, 0xd8, 0x8a, 0xb6, 0xf7, 0xac, 0xde, 0x7d, 0xea, 0x59, 0x3b, 0x07, 0x72, 0xe9, 0x1d, + 0x3b, 0xcf, 0x33, 0xcd, 0x81, 0x19, 0xb9, 0xc8, 0xfb, 0x30, 0x66, 0xe8, 0x8b, 0xd4, 0x0b, 0x46, + 0x31, 0x2c, 0xf0, 0x1d, 0xbd, 0x8b, 0x0b, 0x51, 0x76, 0x4c, 0x80, 0x91, 0x2d, 0x00, 0x23, 0x82, + 0x2e, 0x9d, 0xd8, 0x1c, 0x12, 0x03, 0x8e, 0x01, 0x11, 0x84, 0xc6, 0x1e, 0x63, 0xe5, 0xa8, 0xe5, + 0x93, 0xa0, 0xf2, 0x9e, 0x73, 0x47, 0xe5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x3c, 0x71, 0x21, 0x0f, + 0xf9, 0x1a, 0xd4, 0xdd, 0x5e, 0x6c, 0x38, 0x6d, 0xf0, 0xf8, 0xcf, 0xfa, 0x3d, 0x99, 0xf6, 0xe8, + 0x70, 0x66, 0x7c, 0xcd, 0xed, 0x58, 0x86, 0x4a, 0xc0, 0x90, 0x9d, 0x68, 0x50, 0xe5, 0xdb, 0xf0, + 0xd4, 0x75, 0x3c, 0x7c, 0xee, 0xe0, 0x37, 0x66, 0xf8, 0x28, 0x29, 0xda, 0xaf, 0x94, 0x21, 0xf2, + 0x11, 0x12, 0x1f, 0xaa, 0x62, 0x9b, 0x81, 0x1c, 0xb9, 0xcf, 0x74, 0x47, 0x83, 0x14, 0x45, 0x3a, + 0x50, 0xfa, 0xd0, 0xdd, 0xce, 0x3d, 0x70, 0xc7, 0xf6, 0xdf, 0x0b, 0x5b, 0x59, 0x2c, 0x01, 0x99, + 0x04, 0xf2, 0xb7, 0x0b, 0x70, 0xce, 0x4f, 0xab, 0xbe, 0xb2, 0x3b, 0x60, 0x7e, 0x1d, 0x3f, 0xad, + 0x4c, 0xcb, 0x40, 0xdd, 0x61, 0x64, 0x1c, 0x2c, 0x0b, 0x6b, 0x7f, 0xe1, 0xbc, 0x93, 0xdd, 0x69, + 0x25, 0xe7, 0x25, 0x92, 0xc9, 0xf6, 0x4f, 0xa6, 0xa1, 0x14, 0xa5, 0xfd, 0x5a, 0x11, 0x9a, 0xb1, + 0xd1, 0x3a, 0xf7, 0x2d, 0x4f, 0x0f, 0x53, 0xb7, 0x3c, 0x6d, 0x8c, 0xee, 0xcb, 0x8e, 0x4a, 0x75, + 0xd6, 0x17, 0x3d, 0xfd, 0xcb, 0x22, 0x94, 0xb6, 0x96, 0x96, 0x93, 0x8b, 0xd6, 0xc2, 0x53, 0x58, + 0xb4, 0xee, 0x42, 0x6d, 0xbb, 0x6f, 0xd9, 0x81, 0xe5, 0xe4, 0x3e, 0x21, 0x44, 0x5d, 0x8a, 0x25, + 0x7d, 0x1d, 0x02, 0x15, 0x15, 0x3c, 0xe9, 0x40, 0xad, 0x23, 0x8e, 0x68, 0xcc, 0x1d, 0xe1, 0x27, + 0x8f, 0x7a, 0x14, 0x82, 0xe4, 0x0b, 0x2a, 0x74, 0xed, 0x00, 0xe4, 0xed, 0xfe, 0x4f, 0xbd, 0x35, + 0xb5, 0x5f, 0x86, 0x50, 0x0b, 0x78, 0xfa, 0xc2, 0xff, 0x47, 0x01, 0x92, 0x8a, 0xcf, 0xd3, 0xef, + 0x4d, 0x7b, 0xe9, 0xde, 0xb4, 0x74, 0x1a, 0x3f, 0x5f, 0x76, 0x87, 0xd2, 0xfe, 0x7d, 0x01, 0x52, + 0x7b, 0xc3, 0xc8, 0x1b, 0xf2, 0xb4, 0xaf, 0x64, 0x28, 0x95, 0x3a, 0xed, 0x8b, 0x24, 0xb9, 0x63, + 0xa7, 0x7e, 0x7d, 0xca, 0x96, 0x6b, 0x71, 0x07, 0x9a, 0x2c, 0xfe, 0xdd, 0xd1, 0x97, 0x6b, 0x59, + 0xee, 0x38, 0x19, 0xee, 0x17, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x4f, 0x8a, 0x50, 0x7d, 0x6a, 0x5b, + 0xd5, 0x69, 0x22, 0x02, 0x73, 0x31, 0xe7, 0x68, 0x3f, 0x34, 0xfe, 0xb2, 0x9b, 0x8a, 0xbf, 0xcc, + 0x7b, 0x37, 0xf1, 0x13, 0xa2, 0x2f, 0xff, 0x6d, 0x01, 0xe4, 0x5c, 0xb3, 0xea, 0xf8, 0x81, 0xee, + 0x18, 0x94, 0x18, 0xe1, 0xc4, 0x96, 0x37, 0xcc, 0x47, 0x86, 0xc2, 0x09, 0x5d, 0x86, 0x3f, 0xab, + 0x89, 0x8c, 0xfc, 0x34, 0xd4, 0x77, 0x5d, 0x3f, 0xe0, 0x93, 0x57, 0x31, 0x69, 0x32, 0xbb, 0x25, + 0xd3, 0x31, 0xe4, 0x48, 0xbb, 0xb3, 0x2b, 0xc3, 0xdd, 0xd9, 0xda, 0x6f, 0x17, 0x61, 0xec, 0xcb, + 0xb2, 0xdf, 0x3e, 0x2b, 0x5e, 0xb5, 0x94, 0x33, 0x5e, 0xb5, 0x7c, 0x92, 0x78, 0x55, 0xed, 0xfb, + 0x05, 0x80, 0xa7, 0xb6, 0xd9, 0xdf, 0x4c, 0x86, 0x92, 0xe6, 0xee, 0x57, 0xd9, 0x81, 0xa4, 0xff, + 0xa8, 0xa2, 0xaa, 0xc4, 0xc3, 0x48, 0x3f, 0x2d, 0xc0, 0x84, 0x9e, 0x08, 0xcd, 0xcc, 0xad, 0x2f, + 0xa7, 0x22, 0x3d, 0xc3, 0xc8, 0xa2, 0x64, 0x3a, 0xa6, 0xc4, 0x92, 0xb7, 0xa2, 0x83, 0xa6, 0xef, + 0x46, 0xdd, 0x7e, 0xe0, 0x84, 0x68, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x10, 0x0a, 0x5b, 0x3a, 0x95, + 0x50, 0xd8, 0xf8, 0x26, 0xbf, 0xf2, 0x63, 0x37, 0xf9, 0xed, 0x43, 0x63, 0xc7, 0x73, 0xbb, 0x3c, + 0xda, 0x54, 0xde, 0x6a, 0x7c, 0x33, 0xc7, 0x44, 0x19, 0xdd, 0xe7, 0x1f, 0x19, 0xae, 0x96, 0x15, + 0x3e, 0x46, 0xa2, 0xb8, 0xad, 0xdf, 0x15, 0x52, 0xab, 0xa7, 0x29, 0x35, 0x1c, 0x4b, 0x36, 0x05, + 0x3a, 0x2a, 0x31, 0xc9, 0x08, 0xd3, 0xda, 0xd3, 0x89, 0x30, 0xd5, 0x7e, 0xb7, 0xaa, 0x06, 0xb0, + 0x67, 0xee, 0x4c, 0xd3, 0xdc, 0x5b, 0xb3, 0xe3, 0xfb, 0xaa, 0xcb, 0x27, 0xd8, 0x57, 0x5d, 0x39, + 0xee, 0xbe, 0xea, 0xea, 0x13, 0x02, 0x3f, 0xd3, 0x9b, 0x9e, 0x6b, 0x4f, 0x71, 0xd3, 0x73, 0xfd, + 0x74, 0x36, 0x3d, 0x37, 0x4e, 0xb6, 0xe9, 0x59, 0xee, 0x1b, 0x0e, 0xb3, 0x43, 0x72, 0xd3, 0xf3, + 0x62, 0x92, 0x8c, 0x69, 0xfe, 0xac, 0x7d, 0xd3, 0xcd, 0x13, 0xee, 0x9b, 0x4e, 0xed, 0x72, 0x1e, + 0x1b, 0x69, 0x97, 0xf3, 0xf8, 0xb1, 0x76, 0x39, 0x1f, 0x96, 0x20, 0xb5, 0x74, 0xfe, 0xca, 0x4d, + 0xf6, 0xff, 0x95, 0x9b, 0xec, 0xb3, 0x22, 0x44, 0xc3, 0xe6, 0x09, 0xc3, 0x88, 0xde, 0x83, 0x7a, + 0x57, 0x7f, 0xb8, 0x44, 0x6d, 0xfd, 0x20, 0xcf, 0xc5, 0xbd, 0xeb, 0x12, 0x03, 0x43, 0x34, 0xe2, + 0x03, 0x58, 0xe1, 0xe1, 0xf9, 0xb9, 0x1d, 0x0e, 0xd1, 0x39, 0xfc, 0xc2, 0xa4, 0x19, 0xbd, 0x63, + 0x4c, 0x8c, 0xf6, 0x6f, 0x8a, 0x20, 0x6f, 0x59, 0x20, 0x14, 0x2a, 0x3b, 0xd6, 0x43, 0x6a, 0xe6, + 0x0e, 0x4e, 0x8e, 0x5d, 0xa7, 0x2e, 0x3c, 0x2a, 0x3c, 0x01, 0x05, 0x3a, 0x37, 0x95, 0x0b, 0x0f, + 0x99, 0x6c, 0xbf, 0x1c, 0xa6, 0xf2, 0xb8, 0xa7, 0x4d, 0x9a, 0xca, 0x45, 0x12, 0x2a, 0x19, 0xc2, + 0x32, 0xcf, 0x83, 0x25, 0x72, 0x3b, 0x04, 0x13, 0x41, 0x17, 0xca, 0x32, 0xef, 0x8b, 0x63, 0x0e, + 0xa4, 0x8c, 0xd6, 0x2f, 0x7d, 0xef, 0x07, 0xd7, 0x9e, 0xfb, 0xfe, 0x0f, 0xae, 0x3d, 0xf7, 0xf9, + 0x0f, 0xae, 0x3d, 0xf7, 0x2b, 0x47, 0xd7, 0x0a, 0xdf, 0x3b, 0xba, 0x56, 0xf8, 0xfe, 0xd1, 0xb5, + 0xc2, 0xe7, 0x47, 0xd7, 0x0a, 0xff, 0xf9, 0xe8, 0x5a, 0xe1, 0x6f, 0xfc, 0x97, 0x6b, 0xcf, 0xfd, + 0xe2, 0x9b, 0x51, 0x11, 0xe6, 0x54, 0x11, 0xe6, 0x94, 0xc0, 0xb9, 0xde, 0x5e, 0x67, 0x8e, 0x15, + 0x21, 0x4a, 0x51, 0x45, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x35, 0xbe, 0x7d, 0xfe, + 0x99, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6331,6 +6403,16 @@ func (m *MonoVertexSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 if m.DaemonTemplate != nil { { size, err := m.DaemonTemplate.MarshalToSizedBuffer(dAtA[:i]) @@ -6485,24 +6567,24 @@ func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.UpdateHash) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateHash))) i-- - dAtA[i] = 0x72 + dAtA[i] = 0x7a i -= len(m.CurrentHash) copy(dAtA[i:], m.CurrentHash) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentHash))) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x72 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReadyReplicas)) + i-- + dAtA[i] = 0x68 i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) i-- dAtA[i] = 0x60 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x58 i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x50 + dAtA[i] = 0x58 i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x48 + dAtA[i] = 0x50 { size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -6512,7 +6594,7 @@ func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x4a { size, err := m.LastUpdated.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -6522,22 +6604,25 @@ func (m *MonoVertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x3a i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x32 i -= len(m.Selector) copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- dAtA[i] = 0x18 @@ -7483,6 +7568,41 @@ func (m *RetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RollingUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *SASL) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8679,6 +8799,46 @@ func (m *UDTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *UpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Vertex) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10411,6 +10571,8 @@ func (m *MonoVertexSpec) Size() (n int) { l = m.DaemonTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10425,6 +10587,7 @@ func (m *MonoVertexStatus) Size() (n int) { l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) l = len(m.Selector) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) @@ -10437,8 +10600,8 @@ func (m *MonoVertexStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReadyReplicas)) l = len(m.CurrentHash) n += 1 + l + sovGenerated(uint64(l)) l = len(m.UpdateHash) @@ -10789,6 +10952,19 @@ func (m *RetryStrategy) Size() (n int) { return n } +func (m *RollingUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SASL) Size() (n int) { if m == nil { return 0 @@ -11236,6 +11412,21 @@ func (m *UDTransformer) Size() (n int) { return n } +func (m *UpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Vertex) Size() (n int) { if m == nil { return 0 @@ -12270,6 +12461,7 @@ func (this *MonoVertexSpec) String() string { `InitContainers:` + repeatedStringForInitContainers + `,`, `Sidecars:` + repeatedStringForSidecars + `,`, `DaemonTemplate:` + strings.Replace(this.DaemonTemplate.String(), "DaemonTemplate", "DaemonTemplate", 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "UpdateStrategy", "UpdateStrategy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12282,6 +12474,7 @@ func (this *MonoVertexStatus) String() string { `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, @@ -12289,8 +12482,8 @@ func (this *MonoVertexStatus) String() string { `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `UpdatedReadyReplicas:` + fmt.Sprintf("%v", this.UpdatedReadyReplicas) + `,`, `CurrentHash:` + fmt.Sprintf("%v", this.CurrentHash) + `,`, `UpdateHash:` + fmt.Sprintf("%v", this.UpdateHash) + `,`, `}`, @@ -12518,6 +12711,16 @@ func (this *RetryStrategy) String() string { }, "") return s } +func (this *RollingUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStrategy{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} func (this *SASL) String() string { if this == nil { return "nil" @@ -12798,6 +13001,17 @@ func (this *UDTransformer) String() string { }, "") return s } +func (this *UpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStrategy", "RollingUpdateStrategy", 1) + `,`, + `}`, + }, "") + return s +} func (this *Vertex) String() string { if this == nil { return "nil" @@ -23365,6 +23579,39 @@ func (m *MonoVertexSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23500,6 +23747,25 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } } case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } @@ -23531,7 +23797,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } m.Selector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } @@ -23563,7 +23829,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } @@ -23595,7 +23861,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastUpdated", wireType) } @@ -23628,7 +23894,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 8: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastScaledAt", wireType) } @@ -23661,7 +23927,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: + case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } @@ -23680,7 +23946,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { break } } - case 10: + case 11: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } @@ -23699,11 +23965,11 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { break } } - case 11: + case 12: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - m.CurrentReplicas = 0 + m.UpdatedReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23713,16 +23979,16 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= uint32(b&0x7F) << shift + m.UpdatedReplicas |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 12: + case 13: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReadyReplicas", wireType) } - m.UpdatedReplicas = 0 + m.UpdatedReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23732,12 +23998,12 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= uint32(b&0x7F) << shift + m.UpdatedReadyReplicas |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 13: + case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CurrentHash", wireType) } @@ -23769,7 +24035,7 @@ func (m *MonoVertexStatus) Unmarshal(dAtA []byte) error { } m.CurrentHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UpdateHash", wireType) } @@ -26651,6 +26917,92 @@ func (m *RetryStrategy) Unmarshal(dAtA []byte) error { } return nil } +func (m *RollingUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SASL) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -30044,6 +30396,124 @@ func (m *UDTransformer) Unmarshal(dAtA []byte) error { } return nil } +func (m *UpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = UpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Vertex) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 55940285e3..1e6e6bd35a 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -26,6 +26,7 @@ import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1"; @@ -864,7 +865,7 @@ message Metadata { // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.status.desiredReplicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -942,6 +943,11 @@ message MonoVertexSpec { // Template for the daemon service deployment. // +optional optional DaemonTemplate daemonTemplate = 11; + + // The strategy to use to replace existing pods with new ones. + // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} + // +optional + optional UpdateStrategy updateStrategy = 12; } message MonoVertexStatus { @@ -954,41 +960,45 @@ message MonoVertexStatus { // +optional optional uint32 replicas = 3; + // The number of desired replicas. // +optional - optional string selector = 4; + optional uint32 desiredReplicas = 4; // +optional - optional string reason = 5; + optional string selector = 5; // +optional - optional string message = 6; + optional string reason = 6; + + // +optional + optional string message = 7; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 8; // Time of last scaling operation. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 9; // The generation observed by the MonoVertex controller. // +optional - optional int64 observedGeneration = 9; + optional int64 observedGeneration = 10; // The number of pods targeted by this MonoVertex with a Ready Condition. // +optional - optional uint32 readyReplicas = 10; - - // The number of Pods created by the controller from the MonoVertex version indicated by currentHash. - optional uint32 currentReplicas = 11; + optional uint32 readyReplicas = 11; // The number of Pods created by the controller from the MonoVertex version indicated by updateHash. optional uint32 updatedReplicas = 12; - // If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). - optional string currentHash = 13; + // The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash. + optional uint32 updatedReadyReplicas = 13; - // If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) - optional string updateHash = 14; + // If not empty, indicates the current version of the MonoVertex used to generate Pods. + optional string currentHash = 14; + + // If not empty, indicates the updated version of the MonoVertex used to generate Pods. + optional string updateHash = 15; } message NativeRedis { @@ -1291,6 +1301,21 @@ message RetryStrategy { optional string onFailure = 2; } +// RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType. +message RollingUpdateStrategy { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // Defaults to 25%. + // Example: when this is set to 30%, the old pods can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old pods + // can be scaled down further, followed by scaling up the new pods, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + message SASL { // SASL mechanism to use optional string mechanism = 1; @@ -1599,6 +1624,19 @@ message UDTransformer { optional Transformer builtin = 2; } +// UpdateStrategy indicates the strategy that the +// controller will use to perform updates for Vertex or MonoVertex. +message UpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStrategy. + // +optional + optional RollingUpdateStrategy rollingUpdate = 2; +} + // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=vtx diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index b05fd8c5f4..677ec4fc5c 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -54,7 +54,7 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.status.desiredReplicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -71,13 +71,28 @@ type MonoVertex struct { Status MonoVertexStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -func (mv MonoVertex) GetReplicas() int { +func (mv MonoVertex) getReplicas() int { if mv.Spec.Replicas == nil { return 1 } return int(*mv.Spec.Replicas) } +func (mv MonoVertex) CalculateReplicas() int { + desiredReplicas := mv.getReplicas() + // Don't allow replicas to be out of the range of min and max when auto scaling is enabled + if s := mv.Spec.Scale; !s.Disabled { + max := int(s.GetMaxReplicas()) + min := int(s.GetMinReplicas()) + if desiredReplicas < min { + desiredReplicas = min + } else if desiredReplicas > max { + desiredReplicas = max + } + } + return desiredReplicas +} + func (mv MonoVertex) GetHeadlessServiceName() string { return mv.Name + "-mv-headless" } @@ -291,6 +306,7 @@ func (mv MonoVertex) simpleCopy() MonoVertex { if m.Spec.Limits.ReadTimeout == nil { m.Spec.Limits.ReadTimeout = &metav1.Duration{Duration: DefaultReadTimeout} } + m.Spec.UpdateStrategy = UpdateStrategy{} // TODO: lifecycle // mvVtxCopy.Spec.Lifecycle = Lifecycle{} return m @@ -408,6 +424,10 @@ type MonoVertexSpec struct { // Template for the daemon service deployment. // +optional DaemonTemplate *DaemonTemplate `json:"daemonTemplate,omitempty" protobuf:"bytes,11,opt,name=daemonTemplate"` + // The strategy to use to replace existing pods with new ones. + // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} + // +optional + UpdateStrategy UpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,12,opt,name=updateStrategy"` } func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { @@ -470,31 +490,34 @@ type MonoVertexStatus struct { // Total number of non-terminated pods targeted by this MonoVertex (their labels match the selector). // +optional Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // The number of desired replicas. + // +optional + DesiredReplicas uint32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` // +optional - Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Selector string `json:"selector,omitempty" protobuf:"bytes,5,opt,name=selector"` // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + Reason string `json:"reason,omitempty" protobuf:"bytes,6,opt,name=reason"` // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + Message string `json:"message,omitempty" protobuf:"bytes,7,opt,name=message"` // +optional - LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,7,opt,name=lastUpdated"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty" protobuf:"bytes,8,opt,name=lastUpdated"` // Time of last scaling operation. // +optional - LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,8,opt,name=lastScaledAt"` + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,9,opt,name=lastScaledAt"` // The generation observed by the MonoVertex controller. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,9,opt,name=observedGeneration"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,10,opt,name=observedGeneration"` // The number of pods targeted by this MonoVertex with a Ready Condition. // +optional - ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,10,opt,name=readyReplicas"` - // The number of Pods created by the controller from the MonoVertex version indicated by currentHash. - CurrentReplicas uint32 `json:"currentReplicas,omitempty" protobuf:"varint,11,opt,name=currentReplicas"` + ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,11,opt,name=readyReplicas"` // The number of Pods created by the controller from the MonoVertex version indicated by updateHash. UpdatedReplicas uint32 `json:"updatedReplicas,omitempty" protobuf:"varint,12,opt,name=updatedReplicas"` - // If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). - CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,13,opt,name=currentHash"` - // If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) - UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,14,opt,name=updateHash"` + // The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash. + UpdatedReadyReplicas uint32 `json:"updatedReadyReplicas,omitempty" protobuf:"varint,13,opt,name=updatedReadyReplicas"` + // If not empty, indicates the current version of the MonoVertex used to generate Pods. + CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,14,opt,name=currentHash"` + // If not empty, indicates the updated version of the MonoVertex used to generate Pods. + UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,15,opt,name=updateHash"` } // SetObservedGeneration sets the Status ObservedGeneration diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 0ed5471bc8..a339fdfb7a 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -93,6 +93,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RetryStrategy": schema_pkg_apis_numaflow_v1alpha1_RetryStrategy(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RollingUpdateStrategy": schema_pkg_apis_numaflow_v1alpha1_RollingUpdateStrategy(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASL": schema_pkg_apis_numaflow_v1alpha1_SASL(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASLPlain": schema_pkg_apis_numaflow_v1alpha1_SASLPlain(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale": schema_pkg_apis_numaflow_v1alpha1_Scale(ref), @@ -114,6 +115,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSink": schema_pkg_apis_numaflow_v1alpha1_UDSink(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSource": schema_pkg_apis_numaflow_v1alpha1_UDSource(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDTransformer": schema_pkg_apis_numaflow_v1alpha1_UDTransformer(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy": schema_pkg_apis_numaflow_v1alpha1_UpdateStrategy(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Vertex": schema_pkg_apis_numaflow_v1alpha1_Vertex(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexInstance": schema_pkg_apis_numaflow_v1alpha1_VertexInstance(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits": schema_pkg_apis_numaflow_v1alpha1_VertexLimits(ref), @@ -3332,11 +3334,18 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallba Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate"), }, }, + "updateStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "The strategy to use to replace existing pods with new ones.", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -3380,6 +3389,14 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCall Format: "int64", }, }, + "desiredReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of desired replicas.", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, "selector": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, @@ -3423,30 +3440,30 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexStatus(ref common.ReferenceCall Format: "int64", }, }, - "currentReplicas": { + "updatedReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of Pods created by the controller from the MonoVertex version indicated by currentHash.", + Description: "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", Type: []string{"integer"}, Format: "int64", }, }, - "updatedReplicas": { + "updatedReadyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of Pods created by the controller from the MonoVertex version indicated by updateHash.", + Description: "The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash.", Type: []string{"integer"}, Format: "int64", }, }, "currentHash": { SchemaProps: spec.SchemaProps{ - Description: "If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas).", + Description: "If not empty, indicates the current version of the MonoVertex used to generate Pods.", Type: []string{"string"}, Format: "", }, }, "updateHash": { SchemaProps: spec.SchemaProps{ - Description: "If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Description: "If not empty, indicates the updated version of the MonoVertex used to generate Pods.", Type: []string{"string"}, Format: "", }, @@ -4295,6 +4312,27 @@ func schema_pkg_apis_numaflow_v1alpha1_RetryStrategy(ref common.ReferenceCallbac } } +func schema_pkg_apis_numaflow_v1alpha1_RollingUpdateStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "maxUnavailable": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. Defaults to 25%. Example: when this is set to 30%, the old pods can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old pods can be scaled down further, followed by scaling up the new pods, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_SASL(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -5232,6 +5270,35 @@ func schema_pkg_apis_numaflow_v1alpha1_UDTransformer(ref common.ReferenceCallbac } } +func schema_pkg_apis_numaflow_v1alpha1_UpdateStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpdateStrategy indicates the strategy that the controller will use to perform updates for Vertex or MonoVertex.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.\n\nPossible enum values:\n - `\"RollingUpdate\"`", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"RollingUpdate"}, + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "RollingUpdate is used to communicate parameters when Type is RollingUpdateStrategy.", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RollingUpdateStrategy"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RollingUpdateStrategy"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_Vertex(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/numaflow/v1alpha1/update_strategy.go b/pkg/apis/numaflow/v1alpha1/update_strategy.go new file mode 100644 index 0000000000..5313d06725 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/update_strategy.go @@ -0,0 +1,80 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/util/intstr" +) + +// UpdateStrategy indicates the strategy that the +// controller will use to perform updates for Vertex or MonoVertex. +type UpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type UpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=UpdateStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStrategy. + // +optional + RollingUpdate *RollingUpdateStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +func (us UpdateStrategy) GetUpdateStrategyType() UpdateStrategyType { + switch us.Type { + case RollingUpdateStrategyType: + return us.Type + default: + return RollingUpdateStrategyType // We only support RollingUpdateStrategyType for now. + } +} + +func (us UpdateStrategy) GetRollingUpdateStrategy() RollingUpdateStrategy { + if us.RollingUpdate == nil { + return RollingUpdateStrategy{} + } + return *us.RollingUpdate +} + +// UpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies. +// +enum +type UpdateStrategyType string + +const ( + RollingUpdateStrategyType UpdateStrategyType = "RollingUpdate" +) + +// RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType. +type RollingUpdateStrategy struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // Defaults to 25%. + // Example: when this is set to 30%, the old pods can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old pods + // can be scaled down further, followed by scaling up the new pods, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +func (rus RollingUpdateStrategy) GetMaxUnavailable() intstr.IntOrString { + if rus.MaxUnavailable == nil { + return intstr.FromString("25%") // Default value is 25%. + } + return *rus.MaxUnavailable +} diff --git a/pkg/apis/numaflow/v1alpha1/update_strategy_test.go b/pkg/apis/numaflow/v1alpha1/update_strategy_test.go new file mode 100644 index 0000000000..e78cd34d95 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/update_strategy_test.go @@ -0,0 +1,128 @@ +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + intstr "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" +) + +func TestUpdateStrategy_GetUpdateStrategyType(t *testing.T) { + tests := []struct { + name string + strategy UpdateStrategy + expected UpdateStrategyType + }{ + { + name: "RollingUpdateStrategyType", + strategy: UpdateStrategy{Type: RollingUpdateStrategyType}, + expected: RollingUpdateStrategyType, + }, + { + name: "EmptyType", + strategy: UpdateStrategy{}, + expected: RollingUpdateStrategyType, + }, + { + name: "UnsupportedType", + strategy: UpdateStrategy{Type: "UnsupportedType"}, + expected: RollingUpdateStrategyType, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.strategy.GetUpdateStrategyType() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestUpdateStrategy_GetRollingUpdateStrategy(t *testing.T) { + tests := []struct { + name string + strategy UpdateStrategy + expected RollingUpdateStrategy + }{ + { + name: "NilRollingUpdate", + strategy: UpdateStrategy{}, + expected: RollingUpdateStrategy{}, + }, + { + name: "NonNilRollingUpdate", + strategy: UpdateStrategy{ + RollingUpdate: &RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("2")), + }, + }, + expected: RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("2")), + }, + }, + { + name: "EmptyRollingUpdate", + strategy: UpdateStrategy{ + RollingUpdate: &RollingUpdateStrategy{}, + }, + expected: RollingUpdateStrategy{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.strategy.GetRollingUpdateStrategy() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestRollingUpdateStrategy_GetMaxUnavailable(t *testing.T) { + tests := []struct { + name string + strategy RollingUpdateStrategy + expected intstr.IntOrString + }{ + { + name: "NilMaxUnavailable", + strategy: RollingUpdateStrategy{}, + expected: intstr.FromString("25%"), + }, + { + name: "IntegerMaxUnavailable", + strategy: RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromInt(5)), + }, + expected: intstr.FromInt(5), + }, + { + name: "StringMaxUnavailable", + strategy: RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("50%")), + }, + expected: intstr.FromString("50%"), + }, + { + name: "ZeroIntegerMaxUnavailable", + strategy: RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromInt(0)), + }, + expected: intstr.FromInt(0), + }, + { + name: "ZeroPercentMaxUnavailable", + strategy: RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("0%")), + }, + expected: intstr.FromString("0%"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.strategy.GetMaxUnavailable() + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 6033c0302f..b6325ce920 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1502,6 +1503,7 @@ func (in *MonoVertexSpec) DeepCopyInto(out *MonoVertexSpec) { *out = new(DaemonTemplate) (*in).DeepCopyInto(*out) } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) return } @@ -2009,6 +2011,27 @@ func (in *RetryStrategy) DeepCopy() *RetryStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStrategy) DeepCopyInto(out *RollingUpdateStrategy) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStrategy. +func (in *RollingUpdateStrategy) DeepCopy() *RollingUpdateStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SASL) DeepCopyInto(out *SASL) { *out = *in @@ -2654,6 +2677,27 @@ func (in *UDTransformer) DeepCopy() *UDTransformer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. +func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { + if in == nil { + return nil + } + out := new(UpdateStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Vertex) DeepCopyInto(out *Vertex) { *out = *in diff --git a/pkg/mvtxdaemon/server/service/health_status.go b/pkg/mvtxdaemon/server/service/health_status.go index 9942f7885c..b075555aff 100644 --- a/pkg/mvtxdaemon/server/service/health_status.go +++ b/pkg/mvtxdaemon/server/service/health_status.go @@ -132,20 +132,18 @@ func (hc *HealthChecker) setCurrentHealth(status *dataHealthResponse) { // Else we consider the data criticality as healthy. // // TODO(MonoVertex): Add the logic to determine the warning state based on more conditions. -func (hc *HealthChecker) getMonoVertexDataCriticality(ctx context.Context, mvtxMetrics *mvtxdaemon.MonoVertexMetrics) (*monoVtxState, error) { +func (hc *HealthChecker) getMonoVertexDataCriticality(_ context.Context, mvtxMetrics *mvtxdaemon.MonoVertexMetrics) (*monoVtxState, error) { // Get the desired replicas for the MonoVertex based on the metrics desiredReplicas, err := hc.getDesiredReplica(mvtxMetrics) if err != nil { return nil, err } - // Get the current state of the MonoVertex replicas - currentReplicas := hc.monoVertex.GetReplicas() maxReplicas := int(hc.monoVertex.Spec.Scale.GetMaxReplicas()) // default status is healthy status := v1alpha1.MonoVertexStatusHealthy - // If the current replicas are equal to the max replicas, and the desired replicas are more than the max replicas, + // If the desired replicas are more than the max replicas, // the data criticality is Critical. - if currentReplicas == maxReplicas && desiredReplicas > maxReplicas { + if desiredReplicas > maxReplicas { status = v1alpha1.MonoVertexStatusCritical } return newMonoVtxState(mvtxMetrics.MonoVertex, status), nil diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 104b7d9728..a0de7e6d6a 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -113,36 +114,19 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon if monoVtx.Scalable() { mr.scaler.StartWatching(mVtxKey) } - // TODO: handle lifecycle changes - - // Regular mono vertex change - result, err := mr.reconcileNonLifecycleChanges(ctx, monoVtx) - if err != nil { - mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "ReconcileMonoVertexFailed", "Failed to reconcile a mono vertex: %v", err.Error()) - } - return result, err -} -func (mr *monoVertexReconciler) reconcileNonLifecycleChanges(ctx context.Context, monoVtx *dfv1.MonoVertex) (ctrl.Result, error) { - // Create or update mono vtx services - if err := mr.createOrUpdateMonoVtxServices(ctx, monoVtx); err != nil { + if err := mr.orchestrateFixedResources(ctx, monoVtx); err != nil { + monoVtx.Status.MarkDeployFailed("OrchestrateFixedResourcesFailed", err.Error()) return ctrl.Result{}, err } - // Mono vtx daemon service - if err := mr.createOrUpdateDaemonService(ctx, monoVtx); err != nil { - return ctrl.Result{}, err - } + // TODO: handle lifecycle changes - // Mono vtx daemon deployment - if err := mr.createOrUpdateDaemonDeployment(ctx, monoVtx); err != nil { + if err := mr.orchestratePods(ctx, monoVtx); err != nil { + monoVtx.Status.MarkDeployFailed("OrchestratePodsFailed", err.Error()) return ctrl.Result{}, err } - // Create pods - if err := mr.reconcilePods(ctx, monoVtx); err != nil { - return ctrl.Result{}, err - } monoVtx.Status.MarkDeployed() // Mark it running before checking the status of the pods @@ -155,42 +139,197 @@ func (mr *monoVertexReconciler) reconcileNonLifecycleChanges(ctx context.Context return ctrl.Result{}, nil } -func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1.MonoVertex) error { - desiredReplicas := monoVtx.GetReplicas() - // Don't allow replicas to be out of the range of min and max when auto scaling is enabled - if s := monoVtx.Spec.Scale; !s.Disabled { - max := int(s.GetMaxReplicas()) - min := int(s.GetMinReplicas()) - if desiredReplicas < min { - desiredReplicas = min - } else if desiredReplicas > max { - desiredReplicas = max - } +// orchestrateFixedResources orchestrates fixed resources such as daemon service related objects for a mono vertex. +func (mr *monoVertexReconciler) orchestrateFixedResources(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + // Create or update mono vtx services + if err := mr.createOrUpdateMonoVtxServices(ctx, monoVtx); err != nil { + return fmt.Errorf("failed to orchestrate mono vtx services: %w", err) + } + + // Mono vtx daemon service + if err := mr.createOrUpdateDaemonService(ctx, monoVtx); err != nil { + return fmt.Errorf("failed to orchestrate mono vtx daemon service: %w", err) } + + // Mono vtx daemon deployment + if err := mr.createOrUpdateDaemonDeployment(ctx, monoVtx); err != nil { + return fmt.Errorf("failed to orchestrate mono vtx daemon deployment: %w", err) + } + return nil +} + +func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *dfv1.MonoVertex) error { + log := logging.FromContext(ctx) + desiredReplicas := monoVtx.CalculateReplicas() + monoVtx.Status.DesiredReplicas = uint32(desiredReplicas) + // Set metrics defer func() { reconciler.MonoVertexDesiredReplicas.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(float64(desiredReplicas)) reconciler.MonoVertexCurrentReplicas.WithLabelValues(monoVtx.Namespace, monoVtx.Name).Set(float64(monoVtx.Status.Replicas)) }() + podSpec, err := mr.buildPodSpec(monoVtx) + if err != nil { + return fmt.Errorf("failed to generate mono vertex pod spec: %w", err) + } + + hash := sharedutil.MustHash(podSpec) + if monoVtx.Status.UpdateHash != hash { // New spec, or still processing last update, while new update is coming + monoVtx.Status.UpdateHash = hash + monoVtx.Status.UpdatedReplicas = 0 + monoVtx.Status.UpdatedReadyReplicas = 0 + } + + // Manually or automatically scaled down + if currentReplicas := int(monoVtx.Status.Replicas); currentReplicas > desiredReplicas { + if err := mr.cleanUpPodsFromTo(ctx, monoVtx, desiredReplicas, currentReplicas); err != nil { + return fmt.Errorf("failed to clean up mono vertex pods [%v, %v): %w", desiredReplicas, currentReplicas, err) + } + monoVtx.Status.Replicas = uint32(desiredReplicas) + } + updatedReplicas := int(monoVtx.Status.UpdatedReplicas) + if updatedReplicas > desiredReplicas { + updatedReplicas = desiredReplicas + monoVtx.Status.UpdatedReplicas = uint32(updatedReplicas) + } + + if updatedReplicas > 0 { + // Make sure [0 - updatedReplicas] with hash are in place + if err := mr.orchestratePodsFromTo(ctx, monoVtx, *podSpec, 0, updatedReplicas, hash); err != nil { + return fmt.Errorf("failed to orchestrate mono vertex pods [0, %v): %w", updatedReplicas, err) + } + // Wait for the updated pods to be ready before moving on + if monoVtx.Status.UpdatedReadyReplicas != monoVtx.Status.UpdatedReplicas { + updatedReadyReplicas := 0 + existingPods, err := mr.findExistingPods(ctx, monoVtx, 0, updatedReplicas) + if err != nil { + return fmt.Errorf("failed to get pods of a mono vertex: %w", err) + } + for _, pod := range existingPods { + if pod.GetAnnotations()[dfv1.KeyHash] == monoVtx.Status.UpdateHash { + if reconciler.IsPodReady(pod) { + updatedReadyReplicas++ + } + } + } + monoVtx.Status.UpdatedReadyReplicas = uint32(updatedReadyReplicas) + if updatedReadyReplicas < updatedReplicas { + return nil + } + } + } + + if monoVtx.Status.UpdateHash == monoVtx.Status.CurrentHash || + monoVtx.Status.CurrentHash == "" { + // 1. Regular scaling operation 2. First time + // create (desiredReplicas-updatedReplicas) pods directly + if desiredReplicas > updatedReplicas { + if err := mr.orchestratePodsFromTo(ctx, monoVtx, *podSpec, updatedReplicas, desiredReplicas, hash); err != nil { + return fmt.Errorf("failed to orchestrate mono vertex pods [%v, %v): %w", updatedReplicas, desiredReplicas, err) + } + } + monoVtx.Status.UpdatedReplicas = uint32(desiredReplicas) + monoVtx.Status.CurrentHash = monoVtx.Status.UpdateHash + } else { // Update scenario + if updatedReplicas >= desiredReplicas { + return nil + } + + // Create more pods + if monoVtx.Spec.UpdateStrategy.GetUpdateStrategyType() != dfv1.RollingUpdateStrategyType { + // Revisit later, we only support rolling update for now + return nil + } + + // Calculate the to be updated replicas based on the max unavailable configuration + maxUnavailConf := monoVtx.Spec.UpdateStrategy.GetRollingUpdateStrategy().GetMaxUnavailable() + toBeUpdated, err := intstr.GetScaledValueFromIntOrPercent(&maxUnavailConf, desiredReplicas, true) + if err != nil { // This should never happen since we have validated the configuration + return fmt.Errorf("invalid max unavailable configuration in rollingUpdate: %w", err) + } + if updatedReplicas+toBeUpdated > desiredReplicas { + toBeUpdated = desiredReplicas - updatedReplicas + } + log.Infof("Rolling update %d replicas, [%d, %d)\n", toBeUpdated, updatedReplicas, updatedReplicas+toBeUpdated) + + // Create pods [updatedReplicas, updatedReplicas+toBeUpdated), and clean up any pods in that range that has a different hash + if err := mr.orchestratePodsFromTo(ctx, monoVtx, *podSpec, updatedReplicas, updatedReplicas+toBeUpdated, monoVtx.Status.UpdateHash); err != nil { + return fmt.Errorf("failed to orchestrate pods [%v, %v)]: %w", updatedReplicas, updatedReplicas+toBeUpdated, err) + } + monoVtx.Status.UpdatedReplicas = uint32(updatedReplicas + toBeUpdated) + if monoVtx.Status.UpdatedReplicas == uint32(desiredReplicas) { + monoVtx.Status.CurrentHash = monoVtx.Status.UpdateHash + } + } + + currentReplicas := int(monoVtx.Status.Replicas) + if currentReplicas != desiredReplicas { + log.Infow("MonoVertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) + monoVtx.Status.Replicas = uint32(desiredReplicas) + monoVtx.Status.LastScaledAt = metav1.Time{Time: time.Now()} + } + if monoVtx.Status.Selector == "" { + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + monoVtx.Status.Selector = selector.String() + } + + return nil +} + +func (mr *monoVertexReconciler) findExistingPods(ctx context.Context, monoVtx *dfv1.MonoVertex, fromReplica, toReplica int) (map[string]corev1.Pod, error) { + pods := &corev1.PodList{} + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) + if err := mr.client.List(ctx, pods, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { + return nil, fmt.Errorf("failed to list mono vertex pods: %w", err) + } + result := make(map[string]corev1.Pod) + for _, pod := range pods.Items { + if !pod.DeletionTimestamp.IsZero() { + // Ignore pods being deleted + continue + } + replicaStr := pod.GetAnnotations()[dfv1.KeyReplica] + replica, _ := strconv.Atoi(replicaStr) + if replica >= fromReplica && replica < toReplica { + result[pod.Name] = pod + } + } + return result, nil +} + +func (mr *monoVertexReconciler) cleanUpPodsFromTo(ctx context.Context, monoVtx *dfv1.MonoVertex, fromReplica, toReplica int) error { log := logging.FromContext(ctx) - existingPods, err := mr.findExistingPods(ctx, monoVtx) + existingPods, err := mr.findExistingPods(ctx, monoVtx, fromReplica, toReplica) if err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindExistingPodFailed", err.Error(), "Failed to find existing mono vertex pods", zap.Error(err)) - return err + return fmt.Errorf("failed to find existing pods: %w", err) } - for replica := 0; replica < desiredReplicas; replica++ { - podSpec, err := mr.buildPodSpec(monoVtx) - if err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "PodSpecGenFailed", err.Error(), "Failed to generate mono vertex pod spec", zap.Error(err)) - return err + + for _, pod := range existingPods { + if err := mr.client.Delete(ctx, &pod); err != nil { + return fmt.Errorf("failed to delete pod %s: %w", pod.Name, err) } - hash := sharedutil.MustHash(podSpec) + log.Infof("Deleted MonoVertx pod %s\n", pod.Name) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "DeletePodSuccess", "Succeeded to delete a mono vertex pod %s", pod.Name) + } + return nil +} + +// orchestratePodsFromTo orchestrates pods [fromReplica, toReplica], and clean up any pods in that range that has a different hash +func (mr *monoVertexReconciler) orchestratePodsFromTo(ctx context.Context, monoVtx *dfv1.MonoVertex, podSpec corev1.PodSpec, fromReplica, toReplica int, newHash string) error { + log := logging.FromContext(ctx) + existingPods, err := mr.findExistingPods(ctx, monoVtx, fromReplica, toReplica) + if err != nil { + return fmt.Errorf("failed to find existing pods: %w", err) + } + // Create pods [fromReplica, toReplica) + for replica := fromReplica; replica < toReplica; replica++ { podNamePrefix := fmt.Sprintf("%s-mv-%d-", monoVtx.Name, replica) needToCreate := true for existingPodName, existingPod := range existingPods { if strings.HasPrefix(existingPodName, podNamePrefix) { - if existingPod.GetAnnotations()[dfv1.KeyHash] == hash && existingPod.Status.Phase != corev1.PodFailed { + if existingPod.GetAnnotations()[dfv1.KeyHash] == newHash && existingPod.Status.Phase != corev1.PodFailed { needToCreate = false delete(existingPods, existingPodName) } @@ -213,7 +352,7 @@ func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1 podLabels[dfv1.KeyComponent] = dfv1.ComponentMonoVertex podLabels[dfv1.KeyAppName] = monoVtx.Name podLabels[dfv1.KeyMonoVertexName] = monoVtx.Name - annotations[dfv1.KeyHash] = hash + annotations[dfv1.KeyHash] = newHash annotations[dfv1.KeyReplica] = strconv.Itoa(replica) // Defaults to udf annotations[dfv1.KeyDefaultContainer] = dfv1.CtrMain @@ -225,12 +364,11 @@ func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1 Annotations: annotations, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(monoVtx.GetObjectMeta(), dfv1.MonoVertexGroupVersionKind)}, }, - Spec: *podSpec, + Spec: podSpec, } pod.Spec.Hostname = fmt.Sprintf("%s-mv-%d", monoVtx.Name, replica) if err := mr.client.Create(ctx, pod); err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreatePodFailed", err.Error(), "Failed to created a mono vertex pod", zap.Error(err)) - return err + return fmt.Errorf("failed to create a mono vertex pod: %w", err) } log.Infow("Succeeded to create a mono vertex pod", zap.String("pod", pod.Name)) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreatePodSuccess", "Succeeded to create a mono vertex pod %s", pod.Name) @@ -238,21 +376,9 @@ func (mr *monoVertexReconciler) reconcilePods(ctx context.Context, monoVtx *dfv1 } for _, v := range existingPods { if err := mr.client.Delete(ctx, &v); err != nil && !apierrors.IsNotFound(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelPodFailed", err.Error(), "Failed to delete a mono vertex pod", zap.Error(err)) - return err + return fmt.Errorf("failed to delete pod %s: %w", v.Name, err) } } - - currentReplicas := int(monoVtx.Status.Replicas) - if currentReplicas != desiredReplicas || monoVtx.Status.Selector == "" { - log.Infow("MonoVertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) - mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) - monoVtx.Status.Replicas = uint32(desiredReplicas) - monoVtx.Status.LastScaledAt = metav1.Time{Time: time.Now()} - } - selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) - monoVtx.Status.Selector = selector.String() - return nil } @@ -404,23 +530,6 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Conte return nil } -func (mr *monoVertexReconciler) findExistingPods(ctx context.Context, monoVtx *dfv1.MonoVertex) (map[string]corev1.Pod, error) { - pods := &corev1.PodList{} - selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + monoVtx.Name) - if err := mr.client.List(ctx, pods, &client.ListOptions{Namespace: monoVtx.Namespace, LabelSelector: selector}); err != nil { - return nil, fmt.Errorf("failed to list mono vertex pods: %w", err) - } - result := make(map[string]corev1.Pod) - for _, v := range pods.Items { - if !v.DeletionTimestamp.IsZero() { - // Ignore pods being deleted - continue - } - result[v.Name] = v - } - return result, nil -} - func (mr *monoVertexReconciler) buildPodSpec(monoVtx *dfv1.MonoVertex) (*corev1.PodSpec, error) { podSpec, err := monoVtx.GetPodSpec(dfv1.GetMonoVertexPodSpecReq{ Image: mr.image, @@ -482,7 +591,7 @@ func (mr *monoVertexReconciler) checkChildrenResourceStatus(ctx context.Context, var podList corev1.PodList if err := mr.client.List(ctx, &podList, &client.ListOptions{Namespace: monoVtx.GetNamespace(), LabelSelector: selector}); err != nil { monoVtx.Status.MarkPodNotHealthy("ListMonoVerticesPodsFailed", err.Error()) - return fmt.Errorf("failed to get pods of a vertex: %w", err) + return fmt.Errorf("failed to get pods of a mono vertex: %w", err) } readyPods := reconciler.NumOfReadyPods(podList) if readyPods > int(monoVtx.Status.Replicas) { // It might happen in some corner cases, such as during rollout diff --git a/pkg/reconciler/monovertex/controller_test.go b/pkg/reconciler/monovertex/controller_test.go index d84ddef68a..e9c4f9fdea 100644 --- a/pkg/reconciler/monovertex/controller_test.go +++ b/pkg/reconciler/monovertex/controller_test.go @@ -15,3 +15,302 @@ limitations under the License. */ package monovertex + +import ( + "context" + "strings" + "testing" + + "go.uber.org/zap/zaptest" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/numaproj/numaflow/pkg/reconciler" + "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" + "github.com/stretchr/testify/assert" +) + +const ( + testNamespace = "test-ns" + testMonoVtxName = "tmvtx" + testFlowImage = "test-d-iamge" +) + +var ( + testMonoVtx = &dfv1.MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testMonoVtxName, + }, + Spec: dfv1.MonoVertexSpec{ + Scale: dfv1.Scale{ + Min: ptr.To[int32](2), + }, + Source: &dfv1.Source{ + UDSource: &dfv1.UDSource{ + Container: &dfv1.Container{ + Image: "test-source-image", + }, + }, + UDTransformer: &dfv1.UDTransformer{ + Container: &dfv1.Container{ + Image: "test-tf-image", + }, + }, + }, + Sink: &dfv1.Sink{ + AbstractSink: dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{ + Container: &dfv1.Container{ + Image: "test-sink", + }, + }, + }, + Fallback: &dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{ + Container: &dfv1.Container{ + Image: "test-fb-sink", + }, + }, + }, + }, + }, + } +) + +func init() { + _ = dfv1.AddToScheme(scheme.Scheme) + _ = appv1.AddToScheme(scheme.Scheme) + _ = corev1.AddToScheme(scheme.Scheme) +} + +func Test_NewReconciler(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := NewReconciler(cl, scheme.Scheme, reconciler.FakeGlobalConfig(t, nil), testFlowImage, scaling.NewScaler(cl), zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) + _, ok := r.(*monoVertexReconciler) + assert.True(t, ok) +} + +func Test_BuildPodSpec(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + t.Run("test has everything", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + spec, err := r.buildPodSpec(testObj) + assert.NoError(t, err) + assert.Equal(t, 5, len(spec.Containers)) + assert.Equal(t, dfv1.CtrMain, spec.Containers[0].Name) + assert.Equal(t, dfv1.CtrUdsource, spec.Containers[1].Name) + assert.Equal(t, dfv1.CtrUdtransformer, spec.Containers[2].Name) + assert.Equal(t, dfv1.CtrUdsink, spec.Containers[3].Name) + assert.Equal(t, dfv1.CtrFallbackUdsink, spec.Containers[4].Name) + assert.Equal(t, 0, len(spec.InitContainers)) + }) + + t.Run("test no transformer, no fallback sink", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + testObj.Spec.Source.UDTransformer = nil + testObj.Spec.Sink.Fallback = nil + spec, err := r.buildPodSpec(testObj) + assert.NoError(t, err) + assert.Equal(t, 3, len(spec.Containers)) + assert.Equal(t, dfv1.CtrMain, spec.Containers[0].Name) + assert.Equal(t, dfv1.CtrUdsource, spec.Containers[1].Name) + assert.Equal(t, dfv1.CtrUdsink, spec.Containers[2].Name) + assert.Equal(t, 0, len(spec.InitContainers)) + }) +} + +func Test_createOrUpdateDaemonDeployment(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + + t.Run("test everything from scratch for daemon deployment", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + err := r.createOrUpdateDaemonDeployment(context.TODO(), testObj) + assert.NoError(t, err) + var daemonDeployment appv1.Deployment + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonDeploymentName()}, + &daemonDeployment) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonDeploymentName(), daemonDeployment.Name) + assert.Equal(t, 1, len(daemonDeployment.Spec.Template.Spec.Containers)) + assert.Equal(t, dfv1.CtrMain, daemonDeployment.Spec.Template.Spec.Containers[0].Name) + }) +} + +func Test_createOrUpdateDaemonService(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + + t.Run("test everything from scratch for daemon service", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + err := r.createOrUpdateDaemonService(context.TODO(), testObj) + assert.NoError(t, err) + var daemonSvc corev1.Service + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonServiceName()}, + &daemonSvc) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonServiceName(), daemonSvc.Name) + assert.Equal(t, 1, len(daemonSvc.Spec.Ports)) + assert.Equal(t, int32(dfv1.MonoVertexDaemonServicePort), daemonSvc.Spec.Ports[0].Port) + }) +} + +func Test_createOrUpdateMonoVtxServices(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + + t.Run("test everything from scratch for monovtx service", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + err := r.createOrUpdateMonoVtxServices(context.TODO(), testObj) + assert.NoError(t, err) + var svc corev1.Service + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetHeadlessServiceName()}, + &svc) + assert.NoError(t, err) + assert.Equal(t, testObj.GetHeadlessServiceName(), svc.Name) + assert.Equal(t, 1, len(svc.Spec.Ports)) + assert.Equal(t, int32(dfv1.MonoVertexMetricsPort), svc.Spec.Ports[0].Port) + m, err := r.findExistingMonoVtxServices(context.TODO(), testObj) + assert.NoError(t, err) + assert.Equal(t, 1, len(m)) + }) +} + +func Test_orchestratePods(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + t.Run("test orchestratePodsFromTo and cleanUpPodsFromTo", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + hash := "test-hasssssh" + podSpec, err := r.buildPodSpec(testObj) + assert.NoError(t, err) + err = r.orchestratePodsFromTo(context.TODO(), testObj, *podSpec, 2, 4, hash) + assert.NoError(t, err) + foundPods, err := r.findExistingPods(context.TODO(), testObj, 2, 4) + assert.NoError(t, err) + assert.Equal(t, 2, len(foundPods)) + for n, pod := range foundPods { + assert.Equal(t, hash, pod.Annotations[dfv1.KeyHash]) + assert.True(t, strings.HasPrefix(n, testObj.Name+"-mv-2") || strings.HasPrefix(n, testObj.Name+"-mv-3")) + } + err = r.cleanUpPodsFromTo(context.TODO(), testObj, 2, 4) + assert.NoError(t, err) + foundPods, err = r.findExistingPods(context.TODO(), testObj, 2, 4) + assert.NoError(t, err) + assert.Equal(t, 0, len(foundPods)) + }) + + t.Run("test orchestratePods", func(t *testing.T) { + testObj := testMonoVtx.DeepCopy() + err := r.orchestratePods(context.TODO(), testObj) + assert.NoError(t, err) + foundPods, err := r.findExistingPods(context.TODO(), testObj, 0, 4) + assert.NoError(t, err) + assert.Equal(t, 2, len(foundPods)) + for n := range foundPods { + assert.True(t, strings.HasPrefix(n, testObj.Name+"-mv-0") || strings.HasPrefix(n, testObj.Name+"-mv-1")) + } + }) +} + +func Test_orchestrateFixedResources(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } + testObj := testMonoVtx.DeepCopy() + err := r.orchestrateFixedResources(context.TODO(), testObj) + assert.NoError(t, err) + var svc corev1.Service + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetHeadlessServiceName()}, + &svc) + assert.NoError(t, err) + assert.Equal(t, testObj.GetHeadlessServiceName(), svc.Name) + var daemonSvc corev1.Service + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonServiceName()}, + &daemonSvc) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonServiceName(), daemonSvc.Name) + var daemonDeployment appv1.Deployment + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonDeploymentName()}, + &daemonDeployment) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonDeploymentName(), daemonDeployment.Name) +} + +func Test_reconcile(t *testing.T) { + fakeConfig := reconciler.FakeGlobalConfig(t, nil) + cl := fake.NewClientBuilder().Build() + r := &monoVertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: fakeConfig, + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + scaler: scaling.NewScaler(cl), + } + testObj := testMonoVtx.DeepCopy() + _, err := r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + var daemonDeployment appv1.Deployment + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonDeploymentName()}, + &daemonDeployment) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonDeploymentName(), daemonDeployment.Name) +} diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index fa085fc2b4..2ae99fcf1b 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -157,6 +157,10 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) s.StopWatching(key) // Remove it in case it's watched. return nil } + if monoVtx.Status.UpdateHash != monoVtx.Status.CurrentHash && monoVtx.Status.UpdateHash != "" { + log.Info("MonoVertex is updating, skip scaling.") + return nil + } secondsSinceLastScale := time.Since(monoVtx.Status.LastScaledAt.Time).Seconds() scaleDownCooldown := float64(monoVtx.Spec.Scale.GetScaleDownCooldownSeconds()) scaleUpCooldown := float64(monoVtx.Spec.Scale.GetScaleUpCooldownSeconds()) @@ -174,7 +178,7 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) // log.Info("MonoVertex is pausing, skip scaling.") // return nil // } - if int(monoVtx.Status.Replicas) != monoVtx.GetReplicas() { + if int(monoVtx.Status.Replicas) != monoVtx.CalculateReplicas() { log.Infof("MonoVertex %s might be under processing, replicas mismatch, skip scaling.", monoVtx.Name) return nil } @@ -235,7 +239,7 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) desired = min log.Infof("Calculated desired replica number %d of MonoVertex %q is smaller than min, using min %d.", monoVtxName, desired, min) } - current := int32(monoVtx.GetReplicas()) + current := int32(monoVtx.Status.Replicas) if current > max || current < min { // Someone might have manually scaled up/down the MonoVertex return s.patchMonoVertexReplicas(ctx, monoVtx, desired) } diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 0f5354b2c8..96d9fa2266 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -145,7 +145,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( pl.Status.SetObservedGeneration(pl.Generation) // Regular pipeline change - // This should be happening in call cases to ensure a clean initialization regardless of the lifecycle phase + // This should be happening in all cases to ensure a clean initialization regardless of the lifecycle phase // Eg: even for a pipeline started with desiredPhase = Pause, we should still create the resources for the pipeline result, err := r.reconcileNonLifecycleChanges(ctx, pl) if err != nil { diff --git a/pkg/reconciler/util.go b/pkg/reconciler/util.go index 6d27a749bb..8edad6b1a8 100644 --- a/pkg/reconciler/util.go +++ b/pkg/reconciler/util.go @@ -63,20 +63,25 @@ func isPodHealthy(pod *corev1.Pod) (healthy bool, reason string) { func NumOfReadyPods(pods corev1.PodList) int { result := 0 for _, pod := range pods.Items { - ready := true - for _, s := range pod.Status.ContainerStatuses { - if !s.Ready { - ready = false - break - } - } - if ready { + if IsPodReady(pod) { result++ } } return result } +func IsPodReady(pod corev1.Pod) bool { + if pod.Status.Phase != corev1.PodRunning { + return false + } + for _, c := range pod.Status.ContainerStatuses { + if !c.Ready { + return false + } + } + return true +} + // CheckVertexStatus will calculate the status of the vertices and return the status and reason func CheckVertexStatus(vertices *dfv1.VertexList) (healthy bool, reason string, message string) { for _, vertex := range vertices.Items { diff --git a/pkg/reconciler/util_test.go b/pkg/reconciler/util_test.go index 51348f5a32..72a09da1bd 100644 --- a/pkg/reconciler/util_test.go +++ b/pkg/reconciler/util_test.go @@ -260,6 +260,7 @@ func TestNumOfReadyPods(t *testing.T) { Items: []corev1.Pod{ { Status: corev1.PodStatus{ + Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ { Ready: true, @@ -272,6 +273,7 @@ func TestNumOfReadyPods(t *testing.T) { }, { Status: corev1.PodStatus{ + Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ { Ready: false, @@ -284,6 +286,7 @@ func TestNumOfReadyPods(t *testing.T) { }, { Status: corev1.PodStatus{ + Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ { Ready: true, @@ -296,6 +299,7 @@ func TestNumOfReadyPods(t *testing.T) { }, { Status: corev1.PodStatus{ + Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ { Ready: true, @@ -311,6 +315,7 @@ func TestNumOfReadyPods(t *testing.T) { }, { Status: corev1.PodStatus{ + Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ { Ready: false, @@ -324,6 +329,22 @@ func TestNumOfReadyPods(t *testing.T) { }, }, }, + { + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + }, + { + Ready: true, + }, + { + Ready: true, + }, + }, + }, + }, }, } assert.Equal(t, 2, NumOfReadyPods(pods)) diff --git a/rust/numaflow-models/Makefile b/rust/numaflow-models/Makefile index 3e38249cae..069179f746 100644 --- a/rust/numaflow-models/Makefile +++ b/rust/numaflow-models/Makefile @@ -19,6 +19,7 @@ generate: sed 's/io.k8s.api.core.v1./CoreV1/' | \ sed 's/io.k8s.apimachinery.pkg.apis.meta.v1./MetaV1/' | \ sed 's/io.k8s.apimachinery.pkg.api.resource.Quantity/ResourceQuantity/' | \ + sed 's/io.k8s.apimachinery.pkg.util.intstr.IntOrString/IntOrString/' | \ sed 's/io.numaproj.numaflow.v1alpha1.//' \ > ./dist/swagger.json $(DOCKER) openapitools/openapi-generator-cli:$(GENERATOR_VERSION) \ @@ -58,6 +59,7 @@ generate: --type-mappings MetaV1Time="k8s_openapi::apimachinery::pkg::apis::meta::v1::Time" \ --type-mappings MetaV1ObjectMeta="k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta" \ --type-mappings ResourceQuantity="k8s_openapi::apimachinery::pkg::api::resource::Quantity" \ + --type-mappings IntOrString="k8s_openapi::apimachinery::pkg::util::intstr::IntOrString" \ --generate-alias-as-model cargo add kube diff --git a/rust/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs index bbb11ca261..29846575d2 100644 --- a/rust/numaflow-models/src/models/mod.rs +++ b/rust/numaflow-models/src/models/mod.rs @@ -128,6 +128,8 @@ pub mod redis_settings; pub use self::redis_settings::RedisSettings; pub mod retry_strategy; pub use self::retry_strategy::RetryStrategy; +pub mod rolling_update_strategy; +pub use self::rolling_update_strategy::RollingUpdateStrategy; pub mod sasl; pub use self::sasl::Sasl; pub mod sasl_plain; @@ -170,6 +172,8 @@ pub mod ud_transformer; pub use self::ud_transformer::UdTransformer; pub mod udf; pub use self::udf::Udf; +pub mod update_strategy; +pub use self::update_strategy::UpdateStrategy; pub mod vertex; pub use self::vertex::Vertex; pub mod vertex_instance; diff --git a/rust/numaflow-models/src/models/mono_vertex_spec.rs b/rust/numaflow-models/src/models/mono_vertex_spec.rs index 8eadbbdaab..6d4068bee7 100644 --- a/rust/numaflow-models/src/models/mono_vertex_spec.rs +++ b/rust/numaflow-models/src/models/mono_vertex_spec.rs @@ -79,6 +79,8 @@ pub struct MonoVertexSpec { /// If specified, the pod's tolerations. #[serde(rename = "tolerations", skip_serializing_if = "Option::is_none")] pub tolerations: Option>, + #[serde(rename = "updateStrategy", skip_serializing_if = "Option::is_none")] + pub update_strategy: Option>, #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] pub volumes: Option>, } @@ -109,6 +111,7 @@ impl MonoVertexSpec { sink: None, source: None, tolerations: None, + update_strategy: None, volumes: None, } } diff --git a/rust/numaflow-models/src/models/mono_vertex_status.rs b/rust/numaflow-models/src/models/mono_vertex_status.rs index bd2fdee807..16e2de5479 100644 --- a/rust/numaflow-models/src/models/mono_vertex_status.rs +++ b/rust/numaflow-models/src/models/mono_vertex_status.rs @@ -21,12 +21,12 @@ pub struct MonoVertexStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// If not empty, indicates the version of the MonoVertex used to generate Pods in the sequence [0,currentReplicas). + /// If not empty, indicates the current version of the MonoVertex used to generate Pods. #[serde(rename = "currentHash", skip_serializing_if = "Option::is_none")] pub current_hash: Option, - /// The number of Pods created by the controller from the MonoVertex version indicated by currentHash. - #[serde(rename = "currentReplicas", skip_serializing_if = "Option::is_none")] - pub current_replicas: Option, + /// The number of desired replicas. + #[serde(rename = "desiredReplicas", skip_serializing_if = "Option::is_none")] + pub desired_replicas: Option, #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] pub last_scaled_at: Option, #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] @@ -48,9 +48,15 @@ pub struct MonoVertexStatus { pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, - /// If not empty, indicates the version of the MonoVertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + /// If not empty, indicates the updated version of the MonoVertex used to generate Pods. #[serde(rename = "updateHash", skip_serializing_if = "Option::is_none")] pub update_hash: Option, + /// The number of ready Pods created by the controller from the MonoVertex version indicated by updateHash. + #[serde( + rename = "updatedReadyReplicas", + skip_serializing_if = "Option::is_none" + )] + pub updated_ready_replicas: Option, /// The number of Pods created by the controller from the MonoVertex version indicated by updateHash. #[serde(rename = "updatedReplicas", skip_serializing_if = "Option::is_none")] pub updated_replicas: Option, @@ -61,7 +67,7 @@ impl MonoVertexStatus { MonoVertexStatus { conditions: None, current_hash: None, - current_replicas: None, + desired_replicas: None, last_scaled_at: None, last_updated: None, message: None, @@ -72,6 +78,7 @@ impl MonoVertexStatus { replicas: None, selector: None, update_hash: None, + updated_ready_replicas: None, updated_replicas: None, } } diff --git a/rust/numaflow-models/src/models/rolling_update_strategy.rs b/rust/numaflow-models/src/models/rolling_update_strategy.rs new file mode 100644 index 0000000000..2712fea2b4 --- /dev/null +++ b/rust/numaflow-models/src/models/rolling_update_strategy.rs @@ -0,0 +1,34 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// RollingUpdateStrategy : RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct RollingUpdateStrategy { + #[serde(rename = "maxUnavailable", skip_serializing_if = "Option::is_none")] + pub max_unavailable: Option, +} + +impl RollingUpdateStrategy { + /// RollingUpdateStrategy is used to communicate parameter for RollingUpdateStrategyType. + pub fn new() -> RollingUpdateStrategy { + RollingUpdateStrategy { + max_unavailable: None, + } + } +} diff --git a/rust/numaflow-models/src/models/update_strategy.rs b/rust/numaflow-models/src/models/update_strategy.rs new file mode 100644 index 0000000000..96f1256351 --- /dev/null +++ b/rust/numaflow-models/src/models/update_strategy.rs @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// UpdateStrategy : UpdateStrategy indicates the strategy that the controller will use to perform updates for Vertex or MonoVertex. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct UpdateStrategy { + #[serde(rename = "rollingUpdate", skip_serializing_if = "Option::is_none")] + pub rolling_update: Option>, + /// Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. Possible enum values: - `\"RollingUpdate\"` + #[serde(rename = "type", skip_serializing_if = "Option::is_none")] + pub r#type: Option, +} + +impl UpdateStrategy { + /// UpdateStrategy indicates the strategy that the controller will use to perform updates for Vertex or MonoVertex. + pub fn new() -> UpdateStrategy { + UpdateStrategy { + rolling_update: None, + r#type: None, + } + } +} + +/// Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. Possible enum values: - `\"RollingUpdate\"` +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Type { + #[serde(rename = "RollingUpdate")] + RollingUpdate, +} + +impl Default for Type { + fn default() -> Type { + Self::RollingUpdate + } +} diff --git a/test/fixtures/util.go b/test/fixtures/util.go index 2c8c8a0ae1..c88d28d1af 100644 --- a/test/fixtures/util.go +++ b/test/fixtures/util.go @@ -278,7 +278,7 @@ func WaitForMonoVertexPodRunning(kubeClient kubernetes.Interface, monoVertexClie if err != nil { return fmt.Errorf("error getting monovertex pod list: %w", err) } - ok := len(podList.Items) > 0 && len(podList.Items) == monoVertex.GetReplicas() // pod number should equal to desired replicas + ok := len(podList.Items) > 0 && len(podList.Items) == monoVertex.CalculateReplicas() // pod number should equal to desired replicas for _, p := range podList.Items { ok = ok && p.Status.Phase == corev1.PodRunning } From 2189ea9ec23368a9fdebaa72667b85c0fe9c4fc2 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sat, 7 Sep 2024 10:56:28 -0700 Subject: [PATCH 051/188] chore: minor pipeline controller refactor (#2039) --- api/json-schema/schema.json | 2 +- api/openapi-spec/swagger.json | 2 +- .../full/numaflow.numaproj.io_pipelines.yaml | 1 - config/install.yaml | 1 - config/namespace-install.yaml | 1 - docs/APIs.md | 7 +- examples/1-simple-pipeline.yaml | 2 +- pkg/apis/numaflow/v1alpha1/generated.proto | 7 +- .../numaflow/v1alpha1/openapi_generated.go | 2 +- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 7 +- pkg/reconciler/monovertex/controller.go | 5 +- pkg/reconciler/monovertex/scaling/scaling.go | 4 +- pkg/reconciler/pipeline/controller.go | 66 ++++----- pkg/reconciler/pipeline/controller_test.go | 132 ++++++++---------- pkg/reconciler/vertex/scaling/scaling.go | 10 +- .../src/models/pipeline_status.rs | 2 +- 16 files changed, 117 insertions(+), 134 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 27796003ba..44e18ef85a 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19691,7 +19691,7 @@ "x-kubernetes-patch-strategy": "merge" }, "drainedOnPause": { - "description": "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + "description": "Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. True means it has been successfully drained.", "type": "boolean" }, "lastUpdated": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index bb918bac66..7067416291 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19678,7 +19678,7 @@ "x-kubernetes-patch-strategy": "merge" }, "drainedOnPause": { - "description": "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + "description": "Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. True means it has been successfully drained.", "type": "boolean" }, "lastUpdated": { diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 9670b018e0..6fc509dc96 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -9822,7 +9822,6 @@ spec: type: object type: array drainedOnPause: - default: false type: boolean lastUpdated: format: date-time diff --git a/config/install.yaml b/config/install.yaml index ac272ddf19..d19f2fa2f2 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -18095,7 +18095,6 @@ spec: type: object type: array drainedOnPause: - default: false type: boolean lastUpdated: format: date-time diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 12579dc36f..5919b0fcf8 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -18095,7 +18095,6 @@ spec: type: object type: array drainedOnPause: - default: false type: boolean lastUpdated: format: date-time diff --git a/docs/APIs.md b/docs/APIs.md index 5fd26ad505..bd71cf0592 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -8011,11 +8011,12 @@ The generation observed by the Pipeline controller. +(Optional)

-Field to indicate if a pipeline drain successfully occurred, or it timed -out. Set to true when the Pipeline is in Paused state, and after it has -successfully been drained. defaults to false +Field to indicate if a pipeline drain successfully occurred, only +meaningful when the pipeline is paused. True means it has been +successfully drained.

diff --git a/examples/1-simple-pipeline.yaml b/examples/1-simple-pipeline.yaml index 42e9d9e095..e790fa3150 100644 --- a/examples/1-simple-pipeline.yaml +++ b/examples/1-simple-pipeline.yaml @@ -27,4 +27,4 @@ spec: - from: in to: cat - from: cat - to: out \ No newline at end of file + to: out diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 1e6e6bd35a..78e10457a0 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -1229,10 +1229,9 @@ message PipelineStatus { // +optional optional int64 observedGeneration = 11; - // Field to indicate if a pipeline drain successfully occurred, or it timed out. - // Set to true when the Pipeline is in Paused state, and after it has successfully been drained. - // defaults to false - // +kubebuilder:default=false + // Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. + // True means it has been successfully drained. + // +optional optional bool drainedOnPause = 12; } diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index a339fdfb7a..765dd96681 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -4151,7 +4151,7 @@ func schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref common.ReferenceCallba }, "drainedOnPause": { SchemaProps: spec.SchemaProps{ - Description: "Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false", + Description: "Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. True means it has been successfully drained.", Type: []string{"boolean"}, Format: "", }, diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index c68a7d647c..ff8dfaf5e5 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -633,10 +633,9 @@ type PipelineStatus struct { // The generation observed by the Pipeline controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,11,opt,name=observedGeneration"` - // Field to indicate if a pipeline drain successfully occurred, or it timed out. - // Set to true when the Pipeline is in Paused state, and after it has successfully been drained. - // defaults to false - // +kubebuilder:default=false + // Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. + // True means it has been successfully drained. + // +optional DrainedOnPause bool `json:"drainedOnPause,omitempty" protobuf:"bytes,12,opt,name=drainedOnPause"` } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index a0de7e6d6a..a7b7a90c40 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -110,6 +110,7 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon } }() + monoVtx.Status.InitializeConditions() monoVtx.Status.SetObservedGeneration(monoVtx.Generation) if monoVtx.Scalable() { mr.scaler.StartWatching(mVtxKey) @@ -251,7 +252,7 @@ func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *df if updatedReplicas+toBeUpdated > desiredReplicas { toBeUpdated = desiredReplicas - updatedReplicas } - log.Infof("Rolling update %d replicas, [%d, %d)\n", toBeUpdated, updatedReplicas, updatedReplicas+toBeUpdated) + log.Infof("Rolling update %d replicas, [%d, %d)", toBeUpdated, updatedReplicas, updatedReplicas+toBeUpdated) // Create pods [updatedReplicas, updatedReplicas+toBeUpdated), and clean up any pods in that range that has a different hash if err := mr.orchestratePodsFromTo(ctx, monoVtx, *podSpec, updatedReplicas, updatedReplicas+toBeUpdated, monoVtx.Status.UpdateHash); err != nil { @@ -310,7 +311,7 @@ func (mr *monoVertexReconciler) cleanUpPodsFromTo(ctx context.Context, monoVtx * if err := mr.client.Delete(ctx, &pod); err != nil { return fmt.Errorf("failed to delete pod %s: %w", pod.Name, err) } - log.Infof("Deleted MonoVertx pod %s\n", pod.Name) + log.Infof("Deleted MonoVertx pod %q", pod.Name) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "DeletePodSuccess", "Succeeded to delete a mono vertex pod %s", pod.Name) } return nil diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index 2ae99fcf1b..d523800cdd 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -233,11 +233,11 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) min := monoVtx.Spec.Scale.GetMinReplicas() if desired > max { desired = max - log.Infof("Calculated desired replica number %d of MonoVertex %q is greater than max, using max %d.", monoVtxName, desired, max) + log.Infof("Calculated desired replica number %d of MonoVertex %q is greater than max, using max %d.", desired, monoVtxName, max) } if desired < min { desired = min - log.Infof("Calculated desired replica number %d of MonoVertex %q is smaller than min, using min %d.", monoVtxName, desired, min) + log.Infof("Calculated desired replica number %d of MonoVertex %q is smaller than min, using min %d.", desired, monoVtxName, min) } current := int32(monoVtx.Status.Replicas) if current > max || current < min { // Someone might have manually scaled up/down the MonoVertex diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 96d9fa2266..955344a8c1 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -143,15 +143,25 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( } }() + pl.Status.InitConditions() pl.Status.SetObservedGeneration(pl.Generation) - // Regular pipeline change - // This should be happening in all cases to ensure a clean initialization regardless of the lifecycle phase - // Eg: even for a pipeline started with desiredPhase = Pause, we should still create the resources for the pipeline - result, err := r.reconcileNonLifecycleChanges(ctx, pl) - if err != nil { + // Orchestrate pipeline sub resources. + // This should be happening in all cases to ensure a clean initialization regardless of the lifecycle phase. + // Eg: even for a pipeline started with desiredPhase = Pause, we should still create the resources for the pipeline. + if err := r.reconcileFixedResources(ctx, pl); err != nil { r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) - return result, err + return ctrl.Result{}, err + } + // If the pipeline has a lifecycle change, then do not update the phase as + // this should happen only after the required configs for the lifecycle changes + // have been applied. + if !isLifecycleChange(pl) { + pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") } + if err := r.checkChildrenResourceStatus(ctx, pl); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to check pipeline children resource status, %w", err) + } + // check if any changes related to pause/resume lifecycle for the pipeline if isLifecycleChange(pl) { oldPhase := pl.Status.Phase @@ -171,7 +181,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( } return ctrl.Result{}, nil } - return result, nil + return ctrl.Result{}, nil } // isLifecycleChange determines whether there has been a change requested in the lifecycle @@ -190,17 +200,16 @@ func isLifecycleChange(pl *dfv1.Pipeline) bool { return false } -// reconcileNonLifecycleChanges do the jobs not related to pipeline lifecycle changes. -func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, pl *dfv1.Pipeline) (ctrl.Result, error) { +// reconcileFixedResources do the jobs of creating fixed resources such as daemon service, vertex objects, and ISB management jobs, etc +func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *dfv1.Pipeline) error { log := logging.FromContext(ctx) if !controllerutil.ContainsFinalizer(pl, finalizerName) { controllerutil.AddFinalizer(pl, finalizerName) } - pl.Status.InitConditions() if err := ValidatePipeline(pl); err != nil { log.Errorw("Validation failed", zap.Error(err)) pl.Status.MarkNotConfigured("InvalidSpec", err.Error()) - return ctrl.Result{}, err + return err } pl.Status.SetVertexCounts(pl.Spec.Vertices) pl.Status.MarkConfigured() @@ -215,16 +224,16 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p if apierrors.IsNotFound(err) { pl.Status.MarkDeployFailed("ISBSvcNotFound", "ISB Service not found.") log.Errorw("ISB Service not found", zap.String("isbsvc", isbSvcName), zap.Error(err)) - return ctrl.Result{}, fmt.Errorf("isbsvc %s not found", isbSvcName) + return fmt.Errorf("isbsvc %s not found", isbSvcName) } pl.Status.MarkDeployFailed("GetISBSvcFailed", err.Error()) log.Errorw("Failed to get ISB Service", zap.String("isbsvc", isbSvcName), zap.Error(err)) - return ctrl.Result{}, err + return err } if !isbSvc.Status.IsHealthy() { pl.Status.MarkDeployFailed("ISBSvcNotHealthy", "ISB Service not healthy.") log.Errorw("ISB Service is not in healthy status", zap.String("isbsvc", isbSvcName), zap.Error(err)) - return ctrl.Result{}, fmt.Errorf("isbsvc not healthy") + return fmt.Errorf("isbsvc not healthy") } // Create or update the Side Inputs Manager deployments @@ -232,14 +241,14 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p log.Errorw("Failed to create or update Side Inputs Manager deployments", zap.Error(err)) pl.Status.MarkDeployFailed("CreateOrUpdateSIMDeploymentsFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateOrUpdateSIMDeploymentsFailed", "Failed to create or update Side Inputs Manager deployments: %w", err.Error()) - return ctrl.Result{}, err + return err } existingObjs, err := r.findExistingVertices(ctx, pl) if err != nil { log.Errorw("Failed to find existing vertices", zap.Error(err)) pl.Status.MarkDeployFailed("ListVerticesFailed", err.Error()) - return ctrl.Result{}, err + return err } oldBuffers := make(map[string]string) newBuffers := make(map[string]string) @@ -279,7 +288,7 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p } else { pl.Status.MarkDeployFailed("CreateVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateVertexFailed", "Failed to create vertex: %w", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to create vertex, err: %w", err) + return fmt.Errorf("failed to create vertex, err: %w", err) } } log.Infow("Created vertex successfully", zap.String("vertex", vertexName)) @@ -291,7 +300,7 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p if err := r.client.Update(ctx, &oldObj); err != nil { pl.Status.MarkDeployFailed("UpdateVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "UpdateVertexFailed", "Failed to update vertex: %w", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to update vertex, err: %w", err) + return fmt.Errorf("failed to update vertex, err: %w", err) } log.Infow("Updated vertex successfully", zap.String("vertex", vertexName)) r.recorder.Eventf(pl, corev1.EventTypeNormal, "UpdateVertexSuccess", "Updated vertex %s successfully", vertexName) @@ -303,7 +312,7 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p if err := r.client.Delete(ctx, &v); err != nil { pl.Status.MarkDeployFailed("DeleteStaleVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "DeleteStaleVertexFailed", "Failed to delete vertex: %w", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to delete vertex, err: %w", err) + return fmt.Errorf("failed to delete vertex, err: %w", err) } log.Infow("Deleted stale vertex successfully", zap.String("vertex", v.Name)) r.recorder.Eventf(pl, corev1.EventTypeNormal, "DeleteStaleVertexSuccess", "Deleted stale vertex %s successfully", v.Name) @@ -328,7 +337,7 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p batchJob := buildISBBatchJob(pl, r.image, isbSvc.Status.Config, "isbsvc-create", args, "cre") if err := r.client.Create(ctx, batchJob); err != nil && !apierrors.IsAlreadyExists(err) { pl.Status.MarkDeployFailed("CreateISBSvcCreatingJobFailed", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to create ISB Svc creating job, err: %w", err) + return fmt.Errorf("failed to create ISB Svc creating job, err: %w", err) } log.Infow("Created a job successfully for ISB Svc creating", zap.Any("buffers", bfs), zap.Any("buckets", bks), zap.Any("servingStreams", pl.GetServingSourceStreamNames())) } @@ -346,31 +355,22 @@ func (r *pipelineReconciler) reconcileNonLifecycleChanges(ctx context.Context, p batchJob := buildISBBatchJob(pl, r.image, isbSvc.Status.Config, "isbsvc-delete", args, "del") if err := r.client.Create(ctx, batchJob); err != nil && !apierrors.IsAlreadyExists(err) { pl.Status.MarkDeployFailed("CreateISBSvcDeletingJobFailed", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to create ISB Svc deleting job, err: %w", err) + return fmt.Errorf("failed to create ISB Svc deleting job, err: %w", err) } log.Infow("Created ISB Svc deleting job successfully", zap.Any("buffers", bfs), zap.Any("buckets", bks)) } // Daemon service if err := r.createOrUpdateDaemonService(ctx, pl); err != nil { - return ctrl.Result{}, err + return err } // Daemon deployment if err := r.createOrUpdateDaemonDeployment(ctx, pl, isbSvc.Status.Config); err != nil { - return ctrl.Result{}, err + return err } pl.Status.MarkDeployed() - // If the pipeline has a lifecycle change, then do not update the phase as - // this should happen only after the required configs for the lifecycle changes - // have been applied. - if !isLifecycleChange(pl) { - pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") - } - if err := r.checkChildrenResourceStatus(ctx, pl); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to check pipeline children resource status, %w", err) - } - return ctrl.Result{}, nil + return nil } func (r *pipelineReconciler) createOrUpdateDaemonService(ctx context.Context, pl *dfv1.Pipeline) error { diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index 0cf9205f0a..2a1762aa4b 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -154,6 +154,18 @@ func init() { _ = batchv1.AddToScheme(scheme.Scheme) } +func fakeReconciler(t *testing.T, cl client.WithWatch) *pipelineReconciler { + t.Helper() + return &pipelineReconciler{ + client: cl, + scheme: scheme.Scheme, + config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + } +} + func Test_NewReconciler(t *testing.T) { cl := fake.NewClientBuilder().Build() r := NewReconciler(cl, scheme.Scheme, reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), testFlowImage, zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) @@ -162,23 +174,17 @@ func Test_NewReconciler(t *testing.T) { } func Test_reconcile(t *testing.T) { + ctx := context.TODO() + t.Run("test reconcile", func(t *testing.T) { - cl := fake.NewClientBuilder().Build() - ctx := context.TODO() testIsbSvc := testNativeRedisIsbSvc.DeepCopy() testIsbSvc.Status.MarkConfigured() testIsbSvc.Status.MarkDeployed() + cl := fake.NewClientBuilder().Build() err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } testObj := testPipeline.DeepCopy() + r := fakeReconciler(t, cl) _, err = r.reconcile(ctx, testObj) assert.NoError(t, err) vertices := &dfv1.VertexList{} @@ -191,27 +197,50 @@ func Test_reconcile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(jobs.Items)) }) -} -func Test_reconcileEvents(t *testing.T) { + t.Run("test reconcile deleting", func(t *testing.T) { + testIsbSvc := testNativeRedisIsbSvc.DeepCopy() + testIsbSvc.Status.MarkConfigured() + testIsbSvc.Status.MarkDeployed() + cl := fake.NewClientBuilder().Build() + err := cl.Create(ctx, testIsbSvc) + assert.Nil(t, err) + testObj := testPipeline.DeepCopy() + testObj.DeletionTimestamp = &metav1.Time{Time: time.Now()} + r := fakeReconciler(t, cl) + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + }) - fakeConfig := reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig) - t.Run("test reconcile - invalid name", func(t *testing.T) { + t.Run("test reconcile - no isbsvc", func(t *testing.T) { + testObj := testPipeline.DeepCopy() cl := fake.NewClientBuilder().Build() - ctx := context.TODO() + r := fakeReconciler(t, cl) + _, err := r.reconcile(ctx, testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not found") + }) + + t.Run("test reconcile - isbsvc unhealthy", func(t *testing.T) { + testIsbSvc := testNativeRedisIsbSvc.DeepCopy() + testIsbSvc.Status.MarkConfigured() + cl := fake.NewClientBuilder().Build() + _ = cl.Create(ctx, testIsbSvc) + testObj := testPipeline.DeepCopy() + r := fakeReconciler(t, cl) + _, err := r.reconcile(ctx, testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not healthy") + }) + + t.Run("test reconcile - invalid name", func(t *testing.T) { testIsbSvc := testNativeRedisIsbSvc.DeepCopy() testIsbSvc.Status.MarkConfigured() testIsbSvc.Status.MarkDeployed() + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } testObj := testPipeline.DeepCopy() testObj.Status.Phase = "Paused" _, err = r.reconcile(ctx, testObj) @@ -224,22 +253,14 @@ func Test_reconcileEvents(t *testing.T) { }) t.Run("test reconcile - duplicate vertex", func(t *testing.T) { - cl := fake.NewClientBuilder().Build() - ctx := context.TODO() testIsbSvc := testNativeRedisIsbSvc.DeepCopy() testIsbSvc.Status.MarkConfigured() testIsbSvc.Status.MarkDeployed() + cl := fake.NewClientBuilder().Build() err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } testObj := testPipeline.DeepCopy() + r := fakeReconciler(t, cl) _, err = r.reconcile(ctx, testObj) assert.NoError(t, err) testObj.Spec.Vertices = append(testObj.Spec.Vertices, dfv1.AbstractVertex{Name: "input", Source: &dfv1.Source{}}) @@ -279,14 +300,7 @@ func Test_pauseAndResumePipeline(t *testing.T) { testIsbSvc.Status.MarkDeployed() err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testPipeline.DeepCopy() testObj.Spec.Vertices[0].Scale.Min = ptr.To[int32](3) _, err = r.reconcile(ctx, testObj) @@ -316,14 +330,7 @@ func Test_pauseAndResumePipeline(t *testing.T) { testIsbSvc.Status.MarkDeployed() err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testReducePipeline.DeepCopy() _, err = r.reconcile(ctx, testObj) assert.NoError(t, err) @@ -566,14 +573,7 @@ func Test_cleanupBuffers(t *testing.T) { func TestCreateOrUpdateDaemon(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) t.Run("test create or update service", func(t *testing.T) { testObj := testPipeline.DeepCopy() @@ -601,14 +601,7 @@ func TestCreateOrUpdateDaemon(t *testing.T) { func Test_createOrUpdateSIMDeployments(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) t.Run("no side inputs", func(t *testing.T) { err := r.createOrUpdateSIMDeployments(ctx, testPipeline, fakeIsbSvcConfig) @@ -920,14 +913,7 @@ func Test_checkChildrenResourceStatus(t *testing.T) { testIsbSvc.Status.MarkDeployed() err := cl.Create(ctx, testIsbSvc) assert.Nil(t, err) - r := &pipelineReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testPipelineWithSideinput.DeepCopy() _, err = r.reconcile(ctx, testObj) assert.NoError(t, err) diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index 03f581e6d7..5ea6b7e6d5 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -302,11 +302,11 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err min := vertex.Spec.Scale.GetMinReplicas() if desired > max { desired = max - log.Infof("Calculated desired replica number %d of vertex %q is greater than max, using max %d.", vertex.Name, desired, max) + log.Infof("Calculated desired replica number %d of vertex %q is greater than max, using max %d.", desired, vertex.Name, max) } if desired < min { desired = min - log.Infof("Calculated desired replica number %d of vertex %q is smaller than min, using min %d.", vertex.Name, desired, min) + log.Infof("Calculated desired replica number %d of vertex %q is smaller than min, using min %d.", desired, vertex.Name, min) } if current > max || current < min { // Someone might have manually scaled up/down the vertex return s.patchVertexReplicas(ctx, vertex, desired) @@ -328,14 +328,14 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err directPressure, downstreamPressure := s.hasBackPressure(*pl, *vertex) if directPressure { if current > min && current > 1 { // Scale down but not to 0 - log.Infof("Vertex %s has direct back pressure from connected vertices, decreasing one replica.", key) + log.Infof("Vertex %q has direct back pressure from connected vertices, decreasing one replica.", key) return s.patchVertexReplicas(ctx, vertex, current-1) } else { - log.Infof("Vertex %s has direct back pressure from connected vertices, skip scaling.", key) + log.Infof("Vertex %q has direct back pressure from connected vertices, skip scaling.", key) return nil } } else if downstreamPressure { - log.Infof("Vertex %s has back pressure in downstream vertices, skip scaling.", key) + log.Infof("Vertex %q has back pressure in downstream vertices, skip scaling.", key) return nil } maxAllowedUp := int32(vertex.Spec.Scale.GetReplicasPerScaleUp()) diff --git a/rust/numaflow-models/src/models/pipeline_status.rs b/rust/numaflow-models/src/models/pipeline_status.rs index e67205b3cd..2fa64471fd 100644 --- a/rust/numaflow-models/src/models/pipeline_status.rs +++ b/rust/numaflow-models/src/models/pipeline_status.rs @@ -21,7 +21,7 @@ pub struct PipelineStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// Field to indicate if a pipeline drain successfully occurred, or it timed out. Set to true when the Pipeline is in Paused state, and after it has successfully been drained. defaults to false + /// Field to indicate if a pipeline drain successfully occurred, only meaningful when the pipeline is paused. True means it has been successfully drained. #[serde(rename = "drainedOnPause", skip_serializing_if = "Option::is_none")] pub drained_on_pause: Option, #[serde(rename = "lastUpdated", skip_serializing_if = "Option::is_none")] From c4b4d0068012f06980595437b3bc39c73cace8ef Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 9 Sep 2024 10:09:07 -0700 Subject: [PATCH 052/188] feat: rolling update for Pipeline Vertex (#2040) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 21 +- api/openapi-spec/swagger.json | 21 +- config/advanced-install/minimal-crds.yaml | 2 +- .../full/numaflow.numaproj.io_pipelines.yaml | 17 + .../full/numaflow.numaproj.io_vertices.yaml | 24 +- .../numaflow.numaproj.io_vertices.yaml | 2 +- config/install.yaml | 41 +- config/namespace-install.yaml | 41 +- docs/APIs.md | 59 +- .../configuration/sidecar-containers.md | 23 +- .../configuration/update-strategy.md | 60 + mkdocs.yml | 37 +- pkg/apis/numaflow/v1alpha1/generated.pb.go | 1119 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 37 +- .../v1alpha1/mono_vertex_types_test.go | 369 ++++++ .../numaflow/v1alpha1/openapi_generated.go | 38 +- pkg/apis/numaflow/v1alpha1/vertex_types.go | 48 +- .../numaflow/v1alpha1/vertex_types_test.go | 29 +- pkg/reconciler/monovertex/controller.go | 48 +- pkg/reconciler/monovertex/controller_test.go | 217 ++-- pkg/reconciler/pipeline/controller.go | 54 +- pkg/reconciler/pipeline/controller_test.go | 46 +- pkg/reconciler/pipeline/validate.go | 8 + pkg/reconciler/pipeline/validate_test.go | 41 + pkg/reconciler/vertex/controller.go | 419 ++++-- pkg/reconciler/vertex/controller_test.go | 354 ++++-- .../src/models/abstract_vertex.rs | 3 + .../numaflow-models/src/models/vertex_spec.rs | 3 + .../src/models/vertex_status.rs | 19 +- 29 files changed, 2205 insertions(+), 995 deletions(-) create mode 100644 docs/user-guide/reference/configuration/update-strategy.md diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 44e18ef85a..bf1f28f594 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -17785,6 +17785,10 @@ "udf": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDF" }, + "updateStrategy": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy", + "description": "The strategy to use to replace existing pods with new ones." + }, "volumes": { "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Volume" @@ -20608,6 +20612,10 @@ "udf": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDF" }, + "updateStrategy": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy", + "description": "The strategy to use to replace existing pods with new ones." + }, "volumes": { "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Volume" @@ -20639,11 +20647,11 @@ "x-kubernetes-patch-strategy": "merge" }, "currentHash": { - "description": "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + "description": "If not empty, indicates the current version of the Vertex used to generate Pods.", "type": "string" }, - "currentReplicas": { - "description": "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + "desiredReplicas": { + "description": "The number of desired replicas.", "format": "int64", "type": "integer" }, @@ -20679,9 +20687,14 @@ "type": "string" }, "updateHash": { - "description": "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "description": "If not empty, indicates the updated version of the Vertex used to generate Pods.", "type": "string" }, + "updatedReadyReplicas": { + "description": "The number of ready Pods created by the controller from the Vertex version indicated by updateHash.", + "format": "int64", + "type": "integer" + }, "updatedReplicas": { "description": "The number of Pods created by the controller from the Vertex version indicated by updateHash.", "format": "int64", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 7067416291..91e6e43fb6 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -17793,6 +17793,10 @@ "udf": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDF" }, + "updateStrategy": { + "description": "The strategy to use to replace existing pods with new ones.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy" + }, "volumes": { "type": "array", "items": { @@ -20590,6 +20594,10 @@ "udf": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UDF" }, + "updateStrategy": { + "description": "The strategy to use to replace existing pods with new ones.", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.UpdateStrategy" + }, "volumes": { "type": "array", "items": { @@ -20617,11 +20625,11 @@ "x-kubernetes-patch-strategy": "merge" }, "currentHash": { - "description": "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + "description": "If not empty, indicates the current version of the Vertex used to generate Pods.", "type": "string" }, - "currentReplicas": { - "description": "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + "desiredReplicas": { + "description": "The number of desired replicas.", "type": "integer", "format": "int64" }, @@ -20657,9 +20665,14 @@ "type": "string" }, "updateHash": { - "description": "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "description": "If not empty, indicates the updated version of the Vertex used to generate Pods.", "type": "string" }, + "updatedReadyReplicas": { + "description": "The number of ready Pods created by the controller from the Vertex version indicated by updateHash.", + "type": "integer", + "format": "int64" + }, "updatedReplicas": { "description": "The number of Pods created by the controller from the Vertex version indicated by updateHash.", "type": "integer", diff --git a/config/advanced-install/minimal-crds.yaml b/config/advanced-install/minimal-crds.yaml index a8eac9fc22..edf1df3528 100644 --- a/config/advanced-install/minimal-crds.yaml +++ b/config/advanced-install/minimal-crds.yaml @@ -206,7 +206,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 6fc509dc96..40db1f403e 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -9037,6 +9037,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index e7bb52a8d8..1f02fd2b35 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -21,7 +21,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -4701,6 +4701,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -5486,7 +5503,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -5515,6 +5532,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml index 68a95ee056..56799a7791 100644 --- a/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/minimal/numaflow.numaproj.io_vertices.yaml @@ -17,7 +17,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas diff --git a/config/install.yaml b/config/install.yaml index d19f2fa2f2..c3db75767a 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -17310,6 +17310,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -18168,7 +18185,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -22848,6 +22865,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -23633,7 +23667,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -23662,6 +23696,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 5919b0fcf8..5162dc6ad7 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -17310,6 +17310,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -18168,7 +18185,7 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .spec.replicas + - jsonPath: .status.desiredReplicas name: Desired type: string - jsonPath: .status.replicas @@ -22848,6 +22865,23 @@ spec: - window type: object type: object + updateStrategy: + default: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object volumes: items: properties: @@ -23633,7 +23667,7 @@ spec: type: array currentHash: type: string - currentReplicas: + desiredReplicas: format: int32 type: integer lastScaledAt: @@ -23662,6 +23696,9 @@ spec: type: string updateHash: type: string + updatedReadyReplicas: + format: int32 + type: integer updatedReplicas: format: int32 type: integer diff --git a/docs/APIs.md b/docs/APIs.md index bd71cf0592..da6ab6eeb1 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -840,6 +840,27 @@ Container template for the side inputs watcher container. + + + + +updateStrategy
+ UpdateStrategy + + + + + +(Optional) +

+ +The strategy to use to replace existing pods with new ones. +

+ + + + + @@ -10804,6 +10825,7 @@ UpdateStrategy

(Appears on: +AbstractVertex, MonoVertexSpec)

@@ -11593,6 +11615,25 @@ labels match the selector). +desiredReplicas
uint32 + + + + +(Optional) +

+ +The number of desired replicas. +

+ + + + + + + + + selector
string @@ -11694,7 +11735,7 @@ The number of pods targeted by this Vertex with a Ready Condition. -currentReplicas
uint32 +updatedReplicas
uint32 @@ -11702,7 +11743,7 @@ The number of pods targeted by this Vertex with a Ready Condition.

The number of Pods created by the controller from the Vertex version -indicated by currentHash. +indicated by updateHash.

@@ -11713,15 +11754,15 @@ indicated by currentHash. -updatedReplicas
uint32 +updatedReadyReplicas
uint32

-The number of Pods created by the controller from the Vertex version -indicated by updateHash. +The number of ready Pods created by the controller from the Vertex +version indicated by updateHash.

@@ -11739,8 +11780,8 @@ indicated by updateHash.

-If not empty, indicates the version of the Vertex used to generate Pods -in the sequence \[0,currentReplicas). +If not empty, indicates the current version of the Vertex used to +generate Pods.

@@ -11758,8 +11799,8 @@ in the sequence \[0,currentReplicas).

-If not empty, indicates the version of the Vertx used to generate Pods -in the sequence \[replicas-updatedReplicas,replicas) +If not empty, indicates the updated version of the Vertex used to +generate Pods.

diff --git a/docs/user-guide/reference/configuration/sidecar-containers.md b/docs/user-guide/reference/configuration/sidecar-containers.md index 24aaadbcfe..72d07634c6 100644 --- a/docs/user-guide/reference/configuration/sidecar-containers.md +++ b/docs/user-guide/reference/configuration/sidecar-containers.md @@ -1,6 +1,6 @@ # Sidecar Containers -Additional "[sidecar](https://kubernetes.io/docs/concepts/workloads/pods/#how-pods-manage-multiple-containers)" containers can be provided for `udf` and `sink` vertices. `source` vertices do not currently support sidecars. +Additional "[sidecar](https://kubernetes.io/docs/concepts/workloads/pods/#how-pods-manage-multiple-containers)" containers can be provided for `source`, `udf` and `sink` vertices. The following example shows how to add a sidecar container to a `udf` vertex. @@ -15,7 +15,12 @@ spec: sidecars: - name: my-sidecar image: busybox:latest - command: ["/bin/sh", "-c", "echo \"my-sidecar is running!\" && tail -f /dev/null"] + command: + [ + "/bin/sh", + "-c", + 'echo "my-sidecar is running!" && tail -f /dev/null', + ] udf: container: image: my-function:latest @@ -42,14 +47,24 @@ spec: sidecars: - name: my-sidecar image: alpine:latest - command: ["/bin/sh", "-c", "apk add socat && socat UNIX-LISTEN:/path/to/my-sidecar-mount-path/my.sock - && tail -f /dev/null"] + command: + [ + "/bin/sh", + "-c", + "apk add socat && socat UNIX-LISTEN:/path/to/my-sidecar-mount-path/my.sock - && tail -f /dev/null", + ] volumeMounts: - mountPath: /path/to/my-sidecar-mount-path name: my-udf-volume udf: container: image: alpine:latest - command: ["/bin/sh", "-c", "apk add socat && echo \"hello\" | socat UNIX-CONNECT:/path/to/my-udf-mount-path/my.sock,forever - && tail -f /dev/null"] + command: + [ + "/bin/sh", + "-c", + 'apk add socat && echo "hello" | socat UNIX-CONNECT:/path/to/my-udf-mount-path/my.sock,forever - && tail -f /dev/null', + ] volumeMounts: - mountPath: /path/to/my-udf-mount-path name: my-udf-volume diff --git a/docs/user-guide/reference/configuration/update-strategy.md b/docs/user-guide/reference/configuration/update-strategy.md new file mode 100644 index 0000000000..e105be5c89 --- /dev/null +++ b/docs/user-guide/reference/configuration/update-strategy.md @@ -0,0 +1,60 @@ +# Update Strategy + +When spec changes, the `RollingUpdate` update strategy is used to update pods in a `Pipeline` or `MonoVertex` by default, which means that the update is done in a rolling fashion. The default configuration is as below. + +```yaml +updateStrategy: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate +``` + +- `maxUnavailable`: The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: `5`) or a percentage of total pods at the start of update (ex: `10%`). Absolute number is calculated from percentage by rounding up. Defaults to `25%`. + +## How It Works + +The `RollingUpdate` strategy in Numaflow works more like the `RollingUpdate` strategy in `StatefulSet` rather than `Deployment`. It does not create `maxUnavailable` new pods and wait for them to be ready before terminating the old pods. Instead it replaces `maxUnavailable` number of pods with the new spec, then waits for them to be ready before updating the next batch. + +For example, if there are 20 pods running, and `maxUnavailable` is set to the default `25%`, during the update, 5 pods will be unavailable at the same time. The update will be done in 4 batches. If your application has a long startup time, and you are sensitive to the unavailability caused tail latency, you should set `maxUnavailable` to a smaller value, and adjust the `scale.min` if it's needed. + +During rolling update, [autoscaling](../autoscaling.md) will not be triggered for that particular Vertex or MonoVertex. + +## Examples + +A `Pipeline` example. + +```yaml +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: Pipeline +metadata: + name: simple-pipeline +spec: + vertices: + - name: my-vertex + updateStrategy: + rollingUpdate: + maxUnavailable: 25% + type: RollingUpdate +``` + +A `MonoVertex` example. + +```yaml +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex +metadata: + name: my-mvtx +spec: + source: + udsource: + container: + image: my-image1 + sink: + udsink: + container: + image: my-image2 + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate +``` diff --git a/mkdocs.yml b/mkdocs.yml index 7e57a6af21..55dac8b1c1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -93,17 +93,18 @@ nav: - user-guide/reference/join-vertex.md - user-guide/reference/multi-partition.md - user-guide/reference/side-inputs.md - - Configuration: - - user-guide/reference/configuration/pod-specifications.md - - user-guide/reference/configuration/container-resources.md - - user-guide/reference/configuration/volumes.md - - user-guide/reference/configuration/environment-variables.md - - user-guide/reference/configuration/labels-and-annotations.md - - user-guide/reference/configuration/init-containers.md - - user-guide/reference/configuration/sidecar-containers.md - - user-guide/reference/configuration/pipeline-customization.md - - user-guide/reference/configuration/istio.md - - user-guide/reference/configuration/max-message-size.md + - Configuration: + - user-guide/reference/configuration/pod-specifications.md + - user-guide/reference/configuration/container-resources.md + - user-guide/reference/configuration/volumes.md + - user-guide/reference/configuration/environment-variables.md + - user-guide/reference/configuration/labels-and-annotations.md + - user-guide/reference/configuration/init-containers.md + - user-guide/reference/configuration/sidecar-containers.md + - user-guide/reference/configuration/pipeline-customization.md + - user-guide/reference/configuration/istio.md + - user-guide/reference/configuration/max-message-size.md + - user-guide/reference/configuration/update-strategy.md - user-guide/reference/kustomize/kustomize.md - APIs.md - Use Cases: @@ -116,15 +117,15 @@ nav: - Configuration: - Controller Configuration: "operations/controller-configmap.md" - UI Server: - - Access Path: "operations/ui/ui-access-path.md" - - Authentication: - - Overview: "operations/ui/authn/authentication.md" - - SSO with Dex: "operations/ui/authn/dex.md" - - Local Users: "operations/ui/authn/local-users.md" - - Authorization: "operations/ui/authz/rbac.md" + - Access Path: "operations/ui/ui-access-path.md" + - Authentication: + - Overview: "operations/ui/authn/authentication.md" + - SSO with Dex: "operations/ui/authn/dex.md" + - Local Users: "operations/ui/authn/local-users.md" + - Authorization: "operations/ui/authz/rbac.md" - operations/metrics/metrics.md - operations/grafana.md - - Security: operations/security.md + - Security: operations/security.md - Contributor Guide: - development/development.md - Specifications: diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 5297014695..65ac7601e5 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2822,502 +2822,501 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7907 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x25, 0x47, - 0x76, 0x9e, 0xee, 0xff, 0xbd, 0xe7, 0xf2, 0x6f, 0x6a, 0x7e, 0xc4, 0x19, 0x8d, 0x86, 0xe3, 0x96, - 0x25, 0x8f, 0x63, 0x9b, 0x8c, 0x68, 0xfd, 0xad, 0xed, 0x5d, 0x89, 0x97, 0x1c, 0x72, 0x38, 0x43, - 0xce, 0x70, 0xcf, 0x25, 0x47, 0x5a, 0x2b, 0x5e, 0xa5, 0xd9, 0x5d, 0xbc, 0x6c, 0xb1, 0x6f, 0xf7, - 0x55, 0x77, 0x5f, 0xce, 0x50, 0x4e, 0xb0, 0xb6, 0x95, 0x40, 0x0a, 0x82, 0x20, 0x81, 0x9f, 0x0c, - 0x04, 0x4e, 0x90, 0x20, 0x80, 0x1f, 0x0c, 0xe7, 0x21, 0xc0, 0xe6, 0xc1, 0x40, 0xe2, 0x38, 0x08, - 0x92, 0x4d, 0x90, 0x9f, 0x45, 0x10, 0x20, 0xca, 0x0b, 0x91, 0x65, 0x90, 0x87, 0x04, 0x88, 0x61, - 0xc4, 0x48, 0xec, 0x0c, 0x16, 0xd9, 0xa0, 0xfe, 0xfa, 0xef, 0xf6, 0x9d, 0x21, 0x6f, 0x93, 0xa3, - 0x51, 0xac, 0xb7, 0xee, 0x3a, 0xa7, 0xbe, 0x53, 0x55, 0x5d, 0x5d, 0x75, 0xea, 0x9c, 0x53, 0x55, - 0xb0, 0xd2, 0xb1, 0x82, 0xdd, 0xfe, 0xf6, 0xac, 0xe1, 0x76, 0xe7, 0x9c, 0x7e, 0x57, 0xef, 0x79, - 0xee, 0x87, 0xfc, 0x61, 0xc7, 0x76, 0x1f, 0xcc, 0xf5, 0xf6, 0x3a, 0x73, 0x7a, 0xcf, 0xf2, 0xa3, - 0x94, 0xfd, 0x57, 0x75, 0xbb, 0xb7, 0xab, 0xbf, 0x3a, 0xd7, 0xa1, 0x0e, 0xf5, 0xf4, 0x80, 0x9a, - 0xb3, 0x3d, 0xcf, 0x0d, 0x5c, 0xf2, 0x66, 0x04, 0x34, 0xab, 0x80, 0x66, 0x55, 0xb6, 0xd9, 0xde, - 0x5e, 0x67, 0x96, 0x01, 0x45, 0x29, 0x0a, 0xe8, 0xca, 0xcf, 0xc4, 0x4a, 0xd0, 0x71, 0x3b, 0xee, - 0x1c, 0xc7, 0xdb, 0xee, 0xef, 0xf0, 0x37, 0xfe, 0xc2, 0x9f, 0x84, 0x9c, 0x2b, 0xda, 0xde, 0x5b, - 0xfe, 0xac, 0xe5, 0xb2, 0x62, 0xcd, 0x19, 0xae, 0x47, 0xe7, 0xf6, 0x07, 0xca, 0x72, 0xe5, 0xb5, - 0x88, 0xa7, 0xab, 0x1b, 0xbb, 0x96, 0x43, 0xbd, 0x03, 0x55, 0x97, 0x39, 0x8f, 0xfa, 0x6e, 0xdf, - 0x33, 0xe8, 0x89, 0x72, 0xf9, 0x73, 0x5d, 0x1a, 0xe8, 0x59, 0xb2, 0xe6, 0x86, 0xe5, 0xf2, 0xfa, - 0x4e, 0x60, 0x75, 0x07, 0xc5, 0xbc, 0xf1, 0xa4, 0x0c, 0xbe, 0xb1, 0x4b, 0xbb, 0xfa, 0x40, 0xbe, - 0x9f, 0x1d, 0x96, 0xaf, 0x1f, 0x58, 0xf6, 0x9c, 0xe5, 0x04, 0x7e, 0xe0, 0xa5, 0x33, 0x69, 0xbf, - 0x0f, 0x70, 0x7e, 0x61, 0xdb, 0x0f, 0x3c, 0xdd, 0x08, 0x36, 0x5c, 0x73, 0x93, 0x76, 0x7b, 0xb6, - 0x1e, 0x50, 0xb2, 0x07, 0x75, 0x56, 0x21, 0x53, 0x0f, 0xf4, 0xe9, 0xc2, 0xf5, 0xc2, 0x8d, 0xe6, - 0xfc, 0xc2, 0xec, 0x88, 0x1f, 0x70, 0x76, 0x5d, 0x02, 0xb5, 0xc6, 0x8e, 0x0e, 0x67, 0xea, 0xea, - 0x0d, 0x43, 0x01, 0xe4, 0x37, 0x0a, 0x30, 0xe6, 0xb8, 0x26, 0x6d, 0x53, 0x9b, 0x1a, 0x81, 0xeb, - 0x4d, 0x17, 0xaf, 0x97, 0x6e, 0x34, 0xe7, 0xbf, 0x3d, 0xb2, 0xc4, 0x8c, 0x1a, 0xcd, 0xde, 0x8d, - 0x09, 0xb8, 0xe9, 0x04, 0xde, 0x41, 0xeb, 0xc2, 0xf7, 0x0e, 0x67, 0x9e, 0x3b, 0x3a, 0x9c, 0x19, - 0x8b, 0x93, 0x30, 0x51, 0x12, 0xb2, 0x05, 0xcd, 0xc0, 0xb5, 0x59, 0x93, 0x59, 0xae, 0xe3, 0x4f, - 0x97, 0x78, 0xc1, 0xae, 0xcd, 0x8a, 0xa6, 0x66, 0xe2, 0x67, 0x59, 0x1f, 0x9b, 0xdd, 0x7f, 0x75, - 0x76, 0x33, 0x64, 0x6b, 0x9d, 0x97, 0xc0, 0xcd, 0x28, 0xcd, 0xc7, 0x38, 0x0e, 0xa1, 0x30, 0xe9, - 0x53, 0xa3, 0xef, 0x59, 0xc1, 0xc1, 0xa2, 0xeb, 0x04, 0xf4, 0x61, 0x30, 0x5d, 0xe6, 0xad, 0xfc, - 0x4a, 0x16, 0xf4, 0x86, 0x6b, 0xb6, 0x93, 0xdc, 0xad, 0xf3, 0x47, 0x87, 0x33, 0x93, 0xa9, 0x44, - 0x4c, 0x63, 0x12, 0x07, 0xa6, 0xac, 0xae, 0xde, 0xa1, 0x1b, 0x7d, 0xdb, 0x6e, 0x53, 0xc3, 0xa3, - 0x81, 0x3f, 0x5d, 0xe1, 0x55, 0xb8, 0x91, 0x25, 0x67, 0xcd, 0x35, 0x74, 0xfb, 0xde, 0xf6, 0x87, - 0xd4, 0x08, 0x90, 0xee, 0x50, 0x8f, 0x3a, 0x06, 0x6d, 0x4d, 0xcb, 0xca, 0x4c, 0xad, 0xa6, 0x90, - 0x70, 0x00, 0x9b, 0xac, 0xc0, 0xb9, 0x9e, 0x67, 0xb9, 0xbc, 0x08, 0xb6, 0xee, 0xfb, 0x77, 0xf5, - 0x2e, 0x9d, 0xae, 0x5e, 0x2f, 0xdc, 0x68, 0xb4, 0x2e, 0x4b, 0x98, 0x73, 0x1b, 0x69, 0x06, 0x1c, - 0xcc, 0x43, 0x6e, 0x40, 0x5d, 0x25, 0x4e, 0xd7, 0xae, 0x17, 0x6e, 0x54, 0x44, 0xdf, 0x51, 0x79, - 0x31, 0xa4, 0x92, 0x65, 0xa8, 0xeb, 0x3b, 0x3b, 0x96, 0xc3, 0x38, 0xeb, 0xbc, 0x09, 0xaf, 0x66, - 0x55, 0x6d, 0x41, 0xf2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x5e, 0x72, 0x1b, 0x88, 0x4f, 0xbd, 0x7d, - 0xcb, 0xa0, 0x0b, 0x86, 0xe1, 0xf6, 0x9d, 0x80, 0x97, 0xbd, 0xc1, 0xcb, 0x7e, 0x45, 0x96, 0x9d, - 0xb4, 0x07, 0x38, 0x30, 0x23, 0x17, 0x79, 0x07, 0xa6, 0xe4, 0xbf, 0x1a, 0xb5, 0x02, 0x70, 0xa4, - 0x0b, 0xac, 0x21, 0x31, 0x45, 0xc3, 0x01, 0x6e, 0x62, 0xc2, 0x55, 0xbd, 0x1f, 0xb8, 0x5d, 0x06, - 0x99, 0x14, 0xba, 0xe9, 0xee, 0x51, 0x67, 0xba, 0x79, 0xbd, 0x70, 0xa3, 0xde, 0xba, 0x7e, 0x74, - 0x38, 0x73, 0x75, 0xe1, 0x31, 0x7c, 0xf8, 0x58, 0x14, 0x72, 0x0f, 0x1a, 0xa6, 0xe3, 0x6f, 0xb8, - 0xb6, 0x65, 0x1c, 0x4c, 0x8f, 0xf1, 0x02, 0xbe, 0x2a, 0xab, 0xda, 0x58, 0xba, 0xdb, 0x16, 0x84, - 0x47, 0x87, 0x33, 0x57, 0x07, 0x87, 0xd4, 0xd9, 0x90, 0x8e, 0x11, 0x06, 0x59, 0xe7, 0x80, 0x8b, - 0xae, 0xb3, 0x63, 0x75, 0xa6, 0xc7, 0xf9, 0xd7, 0xb8, 0x3e, 0xa4, 0x43, 0x2f, 0xdd, 0x6d, 0x0b, - 0xbe, 0xd6, 0xb8, 0x14, 0x27, 0x5e, 0x31, 0x42, 0x20, 0x26, 0x4c, 0xa8, 0xc1, 0x78, 0xd1, 0xd6, - 0xad, 0xae, 0x3f, 0x3d, 0xc1, 0x3b, 0xef, 0x8f, 0x0f, 0xc1, 0xc4, 0x38, 0x73, 0xeb, 0x92, 0xac, - 0xca, 0x44, 0x22, 0xd9, 0xc7, 0x14, 0xe6, 0x95, 0xb7, 0xe1, 0xdc, 0xc0, 0xd8, 0x40, 0xa6, 0xa0, - 0xb4, 0x47, 0x0f, 0xf8, 0xd0, 0xd7, 0x40, 0xf6, 0x48, 0x2e, 0x40, 0x65, 0x5f, 0xb7, 0xfb, 0x74, - 0xba, 0xc8, 0xd3, 0xc4, 0xcb, 0xcf, 0x15, 0xdf, 0x2a, 0x68, 0x7f, 0xb7, 0x04, 0x63, 0x6a, 0xc4, - 0x69, 0x5b, 0xce, 0x1e, 0x79, 0x17, 0x4a, 0xb6, 0xdb, 0x91, 0xe3, 0xe6, 0x2f, 0x8c, 0x3c, 0x8a, - 0xad, 0xb9, 0x9d, 0x56, 0xed, 0xe8, 0x70, 0xa6, 0xb4, 0xe6, 0x76, 0x90, 0x21, 0x12, 0x03, 0x2a, - 0x7b, 0xfa, 0xce, 0x9e, 0xce, 0xcb, 0xd0, 0x9c, 0x6f, 0x8d, 0x0c, 0x7d, 0x87, 0xa1, 0xb0, 0xb2, - 0xb6, 0x1a, 0x47, 0x87, 0x33, 0x15, 0xfe, 0x8a, 0x02, 0x9b, 0xb8, 0xd0, 0xd8, 0xb6, 0x75, 0x63, - 0x6f, 0xd7, 0xb5, 0xe9, 0x74, 0x29, 0xa7, 0xa0, 0x96, 0x42, 0x12, 0x9f, 0x39, 0x7c, 0xc5, 0x48, - 0x06, 0x31, 0xa0, 0xda, 0x37, 0x7d, 0xcb, 0xd9, 0x93, 0x63, 0xe0, 0xdb, 0x23, 0x4b, 0xdb, 0x5a, - 0xe2, 0x75, 0x82, 0xa3, 0xc3, 0x99, 0xaa, 0x78, 0x46, 0x09, 0xad, 0xfd, 0x41, 0x13, 0x26, 0xd4, - 0x47, 0xba, 0x4f, 0xbd, 0x80, 0x3e, 0x24, 0xd7, 0xa1, 0xec, 0xb0, 0x5f, 0x93, 0x7f, 0xe4, 0xd6, - 0x98, 0xec, 0x2e, 0x65, 0xfe, 0x4b, 0x72, 0x0a, 0x2b, 0x99, 0xe8, 0x2a, 0xb2, 0xc1, 0x47, 0x2f, - 0x59, 0x9b, 0xc3, 0x88, 0x92, 0x89, 0x67, 0x94, 0xd0, 0xe4, 0x7d, 0x28, 0xf3, 0xca, 0x8b, 0xa6, - 0xfe, 0xfa, 0xe8, 0x22, 0x58, 0xd5, 0xeb, 0xac, 0x06, 0xbc, 0xe2, 0x1c, 0x94, 0x75, 0xc5, 0xbe, - 0xb9, 0x23, 0x1b, 0xf6, 0x17, 0x72, 0x34, 0xec, 0xb2, 0xe8, 0x8a, 0x5b, 0x4b, 0xcb, 0xc8, 0x10, - 0xc9, 0x5f, 0x2f, 0xc0, 0x39, 0xc3, 0x75, 0x02, 0x9d, 0xe9, 0x19, 0x6a, 0x92, 0x9d, 0xae, 0x70, - 0x39, 0xb7, 0x47, 0x96, 0xb3, 0x98, 0x46, 0x6c, 0x5d, 0x64, 0x73, 0xc6, 0x40, 0x32, 0x0e, 0xca, - 0x26, 0x7f, 0xb3, 0x00, 0x17, 0xd9, 0x58, 0x3e, 0xc0, 0xcc, 0x67, 0xa0, 0xd3, 0x2d, 0xd5, 0xe5, - 0xa3, 0xc3, 0x99, 0x8b, 0xab, 0x59, 0xc2, 0x30, 0xbb, 0x0c, 0xac, 0x74, 0xe7, 0xf5, 0x41, 0xb5, - 0x84, 0xcf, 0x6e, 0xcd, 0xf9, 0xb5, 0xd3, 0x54, 0x75, 0x5a, 0x2f, 0xc8, 0xae, 0x9c, 0xa5, 0xd9, - 0x61, 0x56, 0x29, 0xc8, 0x4d, 0xa8, 0xed, 0xbb, 0x76, 0xbf, 0x4b, 0xfd, 0xe9, 0x3a, 0x1f, 0x62, - 0xaf, 0x64, 0x0d, 0xb1, 0xf7, 0x39, 0x4b, 0x6b, 0x52, 0xc2, 0xd7, 0xc4, 0xbb, 0x8f, 0x2a, 0x2f, - 0xb1, 0xa0, 0x6a, 0x5b, 0x5d, 0x2b, 0xf0, 0xf9, 0xc4, 0xd9, 0x9c, 0xbf, 0x39, 0x72, 0xb5, 0xc4, - 0x2f, 0xba, 0xc6, 0xc1, 0xc4, 0x5f, 0x23, 0x9e, 0x51, 0x0a, 0x60, 0x43, 0xa1, 0x6f, 0xe8, 0xb6, - 0x98, 0x58, 0x9b, 0xf3, 0xdf, 0x18, 0xfd, 0xb7, 0x61, 0x28, 0xad, 0x71, 0x59, 0xa7, 0x0a, 0x7f, - 0x45, 0x81, 0x4d, 0x7e, 0x09, 0x26, 0x12, 0x5f, 0xd3, 0x9f, 0x6e, 0xf2, 0xd6, 0x79, 0x31, 0xab, - 0x75, 0x42, 0xae, 0x68, 0xe6, 0x49, 0xf4, 0x10, 0x1f, 0x53, 0x60, 0xe4, 0x0e, 0xd4, 0x7d, 0xcb, - 0xa4, 0x86, 0xee, 0xf9, 0xd3, 0x63, 0xc7, 0x01, 0x9e, 0x92, 0xc0, 0xf5, 0xb6, 0xcc, 0x86, 0x21, - 0x00, 0x99, 0x05, 0xe8, 0xe9, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x9c, 0x2b, 0x4d, 0x13, 0x47, 0x87, - 0x33, 0xb0, 0x11, 0xa6, 0x62, 0x8c, 0x83, 0xf1, 0xb3, 0xbc, 0xab, 0x4e, 0xaf, 0x1f, 0x88, 0x89, - 0xb5, 0x21, 0xf8, 0xdb, 0x61, 0x2a, 0xc6, 0x38, 0xc8, 0xef, 0x14, 0xe0, 0x85, 0xe8, 0x75, 0xf0, - 0x27, 0x9b, 0x3c, 0xf5, 0x9f, 0x6c, 0xe6, 0xe8, 0x70, 0xe6, 0x85, 0xf6, 0x70, 0x91, 0xf8, 0xb8, - 0xf2, 0x68, 0xef, 0xc2, 0xf8, 0x42, 0x3f, 0xd8, 0x75, 0x3d, 0xeb, 0x63, 0xae, 0x74, 0x93, 0x65, - 0xa8, 0x04, 0x5c, 0x79, 0x12, 0xf3, 0xf2, 0xcb, 0x59, 0x4d, 0x2d, 0x14, 0xd9, 0x3b, 0xf4, 0x40, - 0x69, 0x03, 0x62, 0x7e, 0x14, 0xca, 0x94, 0xc8, 0xae, 0xfd, 0xa5, 0x02, 0xd4, 0x5a, 0xba, 0xb1, - 0xe7, 0xee, 0xec, 0x90, 0xf7, 0xa0, 0x6e, 0x39, 0x01, 0xf5, 0xf6, 0x75, 0x5b, 0xc2, 0xce, 0xc6, - 0x60, 0xc3, 0x65, 0x58, 0x54, 0x6f, 0xb6, 0xe6, 0x61, 0x82, 0x96, 0xfa, 0x72, 0xad, 0xc0, 0xf5, - 0xd1, 0x55, 0x89, 0x81, 0x21, 0x1a, 0x99, 0x81, 0x8a, 0x1f, 0xd0, 0x9e, 0xcf, 0x67, 0x9e, 0x71, - 0x51, 0x8c, 0x36, 0x4b, 0x40, 0x91, 0xae, 0xfd, 0x9d, 0x02, 0x34, 0x5a, 0xba, 0x6f, 0x19, 0xac, - 0x96, 0x64, 0x11, 0xca, 0x7d, 0x9f, 0x7a, 0x27, 0xab, 0x1b, 0x9f, 0x2c, 0xb6, 0x7c, 0xea, 0x21, - 0xcf, 0x4c, 0xee, 0x41, 0xbd, 0xa7, 0xfb, 0xfe, 0x03, 0xd7, 0x33, 0xe5, 0x84, 0x77, 0x4c, 0x20, - 0xa1, 0x9c, 0xcb, 0xac, 0x18, 0x82, 0x68, 0x4d, 0x88, 0x66, 0x7c, 0xed, 0x8f, 0x0a, 0x70, 0xbe, - 0xd5, 0xdf, 0xd9, 0xa1, 0x9e, 0xd4, 0x45, 0xa5, 0x96, 0x47, 0xa1, 0xe2, 0x51, 0xd3, 0xf2, 0x65, - 0xd9, 0x97, 0x46, 0xee, 0x41, 0xc8, 0x50, 0xa4, 0x52, 0xc9, 0xdb, 0x8b, 0x27, 0xa0, 0x40, 0x27, - 0x7d, 0x68, 0x7c, 0x48, 0xd9, 0x1a, 0x98, 0xea, 0x5d, 0x59, 0xbb, 0x5b, 0x23, 0x8b, 0xba, 0x4d, - 0x83, 0x36, 0x47, 0x8a, 0xeb, 0xb0, 0x61, 0x22, 0x46, 0x92, 0xb4, 0xdf, 0xaf, 0xc0, 0xd8, 0xa2, - 0xdb, 0xdd, 0xb6, 0x1c, 0x6a, 0xde, 0x34, 0x3b, 0x94, 0x7c, 0x00, 0x65, 0x6a, 0x76, 0xa8, 0xac, - 0xed, 0xe8, 0xd3, 0x3d, 0x03, 0x8b, 0x94, 0x16, 0xf6, 0x86, 0x1c, 0x98, 0xac, 0xc1, 0xc4, 0x8e, - 0xe7, 0x76, 0xc5, 0x08, 0xba, 0x79, 0xd0, 0x93, 0x1a, 0x6b, 0xeb, 0xc7, 0xd5, 0xa8, 0xb4, 0x9c, - 0xa0, 0x3e, 0x3a, 0x9c, 0x81, 0xe8, 0x0d, 0x53, 0x79, 0xc9, 0x7b, 0x30, 0x1d, 0xa5, 0x84, 0x43, - 0xc9, 0x22, 0x5b, 0x44, 0x70, 0x8d, 0xa5, 0xd2, 0xba, 0x7a, 0x74, 0x38, 0x33, 0xbd, 0x3c, 0x84, - 0x07, 0x87, 0xe6, 0x26, 0x9f, 0x16, 0x60, 0x2a, 0x22, 0x8a, 0xe1, 0x5d, 0x2a, 0x2a, 0xa7, 0x34, - 0x6f, 0xf0, 0xd5, 0xd6, 0x72, 0x4a, 0x04, 0x0e, 0x08, 0x25, 0xcb, 0x30, 0x16, 0xb8, 0xb1, 0xf6, - 0xaa, 0xf0, 0xf6, 0xd2, 0x94, 0x79, 0x60, 0xd3, 0x1d, 0xda, 0x5a, 0x89, 0x7c, 0x04, 0xe1, 0x92, - 0x7a, 0x4f, 0xb5, 0x54, 0x95, 0xb7, 0xd4, 0x95, 0xa3, 0xc3, 0x99, 0x4b, 0x9b, 0x99, 0x1c, 0x38, - 0x24, 0x27, 0xf9, 0xd5, 0x02, 0x4c, 0x28, 0x92, 0x6c, 0xa3, 0xda, 0x69, 0xb6, 0x11, 0x61, 0x3d, - 0x62, 0x33, 0x21, 0x00, 0x53, 0x02, 0xb5, 0x3f, 0x29, 0x43, 0x23, 0x1c, 0x60, 0xc9, 0x4b, 0x50, - 0xe1, 0x0b, 0x7f, 0xa9, 0x37, 0x87, 0x33, 0x27, 0xb7, 0x0f, 0xa0, 0xa0, 0x91, 0x97, 0xa1, 0x66, - 0xb8, 0xdd, 0xae, 0xee, 0x98, 0xdc, 0x98, 0xd3, 0x68, 0x35, 0x99, 0xc2, 0xb0, 0x28, 0x92, 0x50, - 0xd1, 0xc8, 0x55, 0x28, 0xeb, 0x5e, 0x47, 0xd8, 0x55, 0x1a, 0x62, 0x3c, 0x5a, 0xf0, 0x3a, 0x3e, - 0xf2, 0x54, 0xf2, 0x35, 0x28, 0x51, 0x67, 0x7f, 0xba, 0x3c, 0x5c, 0x23, 0xb9, 0xe9, 0xec, 0xdf, - 0xd7, 0xbd, 0x56, 0x53, 0x96, 0xa1, 0x74, 0xd3, 0xd9, 0x47, 0x96, 0x87, 0xac, 0x41, 0x8d, 0x3a, - 0xfb, 0xec, 0xdb, 0x4b, 0x83, 0xc7, 0x8f, 0x0d, 0xc9, 0xce, 0x58, 0xa4, 0x72, 0x1e, 0xea, 0x35, - 0x32, 0x19, 0x15, 0x04, 0xf9, 0x16, 0x8c, 0x09, 0x15, 0x67, 0x9d, 0x7d, 0x13, 0x7f, 0xba, 0xca, - 0x21, 0x67, 0x86, 0xeb, 0x48, 0x9c, 0x2f, 0x32, 0x30, 0xc5, 0x12, 0x7d, 0x4c, 0x40, 0x91, 0x6f, - 0x41, 0x43, 0xad, 0x47, 0xd5, 0x97, 0xcd, 0xb4, 0xcd, 0xa8, 0x45, 0x2c, 0xd2, 0x8f, 0xfa, 0x96, - 0x47, 0xbb, 0xd4, 0x09, 0xfc, 0xd6, 0x39, 0xb5, 0x5a, 0x57, 0x54, 0x1f, 0x23, 0x34, 0xb2, 0x3d, - 0x68, 0x64, 0x12, 0x16, 0x92, 0x97, 0x86, 0x8c, 0xea, 0x23, 0x58, 0x98, 0xbe, 0x0d, 0x93, 0xa1, - 0x15, 0x48, 0x1a, 0x12, 0x84, 0xcd, 0xe4, 0x35, 0x96, 0x7d, 0x35, 0x49, 0x7a, 0x74, 0x38, 0xf3, - 0x62, 0x86, 0x29, 0x21, 0x62, 0xc0, 0x34, 0x98, 0xf6, 0x7b, 0x25, 0x18, 0xd4, 0xfe, 0x93, 0x8d, - 0x56, 0x38, 0xed, 0x46, 0x4b, 0x57, 0x48, 0x0c, 0x9f, 0x6f, 0xc9, 0x6c, 0xf9, 0x2b, 0x95, 0xf5, - 0x61, 0x4a, 0xa7, 0xfd, 0x61, 0x9e, 0x95, 0x7f, 0x47, 0xfb, 0xac, 0x0c, 0x13, 0x4b, 0x3a, 0xed, - 0xba, 0xce, 0x13, 0xd7, 0x42, 0x85, 0x67, 0x62, 0x2d, 0x74, 0x03, 0xea, 0x1e, 0xed, 0xd9, 0x96, - 0xa1, 0x0b, 0xe5, 0x4b, 0xda, 0x1e, 0x51, 0xa6, 0x61, 0x48, 0x1d, 0xb2, 0x06, 0x2e, 0x3d, 0x93, - 0x6b, 0xe0, 0xf2, 0x17, 0xbf, 0x06, 0xd6, 0x7e, 0xb5, 0x08, 0x5c, 0x51, 0x21, 0xd7, 0xa1, 0xcc, - 0x26, 0xe1, 0xb4, 0xe5, 0x85, 0x77, 0x1c, 0x4e, 0x21, 0x57, 0xa0, 0x18, 0xb8, 0xf2, 0xcf, 0x03, - 0x49, 0x2f, 0x6e, 0xba, 0x58, 0x0c, 0x5c, 0xf2, 0x31, 0x80, 0xe1, 0x3a, 0xa6, 0xa5, 0x4c, 0xf2, - 0xf9, 0x2a, 0xb6, 0xec, 0x7a, 0x0f, 0x74, 0xcf, 0x5c, 0x0c, 0x11, 0xc5, 0x2a, 0x28, 0x7a, 0xc7, - 0x98, 0x34, 0xf2, 0x36, 0x54, 0x5d, 0x67, 0xb9, 0x6f, 0xdb, 0xbc, 0x41, 0x1b, 0xad, 0x9f, 0x60, - 0x4b, 0xd3, 0x7b, 0x3c, 0xe5, 0xd1, 0xe1, 0xcc, 0x65, 0xa1, 0xdf, 0xb2, 0xb7, 0x77, 0x3d, 0x2b, - 0xb0, 0x9c, 0x4e, 0x3b, 0xf0, 0xf4, 0x80, 0x76, 0x0e, 0x50, 0x66, 0xd3, 0x7e, 0xbd, 0x00, 0xcd, - 0x65, 0xeb, 0x21, 0x35, 0xdf, 0xb5, 0x1c, 0xd3, 0x7d, 0x40, 0x10, 0xaa, 0x36, 0x75, 0x3a, 0xc1, - 0xee, 0x88, 0xeb, 0x07, 0xb1, 0x36, 0xe6, 0x08, 0x28, 0x91, 0xc8, 0x1c, 0x34, 0x84, 0xf6, 0x69, - 0x39, 0x1d, 0xde, 0x86, 0xf5, 0x68, 0xd0, 0x6b, 0x2b, 0x02, 0x46, 0x3c, 0xda, 0x01, 0x9c, 0x1b, - 0x68, 0x06, 0x62, 0x42, 0x39, 0xd0, 0x3b, 0x6a, 0x7c, 0x5d, 0x1e, 0xb9, 0x81, 0x37, 0xf5, 0x4e, - 0xac, 0x71, 0xf9, 0x1c, 0xbf, 0xa9, 0xb3, 0x39, 0x9e, 0xa1, 0x6b, 0x3f, 0x2c, 0x40, 0x7d, 0xb9, - 0xef, 0x18, 0x7c, 0x89, 0xf6, 0x64, 0x8b, 0x9c, 0x52, 0x18, 0x8a, 0x99, 0x0a, 0x43, 0x1f, 0xaa, - 0x7b, 0x0f, 0x42, 0x85, 0xa2, 0x39, 0xbf, 0x3e, 0x7a, 0xaf, 0x90, 0x45, 0x9a, 0xbd, 0xc3, 0xf1, - 0x84, 0xc3, 0x68, 0x42, 0x16, 0xa8, 0x7a, 0xe7, 0x5d, 0x2e, 0x54, 0x0a, 0xbb, 0xf2, 0x35, 0x68, - 0xc6, 0xd8, 0x4e, 0x64, 0x3b, 0xfe, 0x87, 0x65, 0xa8, 0xae, 0xb4, 0xdb, 0x0b, 0x1b, 0xab, 0xe4, - 0x75, 0x68, 0x4a, 0x5f, 0xc2, 0xdd, 0xa8, 0x0d, 0x42, 0x57, 0x52, 0x3b, 0x22, 0x61, 0x9c, 0x8f, - 0xa9, 0x63, 0x1e, 0xd5, 0xed, 0xae, 0xfc, 0x59, 0x42, 0x75, 0x0c, 0x59, 0x22, 0x0a, 0x1a, 0xd1, - 0x61, 0x82, 0xad, 0xf0, 0x58, 0x13, 0x8a, 0xd5, 0x9b, 0xfc, 0x6d, 0x8e, 0xb9, 0xbe, 0xe3, 0x4a, - 0xe2, 0x56, 0x02, 0x00, 0x53, 0x80, 0xe4, 0x2d, 0xa8, 0xeb, 0xfd, 0x60, 0x97, 0x2b, 0xd0, 0xe2, - 0xdf, 0xb8, 0xca, 0x5d, 0x2d, 0x32, 0xed, 0xd1, 0xe1, 0xcc, 0xd8, 0x1d, 0x6c, 0xbd, 0xae, 0xde, - 0x31, 0xe4, 0x66, 0x85, 0x53, 0x2b, 0x46, 0x59, 0xb8, 0xca, 0x89, 0x0b, 0xb7, 0x91, 0x00, 0xc0, - 0x14, 0x20, 0x79, 0x1f, 0xc6, 0xf6, 0xe8, 0x41, 0xa0, 0x6f, 0x4b, 0x01, 0xd5, 0x93, 0x08, 0x98, - 0x62, 0x2a, 0xdc, 0x9d, 0x58, 0x76, 0x4c, 0x80, 0x11, 0x1f, 0x2e, 0xec, 0x51, 0x6f, 0x9b, 0x7a, - 0xae, 0x5c, 0x7d, 0x4a, 0x21, 0xb5, 0x93, 0x08, 0x99, 0x3e, 0x3a, 0x9c, 0xb9, 0x70, 0x27, 0x03, - 0x06, 0x33, 0xc1, 0xb5, 0xff, 0x53, 0x84, 0xc9, 0x15, 0xe1, 0xcc, 0x75, 0x3d, 0x31, 0x09, 0x93, - 0xcb, 0x50, 0xf2, 0x7a, 0x7d, 0xde, 0x73, 0x4a, 0xc2, 0x5c, 0x8b, 0x1b, 0x5b, 0xc8, 0xd2, 0xc8, - 0x7b, 0x50, 0x37, 0xe5, 0x90, 0x21, 0x17, 0xbf, 0x23, 0x19, 0x2a, 0xd4, 0x1b, 0x86, 0x68, 0x4c, - 0xd3, 0xef, 0xfa, 0x9d, 0xb6, 0xf5, 0x31, 0x95, 0xeb, 0x41, 0xae, 0xe9, 0xaf, 0x8b, 0x24, 0x54, - 0x34, 0x36, 0xab, 0xee, 0xd1, 0x03, 0xb1, 0x1a, 0x2a, 0x47, 0xb3, 0xea, 0x1d, 0x99, 0x86, 0x21, - 0x95, 0xcc, 0xa8, 0x9f, 0x85, 0xf5, 0x82, 0xb2, 0x58, 0xc9, 0xdf, 0x67, 0x09, 0xf2, 0xbf, 0x61, - 0x43, 0xe6, 0x87, 0x56, 0x10, 0x50, 0x4f, 0x7e, 0xc6, 0x91, 0x86, 0xcc, 0xdb, 0x1c, 0x01, 0x25, - 0x12, 0xf9, 0x29, 0x68, 0x70, 0xf0, 0x96, 0xed, 0x6e, 0xf3, 0x0f, 0xd7, 0x10, 0x6b, 0xfa, 0xfb, - 0x2a, 0x11, 0x23, 0xba, 0xf6, 0xa3, 0x22, 0x5c, 0x5a, 0xa1, 0x81, 0xd0, 0x6a, 0x96, 0x68, 0xcf, - 0x76, 0x0f, 0x98, 0x6a, 0x89, 0xf4, 0x23, 0xf2, 0x0e, 0x80, 0xe5, 0x6f, 0xb7, 0xf7, 0x0d, 0xfe, - 0x1f, 0x88, 0x7f, 0xf8, 0xba, 0xfc, 0x25, 0x61, 0xb5, 0xdd, 0x92, 0x94, 0x47, 0x89, 0x37, 0x8c, - 0xe5, 0x89, 0x96, 0x57, 0xc5, 0xc7, 0x2c, 0xaf, 0xda, 0x00, 0xbd, 0x48, 0x41, 0x2d, 0x71, 0xce, - 0x9f, 0x55, 0x62, 0x4e, 0xa2, 0x9b, 0xc6, 0x60, 0xf2, 0xa8, 0x8c, 0x0e, 0x4c, 0x99, 0x74, 0x47, - 0xef, 0xdb, 0x41, 0xa8, 0x54, 0xcb, 0x9f, 0xf8, 0xf8, 0x7a, 0x79, 0xe8, 0x68, 0x5e, 0x4a, 0x21, - 0xe1, 0x00, 0xb6, 0xf6, 0xbb, 0x25, 0xb8, 0xb2, 0x42, 0x83, 0xd0, 0xe2, 0x22, 0x47, 0xc7, 0x76, - 0x8f, 0x1a, 0xec, 0x2b, 0x7c, 0x5a, 0x80, 0xaa, 0xad, 0x6f, 0x53, 0x9b, 0xcd, 0x5e, 0xac, 0x36, - 0x1f, 0x8c, 0x3c, 0x11, 0x0c, 0x97, 0x32, 0xbb, 0xc6, 0x25, 0xa4, 0xa6, 0x06, 0x91, 0x88, 0x52, - 0x3c, 0x1b, 0xd4, 0x0d, 0xbb, 0xef, 0x07, 0xd4, 0xdb, 0x70, 0xbd, 0x40, 0xea, 0x93, 0xe1, 0xa0, - 0xbe, 0x18, 0x91, 0x30, 0xce, 0x47, 0xe6, 0x01, 0x0c, 0xdb, 0xa2, 0x4e, 0xc0, 0x73, 0x89, 0xff, - 0x8a, 0xa8, 0xef, 0xbb, 0x18, 0x52, 0x30, 0xc6, 0xc5, 0x44, 0x75, 0x5d, 0xc7, 0x0a, 0x5c, 0x21, - 0xaa, 0x9c, 0x14, 0xb5, 0x1e, 0x91, 0x30, 0xce, 0xc7, 0xb3, 0xd1, 0xc0, 0xb3, 0x0c, 0x9f, 0x67, - 0xab, 0xa4, 0xb2, 0x45, 0x24, 0x8c, 0xf3, 0xb1, 0x39, 0x2f, 0x56, 0xff, 0x13, 0xcd, 0x79, 0xbf, - 0xdd, 0x80, 0x6b, 0x89, 0x66, 0x0d, 0xf4, 0x80, 0xee, 0xf4, 0xed, 0x36, 0x0d, 0xd4, 0x07, 0x1c, - 0x71, 0x2e, 0xfc, 0xab, 0xd1, 0x77, 0x17, 0x21, 0x24, 0xc6, 0xe9, 0x7c, 0xf7, 0x81, 0x02, 0x1e, - 0xeb, 0xdb, 0xcf, 0x41, 0xc3, 0xd1, 0x03, 0x9f, 0xff, 0xb8, 0xf2, 0x1f, 0x0d, 0xd5, 0xb0, 0xbb, - 0x8a, 0x80, 0x11, 0x0f, 0xd9, 0x80, 0x0b, 0xb2, 0x89, 0x6f, 0x3e, 0xec, 0xb9, 0x5e, 0x40, 0x3d, - 0x91, 0x57, 0x4e, 0xa7, 0x32, 0xef, 0x85, 0xf5, 0x0c, 0x1e, 0xcc, 0xcc, 0x49, 0xd6, 0xe1, 0xbc, - 0x21, 0xdc, 0xea, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x03, 0x57, 0xb8, 0x34, 0x5a, 0x1c, 0x64, - 0xc1, 0xac, 0x7c, 0xe9, 0xde, 0x5c, 0x1d, 0xa9, 0x37, 0xd7, 0x46, 0xe9, 0xcd, 0xf5, 0xd1, 0x7a, - 0x73, 0xe3, 0x78, 0xbd, 0x99, 0xb5, 0x3c, 0xeb, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x98, 0x61, 0x63, - 0x51, 0x1b, 0x61, 0xcb, 0xb7, 0x33, 0x78, 0x30, 0x33, 0x27, 0xd9, 0x86, 0x2b, 0x22, 0xfd, 0xa6, - 0x63, 0x78, 0x07, 0x3d, 0x36, 0xf1, 0xc4, 0x70, 0x9b, 0x09, 0x0b, 0xe3, 0x95, 0xf6, 0x50, 0x4e, - 0x7c, 0x0c, 0x0a, 0xf9, 0x79, 0x18, 0x17, 0x5f, 0x69, 0x5d, 0xef, 0x71, 0x58, 0x11, 0xc3, 0x71, - 0x51, 0xc2, 0x8e, 0x2f, 0xc6, 0x89, 0x98, 0xe4, 0x25, 0x0b, 0x30, 0xd9, 0xdb, 0x37, 0xd8, 0xe3, - 0xea, 0xce, 0x5d, 0x4a, 0x4d, 0x6a, 0x72, 0xa7, 0x51, 0xa3, 0xf5, 0xbc, 0x32, 0x74, 0x6c, 0x24, - 0xc9, 0x98, 0xe6, 0x27, 0x6f, 0xc1, 0x98, 0x1f, 0xe8, 0x5e, 0x20, 0xcd, 0x7a, 0xd3, 0x13, 0x22, - 0xc6, 0x45, 0x59, 0xbd, 0xda, 0x31, 0x1a, 0x26, 0x38, 0x33, 0xe7, 0x8b, 0xc9, 0xb3, 0x9b, 0x2f, - 0xf2, 0x8c, 0x56, 0xff, 0xa2, 0x08, 0xd7, 0x57, 0x68, 0xb0, 0xee, 0x3a, 0xd2, 0x28, 0x9a, 0x35, - 0xed, 0x1f, 0xcb, 0x26, 0x9a, 0x9c, 0xb4, 0x8b, 0xa7, 0x3a, 0x69, 0x97, 0x4e, 0x69, 0xd2, 0x2e, - 0x9f, 0xe1, 0xa4, 0xfd, 0x8f, 0x8b, 0xf0, 0x7c, 0xa2, 0x25, 0x37, 0x5c, 0x53, 0x0d, 0xf8, 0x5f, - 0x35, 0xe0, 0x31, 0x1a, 0xf0, 0x91, 0xd0, 0x3b, 0xb9, 0x5b, 0x2b, 0xa5, 0xf1, 0x7c, 0x92, 0xd6, - 0x78, 0xde, 0xcf, 0x33, 0xf3, 0x65, 0x48, 0x38, 0xd6, 0x8c, 0x77, 0x1b, 0x88, 0x27, 0x9d, 0x70, - 0xc2, 0xf4, 0x13, 0x53, 0x7a, 0xc2, 0x20, 0x3a, 0x1c, 0xe0, 0xc0, 0x8c, 0x5c, 0xa4, 0x0d, 0x17, - 0x7d, 0xea, 0x04, 0x96, 0x43, 0xed, 0x24, 0x9c, 0xd0, 0x86, 0x5e, 0x94, 0x70, 0x17, 0xdb, 0x59, - 0x4c, 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0xaf, 0x81, 0xab, 0x9c, 0xa2, 0x69, 0x4e, 0x4d, 0x63, - 0xf9, 0x34, 0xad, 0xb1, 0x7c, 0x90, 0xff, 0xbb, 0x8d, 0xa6, 0xad, 0xcc, 0x03, 0xf0, 0xaf, 0x10, - 0x57, 0x57, 0xc2, 0x49, 0x1a, 0x43, 0x0a, 0xc6, 0xb8, 0xd8, 0x04, 0xa4, 0xda, 0x39, 0xae, 0xa9, - 0x84, 0x13, 0x50, 0x3b, 0x4e, 0xc4, 0x24, 0xef, 0x50, 0x6d, 0xa7, 0x32, 0xb2, 0xb6, 0x73, 0x1b, - 0x48, 0xc2, 0xf0, 0x28, 0xf0, 0xaa, 0xc9, 0x18, 0xce, 0xd5, 0x01, 0x0e, 0xcc, 0xc8, 0x35, 0xa4, - 0x2b, 0xd7, 0x4e, 0xb7, 0x2b, 0xd7, 0x47, 0xef, 0xca, 0xe4, 0x03, 0xb8, 0xcc, 0x45, 0xc9, 0xf6, - 0x49, 0x02, 0x0b, 0xbd, 0xe7, 0xc7, 0x24, 0xf0, 0x65, 0x1c, 0xc6, 0x88, 0xc3, 0x31, 0xd8, 0xf7, - 0x31, 0x3c, 0x6a, 0x32, 0xe1, 0xba, 0x3d, 0x5c, 0x27, 0x5a, 0xcc, 0xe0, 0xc1, 0xcc, 0x9c, 0xac, - 0x8b, 0x05, 0xac, 0x1b, 0xea, 0xdb, 0x36, 0x35, 0x65, 0x0c, 0x6b, 0xd8, 0xc5, 0x36, 0xd7, 0xda, - 0x92, 0x82, 0x31, 0xae, 0x2c, 0x35, 0x65, 0xec, 0x84, 0x6a, 0xca, 0x0a, 0xb7, 0xd2, 0xef, 0x24, - 0xb4, 0x21, 0xa9, 0xeb, 0x84, 0x51, 0xc9, 0x8b, 0x69, 0x06, 0x1c, 0xcc, 0xc3, 0xb5, 0x44, 0xc3, - 0xb3, 0x7a, 0x81, 0x9f, 0xc4, 0x9a, 0x48, 0x69, 0x89, 0x19, 0x3c, 0x98, 0x99, 0x93, 0xe9, 0xe7, - 0xbb, 0x54, 0xb7, 0x83, 0xdd, 0x24, 0xe0, 0x64, 0x52, 0x3f, 0xbf, 0x35, 0xc8, 0x82, 0x59, 0xf9, - 0x32, 0x27, 0xa4, 0xa9, 0x67, 0x53, 0xad, 0xfa, 0xb5, 0x12, 0x5c, 0x5e, 0xa1, 0x41, 0x18, 0xde, - 0xf3, 0x95, 0x19, 0xe5, 0x0b, 0x30, 0xa3, 0xfc, 0x56, 0x05, 0xce, 0xaf, 0xd0, 0x60, 0x40, 0x1b, - 0xfb, 0x53, 0xda, 0xfc, 0xeb, 0x70, 0x3e, 0x8a, 0x28, 0x6b, 0x07, 0xae, 0x27, 0xe6, 0xf2, 0xd4, - 0x6a, 0xb9, 0x3d, 0xc8, 0x82, 0x59, 0xf9, 0xc8, 0xb7, 0xe0, 0x79, 0x3e, 0xd5, 0x3b, 0x1d, 0x61, - 0x9f, 0x15, 0xc6, 0x84, 0xd8, 0x9e, 0x88, 0x19, 0x09, 0xf9, 0x7c, 0x3b, 0x9b, 0x0d, 0x87, 0xe5, - 0x27, 0xdf, 0x81, 0xb1, 0x9e, 0xd5, 0xa3, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0x77, 0x48, 0xc8, 0x46, - 0x0c, 0x2c, 0x5a, 0xc0, 0xc5, 0x53, 0x31, 0x21, 0x30, 0xb3, 0xa7, 0xd6, 0xcf, 0xb0, 0xa7, 0xfe, - 0xcf, 0x22, 0xd4, 0x56, 0x3c, 0xb7, 0xdf, 0x6b, 0x1d, 0x90, 0x0e, 0x54, 0x1f, 0x70, 0xe7, 0x99, - 0x74, 0x4d, 0x8d, 0x1e, 0x95, 0x2d, 0x7c, 0x70, 0x91, 0x4a, 0x24, 0xde, 0x51, 0xc2, 0xb3, 0x4e, - 0xbc, 0x47, 0x0f, 0xa8, 0x29, 0x7d, 0x68, 0x61, 0x27, 0xbe, 0xc3, 0x12, 0x51, 0xd0, 0x48, 0x17, - 0x26, 0x75, 0xdb, 0x76, 0x1f, 0x50, 0x73, 0x4d, 0x0f, 0xa8, 0x43, 0x7d, 0xe5, 0x92, 0x3c, 0xa9, - 0x59, 0x9a, 0xfb, 0xf5, 0x17, 0x92, 0x50, 0x98, 0xc6, 0x26, 0x1f, 0x42, 0xcd, 0x0f, 0x5c, 0x4f, - 0x29, 0x5b, 0xcd, 0xf9, 0xc5, 0xd1, 0x3f, 0x7a, 0xeb, 0x9b, 0x6d, 0x01, 0x25, 0x6c, 0xf6, 0xf2, - 0x05, 0x95, 0x00, 0xed, 0x37, 0x0b, 0x00, 0xb7, 0x36, 0x37, 0x37, 0xa4, 0x7b, 0xc1, 0x84, 0xb2, - 0xde, 0x0f, 0x1d, 0x95, 0xa3, 0x3b, 0x04, 0x13, 0x61, 0x99, 0xd2, 0x87, 0xd7, 0x0f, 0x76, 0x91, - 0xa3, 0x93, 0x9f, 0x84, 0x9a, 0x54, 0x90, 0x65, 0xb3, 0x87, 0xa1, 0x05, 0x52, 0x89, 0x46, 0x45, - 0xd7, 0xfe, 0x41, 0x11, 0x60, 0xd5, 0xb4, 0x69, 0x5b, 0x05, 0xd2, 0x37, 0x82, 0x5d, 0x8f, 0xfa, - 0xbb, 0xae, 0x6d, 0x8e, 0xe8, 0x4d, 0xe5, 0x36, 0xff, 0x4d, 0x05, 0x82, 0x11, 0x1e, 0x31, 0x61, - 0xcc, 0x0f, 0x68, 0x4f, 0x45, 0x6a, 0x8e, 0xe8, 0x44, 0x99, 0x12, 0x76, 0x91, 0x08, 0x07, 0x13, - 0xa8, 0x44, 0x87, 0xa6, 0xe5, 0x18, 0xe2, 0x07, 0x69, 0x1d, 0x8c, 0xd8, 0x91, 0x26, 0xd9, 0x8a, - 0x63, 0x35, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x87, 0x45, 0xb8, 0xc4, 0xe5, 0xb1, 0x62, 0x24, 0xe2, - 0x31, 0xc9, 0x9f, 0x1f, 0xd8, 0xf4, 0xf7, 0x67, 0x8f, 0x27, 0x5a, 0xec, 0x19, 0x5b, 0xa7, 0x81, - 0x1e, 0xe9, 0x73, 0x51, 0x5a, 0x6c, 0xa7, 0x5f, 0x1f, 0xca, 0x3e, 0x1b, 0xaf, 0x44, 0xeb, 0xb5, - 0x47, 0xee, 0x42, 0xd9, 0x15, 0xe0, 0xa3, 0x57, 0xe8, 0x35, 0xe6, 0xa3, 0x16, 0x17, 0x47, 0xfe, - 0x22, 0x54, 0xfd, 0x40, 0x0f, 0xfa, 0xea, 0xd7, 0xdc, 0x3a, 0x6d, 0xc1, 0x1c, 0x3c, 0x1a, 0x47, - 0xc4, 0x3b, 0x4a, 0xa1, 0xda, 0x1f, 0x16, 0xe0, 0x4a, 0x76, 0xc6, 0x35, 0xcb, 0x0f, 0xc8, 0x9f, - 0x1b, 0x68, 0xf6, 0x63, 0x7e, 0x71, 0x96, 0x9b, 0x37, 0x7a, 0x18, 0x17, 0xae, 0x52, 0x62, 0x4d, - 0x1e, 0x40, 0xc5, 0x0a, 0x68, 0x57, 0xad, 0x2f, 0xef, 0x9d, 0x72, 0xd5, 0x63, 0x53, 0x3b, 0x93, - 0x82, 0x42, 0x98, 0xf6, 0x59, 0x71, 0x58, 0x95, 0xf9, 0xf4, 0x61, 0x27, 0x63, 0x7e, 0xef, 0xe4, - 0x8b, 0xf9, 0x4d, 0x16, 0x68, 0x30, 0xf4, 0xf7, 0x2f, 0x0c, 0x86, 0xfe, 0xde, 0xcb, 0x1f, 0xfa, - 0x9b, 0x6a, 0x86, 0xa1, 0x11, 0xc0, 0x9f, 0x97, 0xe0, 0xea, 0xe3, 0xba, 0x0d, 0x9b, 0xcf, 0x64, - 0xef, 0xcc, 0x3b, 0x9f, 0x3d, 0xbe, 0x1f, 0x92, 0x79, 0xa8, 0xf4, 0x76, 0x75, 0x5f, 0x29, 0x65, - 0x6a, 0xc1, 0x52, 0xd9, 0x60, 0x89, 0x8f, 0xd8, 0xa0, 0xc1, 0x95, 0x39, 0xfe, 0x8a, 0x82, 0x95, - 0x0d, 0xc7, 0x5d, 0xea, 0xfb, 0x91, 0x4d, 0x20, 0x1c, 0x8e, 0xd7, 0x45, 0x32, 0x2a, 0x3a, 0x09, - 0xa0, 0x2a, 0x4c, 0xcc, 0x72, 0x66, 0x1a, 0x3d, 0x90, 0x2b, 0x23, 0x4c, 0x3c, 0xaa, 0x94, 0xf4, - 0x56, 0x48, 0x59, 0x64, 0x16, 0xca, 0x41, 0x14, 0xb4, 0xab, 0x96, 0xe6, 0xe5, 0x0c, 0xfd, 0x94, - 0xf3, 0xb1, 0x85, 0xbd, 0xbb, 0xcd, 0x8d, 0xea, 0xa6, 0xf4, 0x9f, 0x5b, 0xae, 0xc3, 0x15, 0xb2, - 0x52, 0xb4, 0xb0, 0xbf, 0x37, 0xc0, 0x81, 0x19, 0xb9, 0xb4, 0x7f, 0x57, 0x87, 0x4b, 0xd9, 0xfd, - 0x81, 0xb5, 0xdb, 0x3e, 0xf5, 0x7c, 0x86, 0x5d, 0x48, 0xb6, 0xdb, 0x7d, 0x91, 0x8c, 0x8a, 0xfe, - 0xa5, 0x0e, 0x38, 0xfb, 0xad, 0x02, 0x5c, 0xf6, 0xa4, 0x8f, 0xe8, 0x69, 0x04, 0x9d, 0xbd, 0x28, - 0xcc, 0x19, 0x43, 0x04, 0xe2, 0xf0, 0xb2, 0x90, 0xbf, 0x57, 0x80, 0xe9, 0x6e, 0xca, 0xce, 0x71, - 0x86, 0xfb, 0xd6, 0x78, 0x54, 0xfc, 0xfa, 0x10, 0x79, 0x38, 0xb4, 0x24, 0xe4, 0x3b, 0xd0, 0xec, - 0xb1, 0x7e, 0xe1, 0x07, 0xd4, 0x31, 0xd4, 0xd6, 0xb5, 0xd1, 0xff, 0xa4, 0x8d, 0x08, 0x4b, 0x85, - 0xa2, 0x09, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0x67, 0x7c, 0xa3, 0xda, 0x0d, 0xa8, 0xfb, 0x34, - 0x08, 0x2c, 0xa7, 0x23, 0xd6, 0x1b, 0x0d, 0xf1, 0xaf, 0xb4, 0x65, 0x1a, 0x86, 0x54, 0xf2, 0x53, - 0xd0, 0xe0, 0x2e, 0xa7, 0x05, 0xaf, 0xe3, 0x4f, 0x37, 0x78, 0xb8, 0xd8, 0xb8, 0x08, 0x80, 0x93, - 0x89, 0x18, 0xd1, 0xc9, 0x6b, 0x30, 0xb6, 0xcd, 0x7f, 0x5f, 0xb9, 0x77, 0x59, 0xd8, 0xb8, 0xb8, - 0xb6, 0xd6, 0x8a, 0xa5, 0x63, 0x82, 0x8b, 0xcc, 0x03, 0xd0, 0xd0, 0x2f, 0x97, 0xb6, 0x67, 0x45, - 0x1e, 0x3b, 0x8c, 0x71, 0x91, 0x17, 0xa1, 0x14, 0xd8, 0x3e, 0xb7, 0x61, 0xd5, 0xa3, 0x25, 0xe8, - 0xe6, 0x5a, 0x1b, 0x59, 0xba, 0xf6, 0xa3, 0x02, 0x4c, 0xa6, 0x36, 0x97, 0xb0, 0x2c, 0x7d, 0xcf, - 0x96, 0xc3, 0x48, 0x98, 0x65, 0x0b, 0xd7, 0x90, 0xa5, 0x93, 0x0f, 0xa4, 0x5a, 0x5e, 0xcc, 0x79, - 0x4c, 0xc3, 0x5d, 0x3d, 0xf0, 0x99, 0x1e, 0x3e, 0xa0, 0x91, 0x73, 0x37, 0x5f, 0x54, 0x1e, 0x39, - 0x0f, 0xc4, 0xdc, 0x7c, 0x11, 0x0d, 0x13, 0x9c, 0x29, 0x83, 0x5f, 0xf9, 0x38, 0x06, 0x3f, 0xed, - 0xd7, 0x8b, 0xb1, 0x16, 0x90, 0x9a, 0xfd, 0x13, 0x5a, 0xe0, 0x15, 0x36, 0x81, 0x86, 0x93, 0x7b, - 0x23, 0x3e, 0xff, 0xf1, 0xc9, 0x58, 0x52, 0xc9, 0xbb, 0xa2, 0xed, 0x4b, 0x39, 0x37, 0xc3, 0x6e, - 0xae, 0xb5, 0x45, 0x74, 0x95, 0xfa, 0x6a, 0xe1, 0x27, 0x28, 0x9f, 0xd1, 0x27, 0xd0, 0xfe, 0x55, - 0x09, 0x9a, 0xb7, 0xdd, 0xed, 0x2f, 0x49, 0x04, 0x75, 0xf6, 0x34, 0x55, 0xfc, 0x02, 0xa7, 0xa9, - 0x2d, 0x78, 0x3e, 0x08, 0xec, 0x36, 0x35, 0x5c, 0xc7, 0xf4, 0x17, 0x76, 0x02, 0xea, 0x2d, 0x5b, - 0x8e, 0xe5, 0xef, 0x52, 0x53, 0xba, 0x93, 0x5e, 0x38, 0x3a, 0x9c, 0x79, 0x7e, 0x73, 0x73, 0x2d, - 0x8b, 0x05, 0x87, 0xe5, 0xe5, 0xc3, 0x86, 0xd8, 0x09, 0xc8, 0x77, 0xca, 0xc8, 0x98, 0x1b, 0x31, - 0x6c, 0xc4, 0xd2, 0x31, 0xc1, 0xa5, 0x7d, 0xb7, 0x08, 0x8d, 0x70, 0x03, 0x3e, 0x79, 0x19, 0x6a, - 0xdb, 0x9e, 0xbb, 0x47, 0x3d, 0xe1, 0xb9, 0x93, 0x3b, 0x65, 0x5a, 0x22, 0x09, 0x15, 0x8d, 0xbc, - 0x04, 0x95, 0xc0, 0xed, 0x59, 0x46, 0xda, 0xa0, 0xb6, 0xc9, 0x12, 0x51, 0xd0, 0xce, 0xae, 0x83, - 0xbf, 0x92, 0x50, 0xed, 0x1a, 0x43, 0x95, 0xb1, 0xf7, 0xa1, 0xec, 0xeb, 0xbe, 0x2d, 0xe7, 0xd3, - 0x1c, 0x7b, 0xd9, 0x17, 0xda, 0x6b, 0x72, 0x2f, 0xfb, 0x42, 0x7b, 0x0d, 0x39, 0xa8, 0xf6, 0x27, - 0x45, 0x68, 0x8a, 0x76, 0x13, 0xa3, 0xc2, 0x69, 0xb6, 0xdc, 0xdb, 0x3c, 0x94, 0xc2, 0xef, 0x77, - 0xa9, 0xc7, 0xcd, 0x4c, 0x72, 0x90, 0x8b, 0xfb, 0x07, 0x22, 0x62, 0x18, 0x4e, 0x11, 0x25, 0xa9, - 0xa6, 0x2f, 0x9f, 0x61, 0xd3, 0x57, 0x8e, 0xd5, 0xf4, 0xd5, 0xb3, 0x68, 0xfa, 0x4f, 0x8b, 0xd0, - 0x58, 0xb3, 0x76, 0xa8, 0x71, 0x60, 0xd8, 0x7c, 0x4f, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x15, 0x4f, - 0x37, 0xe8, 0x06, 0xf5, 0x2c, 0x7e, 0x40, 0x0d, 0xfb, 0x3f, 0xf8, 0x08, 0x24, 0xf7, 0x04, 0x2e, - 0x0d, 0xe1, 0xc1, 0xa1, 0xb9, 0xc9, 0x2a, 0x8c, 0x99, 0xd4, 0xb7, 0x3c, 0x6a, 0x6e, 0xc4, 0x16, - 0x2a, 0x2f, 0xab, 0xa9, 0x66, 0x29, 0x46, 0x7b, 0x74, 0x38, 0x33, 0xae, 0x0c, 0x94, 0x62, 0xc5, - 0x92, 0xc8, 0xca, 0x7e, 0xf9, 0x9e, 0xde, 0xf7, 0xb3, 0xca, 0x18, 0xfb, 0xe5, 0x37, 0xb2, 0x59, - 0x70, 0x58, 0x5e, 0xad, 0x02, 0xa5, 0x35, 0xb7, 0xa3, 0x7d, 0x56, 0x82, 0xf0, 0x24, 0x23, 0xf2, - 0x57, 0x0a, 0xd0, 0xd4, 0x1d, 0xc7, 0x0d, 0xe4, 0x29, 0x41, 0xc2, 0x03, 0x8f, 0xb9, 0x0f, 0x4c, - 0x9a, 0x5d, 0x88, 0x40, 0x85, 0xf3, 0x36, 0x74, 0x28, 0xc7, 0x28, 0x18, 0x97, 0x4d, 0xfa, 0x29, - 0x7f, 0xf2, 0x7a, 0xfe, 0x52, 0x1c, 0xc3, 0x7b, 0x7c, 0xe5, 0x1b, 0x30, 0x95, 0x2e, 0xec, 0x49, - 0xdc, 0x41, 0xb9, 0x1c, 0xf3, 0x45, 0x80, 0x28, 0xa6, 0xe4, 0x29, 0x18, 0xb1, 0xac, 0x84, 0x11, - 0x6b, 0x65, 0xf4, 0x06, 0x0e, 0x0b, 0x3d, 0xd4, 0x70, 0xf5, 0x51, 0xca, 0x70, 0xb5, 0x7a, 0x1a, - 0xc2, 0x1e, 0x6f, 0xac, 0xfa, 0xfb, 0x05, 0x98, 0x8a, 0x98, 0xe5, 0x0e, 0xd9, 0x37, 0x61, 0xdc, - 0xa3, 0xba, 0xd9, 0xd2, 0x03, 0x63, 0x97, 0x87, 0x7a, 0x17, 0x78, 0x6c, 0xf6, 0xb9, 0xa3, 0xc3, - 0x99, 0x71, 0x8c, 0x13, 0x30, 0xc9, 0x47, 0x74, 0x68, 0xb2, 0x84, 0x4d, 0xab, 0x4b, 0xdd, 0x7e, - 0x30, 0xa2, 0xd5, 0x94, 0x2f, 0x58, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x79, 0x01, 0x26, 0xe2, - 0x05, 0x3e, 0x73, 0x8b, 0xda, 0x6e, 0xd2, 0xa2, 0xb6, 0x78, 0x0a, 0xdf, 0x64, 0x88, 0x15, 0xed, - 0x13, 0x88, 0x57, 0x8d, 0x5b, 0xce, 0xe2, 0xc6, 0x82, 0xc2, 0x63, 0x8d, 0x05, 0x5f, 0xfe, 0xc3, - 0x6b, 0x86, 0x69, 0xb9, 0xe5, 0x67, 0x58, 0xcb, 0xfd, 0x22, 0x4f, 0xc0, 0x89, 0x9d, 0xe2, 0x52, - 0xcd, 0x71, 0x8a, 0x4b, 0x37, 0x3c, 0xc5, 0xa5, 0x76, 0x6a, 0x83, 0xce, 0x71, 0x4e, 0x72, 0xa9, - 0x3f, 0xd5, 0x93, 0x5c, 0x1a, 0x67, 0x75, 0x92, 0x0b, 0xe4, 0x3d, 0xc9, 0xe5, 0x93, 0x02, 0x4c, - 0x98, 0x89, 0x1d, 0xb3, 0xdc, 0xb6, 0x90, 0x67, 0xaa, 0x49, 0x6e, 0xc0, 0x15, 0x5b, 0xa6, 0x92, - 0x69, 0x98, 0x12, 0x49, 0x3e, 0x2d, 0xc0, 0x44, 0xbf, 0x67, 0xea, 0x41, 0x68, 0x38, 0xe2, 0x46, - 0x8b, 0x3c, 0xa5, 0xd8, 0x4a, 0xc0, 0x45, 0x8d, 0x9b, 0x4c, 0xc7, 0x94, 0x58, 0xed, 0x8f, 0x6b, - 0xf1, 0x19, 0xe9, 0x69, 0x1b, 0xcd, 0xdf, 0x48, 0x1a, 0xcd, 0xaf, 0xa7, 0x8d, 0xe6, 0x93, 0xb1, - 0x78, 0xd6, 0xb8, 0xe1, 0xfc, 0xa7, 0x63, 0x03, 0x75, 0x89, 0x9f, 0xe1, 0x12, 0x7e, 0xf3, 0x8c, - 0xc1, 0x7a, 0x01, 0x26, 0xa5, 0xf6, 0xaa, 0x88, 0x7c, 0x94, 0x1b, 0x8f, 0xc2, 0x9c, 0x96, 0x92, - 0x64, 0x4c, 0xf3, 0x33, 0x81, 0xbe, 0x3a, 0x40, 0x53, 0x2c, 0x15, 0xa2, 0x4e, 0xa6, 0x0e, 0xb7, - 0x0c, 0x39, 0xd8, 0xb2, 0xc2, 0xa3, 0xba, 0x2f, 0x4d, 0xdf, 0xb1, 0x65, 0x05, 0xf2, 0x54, 0x94, - 0xd4, 0xb8, 0xfd, 0xbf, 0xf6, 0x04, 0xfb, 0xbf, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0xf1, 0x35, 0x4d, - 0xf9, 0x3b, 0xff, 0x99, 0xe3, 0x4d, 0xbc, 0x6c, 0x32, 0x8f, 0xb4, 0xdb, 0xb5, 0x08, 0x06, 0xe3, - 0x98, 0xc4, 0x84, 0x31, 0xf6, 0xca, 0x7f, 0x6d, 0x73, 0x21, 0x90, 0xc7, 0x4c, 0x9d, 0x44, 0x46, - 0x68, 0xb6, 0x5a, 0x8b, 0xe1, 0x60, 0x02, 0x75, 0x88, 0x8b, 0x00, 0x46, 0x71, 0x11, 0x90, 0x9f, - 0x17, 0x9a, 0xd3, 0x41, 0xf8, 0x59, 0x9b, 0xfc, 0xb3, 0x86, 0x21, 0x92, 0x18, 0x27, 0x62, 0x92, - 0x97, 0xf5, 0x8a, 0xbe, 0x6c, 0x06, 0x95, 0x7d, 0x2c, 0xd9, 0x2b, 0xb6, 0x92, 0x64, 0x4c, 0xf3, - 0x93, 0x0d, 0xb8, 0x10, 0x26, 0xc5, 0x8b, 0x31, 0xce, 0x71, 0xc2, 0x98, 0xb5, 0xad, 0x0c, 0x1e, - 0xcc, 0xcc, 0xc9, 0x37, 0x81, 0xf4, 0x3d, 0x8f, 0x3a, 0xc1, 0x2d, 0xdd, 0xdf, 0x95, 0xc1, 0x6f, - 0xd1, 0x26, 0x90, 0x88, 0x84, 0x71, 0x3e, 0x32, 0x0f, 0x20, 0xe0, 0x78, 0xae, 0xc9, 0x64, 0x7c, - 0xe9, 0x56, 0x48, 0xc1, 0x18, 0x97, 0xf6, 0x49, 0x03, 0x9a, 0x77, 0xf5, 0xc0, 0xda, 0xa7, 0xdc, - 0x9f, 0x77, 0x36, 0x4e, 0x95, 0xbf, 0x55, 0x80, 0x4b, 0xc9, 0xa0, 0xcd, 0x33, 0xf4, 0xac, 0xf0, - 0x23, 0x60, 0x30, 0x53, 0x1a, 0x0e, 0x29, 0x05, 0xf7, 0xb1, 0x0c, 0xc4, 0x80, 0x9e, 0xb5, 0x8f, - 0xa5, 0x3d, 0x4c, 0x20, 0x0e, 0x2f, 0xcb, 0x97, 0xc5, 0xc7, 0xf2, 0x6c, 0x9f, 0x14, 0x98, 0xf2, - 0x00, 0xd5, 0x9e, 0x19, 0x0f, 0x50, 0xfd, 0x99, 0x50, 0xbb, 0x7b, 0x31, 0x0f, 0x50, 0x23, 0x67, - 0x24, 0x92, 0xdc, 0xe7, 0x20, 0xd0, 0x86, 0x79, 0x92, 0xf8, 0x11, 0x05, 0xca, 0x32, 0xcf, 0xb4, - 0xd5, 0x6d, 0xdd, 0xb7, 0x0c, 0xa9, 0x76, 0xe4, 0x38, 0x19, 0x55, 0x9d, 0xdd, 0x26, 0x02, 0x16, - 0xf8, 0x2b, 0x0a, 0xec, 0xe8, 0xa8, 0xba, 0x62, 0xae, 0xa3, 0xea, 0xc8, 0x22, 0x94, 0x9d, 0x3d, - 0x7a, 0x70, 0xb2, 0xcd, 0xfe, 0x7c, 0x15, 0x76, 0xf7, 0x0e, 0x3d, 0x40, 0x9e, 0x59, 0xfb, 0x6e, - 0x11, 0x80, 0x55, 0xff, 0x78, 0xbe, 0x98, 0x9f, 0x84, 0x9a, 0xdf, 0xe7, 0x56, 0x13, 0xa9, 0x30, - 0x45, 0xe1, 0x5b, 0x22, 0x19, 0x15, 0x9d, 0xbc, 0x04, 0x95, 0x8f, 0xfa, 0xb4, 0xaf, 0x02, 0x0b, - 0x42, 0xc5, 0xfd, 0x9b, 0x2c, 0x11, 0x05, 0xed, 0xec, 0xec, 0xaa, 0xca, 0x67, 0x53, 0x39, 0x2b, - 0x9f, 0x4d, 0x03, 0x6a, 0x77, 0x5d, 0x1e, 0x0d, 0xaa, 0xfd, 0xf7, 0x22, 0x40, 0x14, 0x6d, 0x47, - 0x7e, 0xb3, 0x00, 0x17, 0xc3, 0x1f, 0x2e, 0x10, 0xeb, 0x2f, 0x7e, 0x18, 0x71, 0x6e, 0xff, 0x4d, - 0xd6, 0xcf, 0xce, 0x47, 0xa0, 0x8d, 0x2c, 0x71, 0x98, 0x5d, 0x0a, 0x82, 0x50, 0xa7, 0xdd, 0x5e, - 0x70, 0xb0, 0x64, 0x79, 0xb2, 0x07, 0x66, 0x06, 0x75, 0xde, 0x94, 0x3c, 0x22, 0xab, 0x34, 0x12, - 0xf0, 0x9f, 0x48, 0x51, 0x30, 0xc4, 0x21, 0xbb, 0x50, 0x77, 0xdc, 0x0f, 0x7c, 0xd6, 0x1c, 0xb2, - 0x3b, 0xbe, 0x33, 0x7a, 0x93, 0x8b, 0x66, 0x15, 0xf6, 0x7e, 0xf9, 0x82, 0x35, 0x47, 0x36, 0xf6, - 0x6f, 0x14, 0xe1, 0x7c, 0x46, 0x3b, 0x90, 0x77, 0x60, 0x4a, 0x06, 0x36, 0x46, 0xa7, 0x72, 0x17, - 0xa2, 0x53, 0xb9, 0xdb, 0x29, 0x1a, 0x0e, 0x70, 0x93, 0x0f, 0x00, 0x74, 0xc3, 0xa0, 0xbe, 0xbf, - 0xee, 0x9a, 0x6a, 0x3d, 0xf0, 0x36, 0x53, 0x5f, 0x16, 0xc2, 0xd4, 0x47, 0x87, 0x33, 0x3f, 0x93, - 0x15, 0xab, 0x9c, 0x6a, 0xe7, 0x28, 0x03, 0xc6, 0x20, 0xc9, 0xb7, 0x01, 0xc4, 0x22, 0x3c, 0x3c, - 0x4e, 0xe1, 0x09, 0x96, 0xab, 0x59, 0x75, 0x70, 0xd5, 0xec, 0x37, 0xfb, 0xba, 0x13, 0x58, 0xc1, - 0x81, 0x38, 0xbd, 0xe6, 0x7e, 0x88, 0x82, 0x31, 0x44, 0xed, 0x9f, 0x17, 0xa1, 0xae, 0x6c, 0xe6, - 0x4f, 0xc1, 0x50, 0xda, 0x49, 0x18, 0x4a, 0x4f, 0x29, 0x3a, 0x39, 0xcb, 0x4c, 0xea, 0xa6, 0xcc, - 0xa4, 0x2b, 0xf9, 0x45, 0x3d, 0xde, 0x48, 0xfa, 0x3b, 0x45, 0x98, 0x50, 0xac, 0x79, 0x4d, 0xa4, - 0x5f, 0x87, 0x49, 0x11, 0x55, 0xb0, 0xae, 0x3f, 0x14, 0x07, 0xf9, 0xf0, 0x06, 0x2b, 0x8b, 0x80, - 0xe0, 0x56, 0x92, 0x84, 0x69, 0x5e, 0xd6, 0xad, 0x45, 0xd2, 0x16, 0x5b, 0x84, 0x09, 0x3f, 0xa4, - 0x58, 0x6f, 0xf2, 0x6e, 0xdd, 0x4a, 0xd1, 0x70, 0x80, 0x3b, 0x6d, 0xa3, 0x2d, 0x9f, 0x81, 0x8d, - 0xf6, 0x3f, 0x14, 0x60, 0x2c, 0x6a, 0xaf, 0x33, 0xb7, 0xd0, 0xee, 0x24, 0x2d, 0xb4, 0x0b, 0xb9, - 0xbb, 0xc3, 0x10, 0xfb, 0xec, 0x5f, 0xab, 0x41, 0x22, 0x48, 0x9e, 0x6c, 0xc3, 0x15, 0x2b, 0x33, - 0xd4, 0x2f, 0x36, 0xda, 0x84, 0xbb, 0xbe, 0x57, 0x87, 0x72, 0xe2, 0x63, 0x50, 0x48, 0x1f, 0xea, - 0xfb, 0xd4, 0x0b, 0x2c, 0x83, 0xaa, 0xfa, 0xad, 0xe4, 0x56, 0xc9, 0xa4, 0x15, 0x3a, 0x6c, 0xd3, - 0xfb, 0x52, 0x00, 0x86, 0xa2, 0xc8, 0x36, 0x54, 0xa8, 0xd9, 0xa1, 0xea, 0x68, 0xa5, 0x9c, 0x07, - 0x97, 0x86, 0xed, 0xc9, 0xde, 0x7c, 0x14, 0xd0, 0xc4, 0x87, 0x86, 0xad, 0xbc, 0x8c, 0xb2, 0x1f, - 0x8e, 0xae, 0x60, 0x85, 0xfe, 0xca, 0xe8, 0xd4, 0x85, 0x30, 0x09, 0x23, 0x39, 0x64, 0x2f, 0x34, - 0x77, 0x56, 0x4e, 0x69, 0xf0, 0x78, 0x8c, 0xb1, 0xd3, 0x87, 0xc6, 0x03, 0x3d, 0xa0, 0x5e, 0x57, - 0xf7, 0xf6, 0xe4, 0x6a, 0x63, 0xf4, 0x1a, 0xbe, 0xab, 0x90, 0xa2, 0x1a, 0x86, 0x49, 0x18, 0xc9, - 0x21, 0x2e, 0x34, 0x02, 0xa9, 0x3e, 0x2b, 0x9b, 0xee, 0xe8, 0x42, 0x95, 0x22, 0xee, 0xcb, 0x60, - 0x79, 0xf5, 0x8a, 0x91, 0x0c, 0xb2, 0x9f, 0x38, 0x5b, 0x5a, 0x9c, 0x28, 0xde, 0xca, 0xe1, 0x1b, - 0x90, 0x50, 0xd1, 0x74, 0x93, 0x7d, 0x46, 0xb5, 0xf6, 0xbf, 0x2a, 0xd1, 0xb0, 0xfc, 0xb4, 0xed, - 0x84, 0xaf, 0x25, 0xed, 0x84, 0xd7, 0xd2, 0x76, 0xc2, 0x94, 0xb3, 0xfa, 0xe4, 0xe1, 0xb5, 0x29, - 0xf3, 0x5a, 0xf9, 0x0c, 0xcc, 0x6b, 0xaf, 0x42, 0x73, 0x9f, 0x8f, 0x04, 0xe2, 0x9c, 0xa6, 0x0a, - 0x9f, 0x46, 0xf8, 0xc8, 0x7e, 0x3f, 0x4a, 0xc6, 0x38, 0x0f, 0xcb, 0x22, 0x6f, 0xd3, 0x08, 0x0f, - 0xba, 0x95, 0x59, 0xda, 0x51, 0x32, 0xc6, 0x79, 0x78, 0x64, 0x9e, 0xe5, 0xec, 0x89, 0x0c, 0x35, - 0x9e, 0x41, 0x44, 0xe6, 0xa9, 0x44, 0x8c, 0xe8, 0xe4, 0x06, 0xd4, 0xfb, 0xe6, 0x8e, 0xe0, 0xad, - 0x73, 0x5e, 0xae, 0x61, 0x6e, 0x2d, 0x2d, 0xcb, 0x73, 0xa3, 0x14, 0x95, 0x95, 0xa4, 0xab, 0xf7, - 0x14, 0x81, 0xaf, 0x0d, 0x65, 0x49, 0xd6, 0xa3, 0x64, 0x8c, 0xf3, 0x90, 0x9f, 0x83, 0x09, 0x8f, - 0x9a, 0x7d, 0x83, 0x86, 0xb9, 0x80, 0xe7, 0x22, 0xe2, 0xda, 0x90, 0x38, 0x05, 0x53, 0x9c, 0x43, - 0x8c, 0x84, 0xcd, 0x91, 0x8c, 0x84, 0xdf, 0x80, 0x09, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0xf7, 0x1c, - 0x1e, 0x91, 0x20, 0xe3, 0x03, 0x43, 0x0b, 0xf9, 0x52, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0x0f, 0x0a, - 0x40, 0x06, 0x23, 0xe1, 0xc9, 0x2e, 0x54, 0x1d, 0x6e, 0x3d, 0xcb, 0x7d, 0xb4, 0x76, 0xcc, 0x08, - 0x27, 0x86, 0x35, 0x99, 0x20, 0xf1, 0x89, 0x03, 0x75, 0xfa, 0x30, 0xa0, 0x9e, 0x13, 0xee, 0x8c, - 0x39, 0x9d, 0x63, 0xbc, 0xc5, 0x6a, 0x42, 0x22, 0x63, 0x28, 0x43, 0xfb, 0xa3, 0x22, 0x34, 0x63, - 0x7c, 0x4f, 0x5a, 0x94, 0xf2, 0xcd, 0xf9, 0xc2, 0x68, 0xb5, 0xe5, 0xd9, 0xf2, 0x0f, 0x8d, 0x6d, - 0xce, 0x97, 0x24, 0x5c, 0xc3, 0x38, 0x1f, 0x99, 0x07, 0xe8, 0xea, 0x7e, 0x40, 0x3d, 0x3e, 0x7b, - 0xa7, 0xb6, 0xc4, 0xaf, 0x87, 0x14, 0x8c, 0x71, 0x91, 0xeb, 0xf2, 0x20, 0xf6, 0x72, 0xf2, 0x08, - 0xc3, 0x21, 0xa7, 0xac, 0x57, 0x4e, 0xe1, 0x94, 0x75, 0xd2, 0x81, 0x29, 0x55, 0x6a, 0x45, 0x3d, - 0xd9, 0x01, 0x77, 0x62, 0xfd, 0x93, 0x82, 0xc0, 0x01, 0x50, 0xed, 0xbb, 0x05, 0x18, 0x4f, 0x98, - 0x4c, 0xc4, 0xe1, 0x83, 0x6a, 0x1f, 0x47, 0xe2, 0xf0, 0xc1, 0xd8, 0xf6, 0x8b, 0x57, 0xa0, 0x2a, - 0x1a, 0x28, 0x1d, 0x9e, 0x29, 0x9a, 0x10, 0x25, 0x95, 0x8d, 0x85, 0xd2, 0x28, 0x9b, 0x1e, 0x0b, - 0xa5, 0xd5, 0x16, 0x15, 0x5d, 0xf8, 0x3a, 0x44, 0xe9, 0x64, 0x4b, 0xc7, 0x7c, 0x1d, 0x22, 0x1d, - 0x43, 0x0e, 0xed, 0xf7, 0x78, 0xb9, 0x03, 0xef, 0x20, 0x5c, 0x0b, 0x76, 0xa0, 0x26, 0x43, 0xf2, - 0xe4, 0xaf, 0xf1, 0x4e, 0x0e, 0x3b, 0x0e, 0xc7, 0x91, 0xc1, 0x67, 0xba, 0xb1, 0x77, 0x6f, 0x67, - 0x07, 0x15, 0x3a, 0xb9, 0x09, 0x0d, 0xd7, 0x59, 0xd6, 0x2d, 0xbb, 0xef, 0xa9, 0x99, 0xe1, 0x27, - 0xd8, 0x58, 0x77, 0x4f, 0x25, 0x3e, 0x3a, 0x9c, 0xb9, 0x14, 0xbe, 0x24, 0x0a, 0x89, 0x51, 0x4e, - 0xed, 0x2f, 0x17, 0xe0, 0x22, 0xba, 0xb6, 0x6d, 0x39, 0x9d, 0xa4, 0xb3, 0x8c, 0xd8, 0x30, 0xd1, - 0xd5, 0x1f, 0x6e, 0x39, 0xfa, 0xbe, 0x6e, 0xd9, 0xfa, 0xb6, 0x4d, 0x9f, 0xb8, 0x96, 0xeb, 0x07, - 0x96, 0x3d, 0x2b, 0xae, 0x83, 0x9b, 0x5d, 0x75, 0x82, 0x7b, 0x5e, 0x3b, 0xf0, 0x2c, 0xa7, 0x23, - 0x06, 0xbd, 0xf5, 0x04, 0x16, 0xa6, 0xb0, 0xb5, 0x3f, 0x2e, 0x01, 0x0f, 0x0b, 0x23, 0x6f, 0x42, - 0xa3, 0x4b, 0x8d, 0x5d, 0xdd, 0xb1, 0x7c, 0x75, 0x8c, 0xeb, 0x65, 0x56, 0xaf, 0x75, 0x95, 0xf8, - 0x88, 0x7d, 0x8a, 0x85, 0xf6, 0x1a, 0xdf, 0x79, 0x11, 0xf1, 0x12, 0x03, 0xaa, 0x1d, 0xdf, 0xd7, - 0x7b, 0x56, 0xee, 0xa8, 0x04, 0x71, 0x6c, 0xa6, 0x18, 0x8e, 0xc4, 0x33, 0x4a, 0x68, 0x62, 0x40, - 0xa5, 0x67, 0xeb, 0x96, 0x93, 0xfb, 0xfa, 0x22, 0x56, 0x83, 0x0d, 0x86, 0x24, 0x8c, 0x6b, 0xfc, - 0x11, 0x05, 0x36, 0xe9, 0x43, 0xd3, 0x37, 0x3c, 0xbd, 0xeb, 0xef, 0xea, 0xf3, 0xaf, 0xbf, 0x91, - 0x5b, 0x5d, 0x8d, 0x44, 0x89, 0xd9, 0x73, 0x11, 0x17, 0xd6, 0xdb, 0xb7, 0x16, 0xe6, 0x5f, 0x7f, - 0x03, 0xe3, 0x72, 0xe2, 0x62, 0x5f, 0x7f, 0x75, 0x5e, 0x8e, 0x20, 0xa7, 0x2e, 0xf6, 0xf5, 0x57, - 0xe7, 0x31, 0x2e, 0x47, 0xfb, 0xdf, 0x05, 0x68, 0x84, 0xbc, 0x64, 0x0b, 0x80, 0x8d, 0x65, 0xf2, - 0xa0, 0xcb, 0x13, 0x5d, 0x3a, 0xc1, 0xed, 0x13, 0x5b, 0x61, 0x66, 0x8c, 0x01, 0x65, 0x9c, 0x04, - 0x5a, 0x3c, 0xed, 0x93, 0x40, 0xe7, 0xa0, 0xb1, 0xab, 0x3b, 0xa6, 0xbf, 0xab, 0xef, 0x89, 0x21, - 0x3d, 0x76, 0x36, 0xee, 0x2d, 0x45, 0xc0, 0x88, 0x47, 0xfb, 0xa7, 0x55, 0x10, 0xa1, 0x04, 0x6c, - 0xd0, 0x31, 0x2d, 0x5f, 0xc4, 0xb2, 0x17, 0x78, 0xce, 0x70, 0xd0, 0x59, 0x92, 0xe9, 0x18, 0x72, - 0x90, 0xcb, 0x50, 0xea, 0x5a, 0x8e, 0xf4, 0x3d, 0x71, 0xd3, 0xe3, 0xba, 0xe5, 0x20, 0x4b, 0xe3, - 0x24, 0xfd, 0xa1, 0x0c, 0x43, 0x14, 0x24, 0xfd, 0x21, 0xb2, 0x34, 0xf2, 0x75, 0x98, 0xb4, 0x5d, - 0x77, 0x8f, 0x0d, 0x1f, 0x2a, 0x5a, 0x51, 0xf8, 0x81, 0xb9, 0x31, 0x60, 0x2d, 0x49, 0xc2, 0x34, - 0x2f, 0xd9, 0x82, 0xe7, 0x3f, 0xa6, 0x9e, 0x2b, 0xc7, 0xcb, 0xb6, 0x4d, 0x69, 0x4f, 0xc1, 0x08, - 0x65, 0x8e, 0x07, 0x3d, 0xfe, 0x62, 0x36, 0x0b, 0x0e, 0xcb, 0xcb, 0xc3, 0xa7, 0x75, 0xaf, 0x43, - 0x83, 0x0d, 0xcf, 0x35, 0xa8, 0xef, 0x5b, 0x4e, 0x47, 0xc1, 0x56, 0x23, 0xd8, 0xcd, 0x6c, 0x16, - 0x1c, 0x96, 0x97, 0xbc, 0x07, 0xd3, 0x82, 0x24, 0xd4, 0x96, 0x05, 0x31, 0xcc, 0x58, 0xb6, 0xba, - 0xf5, 0x6f, 0x5c, 0x78, 0x78, 0x36, 0x87, 0xf0, 0xe0, 0xd0, 0xdc, 0xe4, 0x36, 0x4c, 0x29, 0xff, - 0xde, 0x06, 0xf5, 0xda, 0x61, 0x78, 0xc9, 0x78, 0xeb, 0x1a, 0x5b, 0x79, 0x2f, 0xd1, 0x9e, 0x47, - 0x8d, 0xb8, 0x9f, 0x54, 0x71, 0xe1, 0x40, 0x3e, 0x82, 0x70, 0x89, 0xc7, 0x90, 0x6c, 0xf5, 0x16, - 0x5d, 0xd7, 0x36, 0xdd, 0x07, 0x8e, 0xaa, 0xbb, 0x50, 0x31, 0xb9, 0x4b, 0xaf, 0x9d, 0xc9, 0x81, - 0x43, 0x72, 0xb2, 0x9a, 0x73, 0xca, 0x92, 0xfb, 0xc0, 0x49, 0xa3, 0x42, 0x54, 0xf3, 0xf6, 0x10, - 0x1e, 0x1c, 0x9a, 0x9b, 0x2c, 0x03, 0x49, 0xd7, 0x60, 0xab, 0x27, 0x9d, 0xce, 0x97, 0xc4, 0x99, - 0x35, 0x69, 0x2a, 0x66, 0xe4, 0x20, 0x6b, 0x70, 0x21, 0x9d, 0xca, 0xc4, 0x49, 0xff, 0x33, 0x3f, - 0xad, 0x16, 0x33, 0xe8, 0x98, 0x99, 0x4b, 0xfb, 0x67, 0x45, 0x18, 0x4f, 0x1c, 0x72, 0xf0, 0xcc, - 0x6d, 0x26, 0x67, 0x6b, 0x81, 0xae, 0xdf, 0x59, 0x5d, 0xba, 0x45, 0x75, 0x93, 0x7a, 0x77, 0xa8, - 0x3a, 0x90, 0x42, 0x4c, 0x8b, 0x09, 0x0a, 0xa6, 0x38, 0xc9, 0x0e, 0x54, 0x84, 0x65, 0x3b, 0xef, - 0xf5, 0x25, 0xaa, 0x8d, 0xb8, 0x79, 0x5b, 0xde, 0xf9, 0xe3, 0x7a, 0x14, 0x05, 0xbc, 0x16, 0xc0, - 0x58, 0x9c, 0x83, 0x0d, 0x24, 0x91, 0xda, 0x5b, 0x4b, 0xa8, 0xbc, 0xab, 0x50, 0x0a, 0x82, 0x51, - 0xb7, 0xa9, 0x0b, 0x4f, 0xc9, 0xe6, 0x1a, 0x32, 0x0c, 0x6d, 0x87, 0x7d, 0x3b, 0xdf, 0xb7, 0x5c, - 0x47, 0x9e, 0x59, 0xbe, 0x05, 0xb5, 0x40, 0x1a, 0x0b, 0x47, 0xdb, 0x66, 0xcf, 0x75, 0x25, 0x65, - 0x28, 0x54, 0x58, 0xda, 0x7f, 0x2c, 0x42, 0x23, 0x5c, 0xd8, 0x1f, 0xe3, 0x2c, 0x70, 0x17, 0x1a, - 0x61, 0x0c, 0x5c, 0xee, 0x1b, 0x11, 0xa3, 0xd0, 0x2c, 0xbe, 0x16, 0x0d, 0x5f, 0x31, 0x92, 0x11, - 0x8f, 0xaf, 0x2b, 0xe5, 0x88, 0xaf, 0xeb, 0x41, 0x2d, 0xf0, 0xac, 0x4e, 0x47, 0xae, 0x12, 0xf2, - 0x04, 0xd8, 0x85, 0xcd, 0xb5, 0x29, 0x00, 0x65, 0xcb, 0x8a, 0x17, 0x54, 0x62, 0xb4, 0x0f, 0x61, - 0x2a, 0xcd, 0xc9, 0x55, 0x68, 0x63, 0x97, 0x9a, 0x7d, 0x5b, 0xb5, 0x71, 0xa4, 0x42, 0xcb, 0x74, - 0x0c, 0x39, 0xd8, 0x32, 0x9c, 0x7d, 0xa6, 0x8f, 0x5d, 0x47, 0xa9, 0xb1, 0x7c, 0x35, 0xb2, 0x29, - 0xd3, 0x30, 0xa4, 0x6a, 0xff, 0xad, 0x04, 0x97, 0x23, 0xf3, 0xcc, 0xba, 0xee, 0xe8, 0x9d, 0x63, - 0x5c, 0x83, 0xf7, 0xd5, 0xc6, 0xa5, 0x93, 0x5e, 0xe8, 0x50, 0x7a, 0x06, 0x2e, 0x74, 0xf8, 0xbf, - 0x45, 0xe0, 0xf1, 0xba, 0xe4, 0x3b, 0x30, 0xa6, 0xc7, 0x6e, 0x40, 0x95, 0x9f, 0xf3, 0x66, 0xee, - 0xcf, 0xc9, 0xc3, 0x82, 0xc3, 0x90, 0xad, 0x78, 0x2a, 0x26, 0x04, 0x12, 0x17, 0xea, 0x3b, 0xba, - 0x6d, 0x33, 0x5d, 0x28, 0xb7, 0xbb, 0x29, 0x21, 0x9c, 0x77, 0xf3, 0x65, 0x09, 0x8d, 0xa1, 0x10, - 0xf2, 0x49, 0x01, 0xc6, 0xbd, 0xf8, 0x72, 0x4d, 0x7e, 0x90, 0x3c, 0xc1, 0x08, 0x31, 0xb4, 0x78, - 0x80, 0x58, 0x7c, 0x4d, 0x98, 0x94, 0xa9, 0xfd, 0xd7, 0x02, 0x8c, 0xb7, 0x6d, 0xcb, 0xb4, 0x9c, - 0xce, 0x19, 0xde, 0x27, 0x71, 0x0f, 0x2a, 0xbe, 0x6d, 0x99, 0x74, 0xc4, 0xd9, 0x44, 0xcc, 0x63, - 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0x82, 0x8a, 0xd2, 0x31, 0x2e, 0xa8, 0xf8, 0x61, 0x15, 0x64, 0xe4, - 0x39, 0xe9, 0x43, 0xa3, 0xa3, 0xce, 0xbd, 0x97, 0x75, 0xbc, 0x95, 0xe3, 0xcc, 0xc4, 0xc4, 0x09, - 0xfa, 0x62, 0xec, 0x0f, 0x13, 0x31, 0x92, 0x44, 0x68, 0xf2, 0xea, 0xdd, 0xa5, 0x9c, 0x57, 0xef, - 0x0a, 0x71, 0x83, 0x97, 0xef, 0xea, 0x50, 0xde, 0x0d, 0x82, 0x9e, 0xec, 0x4c, 0xa3, 0x6f, 0x2d, - 0x88, 0x8e, 0xed, 0x11, 0x3a, 0x11, 0x7b, 0x47, 0x0e, 0xcd, 0x44, 0x38, 0x7a, 0x78, 0xd5, 0xda, - 0x62, 0xae, 0xc0, 0x87, 0xb8, 0x08, 0xf6, 0x8e, 0x1c, 0x9a, 0xfc, 0x32, 0x34, 0x03, 0x4f, 0x77, - 0xfc, 0x1d, 0xd7, 0xeb, 0x52, 0x4f, 0xae, 0x51, 0x97, 0x73, 0xdc, 0x3e, 0xbb, 0x19, 0xa1, 0x09, - 0x8f, 0x6a, 0x22, 0x09, 0xe3, 0xd2, 0xc8, 0x1e, 0xd4, 0xfb, 0xa6, 0x28, 0x98, 0x34, 0x83, 0x2d, - 0xe4, 0xb9, 0x50, 0x38, 0x16, 0xd6, 0xa0, 0xde, 0x30, 0x14, 0x90, 0xbc, 0x55, 0xb0, 0x76, 0x5a, - 0xb7, 0x0a, 0xc6, 0x7b, 0x63, 0xd6, 0x99, 0x22, 0xa4, 0x2b, 0xf5, 0x5a, 0xa7, 0x23, 0xa3, 0xb2, - 0x96, 0x73, 0xab, 0x9c, 0x42, 0x64, 0x33, 0xd4, 0x8d, 0x9d, 0x0e, 0x2a, 0x19, 0x5a, 0x17, 0xa4, - 0xb7, 0x83, 0x18, 0x89, 0xbb, 0x77, 0xc4, 0x46, 0xb7, 0xb9, 0xe3, 0x8d, 0x07, 0xe1, 0x25, 0x30, - 0xb1, 0xb3, 0xbf, 0x33, 0x2f, 0xd9, 0xd1, 0xfe, 0x53, 0x11, 0x4a, 0x9b, 0x6b, 0x6d, 0x71, 0x9e, - 0x27, 0xbf, 0xd8, 0x8a, 0xb6, 0xf7, 0xac, 0xde, 0x7d, 0xea, 0x59, 0x3b, 0x07, 0x72, 0xe9, 0x1d, - 0x3b, 0xcf, 0x33, 0xcd, 0x81, 0x19, 0xb9, 0xc8, 0xfb, 0x30, 0x66, 0xe8, 0x8b, 0xd4, 0x0b, 0x46, - 0x31, 0x2c, 0xf0, 0x1d, 0xbd, 0x8b, 0x0b, 0x51, 0x76, 0x4c, 0x80, 0x91, 0x2d, 0x00, 0x23, 0x82, - 0x2e, 0x9d, 0xd8, 0x1c, 0x12, 0x03, 0x8e, 0x01, 0x11, 0x84, 0xc6, 0x1e, 0x63, 0xe5, 0xa8, 0xe5, - 0x93, 0xa0, 0xf2, 0x9e, 0x73, 0x47, 0xe5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x3c, 0x71, 0x21, 0x0f, - 0xf9, 0x1a, 0xd4, 0xdd, 0x5e, 0x6c, 0x38, 0x6d, 0xf0, 0xf8, 0xcf, 0xfa, 0x3d, 0x99, 0xf6, 0xe8, - 0x70, 0x66, 0x7c, 0xcd, 0xed, 0x58, 0x86, 0x4a, 0xc0, 0x90, 0x9d, 0x68, 0x50, 0xe5, 0xdb, 0xf0, - 0xd4, 0x75, 0x3c, 0x7c, 0xee, 0xe0, 0x37, 0x66, 0xf8, 0x28, 0x29, 0xda, 0xaf, 0x94, 0x21, 0xf2, - 0x11, 0x12, 0x1f, 0xaa, 0x62, 0x9b, 0x81, 0x1c, 0xb9, 0xcf, 0x74, 0x47, 0x83, 0x14, 0x45, 0x3a, - 0x50, 0xfa, 0xd0, 0xdd, 0xce, 0x3d, 0x70, 0xc7, 0xf6, 0xdf, 0x0b, 0x5b, 0x59, 0x2c, 0x01, 0x99, - 0x04, 0xf2, 0xb7, 0x0b, 0x70, 0xce, 0x4f, 0xab, 0xbe, 0xb2, 0x3b, 0x60, 0x7e, 0x1d, 0x3f, 0xad, - 0x4c, 0xcb, 0x40, 0xdd, 0x61, 0x64, 0x1c, 0x2c, 0x0b, 0x6b, 0x7f, 0xe1, 0xbc, 0x93, 0xdd, 0x69, - 0x25, 0xe7, 0x25, 0x92, 0xc9, 0xf6, 0x4f, 0xa6, 0xa1, 0x14, 0xa5, 0xfd, 0x5a, 0x11, 0x9a, 0xb1, - 0xd1, 0x3a, 0xf7, 0x2d, 0x4f, 0x0f, 0x53, 0xb7, 0x3c, 0x6d, 0x8c, 0xee, 0xcb, 0x8e, 0x4a, 0x75, - 0xd6, 0x17, 0x3d, 0xfd, 0xcb, 0x22, 0x94, 0xb6, 0x96, 0x96, 0x93, 0x8b, 0xd6, 0xc2, 0x53, 0x58, - 0xb4, 0xee, 0x42, 0x6d, 0xbb, 0x6f, 0xd9, 0x81, 0xe5, 0xe4, 0x3e, 0x21, 0x44, 0x5d, 0x8a, 0x25, - 0x7d, 0x1d, 0x02, 0x15, 0x15, 0x3c, 0xe9, 0x40, 0xad, 0x23, 0x8e, 0x68, 0xcc, 0x1d, 0xe1, 0x27, - 0x8f, 0x7a, 0x14, 0x82, 0xe4, 0x0b, 0x2a, 0x74, 0xed, 0x00, 0xe4, 0xed, 0xfe, 0x4f, 0xbd, 0x35, - 0xb5, 0x5f, 0x86, 0x50, 0x0b, 0x78, 0xfa, 0xc2, 0xff, 0x47, 0x01, 0x92, 0x8a, 0xcf, 0xd3, 0xef, - 0x4d, 0x7b, 0xe9, 0xde, 0xb4, 0x74, 0x1a, 0x3f, 0x5f, 0x76, 0x87, 0xd2, 0xfe, 0x7d, 0x01, 0x52, - 0x7b, 0xc3, 0xc8, 0x1b, 0xf2, 0xb4, 0xaf, 0x64, 0x28, 0x95, 0x3a, 0xed, 0x8b, 0x24, 0xb9, 0x63, - 0xa7, 0x7e, 0x7d, 0xca, 0x96, 0x6b, 0x71, 0x07, 0x9a, 0x2c, 0xfe, 0xdd, 0xd1, 0x97, 0x6b, 0x59, - 0xee, 0x38, 0x19, 0xee, 0x17, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x4f, 0x8a, 0x50, 0x7d, 0x6a, 0x5b, - 0xd5, 0x69, 0x22, 0x02, 0x73, 0x31, 0xe7, 0x68, 0x3f, 0x34, 0xfe, 0xb2, 0x9b, 0x8a, 0xbf, 0xcc, - 0x7b, 0x37, 0xf1, 0x13, 0xa2, 0x2f, 0xff, 0x6d, 0x01, 0xe4, 0x5c, 0xb3, 0xea, 0xf8, 0x81, 0xee, - 0x18, 0x94, 0x18, 0xe1, 0xc4, 0x96, 0x37, 0xcc, 0x47, 0x86, 0xc2, 0x09, 0x5d, 0x86, 0x3f, 0xab, - 0x89, 0x8c, 0xfc, 0x34, 0xd4, 0x77, 0x5d, 0x3f, 0xe0, 0x93, 0x57, 0x31, 0x69, 0x32, 0xbb, 0x25, - 0xd3, 0x31, 0xe4, 0x48, 0xbb, 0xb3, 0x2b, 0xc3, 0xdd, 0xd9, 0xda, 0x6f, 0x17, 0x61, 0xec, 0xcb, - 0xb2, 0xdf, 0x3e, 0x2b, 0x5e, 0xb5, 0x94, 0x33, 0x5e, 0xb5, 0x7c, 0x92, 0x78, 0x55, 0xed, 0xfb, - 0x05, 0x80, 0xa7, 0xb6, 0xd9, 0xdf, 0x4c, 0x86, 0x92, 0xe6, 0xee, 0x57, 0xd9, 0x81, 0xa4, 0xff, - 0xa8, 0xa2, 0xaa, 0xc4, 0xc3, 0x48, 0x3f, 0x2d, 0xc0, 0x84, 0x9e, 0x08, 0xcd, 0xcc, 0xad, 0x2f, - 0xa7, 0x22, 0x3d, 0xc3, 0xc8, 0xa2, 0x64, 0x3a, 0xa6, 0xc4, 0x92, 0xb7, 0xa2, 0x83, 0xa6, 0xef, - 0x46, 0xdd, 0x7e, 0xe0, 0x84, 0x68, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x10, 0x0a, 0x5b, 0x3a, 0x95, - 0x50, 0xd8, 0xf8, 0x26, 0xbf, 0xf2, 0x63, 0x37, 0xf9, 0xed, 0x43, 0x63, 0xc7, 0x73, 0xbb, 0x3c, - 0xda, 0x54, 0xde, 0x6a, 0x7c, 0x33, 0xc7, 0x44, 0x19, 0xdd, 0xe7, 0x1f, 0x19, 0xae, 0x96, 0x15, - 0x3e, 0x46, 0xa2, 0xb8, 0xad, 0xdf, 0x15, 0x52, 0xab, 0xa7, 0x29, 0x35, 0x1c, 0x4b, 0x36, 0x05, - 0x3a, 0x2a, 0x31, 0xc9, 0x08, 0xd3, 0xda, 0xd3, 0x89, 0x30, 0xd5, 0x7e, 0xb7, 0xaa, 0x06, 0xb0, - 0x67, 0xee, 0x4c, 0xd3, 0xdc, 0x5b, 0xb3, 0xe3, 0xfb, 0xaa, 0xcb, 0x27, 0xd8, 0x57, 0x5d, 0x39, - 0xee, 0xbe, 0xea, 0xea, 0x13, 0x02, 0x3f, 0xd3, 0x9b, 0x9e, 0x6b, 0x4f, 0x71, 0xd3, 0x73, 0xfd, - 0x74, 0x36, 0x3d, 0x37, 0x4e, 0xb6, 0xe9, 0x59, 0xee, 0x1b, 0x0e, 0xb3, 0x43, 0x72, 0xd3, 0xf3, - 0x62, 0x92, 0x8c, 0x69, 0xfe, 0xac, 0x7d, 0xd3, 0xcd, 0x13, 0xee, 0x9b, 0x4e, 0xed, 0x72, 0x1e, - 0x1b, 0x69, 0x97, 0xf3, 0xf8, 0xb1, 0x76, 0x39, 0x1f, 0x96, 0x20, 0xb5, 0x74, 0xfe, 0xca, 0x4d, - 0xf6, 0xff, 0x95, 0x9b, 0xec, 0xb3, 0x22, 0x44, 0xc3, 0xe6, 0x09, 0xc3, 0x88, 0xde, 0x83, 0x7a, - 0x57, 0x7f, 0xb8, 0x44, 0x6d, 0xfd, 0x20, 0xcf, 0xc5, 0xbd, 0xeb, 0x12, 0x03, 0x43, 0x34, 0xe2, - 0x03, 0x58, 0xe1, 0xe1, 0xf9, 0xb9, 0x1d, 0x0e, 0xd1, 0x39, 0xfc, 0xc2, 0xa4, 0x19, 0xbd, 0x63, - 0x4c, 0x8c, 0xf6, 0x6f, 0x8a, 0x20, 0x6f, 0x59, 0x20, 0x14, 0x2a, 0x3b, 0xd6, 0x43, 0x6a, 0xe6, - 0x0e, 0x4e, 0x8e, 0x5d, 0xa7, 0x2e, 0x3c, 0x2a, 0x3c, 0x01, 0x05, 0x3a, 0x37, 0x95, 0x0b, 0x0f, - 0x99, 0x6c, 0xbf, 0x1c, 0xa6, 0xf2, 0xb8, 0xa7, 0x4d, 0x9a, 0xca, 0x45, 0x12, 0x2a, 0x19, 0xc2, - 0x32, 0xcf, 0x83, 0x25, 0x72, 0x3b, 0x04, 0x13, 0x41, 0x17, 0xca, 0x32, 0xef, 0x8b, 0x63, 0x0e, - 0xa4, 0x8c, 0xd6, 0x2f, 0x7d, 0xef, 0x07, 0xd7, 0x9e, 0xfb, 0xfe, 0x0f, 0xae, 0x3d, 0xf7, 0xf9, - 0x0f, 0xae, 0x3d, 0xf7, 0x2b, 0x47, 0xd7, 0x0a, 0xdf, 0x3b, 0xba, 0x56, 0xf8, 0xfe, 0xd1, 0xb5, - 0xc2, 0xe7, 0x47, 0xd7, 0x0a, 0xff, 0xf9, 0xe8, 0x5a, 0xe1, 0x6f, 0xfc, 0x97, 0x6b, 0xcf, 0xfd, - 0xe2, 0x9b, 0x51, 0x11, 0xe6, 0x54, 0x11, 0xe6, 0x94, 0xc0, 0xb9, 0xde, 0x5e, 0x67, 0x8e, 0x15, - 0x21, 0x4a, 0x51, 0x45, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x35, 0xbe, 0x7d, 0xfe, - 0x99, 0x00, 0x00, + // 7889 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x25, 0xd9, + 0xb5, 0xd6, 0x9c, 0x97, 0xcf, 0x39, 0xeb, 0xf8, 0xd5, 0xbb, 0x1f, 0xe3, 0xee, 0xe9, 0x69, 0xf7, + 0xad, 0xb9, 0x33, 0xb7, 0x2f, 0xf7, 0x5e, 0x9b, 0xf1, 0x9d, 0x57, 0xee, 0xbd, 0xc9, 0x8c, 0x8f, + 0xdd, 0x76, 0xbb, 0xdb, 0xee, 0x76, 0xd6, 0xb1, 0x7b, 0x26, 0x77, 0x48, 0x86, 0x72, 0xd5, 0xf6, + 0x71, 0x8d, 0xeb, 0x54, 0x9d, 0xa9, 0xaa, 0xe3, 0x6e, 0x4f, 0x40, 0x79, 0x0c, 0x68, 0x06, 0x01, + 0x02, 0xe5, 0x57, 0x24, 0x14, 0x10, 0x08, 0x29, 0x3f, 0xa2, 0xf0, 0x03, 0x29, 0xfc, 0x40, 0x82, + 0x10, 0x84, 0x20, 0x20, 0x1e, 0x11, 0x42, 0x62, 0xf8, 0x63, 0x11, 0x23, 0x7e, 0x80, 0x04, 0x8a, + 0x88, 0x20, 0xa1, 0x15, 0x11, 0xb4, 0x5f, 0xf5, 0x3a, 0x75, 0xba, 0xed, 0x53, 0x76, 0x4f, 0x0f, + 0xcc, 0xbf, 0xaa, 0xbd, 0xd6, 0xfe, 0xd6, 0xae, 0x5d, 0xbb, 0xf6, 0x5e, 0x7b, 0xad, 0xb5, 0x57, + 0xc1, 0x72, 0xdb, 0x0a, 0x76, 0x7a, 0x5b, 0x33, 0x86, 0xdb, 0x99, 0x75, 0x7a, 0x1d, 0xbd, 0xeb, + 0xb9, 0xef, 0xf2, 0x8b, 0x6d, 0xdb, 0xbd, 0x37, 0xdb, 0xdd, 0x6d, 0xcf, 0xea, 0x5d, 0xcb, 0x8f, + 0x4a, 0xf6, 0x5e, 0xd4, 0xed, 0xee, 0x8e, 0xfe, 0xe2, 0x6c, 0x9b, 0x3a, 0xd4, 0xd3, 0x03, 0x6a, + 0xce, 0x74, 0x3d, 0x37, 0x70, 0xc9, 0xab, 0x11, 0xd0, 0x8c, 0x02, 0x9a, 0x51, 0xd5, 0x66, 0xba, + 0xbb, 0xed, 0x19, 0x06, 0x14, 0x95, 0x28, 0xa0, 0x4b, 0xbf, 0x17, 0x6b, 0x41, 0xdb, 0x6d, 0xbb, + 0xb3, 0x1c, 0x6f, 0xab, 0xb7, 0xcd, 0xef, 0xf8, 0x0d, 0xbf, 0x12, 0x72, 0x2e, 0x69, 0xbb, 0xaf, + 0xf9, 0x33, 0x96, 0xcb, 0x9a, 0x35, 0x6b, 0xb8, 0x1e, 0x9d, 0xdd, 0xeb, 0x6b, 0xcb, 0xa5, 0x97, + 0x22, 0x9e, 0x8e, 0x6e, 0xec, 0x58, 0x0e, 0xf5, 0xf6, 0xd5, 0xb3, 0xcc, 0x7a, 0xd4, 0x77, 0x7b, + 0x9e, 0x41, 0x8f, 0x55, 0xcb, 0x9f, 0xed, 0xd0, 0x40, 0xcf, 0x92, 0x35, 0x3b, 0xa8, 0x96, 0xd7, + 0x73, 0x02, 0xab, 0xd3, 0x2f, 0xe6, 0x95, 0x47, 0x55, 0xf0, 0x8d, 0x1d, 0xda, 0xd1, 0xfb, 0xea, + 0xfd, 0xfe, 0xa0, 0x7a, 0xbd, 0xc0, 0xb2, 0x67, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x5d, 0x49, 0xfb, + 0x11, 0xc0, 0xd9, 0xf9, 0x2d, 0x3f, 0xf0, 0x74, 0x23, 0x58, 0x77, 0xcd, 0x0d, 0xda, 0xe9, 0xda, + 0x7a, 0x40, 0xc9, 0x2e, 0xd4, 0xd8, 0x03, 0x99, 0x7a, 0xa0, 0x4f, 0x15, 0xae, 0x16, 0xae, 0x35, + 0xe6, 0xe6, 0x67, 0x86, 0x7c, 0x81, 0x33, 0x6b, 0x12, 0xa8, 0x39, 0x7a, 0x78, 0x30, 0x5d, 0x53, + 0x77, 0x18, 0x0a, 0x20, 0xdf, 0x2e, 0xc0, 0xa8, 0xe3, 0x9a, 0xb4, 0x45, 0x6d, 0x6a, 0x04, 0xae, + 0x37, 0x55, 0xbc, 0x5a, 0xba, 0xd6, 0x98, 0xfb, 0xca, 0xd0, 0x12, 0x33, 0x9e, 0x68, 0xe6, 0x76, + 0x4c, 0xc0, 0x75, 0x27, 0xf0, 0xf6, 0x9b, 0xe7, 0x7e, 0x7c, 0x30, 0xfd, 0xd4, 0xe1, 0xc1, 0xf4, + 0x68, 0x9c, 0x84, 0x89, 0x96, 0x90, 0x4d, 0x68, 0x04, 0xae, 0xcd, 0xba, 0xcc, 0x72, 0x1d, 0x7f, + 0xaa, 0xc4, 0x1b, 0x76, 0x65, 0x46, 0x74, 0x35, 0x13, 0x3f, 0xc3, 0xc6, 0xd8, 0xcc, 0xde, 0x8b, + 0x33, 0x1b, 0x21, 0x5b, 0xf3, 0xac, 0x04, 0x6e, 0x44, 0x65, 0x3e, 0xc6, 0x71, 0x08, 0x85, 0x09, + 0x9f, 0x1a, 0x3d, 0xcf, 0x0a, 0xf6, 0x17, 0x5c, 0x27, 0xa0, 0xf7, 0x83, 0xa9, 0x32, 0xef, 0xe5, + 0x17, 0xb2, 0xa0, 0xd7, 0x5d, 0xb3, 0x95, 0xe4, 0x6e, 0x9e, 0x3d, 0x3c, 0x98, 0x9e, 0x48, 0x15, + 0x62, 0x1a, 0x93, 0x38, 0x30, 0x69, 0x75, 0xf4, 0x36, 0x5d, 0xef, 0xd9, 0x76, 0x8b, 0x1a, 0x1e, + 0x0d, 0xfc, 0xa9, 0x0a, 0x7f, 0x84, 0x6b, 0x59, 0x72, 0x56, 0x5d, 0x43, 0xb7, 0xef, 0x6c, 0xbd, + 0x4b, 0x8d, 0x00, 0xe9, 0x36, 0xf5, 0xa8, 0x63, 0xd0, 0xe6, 0x94, 0x7c, 0x98, 0xc9, 0x95, 0x14, + 0x12, 0xf6, 0x61, 0x93, 0x65, 0x38, 0xd3, 0xf5, 0x2c, 0x97, 0x37, 0xc1, 0xd6, 0x7d, 0xff, 0xb6, + 0xde, 0xa1, 0x53, 0x23, 0x57, 0x0b, 0xd7, 0xea, 0xcd, 0x8b, 0x12, 0xe6, 0xcc, 0x7a, 0x9a, 0x01, + 0xfb, 0xeb, 0x90, 0x6b, 0x50, 0x53, 0x85, 0x53, 0xd5, 0xab, 0x85, 0x6b, 0x15, 0x31, 0x76, 0x54, + 0x5d, 0x0c, 0xa9, 0x64, 0x09, 0x6a, 0xfa, 0xf6, 0xb6, 0xe5, 0x30, 0xce, 0x1a, 0xef, 0xc2, 0xcb, + 0x59, 0x8f, 0x36, 0x2f, 0x79, 0x04, 0x8e, 0xba, 0xc3, 0xb0, 0x2e, 0xb9, 0x09, 0xc4, 0xa7, 0xde, + 0x9e, 0x65, 0xd0, 0x79, 0xc3, 0x70, 0x7b, 0x4e, 0xc0, 0xdb, 0x5e, 0xe7, 0x6d, 0xbf, 0x24, 0xdb, + 0x4e, 0x5a, 0x7d, 0x1c, 0x98, 0x51, 0x8b, 0xbc, 0x01, 0x93, 0xf2, 0x5b, 0x8d, 0x7a, 0x01, 0x38, + 0xd2, 0x39, 0xd6, 0x91, 0x98, 0xa2, 0x61, 0x1f, 0x37, 0x31, 0xe1, 0xb2, 0xde, 0x0b, 0xdc, 0x0e, + 0x83, 0x4c, 0x0a, 0xdd, 0x70, 0x77, 0xa9, 0x33, 0xd5, 0xb8, 0x5a, 0xb8, 0x56, 0x6b, 0x5e, 0x3d, + 0x3c, 0x98, 0xbe, 0x3c, 0xff, 0x10, 0x3e, 0x7c, 0x28, 0x0a, 0xb9, 0x03, 0x75, 0xd3, 0xf1, 0xd7, + 0x5d, 0xdb, 0x32, 0xf6, 0xa7, 0x46, 0x79, 0x03, 0x5f, 0x94, 0x8f, 0x5a, 0x5f, 0xbc, 0xdd, 0x12, + 0x84, 0x07, 0x07, 0xd3, 0x97, 0xfb, 0xa7, 0xd4, 0x99, 0x90, 0x8e, 0x11, 0x06, 0x59, 0xe3, 0x80, + 0x0b, 0xae, 0xb3, 0x6d, 0xb5, 0xa7, 0xc6, 0xf8, 0xdb, 0xb8, 0x3a, 0x60, 0x40, 0x2f, 0xde, 0x6e, + 0x09, 0xbe, 0xe6, 0x98, 0x14, 0x27, 0x6e, 0x31, 0x42, 0x20, 0x26, 0x8c, 0xab, 0xc9, 0x78, 0xc1, + 0xd6, 0xad, 0x8e, 0x3f, 0x35, 0xce, 0x07, 0xef, 0x6f, 0x0e, 0xc0, 0xc4, 0x38, 0x73, 0xf3, 0x82, + 0x7c, 0x94, 0xf1, 0x44, 0xb1, 0x8f, 0x29, 0xcc, 0x4b, 0xaf, 0xc3, 0x99, 0xbe, 0xb9, 0x81, 0x4c, + 0x42, 0x69, 0x97, 0xee, 0xf3, 0xa9, 0xaf, 0x8e, 0xec, 0x92, 0x9c, 0x83, 0xca, 0x9e, 0x6e, 0xf7, + 0xe8, 0x54, 0x91, 0x97, 0x89, 0x9b, 0x3f, 0x28, 0xbe, 0x56, 0xd0, 0xfe, 0x56, 0x09, 0x46, 0xd5, + 0x8c, 0xd3, 0xb2, 0x9c, 0x5d, 0xf2, 0x26, 0x94, 0x6c, 0xb7, 0x2d, 0xe7, 0xcd, 0x3f, 0x1a, 0x7a, + 0x16, 0x5b, 0x75, 0xdb, 0xcd, 0xea, 0xe1, 0xc1, 0x74, 0x69, 0xd5, 0x6d, 0x23, 0x43, 0x24, 0x06, + 0x54, 0x76, 0xf5, 0xed, 0x5d, 0x9d, 0xb7, 0xa1, 0x31, 0xd7, 0x1c, 0x1a, 0xfa, 0x16, 0x43, 0x61, + 0x6d, 0x6d, 0xd6, 0x0f, 0x0f, 0xa6, 0x2b, 0xfc, 0x16, 0x05, 0x36, 0x71, 0xa1, 0xbe, 0x65, 0xeb, + 0xc6, 0xee, 0x8e, 0x6b, 0xd3, 0xa9, 0x52, 0x4e, 0x41, 0x4d, 0x85, 0x24, 0x5e, 0x73, 0x78, 0x8b, + 0x91, 0x0c, 0x62, 0xc0, 0x48, 0xcf, 0xf4, 0x2d, 0x67, 0x57, 0xce, 0x81, 0xaf, 0x0f, 0x2d, 0x6d, + 0x73, 0x91, 0x3f, 0x13, 0x1c, 0x1e, 0x4c, 0x8f, 0x88, 0x6b, 0x94, 0xd0, 0xda, 0x2f, 0x47, 0x61, + 0x5c, 0xbd, 0xa4, 0xbb, 0xd4, 0x0b, 0xe8, 0x7d, 0x72, 0x15, 0xca, 0x0e, 0xfb, 0x34, 0xf9, 0x4b, + 0x6e, 0x8e, 0xca, 0xe1, 0x52, 0xe6, 0x9f, 0x24, 0xa7, 0xb0, 0x96, 0x89, 0xa1, 0x22, 0x3b, 0x7c, + 0xf8, 0x96, 0xb5, 0x38, 0x8c, 0x68, 0x99, 0xb8, 0x46, 0x09, 0x4d, 0xde, 0x86, 0x32, 0x7f, 0x78, + 0xd1, 0xd5, 0x9f, 0x1f, 0x5e, 0x04, 0x7b, 0xf4, 0x1a, 0x7b, 0x02, 0xfe, 0xe0, 0x1c, 0x94, 0x0d, + 0xc5, 0x9e, 0xb9, 0x2d, 0x3b, 0xf6, 0x8f, 0x72, 0x74, 0xec, 0x92, 0x18, 0x8a, 0x9b, 0x8b, 0x4b, + 0xc8, 0x10, 0xc9, 0x5f, 0x29, 0xc0, 0x19, 0xc3, 0x75, 0x02, 0x9d, 0xe9, 0x19, 0x6a, 0x91, 0x9d, + 0xaa, 0x70, 0x39, 0x37, 0x87, 0x96, 0xb3, 0x90, 0x46, 0x6c, 0x9e, 0x67, 0x6b, 0x46, 0x5f, 0x31, + 0xf6, 0xcb, 0x26, 0x7f, 0xad, 0x00, 0xe7, 0xd9, 0x5c, 0xde, 0xc7, 0xcc, 0x57, 0xa0, 0x93, 0x6d, + 0xd5, 0xc5, 0xc3, 0x83, 0xe9, 0xf3, 0x2b, 0x59, 0xc2, 0x30, 0xbb, 0x0d, 0xac, 0x75, 0x67, 0xf5, + 0x7e, 0xb5, 0x84, 0xaf, 0x6e, 0x8d, 0xb9, 0xd5, 0x93, 0x54, 0x75, 0x9a, 0xcf, 0xc8, 0xa1, 0x9c, + 0xa5, 0xd9, 0x61, 0x56, 0x2b, 0xc8, 0x75, 0xa8, 0xee, 0xb9, 0x76, 0xaf, 0x43, 0xfd, 0xa9, 0x1a, + 0x9f, 0x62, 0x2f, 0x65, 0x4d, 0xb1, 0x77, 0x39, 0x4b, 0x73, 0x42, 0xc2, 0x57, 0xc5, 0xbd, 0x8f, + 0xaa, 0x2e, 0xb1, 0x60, 0xc4, 0xb6, 0x3a, 0x56, 0xe0, 0xf3, 0x85, 0xb3, 0x31, 0x77, 0x7d, 0xe8, + 0xc7, 0x12, 0x9f, 0xe8, 0x2a, 0x07, 0x13, 0x5f, 0x8d, 0xb8, 0x46, 0x29, 0x80, 0x4d, 0x85, 0xbe, + 0xa1, 0xdb, 0x62, 0x61, 0x6d, 0xcc, 0x7d, 0x61, 0xf8, 0xcf, 0x86, 0xa1, 0x34, 0xc7, 0xe4, 0x33, + 0x55, 0xf8, 0x2d, 0x0a, 0x6c, 0xf2, 0x65, 0x18, 0x4f, 0xbc, 0x4d, 0x7f, 0xaa, 0xc1, 0x7b, 0xe7, + 0xd9, 0xac, 0xde, 0x09, 0xb9, 0xa2, 0x95, 0x27, 0x31, 0x42, 0x7c, 0x4c, 0x81, 0x91, 0x5b, 0x50, + 0xf3, 0x2d, 0x93, 0x1a, 0xba, 0xe7, 0x4f, 0x8d, 0x1e, 0x05, 0x78, 0x52, 0x02, 0xd7, 0x5a, 0xb2, + 0x1a, 0x86, 0x00, 0x64, 0x06, 0xa0, 0xab, 0x7b, 0x81, 0x25, 0x14, 0xd5, 0x31, 0xae, 0x34, 0x8d, + 0x1f, 0x1e, 0x4c, 0xc3, 0x7a, 0x58, 0x8a, 0x31, 0x0e, 0xc6, 0xcf, 0xea, 0xae, 0x38, 0xdd, 0x5e, + 0x20, 0x16, 0xd6, 0xba, 0xe0, 0x6f, 0x85, 0xa5, 0x18, 0xe3, 0x20, 0xdf, 0x2f, 0xc0, 0x33, 0xd1, + 0x6d, 0xff, 0x47, 0x36, 0x71, 0xe2, 0x1f, 0xd9, 0xf4, 0xe1, 0xc1, 0xf4, 0x33, 0xad, 0xc1, 0x22, + 0xf1, 0x61, 0xed, 0x21, 0x1f, 0x16, 0x60, 0xbc, 0xd7, 0x35, 0xf5, 0x80, 0xb6, 0x02, 0xb6, 0xe3, + 0x69, 0xef, 0x4f, 0x4d, 0xf2, 0x26, 0x2e, 0x0f, 0x3f, 0x0b, 0x26, 0xe0, 0xa2, 0xd7, 0x9c, 0x2c, + 0xc7, 0x94, 0x58, 0xed, 0x4d, 0x18, 0x9b, 0xef, 0x05, 0x3b, 0xae, 0x67, 0xbd, 0xcf, 0xd5, 0x7f, + 0xb2, 0x04, 0x95, 0x80, 0xab, 0x71, 0x42, 0x43, 0x78, 0x3e, 0xeb, 0xa5, 0x0b, 0x95, 0xfa, 0x16, + 0xdd, 0x57, 0x7a, 0x89, 0x58, 0xa9, 0x85, 0x5a, 0x27, 0xaa, 0x6b, 0x7f, 0xae, 0x00, 0xd5, 0xa6, + 0x6e, 0xec, 0xba, 0xdb, 0xdb, 0xe4, 0x2d, 0xa8, 0x59, 0x4e, 0x40, 0xbd, 0x3d, 0xdd, 0x96, 0xb0, + 0x33, 0x31, 0xd8, 0x70, 0x43, 0x18, 0x3d, 0x1e, 0xdb, 0x7d, 0x31, 0x41, 0x8b, 0x3d, 0xb9, 0x6b, + 0xe1, 0x9a, 0xf1, 0x8a, 0xc4, 0xc0, 0x10, 0x8d, 0x4c, 0x43, 0xc5, 0x0f, 0x68, 0xd7, 0xe7, 0x6b, + 0xe0, 0x98, 0x68, 0x46, 0x8b, 0x15, 0xa0, 0x28, 0xd7, 0xfe, 0x66, 0x01, 0xea, 0x4d, 0xdd, 0xb7, + 0x0c, 0xf6, 0x94, 0x64, 0x01, 0xca, 0x3d, 0x9f, 0x7a, 0xc7, 0x7b, 0x36, 0xbe, 0x6c, 0x6d, 0xfa, + 0xd4, 0x43, 0x5e, 0x99, 0xdc, 0x81, 0x5a, 0x57, 0xf7, 0xfd, 0x7b, 0xae, 0x67, 0xca, 0xa5, 0xf7, + 0x88, 0x40, 0x62, 0x9b, 0x20, 0xab, 0x62, 0x08, 0xa2, 0x35, 0x20, 0xd2, 0x3d, 0xb4, 0x9f, 0x17, + 0xe0, 0x6c, 0xb3, 0xb7, 0xbd, 0x4d, 0x3d, 0xa9, 0x15, 0x4b, 0x7d, 0x93, 0x42, 0xc5, 0xa3, 0xa6, + 0xe5, 0xcb, 0xb6, 0x2f, 0x0e, 0x3d, 0x50, 0x90, 0xa1, 0x48, 0xf5, 0x96, 0xf7, 0x17, 0x2f, 0x40, + 0x81, 0x4e, 0x7a, 0x50, 0x7f, 0x97, 0xb2, 0xdd, 0x38, 0xd5, 0x3b, 0xf2, 0xe9, 0x6e, 0x0c, 0x2d, + 0xea, 0x26, 0x0d, 0x5a, 0x1c, 0x29, 0xae, 0x4d, 0x87, 0x85, 0x18, 0x49, 0xd2, 0x7e, 0x54, 0x81, + 0xd1, 0x05, 0xb7, 0xb3, 0x65, 0x39, 0xd4, 0xbc, 0x6e, 0xb6, 0x29, 0x79, 0x07, 0xca, 0xd4, 0x6c, + 0x53, 0xf9, 0xb4, 0xc3, 0x2b, 0x1e, 0x0c, 0x2c, 0x52, 0x9f, 0xd8, 0x1d, 0x72, 0x60, 0xb2, 0x0a, + 0xe3, 0xdb, 0x9e, 0xdb, 0x11, 0x73, 0xf9, 0xc6, 0x7e, 0x57, 0xea, 0xce, 0xcd, 0xdf, 0x54, 0x1f, + 0xce, 0x52, 0x82, 0xfa, 0xe0, 0x60, 0x1a, 0xa2, 0x3b, 0x4c, 0xd5, 0x25, 0x6f, 0xc1, 0x54, 0x54, + 0x12, 0x4e, 0x6a, 0x0b, 0x6c, 0x3b, 0xc3, 0x75, 0xa7, 0x4a, 0xf3, 0xf2, 0xe1, 0xc1, 0xf4, 0xd4, + 0xd2, 0x00, 0x1e, 0x1c, 0x58, 0x9b, 0x4d, 0x15, 0x93, 0x11, 0x51, 0x2c, 0x34, 0x52, 0x65, 0x3a, + 0xa1, 0x15, 0x8c, 0xef, 0xfb, 0x96, 0x52, 0x22, 0xb0, 0x4f, 0x28, 0x59, 0x82, 0xd1, 0xc0, 0x8d, + 0xf5, 0x57, 0x85, 0xf7, 0x97, 0xa6, 0x0c, 0x15, 0x1b, 0xee, 0xc0, 0xde, 0x4a, 0xd4, 0x23, 0x08, + 0x17, 0xd4, 0x7d, 0xaa, 0xa7, 0x46, 0x78, 0x4f, 0x5d, 0x3a, 0x3c, 0x98, 0xbe, 0xb0, 0x91, 0xc9, + 0x81, 0x03, 0x6a, 0x92, 0x6f, 0x14, 0x60, 0x5c, 0x91, 0x64, 0x1f, 0x55, 0x4f, 0xb2, 0x8f, 0x08, + 0x1b, 0x11, 0x1b, 0x09, 0x01, 0x98, 0x12, 0xa8, 0xfd, 0xb2, 0x0c, 0xf5, 0x70, 0xaa, 0x27, 0xcf, + 0x41, 0x85, 0x9b, 0x20, 0xa4, 0x06, 0x1f, 0xae, 0xe1, 0xdc, 0x52, 0x81, 0x82, 0x46, 0x9e, 0x87, + 0xaa, 0xe1, 0x76, 0x3a, 0xba, 0x63, 0x72, 0xb3, 0x52, 0xbd, 0xd9, 0x60, 0xaa, 0xcb, 0x82, 0x28, + 0x42, 0x45, 0x23, 0x97, 0xa1, 0xac, 0x7b, 0x6d, 0x61, 0xe1, 0xa9, 0x8b, 0xf9, 0x68, 0xde, 0x6b, + 0xfb, 0xc8, 0x4b, 0xc9, 0xe7, 0xa0, 0x44, 0x9d, 0xbd, 0xa9, 0xf2, 0x60, 0xdd, 0xe8, 0xba, 0xb3, + 0x77, 0x57, 0xf7, 0x9a, 0x0d, 0xd9, 0x86, 0xd2, 0x75, 0x67, 0x0f, 0x59, 0x1d, 0xb2, 0x0a, 0x55, + 0xea, 0xec, 0xb1, 0x77, 0x2f, 0x4d, 0x2f, 0xbf, 0x31, 0xa0, 0x3a, 0x63, 0x91, 0xdb, 0x84, 0x50, + 0xc3, 0x92, 0xc5, 0xa8, 0x20, 0xc8, 0x97, 0x60, 0x54, 0x28, 0x5b, 0x6b, 0xec, 0x9d, 0xf8, 0x53, + 0x23, 0x1c, 0x72, 0x7a, 0xb0, 0xb6, 0xc6, 0xf9, 0x22, 0x53, 0x57, 0xac, 0xd0, 0xc7, 0x04, 0x14, + 0xf9, 0x12, 0xd4, 0xd5, 0xce, 0x58, 0xbd, 0xd9, 0x4c, 0x2b, 0x91, 0xda, 0x4e, 0x23, 0x7d, 0xaf, + 0x67, 0x79, 0xb4, 0x43, 0x9d, 0xc0, 0x6f, 0x9e, 0x51, 0x76, 0x03, 0x45, 0xf5, 0x31, 0x42, 0x23, + 0x5b, 0xfd, 0xe6, 0x2e, 0x61, 0xab, 0x79, 0x6e, 0xc0, 0xac, 0x3e, 0x84, 0xad, 0xeb, 0x2b, 0x30, + 0x11, 0xda, 0xa3, 0xa4, 0x49, 0x43, 0x58, 0x6f, 0x5e, 0x62, 0xd5, 0x57, 0x92, 0xa4, 0x07, 0x07, + 0xd3, 0xcf, 0x66, 0x18, 0x35, 0x22, 0x06, 0x4c, 0x83, 0x69, 0x3f, 0x2c, 0x41, 0xff, 0x3e, 0x24, + 0xd9, 0x69, 0x85, 0x93, 0xee, 0xb4, 0xf4, 0x03, 0x89, 0xe9, 0xf3, 0x35, 0x59, 0x2d, 0xff, 0x43, + 0x65, 0xbd, 0x98, 0xd2, 0x49, 0xbf, 0x98, 0x27, 0xe5, 0xdb, 0xd1, 0x3e, 0x2a, 0xc3, 0xf8, 0xa2, + 0x4e, 0x3b, 0xae, 0xf3, 0xc8, 0x5d, 0x59, 0xe1, 0x89, 0xd8, 0x95, 0x5d, 0x83, 0x9a, 0x47, 0xbb, + 0xb6, 0x65, 0xe8, 0x42, 0xf9, 0x92, 0x56, 0x50, 0x94, 0x65, 0x18, 0x52, 0x07, 0xec, 0xc6, 0x4b, + 0x4f, 0xe4, 0x6e, 0xbc, 0xfc, 0xc9, 0xef, 0xc6, 0xb5, 0x6f, 0x14, 0x81, 0x2b, 0x2a, 0xe4, 0x2a, + 0x94, 0xd9, 0x22, 0x9c, 0xb6, 0x01, 0xf1, 0x81, 0xc3, 0x29, 0xe4, 0x12, 0x14, 0x03, 0x57, 0x7e, + 0x79, 0x20, 0xe9, 0xc5, 0x0d, 0x17, 0x8b, 0x81, 0x4b, 0xde, 0x07, 0x30, 0x5c, 0xc7, 0xb4, 0x94, + 0x73, 0x20, 0xdf, 0x83, 0x2d, 0xb9, 0xde, 0x3d, 0xdd, 0x33, 0x17, 0x42, 0x44, 0xb1, 0x1f, 0x8b, + 0xee, 0x31, 0x26, 0x8d, 0xbc, 0x0e, 0x23, 0xae, 0xb3, 0xd4, 0xb3, 0x6d, 0xde, 0xa1, 0xf5, 0xe6, + 0x6f, 0xb1, 0x4d, 0xf2, 0x1d, 0x5e, 0xf2, 0xe0, 0x60, 0xfa, 0xa2, 0xd0, 0x6f, 0xd9, 0xdd, 0x9b, + 0x9e, 0x15, 0x58, 0x4e, 0x3b, 0xdc, 0x9e, 0xc8, 0x6a, 0xda, 0xb7, 0x0a, 0xd0, 0x58, 0xb2, 0xee, + 0x53, 0xf3, 0x4d, 0xcb, 0x31, 0xdd, 0x7b, 0x04, 0x61, 0xc4, 0xa6, 0x4e, 0x3b, 0xd8, 0x19, 0x72, + 0xff, 0x20, 0x76, 0xe9, 0x1c, 0x01, 0x25, 0x12, 0x99, 0x85, 0xba, 0xd0, 0x3e, 0x2d, 0xa7, 0xcd, + 0xfb, 0xb0, 0x16, 0x4d, 0x7a, 0x2d, 0x45, 0xc0, 0x88, 0x47, 0xdb, 0x87, 0x33, 0x7d, 0xdd, 0x40, + 0x4c, 0x28, 0x07, 0x7a, 0x5b, 0xcd, 0xaf, 0x4b, 0x43, 0x77, 0xf0, 0x86, 0xde, 0x8e, 0x75, 0x2e, + 0x5f, 0xe3, 0x37, 0x74, 0xb6, 0xc6, 0x33, 0x74, 0xed, 0x57, 0x05, 0xa8, 0x2d, 0xf5, 0x1c, 0x83, + 0x6f, 0xd1, 0x1e, 0x6d, 0x1b, 0x54, 0x0a, 0x43, 0x31, 0x53, 0x61, 0xe8, 0xc1, 0xc8, 0xee, 0xbd, + 0x50, 0xa1, 0x68, 0xcc, 0xad, 0x0d, 0x3f, 0x2a, 0x64, 0x93, 0x66, 0x6e, 0x71, 0x3c, 0xe1, 0xba, + 0x1a, 0x97, 0x0d, 0x1a, 0xb9, 0xf5, 0x26, 0x17, 0x2a, 0x85, 0x5d, 0xfa, 0x1c, 0x34, 0x62, 0x6c, + 0xc7, 0xb2, 0x62, 0xff, 0xbd, 0x32, 0x8c, 0x2c, 0xb7, 0x5a, 0xf3, 0xeb, 0x2b, 0xe4, 0x65, 0x68, + 0x48, 0xaf, 0xc6, 0xed, 0xa8, 0x0f, 0x42, 0xa7, 0x56, 0x2b, 0x22, 0x61, 0x9c, 0x8f, 0xa9, 0x63, + 0x1e, 0xd5, 0xed, 0x8e, 0xfc, 0x58, 0x42, 0x75, 0x0c, 0x59, 0x21, 0x0a, 0x1a, 0xd1, 0x61, 0x9c, + 0xed, 0xf0, 0x58, 0x17, 0x8a, 0xdd, 0x9b, 0xfc, 0x6c, 0x8e, 0xb8, 0xbf, 0xe3, 0x4a, 0xe2, 0x66, + 0x02, 0x00, 0x53, 0x80, 0xe4, 0x35, 0xa8, 0xe9, 0xbd, 0x60, 0x87, 0x2b, 0xd0, 0xe2, 0xdb, 0xb8, + 0xcc, 0x9d, 0x3e, 0xb2, 0xec, 0xc1, 0xc1, 0xf4, 0xe8, 0x2d, 0x6c, 0xbe, 0xac, 0xee, 0x31, 0xe4, + 0x66, 0x8d, 0x53, 0x3b, 0x46, 0xd9, 0xb8, 0xca, 0xb1, 0x1b, 0xb7, 0x9e, 0x00, 0xc0, 0x14, 0x20, + 0x79, 0x1b, 0x46, 0x77, 0xe9, 0x7e, 0xa0, 0x6f, 0x49, 0x01, 0x23, 0xc7, 0x11, 0x30, 0xc9, 0x54, + 0xb8, 0x5b, 0xb1, 0xea, 0x98, 0x00, 0x23, 0x3e, 0x9c, 0xdb, 0xa5, 0xde, 0x16, 0xf5, 0x5c, 0xb9, + 0xfb, 0x94, 0x42, 0xaa, 0xc7, 0x11, 0x32, 0x75, 0x78, 0x30, 0x7d, 0xee, 0x56, 0x06, 0x0c, 0x66, + 0x82, 0x6b, 0xff, 0xbb, 0x08, 0x13, 0xcb, 0xc2, 0xad, 0xec, 0x7a, 0x62, 0x11, 0x26, 0x17, 0xa1, + 0xe4, 0x75, 0x7b, 0x7c, 0xe4, 0x94, 0x84, 0xe1, 0x18, 0xd7, 0x37, 0x91, 0x95, 0x91, 0xb7, 0xa0, + 0x66, 0xca, 0x29, 0x43, 0x6e, 0x7e, 0x87, 0x32, 0x54, 0xa8, 0x3b, 0x0c, 0xd1, 0x98, 0xa6, 0xdf, + 0xf1, 0xdb, 0x2d, 0xeb, 0x7d, 0x2a, 0xf7, 0x83, 0x5c, 0xd3, 0x5f, 0x13, 0x45, 0xa8, 0x68, 0x6c, + 0x55, 0xdd, 0xa5, 0xfb, 0x62, 0x37, 0x54, 0x8e, 0x56, 0xd5, 0x5b, 0xb2, 0x0c, 0x43, 0x2a, 0x99, + 0x56, 0x1f, 0x0b, 0x1b, 0x05, 0x65, 0xb1, 0x93, 0xbf, 0xcb, 0x0a, 0xe4, 0x77, 0xc3, 0xa6, 0xcc, + 0x77, 0xad, 0x20, 0xa0, 0x9e, 0x7c, 0x8d, 0x43, 0x4d, 0x99, 0x37, 0x39, 0x02, 0x4a, 0x24, 0xf2, + 0x3b, 0x50, 0xe7, 0xe0, 0x4d, 0xdb, 0xdd, 0xe2, 0x2f, 0xae, 0x2e, 0xf6, 0xf4, 0x77, 0x55, 0x21, + 0x46, 0x74, 0xed, 0xd7, 0x45, 0xb8, 0xb0, 0x4c, 0x03, 0xa1, 0xd5, 0x2c, 0xd2, 0xae, 0xed, 0xee, + 0x33, 0xd5, 0x12, 0xe9, 0x7b, 0xe4, 0x0d, 0x00, 0xcb, 0xdf, 0x6a, 0xed, 0x19, 0xfc, 0x3b, 0x10, + 0xdf, 0xf0, 0x55, 0xf9, 0x49, 0xc2, 0x4a, 0xab, 0x29, 0x29, 0x0f, 0x12, 0x77, 0x18, 0xab, 0x13, + 0x6d, 0xaf, 0x8a, 0x0f, 0xd9, 0x5e, 0xb5, 0x00, 0xba, 0x91, 0x82, 0x5a, 0xe2, 0x9c, 0xbf, 0xaf, + 0xc4, 0x1c, 0x47, 0x37, 0x8d, 0xc1, 0xe4, 0x51, 0x19, 0x1d, 0x98, 0x34, 0xe9, 0xb6, 0xde, 0xb3, + 0x83, 0x50, 0xa9, 0x96, 0x1f, 0xf1, 0xd1, 0xf5, 0xf2, 0xd0, 0xe5, 0xbd, 0x98, 0x42, 0xc2, 0x3e, + 0x6c, 0xed, 0xef, 0x97, 0xe0, 0xd2, 0x32, 0x0d, 0x42, 0x8b, 0x8b, 0x9c, 0x1d, 0x5b, 0x5d, 0x6a, + 0xb0, 0xb7, 0xf0, 0x61, 0x01, 0x46, 0x6c, 0x7d, 0x8b, 0xda, 0x6c, 0xf5, 0x62, 0x4f, 0xf3, 0xce, + 0xd0, 0x0b, 0xc1, 0x60, 0x29, 0x33, 0xab, 0x5c, 0x42, 0x6a, 0x69, 0x10, 0x85, 0x28, 0xc5, 0xb3, + 0x49, 0xdd, 0xb0, 0x7b, 0x7e, 0x40, 0xbd, 0x75, 0xd7, 0x0b, 0xa4, 0x3e, 0x19, 0x4e, 0xea, 0x0b, + 0x11, 0x09, 0xe3, 0x7c, 0x64, 0x0e, 0xc0, 0xb0, 0x2d, 0xea, 0x04, 0xbc, 0x96, 0xf8, 0xae, 0x88, + 0x7a, 0xbf, 0x0b, 0x21, 0x05, 0x63, 0x5c, 0x4c, 0x54, 0xc7, 0x75, 0xac, 0xc0, 0x15, 0xa2, 0xca, + 0x49, 0x51, 0x6b, 0x11, 0x09, 0xe3, 0x7c, 0xbc, 0x1a, 0x0d, 0x3c, 0xcb, 0xf0, 0x79, 0xb5, 0x4a, + 0xaa, 0x5a, 0x44, 0xc2, 0x38, 0x1f, 0x5b, 0xf3, 0x62, 0xcf, 0x7f, 0xac, 0x35, 0xef, 0x7b, 0x75, + 0xb8, 0x92, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0x76, 0xcf, 0x6e, 0xd1, 0x40, 0xbd, 0xc0, 0x21, 0xd7, + 0xc2, 0xbf, 0x18, 0xbd, 0x77, 0x11, 0xcc, 0x62, 0x9c, 0xcc, 0x7b, 0xef, 0x6b, 0xe0, 0x91, 0xde, + 0xfd, 0x2c, 0xd4, 0x1d, 0x3d, 0xf0, 0xf9, 0x87, 0x2b, 0xbf, 0xd1, 0x50, 0x0d, 0xbb, 0xad, 0x08, + 0x18, 0xf1, 0x90, 0x75, 0x38, 0x27, 0xbb, 0xf8, 0xfa, 0xfd, 0xae, 0xeb, 0x05, 0xd4, 0x13, 0x75, + 0xe5, 0x72, 0x2a, 0xeb, 0x9e, 0x5b, 0xcb, 0xe0, 0xc1, 0xcc, 0x9a, 0x64, 0x0d, 0xce, 0x1a, 0xc2, + 0xc1, 0x4f, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x70, 0x85, 0x5b, 0xa3, 0x85, 0x7e, 0x16, 0xcc, + 0xaa, 0x97, 0x1e, 0xcd, 0x23, 0x43, 0x8d, 0xe6, 0xea, 0x30, 0xa3, 0xb9, 0x36, 0xdc, 0x68, 0xae, + 0x1f, 0x6d, 0x34, 0xb3, 0x9e, 0x67, 0xe3, 0x88, 0x7a, 0x4c, 0x3d, 0x11, 0x2b, 0x6c, 0x2c, 0x7e, + 0x24, 0xec, 0xf9, 0x56, 0x06, 0x0f, 0x66, 0xd6, 0x24, 0x5b, 0x70, 0x49, 0x94, 0x5f, 0x77, 0x0c, + 0x6f, 0xbf, 0xcb, 0x16, 0x9e, 0x18, 0x6e, 0x23, 0x61, 0x61, 0xbc, 0xd4, 0x1a, 0xc8, 0x89, 0x0f, + 0x41, 0x21, 0x7f, 0x08, 0x63, 0xe2, 0x2d, 0xad, 0xe9, 0x5d, 0x0e, 0x2b, 0xa2, 0x49, 0xce, 0x4b, + 0xd8, 0xb1, 0x85, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x1e, 0x26, 0xba, 0x7b, 0x06, 0xbb, 0x5c, 0xd9, + 0xbe, 0x4d, 0xa9, 0x49, 0x4d, 0xee, 0xbe, 0xaa, 0x37, 0x9f, 0x56, 0x86, 0x8e, 0xf5, 0x24, 0x19, + 0xd3, 0xfc, 0xe4, 0x35, 0x18, 0xf5, 0x03, 0xdd, 0x0b, 0xa4, 0x59, 0x6f, 0x6a, 0x5c, 0x44, 0xdb, + 0x28, 0xab, 0x57, 0x2b, 0x46, 0xc3, 0x04, 0x67, 0xe6, 0x7a, 0x31, 0x71, 0x7a, 0xeb, 0x45, 0x9e, + 0xd9, 0xea, 0x9f, 0x15, 0xe1, 0xea, 0x32, 0x0d, 0xd6, 0x5c, 0x47, 0x1a, 0x45, 0xb3, 0x96, 0xfd, + 0x23, 0xd9, 0x44, 0x93, 0x8b, 0x76, 0xf1, 0x44, 0x17, 0xed, 0xd2, 0x09, 0x2d, 0xda, 0xe5, 0x53, + 0x5c, 0xb4, 0xff, 0x61, 0x11, 0x9e, 0x4e, 0xf4, 0xe4, 0xba, 0x6b, 0xaa, 0x09, 0xff, 0xb3, 0x0e, + 0x3c, 0x42, 0x07, 0x3e, 0x10, 0x7a, 0x27, 0x77, 0x6b, 0xa5, 0x34, 0x9e, 0x0f, 0xd2, 0x1a, 0xcf, + 0xdb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0x47, 0x5a, 0xf1, 0x6e, 0x02, 0xf1, 0xa4, 0x13, 0x4e, 0x98, + 0x7e, 0x62, 0x4a, 0x4f, 0x18, 0xce, 0x87, 0x7d, 0x1c, 0x98, 0x51, 0x8b, 0xb4, 0xe0, 0xbc, 0x4f, + 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, 0xb3, 0x12, 0xee, 0x7c, 0x2b, 0x8b, 0x09, + 0xb3, 0xeb, 0xe6, 0x99, 0x07, 0xfe, 0x25, 0x70, 0x95, 0x53, 0x74, 0xcd, 0x89, 0x69, 0x2c, 0x1f, + 0xa6, 0x35, 0x96, 0x77, 0xf2, 0xbf, 0xb7, 0xe1, 0xb4, 0x95, 0x39, 0x00, 0xfe, 0x16, 0xe2, 0xea, + 0x4a, 0xb8, 0x48, 0x63, 0x48, 0xc1, 0x18, 0x17, 0x5b, 0x80, 0x54, 0x3f, 0xc7, 0x35, 0x95, 0x70, + 0x01, 0x6a, 0xc5, 0x89, 0x98, 0xe4, 0x1d, 0xa8, 0xed, 0x54, 0x86, 0xd6, 0x76, 0x6e, 0x02, 0x49, + 0x18, 0x1e, 0x05, 0xde, 0x48, 0x32, 0x9a, 0x74, 0xa5, 0x8f, 0x03, 0x33, 0x6a, 0x0d, 0x18, 0xca, + 0xd5, 0x93, 0x1d, 0xca, 0xb5, 0xe1, 0x87, 0x32, 0x79, 0x07, 0x2e, 0x72, 0x51, 0xb2, 0x7f, 0x92, + 0xc0, 0x42, 0xef, 0xf9, 0x0d, 0x09, 0x7c, 0x11, 0x07, 0x31, 0xe2, 0x60, 0x0c, 0xf6, 0x7e, 0x0c, + 0x8f, 0x9a, 0x4c, 0xb8, 0x6e, 0x0f, 0xd6, 0x89, 0x16, 0x32, 0x78, 0x30, 0xb3, 0x26, 0x1b, 0x62, + 0x01, 0x1b, 0x86, 0xfa, 0x96, 0x4d, 0x4d, 0x19, 0x4d, 0x1b, 0x0e, 0xb1, 0x8d, 0xd5, 0x96, 0xa4, + 0x60, 0x8c, 0x2b, 0x4b, 0x4d, 0x19, 0x3d, 0xa6, 0x9a, 0xb2, 0xcc, 0xad, 0xf4, 0xdb, 0x09, 0x6d, + 0x48, 0xea, 0x3a, 0x61, 0x7c, 0xf4, 0x42, 0x9a, 0x01, 0xfb, 0xeb, 0x70, 0x2d, 0xd1, 0xf0, 0xac, + 0x6e, 0xe0, 0x27, 0xb1, 0xc6, 0x53, 0x5a, 0x62, 0x06, 0x0f, 0x66, 0xd6, 0x64, 0xfa, 0xf9, 0x0e, + 0xd5, 0xed, 0x60, 0x27, 0x09, 0x38, 0x91, 0xd4, 0xcf, 0x6f, 0xf4, 0xb3, 0x60, 0x56, 0xbd, 0xcc, + 0x05, 0x69, 0xf2, 0xc9, 0x54, 0xab, 0xbe, 0x59, 0x82, 0x8b, 0xcb, 0x34, 0x08, 0x03, 0x8d, 0x3e, + 0x33, 0xa3, 0x7c, 0x02, 0x66, 0x94, 0xef, 0x56, 0xe0, 0xec, 0x32, 0x0d, 0xfa, 0xb4, 0xb1, 0xff, + 0x4f, 0xbb, 0x7f, 0x0d, 0xce, 0x46, 0xb1, 0x6d, 0xad, 0xc0, 0xf5, 0xc4, 0x5a, 0x9e, 0xda, 0x2d, + 0xb7, 0xfa, 0x59, 0x30, 0xab, 0x1e, 0xf9, 0x12, 0x3c, 0xcd, 0x97, 0x7a, 0xa7, 0x2d, 0xec, 0xb3, + 0xc2, 0x98, 0x10, 0x3b, 0x9d, 0x31, 0x2d, 0x21, 0x9f, 0x6e, 0x65, 0xb3, 0xe1, 0xa0, 0xfa, 0xe4, + 0x6b, 0x30, 0xda, 0xb5, 0xba, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0x09, 0x59, 0x8f, 0x81, + 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, 0xda, 0x29, 0x8e, 0xd4, 0xff, 0x51, + 0x84, 0xea, 0xb2, 0xe7, 0xf6, 0xba, 0xcd, 0x7d, 0xd2, 0x86, 0x91, 0x7b, 0xdc, 0x79, 0x26, 0x5d, + 0x53, 0xc3, 0xc7, 0x87, 0x0b, 0x1f, 0x5c, 0xa4, 0x12, 0x89, 0x7b, 0x94, 0xf0, 0x6c, 0x10, 0xef, + 0xd2, 0x7d, 0x6a, 0x4a, 0x1f, 0x5a, 0x38, 0x88, 0x6f, 0xb1, 0x42, 0x14, 0x34, 0xd2, 0x81, 0x09, + 0xdd, 0xb6, 0xdd, 0x7b, 0xd4, 0x5c, 0xd5, 0x03, 0xea, 0x50, 0x5f, 0xb9, 0x24, 0x8f, 0x6b, 0x96, + 0xe6, 0x7e, 0xfd, 0xf9, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xbb, 0x50, 0xf5, 0x03, 0xd7, 0x53, 0xca, + 0x56, 0x63, 0x6e, 0x61, 0xf8, 0x97, 0xde, 0xfc, 0x62, 0x4b, 0x40, 0x09, 0x9b, 0xbd, 0xbc, 0x41, + 0x25, 0x40, 0xfb, 0x4e, 0x01, 0xe0, 0xc6, 0xc6, 0xc6, 0xba, 0x74, 0x2f, 0x98, 0x50, 0xd6, 0x7b, + 0xa1, 0xa3, 0x72, 0x78, 0x87, 0x60, 0x22, 0x2c, 0x53, 0xfa, 0xf0, 0x7a, 0xc1, 0x0e, 0x72, 0x74, + 0xf2, 0xdb, 0x50, 0x95, 0x0a, 0xb2, 0xec, 0xf6, 0x30, 0xb4, 0x40, 0x2a, 0xd1, 0xa8, 0xe8, 0xda, + 0xdf, 0x2d, 0x02, 0xac, 0x98, 0x36, 0x6d, 0xa9, 0x90, 0xfe, 0x7a, 0xb0, 0xe3, 0x51, 0x7f, 0xc7, + 0xb5, 0xcd, 0x21, 0xbd, 0xa9, 0xdc, 0xe6, 0xbf, 0xa1, 0x40, 0x30, 0xc2, 0x23, 0x26, 0x8c, 0xfa, + 0x01, 0xed, 0xaa, 0x48, 0xcd, 0x21, 0x9d, 0x28, 0x93, 0xc2, 0x2e, 0x12, 0xe1, 0x60, 0x02, 0x95, + 0xe8, 0xd0, 0xb0, 0x1c, 0x43, 0x7c, 0x20, 0xcd, 0xfd, 0x21, 0x07, 0xd2, 0x04, 0xdb, 0x71, 0xac, + 0x44, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0x56, 0x84, 0x0b, 0x5c, 0x1e, 0x6b, 0x46, 0x22, 0x1e, 0x93, + 0xfc, 0xe9, 0xbe, 0xe3, 0x87, 0x7f, 0xf2, 0x68, 0xa2, 0xc5, 0xe9, 0xb5, 0x35, 0x1a, 0xe8, 0x91, + 0x3e, 0x17, 0x95, 0xc5, 0xce, 0x1c, 0xf6, 0xa0, 0xec, 0xb3, 0xf9, 0x4a, 0xf4, 0x5e, 0x6b, 0xe8, + 0x21, 0x94, 0xfd, 0x00, 0x7c, 0xf6, 0x0a, 0xbd, 0xc6, 0x7c, 0xd6, 0xe2, 0xe2, 0xc8, 0x9f, 0x85, + 0x11, 0x3f, 0xd0, 0x83, 0x9e, 0xfa, 0x34, 0x37, 0x4f, 0x5a, 0x30, 0x07, 0x8f, 0xe6, 0x11, 0x71, + 0x8f, 0x52, 0xa8, 0xf6, 0xb3, 0x02, 0x5c, 0xca, 0xae, 0xb8, 0x6a, 0xf9, 0x01, 0xf9, 0x53, 0x7d, + 0xdd, 0x7e, 0xc4, 0x37, 0xce, 0x6a, 0xf3, 0x4e, 0x0f, 0x23, 0xd4, 0x55, 0x49, 0xac, 0xcb, 0x03, + 0xa8, 0x58, 0x01, 0xed, 0xa8, 0xfd, 0xe5, 0x9d, 0x13, 0x7e, 0xf4, 0xd8, 0xd2, 0xce, 0xa4, 0xa0, + 0x10, 0xa6, 0x7d, 0x54, 0x1c, 0xf4, 0xc8, 0x7c, 0xf9, 0xb0, 0x93, 0x31, 0xbf, 0xb7, 0xf2, 0xc5, + 0xfc, 0x26, 0x1b, 0xd4, 0x1f, 0xfa, 0xfb, 0x67, 0xfa, 0x43, 0x7f, 0xef, 0xe4, 0x0f, 0xfd, 0x4d, + 0x75, 0xc3, 0xc0, 0x08, 0xe0, 0x8f, 0x4b, 0x70, 0xf9, 0x61, 0xc3, 0x86, 0xad, 0x67, 0x72, 0x74, + 0xe6, 0x5d, 0xcf, 0x1e, 0x3e, 0x0e, 0xc9, 0x1c, 0x54, 0xba, 0x3b, 0xba, 0xaf, 0x94, 0x32, 0xb5, + 0x61, 0xa9, 0xac, 0xb3, 0xc2, 0x07, 0x6c, 0xd2, 0xe0, 0xca, 0x1c, 0xbf, 0x45, 0xc1, 0xca, 0xa6, + 0xe3, 0x0e, 0xf5, 0xfd, 0xc8, 0x26, 0x10, 0x4e, 0xc7, 0x6b, 0xa2, 0x18, 0x15, 0x9d, 0x04, 0x30, + 0x22, 0x4c, 0xcc, 0x72, 0x65, 0x1a, 0x3e, 0x90, 0x2b, 0x23, 0x4c, 0x3c, 0x7a, 0x28, 0xe9, 0xad, + 0x90, 0xb2, 0xc8, 0x0c, 0x94, 0x83, 0x28, 0x68, 0x57, 0x6d, 0xcd, 0xcb, 0x19, 0xfa, 0x29, 0xe7, + 0x63, 0x1b, 0x7b, 0x77, 0x8b, 0x1b, 0xd5, 0x4d, 0xe9, 0x3f, 0xb7, 0x5c, 0x87, 0x2b, 0x64, 0xa5, + 0x68, 0x63, 0x7f, 0xa7, 0x8f, 0x03, 0x33, 0x6a, 0x69, 0xff, 0xa6, 0x06, 0x17, 0xb2, 0xc7, 0x03, + 0xeb, 0xb7, 0x3d, 0xea, 0xf9, 0x0c, 0xbb, 0x90, 0xec, 0xb7, 0xbb, 0xa2, 0x18, 0x15, 0xfd, 0x53, + 0x1d, 0x70, 0xf6, 0xdd, 0x02, 0x5c, 0xf4, 0xa4, 0x8f, 0xe8, 0x71, 0x04, 0x9d, 0x3d, 0x2b, 0xcc, + 0x19, 0x03, 0x04, 0xe2, 0xe0, 0xb6, 0x90, 0xbf, 0x5d, 0x80, 0xa9, 0x4e, 0xca, 0xce, 0x71, 0x8a, + 0x27, 0xe8, 0x78, 0x54, 0xfc, 0xda, 0x00, 0x79, 0x38, 0xb0, 0x25, 0xe4, 0x6b, 0xd0, 0xe8, 0xb2, + 0x71, 0xe1, 0x07, 0xd4, 0x31, 0xd4, 0x21, 0xba, 0xe1, 0xbf, 0xa4, 0xf5, 0x08, 0x2b, 0x3c, 0x41, + 0xc3, 0xf5, 0x83, 0x18, 0x01, 0xe3, 0x12, 0x9f, 0xf0, 0x23, 0x73, 0xd7, 0xa0, 0xe6, 0xd3, 0x20, + 0xb0, 0x9c, 0xb6, 0xd8, 0x6f, 0xd4, 0xc5, 0xb7, 0xd2, 0x92, 0x65, 0x18, 0x52, 0xc9, 0xef, 0x40, + 0x9d, 0xbb, 0x9c, 0xe6, 0xbd, 0xb6, 0x3f, 0x55, 0xe7, 0xe1, 0x62, 0x63, 0x22, 0x00, 0x4e, 0x16, + 0x62, 0x44, 0x27, 0x2f, 0xc1, 0xe8, 0x16, 0xff, 0x7c, 0xe5, 0x29, 0x6a, 0x61, 0xe3, 0xe2, 0xda, + 0x5a, 0x33, 0x56, 0x8e, 0x09, 0x2e, 0x32, 0x07, 0x40, 0x43, 0xbf, 0x5c, 0xda, 0x9e, 0x15, 0x79, + 0xec, 0x30, 0xc6, 0x45, 0x9e, 0x85, 0x52, 0x60, 0xfb, 0xdc, 0x86, 0x55, 0x8b, 0xb6, 0xa0, 0x1b, + 0xab, 0x2d, 0x64, 0xe5, 0xda, 0xaf, 0x0b, 0x30, 0x91, 0x3a, 0x5c, 0xc2, 0xaa, 0xf4, 0x3c, 0x5b, + 0x4e, 0x23, 0x61, 0x95, 0x4d, 0x5c, 0x45, 0x56, 0x4e, 0xde, 0x91, 0x6a, 0x79, 0x31, 0x67, 0xc2, + 0x88, 0xdb, 0x7a, 0xe0, 0x33, 0x3d, 0xbc, 0x4f, 0x23, 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, 0x72, 0x1d, + 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, 0xbf, 0xf2, 0x51, 0x0c, 0x7e, 0xda, 0xb7, + 0x8a, 0xb1, 0x1e, 0x90, 0x9a, 0xfd, 0x23, 0x7a, 0xe0, 0x05, 0xb6, 0x80, 0x86, 0x8b, 0x7b, 0x3d, + 0xbe, 0xfe, 0xf1, 0xc5, 0x58, 0x52, 0xc9, 0x9b, 0xa2, 0xef, 0x4b, 0x39, 0x8f, 0xe5, 0x6e, 0xac, + 0xb6, 0x44, 0x74, 0x95, 0x7a, 0x6b, 0xe1, 0x2b, 0x28, 0x9f, 0xd2, 0x2b, 0xd0, 0xfe, 0x45, 0x09, + 0x1a, 0x37, 0xdd, 0xad, 0x4f, 0x49, 0x04, 0x75, 0xf6, 0x32, 0x55, 0xfc, 0x04, 0x97, 0xa9, 0x4d, + 0x78, 0x3a, 0x08, 0xec, 0x16, 0x35, 0x5c, 0xc7, 0xf4, 0xe7, 0xb7, 0x03, 0xea, 0x2d, 0x59, 0x8e, + 0xe5, 0xef, 0x50, 0x53, 0xba, 0x93, 0x9e, 0x39, 0x3c, 0x98, 0x7e, 0x7a, 0x63, 0x63, 0x35, 0x8b, + 0x05, 0x07, 0xd5, 0xe5, 0xd3, 0x86, 0x38, 0x09, 0xc8, 0x4f, 0xca, 0xc8, 0x98, 0x1b, 0x31, 0x6d, + 0xc4, 0xca, 0x31, 0xc1, 0xa5, 0xfd, 0xa0, 0x08, 0xf5, 0x30, 0x15, 0x00, 0x79, 0x1e, 0xaa, 0x5b, + 0x9e, 0xbb, 0x4b, 0x3d, 0xe1, 0xb9, 0x93, 0x27, 0x65, 0x9a, 0xa2, 0x08, 0x15, 0x8d, 0x3c, 0x07, + 0x95, 0xc0, 0xed, 0x5a, 0x46, 0xda, 0xa0, 0xb6, 0xc1, 0x0a, 0x51, 0xd0, 0x4e, 0x6f, 0x80, 0xbf, + 0x90, 0x50, 0xed, 0xea, 0x03, 0x95, 0xb1, 0xb7, 0xa1, 0xec, 0xeb, 0xbe, 0x2d, 0xd7, 0xd3, 0x1c, + 0xa7, 0xea, 0xe7, 0x5b, 0xab, 0xf2, 0x54, 0xfd, 0x7c, 0x6b, 0x15, 0x39, 0xa8, 0xf6, 0xcb, 0x22, + 0x34, 0x44, 0xbf, 0x89, 0x59, 0xe1, 0x24, 0x7b, 0xee, 0x75, 0x1e, 0x4a, 0xe1, 0xf7, 0x3a, 0xd4, + 0xe3, 0x66, 0x26, 0x39, 0xc9, 0xc5, 0xfd, 0x03, 0x11, 0x31, 0x0c, 0xa7, 0x88, 0x8a, 0x54, 0xd7, + 0x97, 0x4f, 0xb1, 0xeb, 0x2b, 0x47, 0xea, 0xfa, 0x91, 0xd3, 0xe8, 0xfa, 0x0f, 0x8b, 0x50, 0x5f, + 0xb5, 0xb6, 0xa9, 0xb1, 0x6f, 0xd8, 0xfc, 0x4c, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x65, 0x4f, 0x37, + 0xe8, 0x3a, 0xf5, 0x2c, 0x9e, 0x2a, 0x87, 0x7d, 0x1f, 0x7c, 0x06, 0x92, 0x67, 0x02, 0x17, 0x07, + 0xf0, 0xe0, 0xc0, 0xda, 0x64, 0x05, 0x46, 0x4d, 0xea, 0x5b, 0x1e, 0x35, 0xd7, 0x63, 0x1b, 0x95, + 0xe7, 0xd5, 0x52, 0xb3, 0x18, 0xa3, 0x3d, 0x38, 0x98, 0x1e, 0x53, 0x06, 0x4a, 0xb1, 0x63, 0x49, + 0x54, 0x65, 0x9f, 0x7c, 0x57, 0xef, 0xf9, 0x59, 0x6d, 0x8c, 0x7d, 0xf2, 0xeb, 0xd9, 0x2c, 0x38, + 0xa8, 0xae, 0x56, 0x81, 0xd2, 0xaa, 0xdb, 0xd6, 0x3e, 0x2a, 0x41, 0x98, 0x53, 0x89, 0xfc, 0x85, + 0x02, 0x34, 0x74, 0xc7, 0x71, 0x03, 0x99, 0xaf, 0x48, 0x78, 0xe0, 0x31, 0x77, 0xea, 0xa6, 0x99, + 0xf9, 0x08, 0x54, 0x38, 0x6f, 0x43, 0x87, 0x72, 0x8c, 0x82, 0x71, 0xd9, 0xa4, 0x97, 0xf2, 0x27, + 0xaf, 0xe5, 0x6f, 0xc5, 0x11, 0xbc, 0xc7, 0x97, 0xbe, 0x00, 0x93, 0xe9, 0xc6, 0x1e, 0xc7, 0x1d, + 0x94, 0xcb, 0x31, 0x5f, 0x04, 0x88, 0x62, 0x4a, 0x1e, 0x83, 0x11, 0xcb, 0x4a, 0x18, 0xb1, 0x86, + 0x3f, 0xd8, 0x1e, 0x35, 0x7a, 0xa0, 0xe1, 0xea, 0xbd, 0x94, 0xe1, 0x6a, 0xe5, 0x24, 0x84, 0x3d, + 0xdc, 0x58, 0xf5, 0x77, 0x0a, 0x30, 0x19, 0x31, 0xcb, 0x13, 0xb2, 0xaf, 0xc2, 0x98, 0x47, 0x75, + 0xb3, 0xa9, 0x07, 0xc6, 0x0e, 0x0f, 0xf5, 0x2e, 0xf0, 0xd8, 0xec, 0x33, 0x87, 0x07, 0xd3, 0x63, + 0x18, 0x27, 0x60, 0x92, 0x8f, 0xe8, 0xd0, 0x60, 0x05, 0x1b, 0x56, 0x87, 0xba, 0xbd, 0x60, 0x48, + 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xe3, 0x02, 0x8c, 0xc7, 0x1b, 0x7c, + 0xea, 0x16, 0xb5, 0x9d, 0xa4, 0x45, 0x6d, 0xe1, 0x04, 0xde, 0xc9, 0x00, 0x2b, 0xda, 0x07, 0x10, + 0x7f, 0x34, 0x6e, 0x39, 0x8b, 0x1b, 0x0b, 0x0a, 0x0f, 0x35, 0x16, 0x7c, 0xfa, 0xd3, 0xe8, 0x0c, + 0xd2, 0x72, 0xcb, 0x4f, 0xb0, 0x96, 0xfb, 0x49, 0xe6, 0xe2, 0x89, 0xe5, 0x93, 0x19, 0xc9, 0x91, + 0x4f, 0xa6, 0x13, 0xe6, 0x93, 0xa9, 0x9e, 0xd8, 0xa4, 0x73, 0x94, 0x9c, 0x32, 0xb5, 0xc7, 0x9a, + 0x53, 0xa6, 0x7e, 0x5a, 0x39, 0x65, 0x20, 0x6f, 0x4e, 0x99, 0x0f, 0x0a, 0x30, 0x6e, 0x26, 0x4e, + 0xcc, 0x72, 0xdb, 0x42, 0x9e, 0xa5, 0x26, 0x79, 0x00, 0x57, 0x1c, 0x99, 0x4a, 0x96, 0x61, 0x4a, + 0x64, 0x56, 0x26, 0x97, 0xd1, 0x4f, 0x26, 0x93, 0xcb, 0x2f, 0xaa, 0xf1, 0x15, 0xe9, 0x71, 0x1b, + 0xcd, 0x5f, 0x49, 0x1a, 0xcd, 0xaf, 0xa6, 0x8d, 0xe6, 0x13, 0xb1, 0x78, 0xd6, 0xb8, 0xe1, 0xfc, + 0x77, 0x63, 0x13, 0x75, 0x89, 0xe7, 0x70, 0x09, 0xdf, 0x79, 0xc6, 0x64, 0x3d, 0x0f, 0x13, 0x52, + 0x7b, 0x55, 0x44, 0x3e, 0xcb, 0x8d, 0x45, 0x61, 0x4e, 0x8b, 0x49, 0x32, 0xa6, 0xf9, 0x99, 0x40, + 0x5f, 0xa5, 0xf2, 0x14, 0x5b, 0x85, 0x68, 0x90, 0xa9, 0x34, 0x9b, 0x21, 0x07, 0xdb, 0x56, 0x78, + 0x54, 0xf7, 0xa5, 0xe9, 0x3b, 0xb6, 0xad, 0x40, 0x5e, 0x8a, 0x92, 0x1a, 0xb7, 0xff, 0x57, 0x1f, + 0x61, 0xff, 0xd7, 0xa1, 0x61, 0xeb, 0x7e, 0x20, 0xde, 0xa6, 0x29, 0x3f, 0xe7, 0x3f, 0x71, 0xb4, + 0x85, 0x97, 0x2d, 0xe6, 0x91, 0x76, 0xbb, 0x1a, 0xc1, 0x60, 0x1c, 0x93, 0x98, 0x30, 0xca, 0x6e, + 0xf9, 0xa7, 0x6d, 0xce, 0x07, 0x32, 0xe1, 0xd5, 0x71, 0x64, 0x84, 0x66, 0xab, 0xd5, 0x18, 0x0e, + 0x26, 0x50, 0x07, 0xb8, 0x08, 0x60, 0x18, 0x17, 0x01, 0xf9, 0x43, 0xa1, 0x39, 0xed, 0x87, 0xaf, + 0xb5, 0xc1, 0x5f, 0x6b, 0x18, 0x22, 0x89, 0x71, 0x22, 0x26, 0x79, 0xd9, 0xa8, 0xe8, 0xc9, 0x6e, + 0x50, 0xd5, 0x47, 0x93, 0xa3, 0x62, 0x33, 0x49, 0xc6, 0x34, 0x3f, 0x59, 0x87, 0x73, 0x61, 0x51, + 0xbc, 0x19, 0x63, 0x1c, 0x27, 0x8c, 0x59, 0xdb, 0xcc, 0xe0, 0xc1, 0xcc, 0x9a, 0xfc, 0x10, 0x48, + 0xcf, 0xf3, 0xa8, 0x13, 0xdc, 0xd0, 0xfd, 0x1d, 0x19, 0xfc, 0x16, 0x1d, 0x02, 0x89, 0x48, 0x18, + 0xe7, 0x23, 0x73, 0x00, 0x02, 0x8e, 0xd7, 0x9a, 0x48, 0xc6, 0x97, 0x6e, 0x86, 0x14, 0x8c, 0x71, + 0x69, 0x1f, 0xd4, 0xa1, 0x71, 0x5b, 0x0f, 0xac, 0x3d, 0xca, 0xfd, 0x79, 0xa7, 0xe3, 0x54, 0xf9, + 0xeb, 0x05, 0xb8, 0x90, 0x0c, 0xda, 0x3c, 0x45, 0xcf, 0x0a, 0x4f, 0x01, 0x83, 0x99, 0xd2, 0x70, + 0x40, 0x2b, 0xb8, 0x8f, 0xa5, 0x2f, 0x06, 0xf4, 0xb4, 0x7d, 0x2c, 0xad, 0x41, 0x02, 0x71, 0x70, + 0x5b, 0x3e, 0x2d, 0x3e, 0x96, 0x27, 0x3b, 0x67, 0x61, 0xca, 0x03, 0x54, 0x7d, 0x62, 0x3c, 0x40, + 0xb5, 0x27, 0x42, 0xed, 0xee, 0xc6, 0x3c, 0x40, 0xf5, 0x9c, 0x91, 0x48, 0xf2, 0x9c, 0x83, 0x40, + 0x1b, 0xe4, 0x49, 0xe2, 0x29, 0x0a, 0x94, 0x65, 0x9e, 0x69, 0xab, 0x5b, 0xba, 0x6f, 0x19, 0x52, + 0xed, 0xc8, 0x91, 0xa3, 0x55, 0xe5, 0x6e, 0x13, 0x01, 0x0b, 0xfc, 0x16, 0x05, 0x76, 0x94, 0xaa, + 0xae, 0x98, 0x2b, 0x55, 0x1d, 0x59, 0x80, 0xb2, 0xb3, 0x4b, 0xf7, 0x8f, 0x77, 0xd8, 0x9f, 0xef, + 0xc2, 0x6e, 0xdf, 0xa2, 0xfb, 0xc8, 0x2b, 0x6b, 0x3f, 0x28, 0x02, 0xb0, 0xc7, 0x3f, 0x9a, 0x2f, + 0xe6, 0xb7, 0xa1, 0xea, 0xf7, 0xb8, 0xd5, 0x44, 0x2a, 0x4c, 0x51, 0xf8, 0x96, 0x28, 0x46, 0x45, + 0x27, 0xcf, 0x41, 0xe5, 0xbd, 0x1e, 0xed, 0xa9, 0xc0, 0x82, 0x50, 0x71, 0xff, 0x22, 0x2b, 0x44, + 0x41, 0x3b, 0x3d, 0xbb, 0xaa, 0xf2, 0xd9, 0x54, 0x4e, 0xcb, 0x67, 0x53, 0x87, 0xea, 0x6d, 0x97, + 0x47, 0x83, 0x6a, 0xff, 0xb5, 0x08, 0x10, 0x45, 0xdb, 0x91, 0xef, 0x14, 0xe0, 0x7c, 0xf8, 0xc1, + 0x05, 0x62, 0xff, 0xc5, 0xd3, 0x22, 0xe7, 0xf6, 0xdf, 0x64, 0x7d, 0xec, 0x7c, 0x06, 0x5a, 0xcf, + 0x12, 0x87, 0xd9, 0xad, 0x20, 0x08, 0x35, 0xda, 0xe9, 0x06, 0xfb, 0x8b, 0x96, 0x27, 0x47, 0x60, + 0x66, 0x50, 0xe7, 0x75, 0xc9, 0x23, 0xaa, 0x4a, 0x23, 0x01, 0xff, 0x88, 0x14, 0x05, 0x43, 0x1c, + 0xb2, 0x03, 0x35, 0xc7, 0x7d, 0xc7, 0x67, 0xdd, 0x21, 0x87, 0xe3, 0x1b, 0xc3, 0x77, 0xb9, 0xe8, + 0x56, 0x61, 0xef, 0x97, 0x37, 0x58, 0x75, 0x64, 0x67, 0x7f, 0xbb, 0x08, 0x67, 0x33, 0xfa, 0x81, + 0xbc, 0x01, 0x93, 0x32, 0xb0, 0x31, 0xca, 0x0f, 0x5e, 0x88, 0xf2, 0x83, 0xb7, 0x52, 0x34, 0xec, + 0xe3, 0x26, 0xef, 0x00, 0xe8, 0x86, 0x41, 0x7d, 0x7f, 0xcd, 0x35, 0xd5, 0x7e, 0xe0, 0x75, 0xa6, + 0xbe, 0xcc, 0x87, 0xa5, 0x0f, 0x0e, 0xa6, 0x7f, 0x2f, 0x2b, 0x56, 0x39, 0xd5, 0xcf, 0x51, 0x05, + 0x8c, 0x41, 0x92, 0xaf, 0x00, 0x88, 0x4d, 0x78, 0x98, 0x4e, 0xe1, 0x11, 0x96, 0xab, 0x19, 0x95, + 0xb8, 0x6a, 0xe6, 0x8b, 0x3d, 0xdd, 0x09, 0xac, 0x60, 0x5f, 0x64, 0xaf, 0xb9, 0x1b, 0xa2, 0x60, + 0x0c, 0x51, 0xfb, 0xa7, 0x45, 0xa8, 0x29, 0x9b, 0xf9, 0x63, 0x30, 0x94, 0xb6, 0x13, 0x86, 0xd2, + 0x13, 0x8a, 0x4e, 0xce, 0x32, 0x93, 0xba, 0x29, 0x33, 0xe9, 0x72, 0x7e, 0x51, 0x0f, 0x37, 0x92, + 0x7e, 0xbf, 0x08, 0xe3, 0x8a, 0x35, 0xaf, 0x89, 0xf4, 0xf3, 0x30, 0x21, 0xa2, 0x0a, 0xd6, 0xf4, + 0xfb, 0x22, 0x91, 0x0f, 0xef, 0xb0, 0xb2, 0x08, 0x08, 0x6e, 0x26, 0x49, 0x98, 0xe6, 0x65, 0xc3, + 0x5a, 0x14, 0x6d, 0xb2, 0x4d, 0x98, 0xf0, 0x43, 0x8a, 0xfd, 0x26, 0x1f, 0xd6, 0xcd, 0x14, 0x0d, + 0xfb, 0xb8, 0xd3, 0x36, 0xda, 0xf2, 0x29, 0xd8, 0x68, 0xff, 0x5d, 0x01, 0x46, 0xa3, 0xfe, 0x3a, + 0x75, 0x0b, 0xed, 0x76, 0xd2, 0x42, 0x3b, 0x9f, 0x7b, 0x38, 0x0c, 0xb0, 0xcf, 0xfe, 0xe5, 0x2a, + 0x24, 0x82, 0xe4, 0xc9, 0x16, 0x5c, 0xb2, 0x32, 0x43, 0xfd, 0x62, 0xb3, 0x4d, 0x78, 0xea, 0x7b, + 0x65, 0x20, 0x27, 0x3e, 0x04, 0x85, 0xf4, 0xa0, 0xb6, 0x47, 0xbd, 0xc0, 0x32, 0xa8, 0x7a, 0xbe, + 0xe5, 0xdc, 0x2a, 0x99, 0xb4, 0x42, 0x87, 0x7d, 0x7a, 0x57, 0x0a, 0xc0, 0x50, 0x14, 0xd9, 0x82, + 0x0a, 0x35, 0xdb, 0x54, 0xa5, 0x56, 0xca, 0x99, 0xb8, 0x34, 0xec, 0x4f, 0x76, 0xe7, 0xa3, 0x80, + 0x26, 0x3e, 0xd4, 0x6d, 0xe5, 0x65, 0x94, 0xe3, 0x70, 0x78, 0x05, 0x2b, 0xf4, 0x57, 0x46, 0x59, + 0x17, 0xc2, 0x22, 0x8c, 0xe4, 0x90, 0xdd, 0xd0, 0xdc, 0x59, 0x39, 0xa1, 0xc9, 0xe3, 0x21, 0xc6, + 0x4e, 0x1f, 0xea, 0xf7, 0xf4, 0x80, 0x7a, 0x1d, 0xdd, 0xdb, 0x95, 0xbb, 0x8d, 0xe1, 0x9f, 0xf0, + 0x4d, 0x85, 0x14, 0x3d, 0x61, 0x58, 0x84, 0x91, 0x1c, 0xe2, 0x42, 0x3d, 0x90, 0xea, 0xb3, 0xb2, + 0xe9, 0x0e, 0x2f, 0x54, 0x29, 0xe2, 0xbe, 0x0c, 0x96, 0x57, 0xb7, 0x18, 0xc9, 0x20, 0x7b, 0x89, + 0x2c, 0xd7, 0x22, 0xb7, 0x79, 0x33, 0x87, 0x6f, 0x40, 0x42, 0x45, 0xcb, 0x4d, 0x76, 0xb6, 0x6c, + 0xed, 0x7f, 0x56, 0xa2, 0x69, 0xf9, 0x71, 0xdb, 0x09, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa4, 0xed, + 0x84, 0x29, 0x67, 0xf5, 0xf1, 0xc3, 0x6b, 0x53, 0xe6, 0xb5, 0xf2, 0x29, 0x98, 0xd7, 0x5e, 0x84, + 0xc6, 0x1e, 0x9f, 0x09, 0x44, 0x9e, 0xa6, 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0x7e, 0x37, 0x2a, 0xc6, + 0x38, 0x0f, 0xab, 0x22, 0xff, 0xeb, 0x11, 0x26, 0xba, 0x95, 0x55, 0x5a, 0x51, 0x31, 0xc6, 0x79, + 0x78, 0x64, 0x9e, 0xe5, 0xec, 0x8a, 0x0a, 0x55, 0x5e, 0x41, 0x44, 0xe6, 0xa9, 0x42, 0x8c, 0xe8, + 0xe4, 0x1a, 0xd4, 0x7a, 0xe6, 0xb6, 0xe0, 0xad, 0x71, 0x5e, 0xae, 0x61, 0x6e, 0x2e, 0x2e, 0xc9, + 0xbc, 0x51, 0x8a, 0xca, 0x5a, 0xd2, 0xd1, 0xbb, 0x8a, 0xc0, 0xf7, 0x86, 0xb2, 0x25, 0x6b, 0x51, + 0x31, 0xc6, 0x79, 0xc8, 0x1f, 0xc0, 0xb8, 0x47, 0xcd, 0x9e, 0x41, 0xc3, 0x5a, 0xc0, 0x6b, 0x11, + 0xf1, 0x03, 0x93, 0x38, 0x05, 0x53, 0x9c, 0x03, 0x8c, 0x84, 0x8d, 0xa1, 0x8c, 0x84, 0x5f, 0x80, + 0x71, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0x77, 0x1c, 0x1e, 0x91, 0x20, 0xe3, 0x03, 0x43, 0x0b, 0xf9, + 0x62, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0xbf, 0x17, 0x80, 0xf4, 0x47, 0xc2, 0x93, 0x1d, 0x18, 0x71, + 0xb8, 0xf5, 0x2c, 0x77, 0x6a, 0xed, 0x98, 0x11, 0x4e, 0x4c, 0x6b, 0xb2, 0x40, 0xe2, 0x13, 0x07, + 0x6a, 0xf4, 0x7e, 0x40, 0x3d, 0x27, 0x3c, 0x19, 0x73, 0x32, 0x69, 0xbc, 0xc5, 0x6e, 0x42, 0x22, + 0x63, 0x28, 0x43, 0xfb, 0x79, 0x11, 0x1a, 0x31, 0xbe, 0x47, 0x6d, 0x4a, 0xf9, 0xe1, 0x7c, 0x61, + 0xb4, 0xda, 0xf4, 0x6c, 0xf9, 0x85, 0xc6, 0x0e, 0xe7, 0x4b, 0x12, 0xae, 0x62, 0x9c, 0x8f, 0xcc, + 0x01, 0x74, 0x74, 0x3f, 0xa0, 0x1e, 0x5f, 0xbd, 0x53, 0x47, 0xe2, 0xd7, 0x42, 0x0a, 0xc6, 0xb8, + 0xc8, 0x55, 0x99, 0x88, 0xbd, 0x9c, 0x4c, 0x61, 0x38, 0x20, 0xcb, 0x7a, 0xe5, 0x04, 0xb2, 0xac, + 0x93, 0x36, 0x4c, 0xaa, 0x56, 0x2b, 0xea, 0xf1, 0x12, 0xdc, 0x89, 0xfd, 0x4f, 0x0a, 0x02, 0xfb, + 0x40, 0xb5, 0x1f, 0x14, 0x60, 0x2c, 0x61, 0x32, 0x11, 0xc9, 0x07, 0xd5, 0x39, 0x8e, 0x44, 0xf2, + 0xc1, 0xd8, 0xf1, 0x8b, 0x17, 0x60, 0x44, 0x74, 0x50, 0x3a, 0x3c, 0x53, 0x74, 0x21, 0x4a, 0x2a, + 0x9b, 0x0b, 0xa5, 0x51, 0x36, 0x3d, 0x17, 0x4a, 0xab, 0x2d, 0x2a, 0xba, 0xf0, 0x75, 0x88, 0xd6, + 0xc9, 0x9e, 0x8e, 0xf9, 0x3a, 0x44, 0x39, 0x86, 0x1c, 0xda, 0x0f, 0x79, 0xbb, 0x03, 0x6f, 0x3f, + 0xdc, 0x0b, 0xb6, 0xa1, 0x2a, 0x43, 0xf2, 0xe4, 0xa7, 0xf1, 0x46, 0x0e, 0x3b, 0x0e, 0xc7, 0x91, + 0xc1, 0x67, 0xba, 0xb1, 0x7b, 0x67, 0x7b, 0x1b, 0x15, 0x3a, 0xb9, 0x0e, 0x75, 0xd7, 0x59, 0xd2, + 0x2d, 0xbb, 0xe7, 0xa9, 0x95, 0xe1, 0xb7, 0xd8, 0x5c, 0x77, 0x47, 0x15, 0x3e, 0x38, 0x98, 0xbe, + 0x10, 0xde, 0x24, 0x1a, 0x89, 0x51, 0x4d, 0xed, 0xcf, 0x17, 0xe0, 0x3c, 0xba, 0xb6, 0x6d, 0x39, + 0xed, 0xa4, 0xb3, 0x8c, 0xd8, 0x30, 0xde, 0xd1, 0xef, 0x6f, 0x3a, 0xfa, 0x9e, 0x6e, 0xd9, 0xfa, + 0x96, 0x4d, 0x1f, 0xb9, 0x97, 0xeb, 0x05, 0x96, 0x3d, 0x23, 0x7e, 0x4c, 0x37, 0xb3, 0xe2, 0x04, + 0x77, 0xbc, 0x56, 0xe0, 0x59, 0x4e, 0x5b, 0x4c, 0x7a, 0x6b, 0x09, 0x2c, 0x4c, 0x61, 0x6b, 0xbf, + 0x28, 0x01, 0x0f, 0x0b, 0x23, 0xaf, 0x42, 0xbd, 0x43, 0x8d, 0x1d, 0xdd, 0xb1, 0x7c, 0x95, 0xc6, + 0xf5, 0x22, 0x7b, 0xae, 0x35, 0x55, 0xf8, 0x80, 0xbd, 0x8a, 0xf9, 0xd6, 0x2a, 0x3f, 0x79, 0x11, + 0xf1, 0x12, 0x03, 0x46, 0xda, 0xbe, 0xaf, 0x77, 0xad, 0xdc, 0x51, 0x09, 0x22, 0x6d, 0xa6, 0x98, + 0x8e, 0xc4, 0x35, 0x4a, 0x68, 0x62, 0x40, 0xa5, 0x6b, 0xeb, 0x96, 0x93, 0xfb, 0x47, 0x4a, 0xec, + 0x09, 0xd6, 0x19, 0x92, 0x30, 0xae, 0xf1, 0x4b, 0x14, 0xd8, 0xa4, 0x07, 0x0d, 0xdf, 0xf0, 0xf4, + 0x8e, 0xbf, 0xa3, 0xcf, 0xbd, 0xfc, 0x4a, 0x6e, 0x75, 0x35, 0x12, 0x25, 0x56, 0xcf, 0x05, 0x9c, + 0x5f, 0x6b, 0xdd, 0x98, 0x9f, 0x7b, 0xf9, 0x15, 0x8c, 0xcb, 0x89, 0x8b, 0x7d, 0xf9, 0xc5, 0x39, + 0x39, 0x83, 0x9c, 0xb8, 0xd8, 0x97, 0x5f, 0x9c, 0xc3, 0xb8, 0x1c, 0xed, 0x7f, 0x15, 0xa0, 0x1e, + 0xf2, 0x92, 0x4d, 0x00, 0x36, 0x97, 0xc9, 0x44, 0x97, 0xc7, 0xfa, 0xe9, 0x04, 0xb7, 0x4f, 0x6c, + 0x86, 0x95, 0x31, 0x06, 0x94, 0x91, 0x09, 0xb4, 0x78, 0xd2, 0x99, 0x40, 0x67, 0xa1, 0xbe, 0xa3, + 0x3b, 0xa6, 0xbf, 0xa3, 0xef, 0x8a, 0x29, 0x3d, 0x96, 0x1b, 0xf7, 0x86, 0x22, 0x60, 0xc4, 0xa3, + 0xfd, 0xe3, 0x11, 0x10, 0xa1, 0x04, 0x6c, 0xd2, 0x31, 0x2d, 0x5f, 0xc4, 0xb2, 0x17, 0x78, 0xcd, + 0x70, 0xd2, 0x59, 0x94, 0xe5, 0x18, 0x72, 0x90, 0x8b, 0x50, 0xea, 0x58, 0x8e, 0xf4, 0x3d, 0x71, + 0xd3, 0xe3, 0x9a, 0xe5, 0x20, 0x2b, 0xe3, 0x24, 0xfd, 0xbe, 0x0c, 0x43, 0x14, 0x24, 0xfd, 0x3e, + 0xb2, 0x32, 0xf2, 0x79, 0x98, 0xb0, 0x5d, 0x77, 0x97, 0x4d, 0x1f, 0x2a, 0x5a, 0x51, 0xf8, 0x81, + 0xb9, 0x31, 0x60, 0x35, 0x49, 0xc2, 0x34, 0x2f, 0xd9, 0x84, 0xa7, 0xdf, 0xa7, 0x9e, 0x2b, 0xe7, + 0xcb, 0x96, 0x4d, 0x69, 0x57, 0xc1, 0x08, 0x65, 0x8e, 0x07, 0x3d, 0xfe, 0x71, 0x36, 0x0b, 0x0e, + 0xaa, 0xcb, 0xc3, 0xa7, 0x75, 0xaf, 0x4d, 0x83, 0x75, 0xcf, 0x35, 0xa8, 0xef, 0x5b, 0x4e, 0x5b, + 0xc1, 0x8e, 0x44, 0xb0, 0x1b, 0xd9, 0x2c, 0x38, 0xa8, 0x2e, 0x79, 0x0b, 0xa6, 0x04, 0x49, 0xa8, + 0x2d, 0xf3, 0x62, 0x9a, 0xb1, 0x6c, 0xf5, 0xff, 0xc1, 0x31, 0xe1, 0xe1, 0xd9, 0x18, 0xc0, 0x83, + 0x03, 0x6b, 0x93, 0x9b, 0x30, 0xa9, 0xfc, 0x7b, 0xeb, 0xd4, 0x6b, 0x85, 0xe1, 0x25, 0x63, 0xcd, + 0x2b, 0x6c, 0xe7, 0xbd, 0x48, 0xbb, 0x1e, 0x35, 0xe2, 0x7e, 0x52, 0xc5, 0x85, 0x7d, 0xf5, 0x08, + 0xc2, 0x05, 0x1e, 0x43, 0xb2, 0xd9, 0x5d, 0x70, 0x5d, 0xdb, 0x74, 0xef, 0x39, 0xea, 0xd9, 0x85, + 0x8a, 0xc9, 0x5d, 0x7a, 0xad, 0x4c, 0x0e, 0x1c, 0x50, 0x93, 0x3d, 0x39, 0xa7, 0x2c, 0xba, 0xf7, + 0x9c, 0x34, 0x2a, 0x44, 0x4f, 0xde, 0x1a, 0xc0, 0x83, 0x03, 0x6b, 0x93, 0x25, 0x20, 0xe9, 0x27, + 0xd8, 0xec, 0x4a, 0xa7, 0xf3, 0x05, 0x91, 0xb3, 0x26, 0x4d, 0xc5, 0x8c, 0x1a, 0x64, 0x15, 0xce, + 0xa5, 0x4b, 0x99, 0x38, 0xe9, 0x7f, 0xe6, 0xd9, 0x6a, 0x31, 0x83, 0x8e, 0x99, 0xb5, 0xb4, 0x7f, + 0x52, 0x84, 0xb1, 0x44, 0x92, 0x83, 0x27, 0xee, 0x30, 0x39, 0xdb, 0x0b, 0x74, 0xfc, 0xf6, 0xca, + 0xe2, 0x0d, 0xaa, 0x9b, 0xd4, 0xbb, 0x45, 0x55, 0x42, 0x0a, 0xb1, 0x2c, 0x26, 0x28, 0x98, 0xe2, + 0x24, 0xdb, 0x50, 0x11, 0x96, 0xed, 0xbc, 0xbf, 0x2f, 0x51, 0x7d, 0xc4, 0xcd, 0xdb, 0xf2, 0x9f, + 0x3f, 0xae, 0x47, 0x51, 0xc0, 0x6b, 0x01, 0x8c, 0xc6, 0x39, 0xd8, 0x44, 0x12, 0xa9, 0xbd, 0xd5, + 0x84, 0xca, 0xbb, 0x02, 0xa5, 0x20, 0x18, 0xf6, 0x98, 0xba, 0xf0, 0x94, 0x6c, 0xac, 0x22, 0xc3, + 0xd0, 0xb6, 0xd9, 0xbb, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0xce, 0xf2, 0x4d, 0xa8, 0x06, 0xd2, 0x58, + 0x38, 0xdc, 0x31, 0x7b, 0xae, 0x2b, 0x29, 0x43, 0xa1, 0xc2, 0xd2, 0xfe, 0x7d, 0x11, 0xea, 0xe1, + 0xc6, 0xfe, 0x08, 0xb9, 0xc0, 0x5d, 0xa8, 0x87, 0x31, 0x70, 0xb9, 0xff, 0xcd, 0x18, 0x85, 0x66, + 0xf1, 0xbd, 0x68, 0x78, 0x8b, 0x91, 0x8c, 0x78, 0x7c, 0x5d, 0x29, 0x47, 0x7c, 0x5d, 0x17, 0xaa, + 0x81, 0x67, 0xb5, 0xdb, 0x72, 0x97, 0x90, 0x27, 0xc0, 0x2e, 0xec, 0xae, 0x0d, 0x01, 0x28, 0x7b, + 0x56, 0xdc, 0xa0, 0x12, 0xa3, 0xbd, 0x0b, 0x93, 0x69, 0x4e, 0xae, 0x42, 0x1b, 0x3b, 0xd4, 0xec, + 0xd9, 0xaa, 0x8f, 0x23, 0x15, 0x5a, 0x96, 0x63, 0xc8, 0xc1, 0xb6, 0xe1, 0xec, 0x35, 0xbd, 0xef, + 0x3a, 0x4a, 0x8d, 0xe5, 0xbb, 0x91, 0x0d, 0x59, 0x86, 0x21, 0x55, 0xfb, 0x2f, 0x25, 0xb8, 0x18, + 0x99, 0x67, 0xd6, 0x74, 0x47, 0x6f, 0x1f, 0xe1, 0x87, 0x7c, 0x9f, 0x1d, 0x5c, 0x3a, 0xee, 0x0f, + 0x1d, 0x4a, 0x4f, 0xc0, 0x0f, 0x1d, 0xfe, 0x4f, 0x11, 0x78, 0xbc, 0x2e, 0xf9, 0x1a, 0x8c, 0xea, + 0xb1, 0x7f, 0xb1, 0xca, 0xd7, 0x79, 0x3d, 0xf7, 0xeb, 0xe4, 0x61, 0xc1, 0x61, 0xc8, 0x56, 0xbc, + 0x14, 0x13, 0x02, 0x89, 0x0b, 0xb5, 0x6d, 0xdd, 0xb6, 0x99, 0x2e, 0x94, 0xdb, 0xdd, 0x94, 0x10, + 0xce, 0x87, 0xf9, 0x92, 0x84, 0xc6, 0x50, 0x08, 0xf9, 0xa0, 0x00, 0x63, 0x5e, 0x7c, 0xbb, 0x26, + 0x5f, 0x48, 0x9e, 0x60, 0x84, 0x18, 0x5a, 0x3c, 0x40, 0x2c, 0xbe, 0x27, 0x4c, 0xca, 0xd4, 0xfe, + 0x73, 0x01, 0xc6, 0x5a, 0xb6, 0x65, 0x5a, 0x4e, 0xfb, 0x14, 0xff, 0x27, 0x71, 0x07, 0x2a, 0xbe, + 0x6d, 0x99, 0x74, 0xc8, 0xd5, 0x44, 0xac, 0x63, 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0x07, 0x15, 0xa5, + 0x23, 0xfc, 0xa0, 0xe2, 0x57, 0x23, 0x20, 0x23, 0xcf, 0x49, 0x0f, 0xea, 0x6d, 0x95, 0xf7, 0x5e, + 0x3e, 0xe3, 0x8d, 0x1c, 0x39, 0x13, 0x13, 0x19, 0xf4, 0xc5, 0xdc, 0x1f, 0x16, 0x62, 0x24, 0x89, + 0xd0, 0xe4, 0x4f, 0x80, 0x17, 0x73, 0xfe, 0x04, 0x58, 0x88, 0xeb, 0xff, 0x0d, 0xb0, 0x0e, 0xe5, + 0x9d, 0x20, 0xe8, 0xca, 0xc1, 0x34, 0xfc, 0xd1, 0x82, 0x28, 0x6d, 0x8f, 0xd0, 0x89, 0xd8, 0x3d, + 0x72, 0x68, 0x26, 0xc2, 0xd1, 0xc3, 0x5f, 0xad, 0x2d, 0xe4, 0x0a, 0x7c, 0x88, 0x8b, 0x60, 0xf7, + 0xc8, 0xa1, 0xc9, 0x57, 0xa1, 0x11, 0x78, 0xba, 0xe3, 0x6f, 0xbb, 0x5e, 0x87, 0x7a, 0x72, 0x8f, + 0xba, 0x94, 0xe3, 0x3f, 0xb8, 0x1b, 0x11, 0x9a, 0xf0, 0xa8, 0x26, 0x8a, 0x30, 0x2e, 0x8d, 0xec, + 0x42, 0xad, 0x67, 0x8a, 0x86, 0x49, 0x33, 0xd8, 0x7c, 0x9e, 0x5f, 0x1b, 0xc7, 0xc2, 0x1a, 0xd4, + 0x1d, 0x86, 0x02, 0x92, 0x7f, 0x15, 0xac, 0x9e, 0xd4, 0x5f, 0x05, 0xe3, 0xa3, 0x31, 0x2b, 0xa7, + 0x08, 0xe9, 0x48, 0xbd, 0xd6, 0x69, 0xcb, 0xa8, 0xac, 0xa5, 0xdc, 0x2a, 0xa7, 0x10, 0xd9, 0x08, + 0x75, 0x63, 0xa7, 0x8d, 0x4a, 0x86, 0xd6, 0x01, 0xe9, 0xed, 0x20, 0x46, 0xe2, 0xdf, 0x3b, 0xe2, + 0xa0, 0xdb, 0xec, 0xd1, 0xe6, 0x83, 0xf0, 0x27, 0x30, 0xb1, 0xdc, 0xdf, 0x99, 0x3f, 0xd9, 0xd1, + 0xfe, 0x43, 0x11, 0x4a, 0x1b, 0xab, 0x2d, 0x91, 0xcf, 0x93, 0xff, 0xd8, 0x8a, 0xb6, 0x76, 0xad, + 0xee, 0x5d, 0xea, 0x59, 0xdb, 0xfb, 0x72, 0xeb, 0x1d, 0xcb, 0xe7, 0x99, 0xe6, 0xc0, 0x8c, 0x5a, + 0xe4, 0x6d, 0x18, 0x35, 0xf4, 0x05, 0xea, 0x05, 0xc3, 0x18, 0x16, 0xf8, 0x89, 0xde, 0x85, 0xf9, + 0xa8, 0x3a, 0x26, 0xc0, 0xc8, 0x26, 0x80, 0x11, 0x41, 0x97, 0x8e, 0x6d, 0x0e, 0x89, 0x01, 0xc7, + 0x80, 0x08, 0x42, 0x7d, 0x97, 0xb1, 0x72, 0xd4, 0xf2, 0x71, 0x50, 0xf9, 0xc8, 0xb9, 0xa5, 0xea, + 0x62, 0x04, 0xa3, 0x39, 0x30, 0x96, 0xf8, 0x21, 0x0f, 0xf9, 0x1c, 0xd4, 0xdc, 0x6e, 0x6c, 0x3a, + 0xad, 0xf3, 0xf8, 0xcf, 0xda, 0x1d, 0x59, 0xf6, 0xe0, 0x60, 0x7a, 0x6c, 0xd5, 0x6d, 0x5b, 0x86, + 0x2a, 0xc0, 0x90, 0x9d, 0x68, 0x30, 0xc2, 0x8f, 0xe1, 0xa9, 0xdf, 0xf1, 0xf0, 0xb5, 0x83, 0xff, + 0x31, 0xc3, 0x47, 0x49, 0xd1, 0xbe, 0x5e, 0x86, 0xc8, 0x47, 0x48, 0x7c, 0x18, 0x11, 0xc7, 0x0c, + 0xe4, 0xcc, 0x7d, 0xaa, 0x27, 0x1a, 0xa4, 0x28, 0xd2, 0x86, 0xd2, 0xbb, 0xee, 0x56, 0xee, 0x89, + 0x3b, 0x76, 0xfe, 0x5e, 0xd8, 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x51, 0x80, 0x33, 0x7e, + 0x5a, 0xf5, 0x95, 0xc3, 0x01, 0xf3, 0xeb, 0xf8, 0x69, 0x65, 0x5a, 0x06, 0xea, 0x0e, 0x22, 0x63, + 0x7f, 0x5b, 0x58, 0xff, 0x0b, 0xe7, 0x9d, 0x1c, 0x4e, 0xcb, 0x39, 0x7f, 0x22, 0x99, 0xec, 0xff, + 0x64, 0x19, 0x4a, 0x51, 0xda, 0x37, 0x8b, 0xd0, 0x88, 0xcd, 0xd6, 0xb9, 0xff, 0xf2, 0x74, 0x3f, + 0xf5, 0x97, 0xa7, 0xf5, 0xe1, 0x7d, 0xd9, 0x51, 0xab, 0x4e, 0xfb, 0x47, 0x4f, 0xff, 0xbc, 0x08, + 0xa5, 0xcd, 0xc5, 0xa5, 0xe4, 0xa6, 0xb5, 0xf0, 0x18, 0x36, 0xad, 0x3b, 0x50, 0xdd, 0xea, 0x59, + 0x76, 0x60, 0x39, 0xb9, 0x33, 0x84, 0xa8, 0x9f, 0x62, 0x49, 0x5f, 0x87, 0x40, 0x45, 0x05, 0x4f, + 0xda, 0x50, 0x6d, 0x8b, 0x14, 0x8d, 0xb9, 0x23, 0xfc, 0x64, 0xaa, 0x47, 0x21, 0x48, 0xde, 0xa0, + 0x42, 0xd7, 0xf6, 0x61, 0x64, 0x73, 0x51, 0xaa, 0xfd, 0x8f, 0xb7, 0x37, 0xb5, 0xaf, 0x42, 0xa8, + 0x05, 0x3c, 0x7e, 0xe1, 0xff, 0xad, 0x00, 0x49, 0xc5, 0xe7, 0xf1, 0x8f, 0xa6, 0xdd, 0xf4, 0x68, + 0x5a, 0x3c, 0x89, 0x8f, 0x2f, 0x7b, 0x40, 0x69, 0xff, 0xb6, 0x00, 0xa9, 0xb3, 0x61, 0xe4, 0x15, + 0x99, 0xed, 0x2b, 0x19, 0x4a, 0xa5, 0xb2, 0x7d, 0x91, 0x24, 0x77, 0x2c, 0xeb, 0xd7, 0x87, 0x6c, + 0xbb, 0x16, 0x77, 0xa0, 0xc9, 0xe6, 0xdf, 0x1e, 0x7e, 0xbb, 0x96, 0xe5, 0x8e, 0x93, 0xe1, 0x7e, + 0x71, 0x12, 0x26, 0xe5, 0x6a, 0xff, 0xa8, 0x08, 0x23, 0x8f, 0xed, 0xa8, 0x3a, 0x4d, 0x44, 0x60, + 0x2e, 0xe4, 0x9c, 0xed, 0x07, 0xc6, 0x5f, 0x76, 0x52, 0xf1, 0x97, 0x79, 0xff, 0x4d, 0xfc, 0x88, + 0xe8, 0xcb, 0x7f, 0x5d, 0x00, 0xb9, 0xd6, 0xac, 0x38, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0xb8, + 0xb0, 0xe5, 0x0d, 0xf3, 0x91, 0xa1, 0x70, 0x42, 0x97, 0xe1, 0xd7, 0x6a, 0x21, 0x23, 0xbf, 0x0b, + 0xb5, 0x1d, 0xd7, 0x0f, 0xf8, 0xe2, 0x55, 0x4c, 0x9a, 0xcc, 0x6e, 0xc8, 0x72, 0x0c, 0x39, 0xd2, + 0xee, 0xec, 0xca, 0x60, 0x77, 0xb6, 0xf6, 0xbd, 0x22, 0x8c, 0x7e, 0x5a, 0xce, 0xdb, 0x67, 0xc5, + 0xab, 0x96, 0x72, 0xc6, 0xab, 0x96, 0x8f, 0x13, 0xaf, 0xaa, 0xfd, 0xa4, 0x00, 0xf0, 0xd8, 0x0e, + 0xfb, 0x9b, 0xc9, 0x50, 0xd2, 0xdc, 0xe3, 0x2a, 0x3b, 0x90, 0xf4, 0x1f, 0x54, 0xd4, 0x23, 0xf1, + 0x30, 0xd2, 0x0f, 0x0b, 0x30, 0xae, 0x27, 0x42, 0x33, 0x73, 0xeb, 0xcb, 0xa9, 0x48, 0xcf, 0x30, + 0xb2, 0x28, 0x59, 0x8e, 0x29, 0xb1, 0xe4, 0xb5, 0x28, 0xd1, 0xf4, 0xed, 0x68, 0xd8, 0xf7, 0x65, + 0x88, 0xe6, 0xba, 0x5b, 0x82, 0xf3, 0x11, 0xa1, 0xb0, 0xa5, 0x13, 0x09, 0x85, 0x8d, 0x1f, 0xf2, + 0x2b, 0x3f, 0xf4, 0x90, 0xdf, 0x1e, 0xd4, 0xb7, 0x3d, 0xb7, 0xc3, 0xa3, 0x4d, 0xe5, 0x5f, 0x8d, + 0xaf, 0xe7, 0x58, 0x28, 0xa3, 0xff, 0xf9, 0x47, 0x86, 0xab, 0x25, 0x85, 0x8f, 0x91, 0x28, 0x6e, + 0xeb, 0x77, 0x85, 0xd4, 0x91, 0x93, 0x94, 0x1a, 0xce, 0x25, 0x1b, 0x02, 0x1d, 0x95, 0x98, 0x64, + 0x84, 0x69, 0xf5, 0xf1, 0x44, 0x98, 0x6a, 0x7f, 0xa9, 0xaa, 0x26, 0xb0, 0x27, 0x2e, 0xa7, 0xe9, + 0x67, 0x47, 0xb3, 0xdb, 0xb4, 0xef, 0xdc, 0x74, 0xed, 0x31, 0x9e, 0x9b, 0xae, 0x9f, 0xcc, 0xb9, + 0x69, 0xc8, 0x77, 0x6e, 0xba, 0x71, 0x42, 0xe7, 0xa6, 0x47, 0x4f, 0xea, 0xdc, 0xf4, 0xd8, 0x50, + 0xe7, 0xa6, 0xc7, 0x8f, 0x74, 0x6e, 0xfa, 0xa0, 0x04, 0xa9, 0xcd, 0xf8, 0x67, 0x8e, 0xb7, 0xff, + 0xa7, 0x1c, 0x6f, 0x1f, 0x15, 0x21, 0x9a, 0x88, 0x8f, 0x19, 0x98, 0xf4, 0x16, 0xd4, 0x3a, 0xfa, + 0xfd, 0x45, 0x6a, 0xeb, 0xfb, 0x79, 0x7e, 0x05, 0xbc, 0x26, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, + 0x15, 0xa6, 0xe3, 0xcf, 0xed, 0xc2, 0x88, 0x32, 0xfb, 0x0b, 0x23, 0x69, 0x74, 0x8f, 0x31, 0x31, + 0xda, 0xbf, 0x2a, 0x82, 0xfc, 0x6f, 0x03, 0xa1, 0x50, 0xd9, 0xb6, 0xee, 0x53, 0x33, 0x77, 0xb8, + 0x73, 0xec, 0x07, 0xed, 0xc2, 0x47, 0xc3, 0x0b, 0x50, 0xa0, 0x73, 0xe3, 0xbb, 0xf0, 0xb9, 0xc9, + 0xfe, 0xcb, 0x61, 0x7c, 0x8f, 0xfb, 0xee, 0xa4, 0xf1, 0x5d, 0x14, 0xa1, 0x92, 0x21, 0x6c, 0xfd, + 0x3c, 0xfc, 0x22, 0xb7, 0x8b, 0x31, 0x11, 0xc6, 0xa1, 0x6c, 0xfd, 0xbe, 0x48, 0x9c, 0x20, 0x65, + 0x34, 0xbf, 0xfc, 0xe3, 0x9f, 0x5e, 0x79, 0xea, 0x27, 0x3f, 0xbd, 0xf2, 0xd4, 0xc7, 0x3f, 0xbd, + 0xf2, 0xd4, 0xd7, 0x0f, 0xaf, 0x14, 0x7e, 0x7c, 0x78, 0xa5, 0xf0, 0x93, 0xc3, 0x2b, 0x85, 0x8f, + 0x0f, 0xaf, 0x14, 0xfe, 0xe3, 0xe1, 0x95, 0xc2, 0x5f, 0xfd, 0x4f, 0x57, 0x9e, 0xfa, 0xe3, 0x57, + 0xa3, 0x26, 0xcc, 0xaa, 0x26, 0xcc, 0x2a, 0x81, 0xb3, 0xdd, 0xdd, 0xf6, 0x2c, 0x6b, 0x42, 0x54, + 0xa2, 0x9a, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xe2, 0xd0, 0x0e, 0xda, 0x9a, 0x00, + 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -3585,6 +3584,18 @@ func (m *AbstractVertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 if m.SideInputsContainerTemplate != nil { { size, err := m.SideInputsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) @@ -9142,24 +9153,24 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.UpdateHash) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateHash))) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x72 i -= len(m.CurrentHash) copy(dAtA[i:], m.CurrentHash) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentHash))) i-- - dAtA[i] = 0x62 + dAtA[i] = 0x6a + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReadyReplicas)) + i-- + dAtA[i] = 0x60 i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) i-- dAtA[i] = 0x58 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x50 i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x48 + dAtA[i] = 0x50 i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x40 + dAtA[i] = 0x48 { size, err := m.LastScaledAt.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -9169,22 +9180,25 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x3a i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x32 i -= len(m.Selector) copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- dAtA[i] = 0x18 @@ -9547,6 +9561,8 @@ func (m *AbstractVertex) Size() (n int) { l = m.SideInputsContainerTemplate.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.UpdateStrategy.Size() + n += 2 + l + sovGenerated(uint64(l)) return n } @@ -11540,6 +11556,7 @@ func (m *VertexStatus) Size() (n int) { l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) l = len(m.Selector) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) @@ -11550,8 +11567,8 @@ func (m *VertexStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReadyReplicas)) l = len(m.CurrentHash) n += 1 + l + sovGenerated(uint64(l)) l = len(m.UpdateHash) @@ -11719,6 +11736,7 @@ func (this *AbstractVertex) String() string { `Partitions:` + valueToStringGenerated(this.Partitions) + `,`, `SideInputs:` + fmt.Sprintf("%v", this.SideInputs) + `,`, `SideInputsContainerTemplate:` + strings.Replace(this.SideInputsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "UpdateStrategy", "UpdateStrategy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13099,14 +13117,15 @@ func (this *VertexStatus) String() string { `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "Status", 1), `&`, ``, 1) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `LastScaledAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastScaledAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `UpdatedReadyReplicas:` + fmt.Sprintf("%v", this.UpdatedReadyReplicas) + `,`, `CurrentHash:` + fmt.Sprintf("%v", this.CurrentHash) + `,`, `UpdateHash:` + fmt.Sprintf("%v", this.UpdateHash) + `,`, `}`, @@ -14477,6 +14496,39 @@ func (m *AbstractVertex) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31445,6 +31497,25 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } } case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } @@ -31476,7 +31547,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } m.Selector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } @@ -31508,7 +31579,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } @@ -31540,7 +31611,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastScaledAt", wireType) } @@ -31573,7 +31644,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 8: + case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } @@ -31592,7 +31663,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { break } } - case 9: + case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } @@ -31611,11 +31682,11 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { break } } - case 10: + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - m.CurrentReplicas = 0 + m.UpdatedReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31625,16 +31696,16 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= uint32(b&0x7F) << shift + m.UpdatedReplicas |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 11: + case 12: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReadyReplicas", wireType) } - m.UpdatedReplicas = 0 + m.UpdatedReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31644,12 +31715,12 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= uint32(b&0x7F) << shift + m.UpdatedReadyReplicas |= uint32(b&0x7F) << shift if b < 0x80 { break } } - case 12: + case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CurrentHash", wireType) } @@ -31681,7 +31752,7 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } m.CurrentHash = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UpdateHash", wireType) } diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 78e10457a0..c927033e7e 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -201,6 +201,11 @@ message AbstractVertex { // Container template for the side inputs watcher container. // +optional optional ContainerTemplate sideInputsContainerTemplate = 15; + + // The strategy to use to replace existing pods with new ones. + // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} + // +optional + optional UpdateStrategy updateStrategy = 16; } message Authorization { @@ -1642,7 +1647,7 @@ message UpdateStrategy { // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.status.desiredReplicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -1732,38 +1737,42 @@ message VertexStatus { // +optional optional uint32 replicas = 3; + // The number of desired replicas. + // +optional + optional uint32 desiredReplicas = 4; + // +optional - optional string selector = 4; + optional string selector = 5; // +optional - optional string reason = 5; + optional string reason = 6; // +optional - optional string message = 6; + optional string message = 7; // Time of last scaling operation. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 7; + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; // The generation observed by the Vertex controller. // +optional - optional int64 observedGeneration = 8; + optional int64 observedGeneration = 9; // The number of pods targeted by this Vertex with a Ready Condition. // +optional - optional uint32 readyReplicas = 9; - - // The number of Pods created by the controller from the Vertex version indicated by currentHash. - optional uint32 currentReplicas = 10; + optional uint32 readyReplicas = 10; // The number of Pods created by the controller from the Vertex version indicated by updateHash. optional uint32 updatedReplicas = 11; - // If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). - optional string currentHash = 12; + // The number of ready Pods created by the controller from the Vertex version indicated by updateHash. + optional uint32 updatedReadyReplicas = 12; + + // If not empty, indicates the current version of the Vertex used to generate Pods. + optional string currentHash = 13; - // If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) - optional string updateHash = 13; + // If not empty, indicates the updated version of the Vertex used to generate Pods. + optional string updateHash = 14; } message VertexTemplate { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go index aeb809db05..665acd8b32 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "testing" + "time" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -88,6 +89,31 @@ func TestMonoVertex_MarkPhaseRunning(t *testing.T) { } } +func TestMonoVertex_MarkDaemonUnHealthy(t *testing.T) { + mvs := MonoVertexStatus{} + mvs.MarkDaemonUnHealthy("reason", "message") + + for _, condition := range mvs.Conditions { + if condition.Type == string(MonoVertexConditionDaemonHealthy) { + if condition.Status != metav1.ConditionFalse { + t.Errorf("MarkDaemonUnHealthy should set the DaemonHealthy condition to false, got %v", condition.Status) + } + if condition.Reason != "reason" { + t.Errorf("MarkDaemonUnHealthy should set the Reason to 'reason', got %s", condition.Reason) + } + if condition.Message != "message" { + t.Errorf("MarkDaemonUnHealthy should set the Message to 'message', got %s", condition.Message) + } + } + } +} + +func TestMonoVertex_SetObservedGeneration(t *testing.T) { + mvs := MonoVertexStatus{} + mvs.SetObservedGeneration(1) + assert.Equal(t, int64(1), mvs.ObservedGeneration) +} + func TestMonoVertex_IsHealthy(t *testing.T) { mvs := MonoVertexStatus{} @@ -171,3 +197,346 @@ func TestMonoVertexGetPodSpec(t *testing.T) { assert.Contains(t, envNames, EnvMonoVertexObject) }) } + +func TestMonoVertexLimits_GetReadBatchSize(t *testing.T) { + t.Run("default value", func(t *testing.T) { + mvl := MonoVertexLimits{} + assert.Equal(t, uint64(DefaultReadBatchSize), mvl.GetReadBatchSize()) + }) + + t.Run("custom value", func(t *testing.T) { + customSize := uint64(1000) + mvl := MonoVertexLimits{ReadBatchSize: &customSize} + assert.Equal(t, customSize, mvl.GetReadBatchSize()) + }) + +} + +func TestMonoVertexLimits_GetReadTimeout(t *testing.T) { + t.Run("default value", func(t *testing.T) { + mvl := MonoVertexLimits{} + assert.Equal(t, DefaultReadTimeout, mvl.GetReadTimeout()) + }) + + t.Run("custom value", func(t *testing.T) { + customTimeout := metav1.Duration{Duration: 5 * time.Second} + mvl := MonoVertexLimits{ReadTimeout: &customTimeout} + assert.Equal(t, 5*time.Second, mvl.GetReadTimeout()) + }) +} + +func TestMonoVertex_GetDaemonDeploymentName(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vertex", + }, + } + expected := "test-vertex-mv-daemon" + assert.Equal(t, expected, mv.GetDaemonDeploymentName()) +} + +func TestMonoVertex_GetDaemonServiceURL(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vertex", + Namespace: "test-namespace", + }, + } + expected := "test-vertex-mv-daemon-svc.test-namespace.svc:4327" + assert.Equal(t, expected, mv.GetDaemonServiceURL()) +} + +func TestMonoVertex_Scalable(t *testing.T) { + t.Run("scalable when not disabled", func(t *testing.T) { + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Scale: Scale{ + Disabled: false, + }, + }, + } + assert.True(t, mv.Scalable()) + }) + + t.Run("not scalable when disabled", func(t *testing.T) { + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Scale: Scale{ + Disabled: true, + }, + }, + } + assert.False(t, mv.Scalable()) + }) +} + +func TestMonoVertex_GetReplicas(t *testing.T) { + t.Run("default replicas", func(t *testing.T) { + mv := MonoVertex{} + assert.Equal(t, 1, mv.getReplicas()) + }) + + t.Run("custom replicas", func(t *testing.T) { + replicas := int32(3) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Replicas: &replicas, + }, + } + assert.Equal(t, 3, mv.getReplicas()) + }) +} + +func TestMonoVertex_CalculateReplicas(t *testing.T) { + t.Run("auto scaling disabled", func(t *testing.T) { + replicas := int32(5) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Replicas: &replicas, + Scale: Scale{ + Disabled: true, + }, + }, + } + assert.Equal(t, 5, mv.CalculateReplicas()) + }) + + t.Run("auto scaling enabled, within range", func(t *testing.T) { + replicas := int32(3) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Replicas: &replicas, + Scale: Scale{ + Disabled: false, + Min: ptr.To[int32](1), + Max: ptr.To[int32](5), + }, + }, + } + assert.Equal(t, 3, mv.CalculateReplicas()) + }) + + t.Run("auto scaling enabled, below min", func(t *testing.T) { + replicas := int32(0) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Replicas: &replicas, + Scale: Scale{ + Disabled: false, + Min: ptr.To[int32](2), + Max: ptr.To[int32](5), + }, + }, + } + assert.Equal(t, 2, mv.CalculateReplicas()) + }) + + t.Run("auto scaling enabled, above max", func(t *testing.T) { + replicas := int32(10) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Replicas: &replicas, + Scale: Scale{ + Disabled: false, + Min: ptr.To[int32](2), + Max: ptr.To[int32](5), + }, + }, + } + assert.Equal(t, 5, mv.CalculateReplicas()) + }) +} + +func TestMonoVertex_GetServiceObj(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vertex", + Namespace: "test-namespace", + }, + } + + t.Run("non-headless service", func(t *testing.T) { + svc := mv.getServiceObj("test-service", false, 8080, "http") + assert.Equal(t, "test-service", svc.Name) + assert.Equal(t, "test-namespace", svc.Namespace) + assert.Equal(t, 1, len(svc.Spec.Ports)) + assert.Equal(t, int32(8080), svc.Spec.Ports[0].Port) + assert.Equal(t, "http", svc.Spec.Ports[0].Name) + assert.NotEqual(t, "None", svc.Spec.ClusterIP) + }) + + t.Run("headless service", func(t *testing.T) { + svc := mv.getServiceObj("test-headless-service", true, 9090, "grpc") + assert.Equal(t, "test-headless-service", svc.Name) + assert.Equal(t, "test-namespace", svc.Namespace) + assert.Equal(t, 1, len(svc.Spec.Ports)) + assert.Equal(t, int32(9090), svc.Spec.Ports[0].Port) + assert.Equal(t, "grpc", svc.Spec.Ports[0].Name) + assert.Equal(t, "None", svc.Spec.ClusterIP) + }) + + t.Run("verify labels", func(t *testing.T) { + svc := mv.getServiceObj("test-label-service", false, 7070, "metrics") + expectedLabels := map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertex, + KeyMonoVertexName: "test-vertex", + } + assert.Equal(t, expectedLabels, svc.Labels) + }) + + t.Run("verify selector", func(t *testing.T) { + svc := mv.getServiceObj("test-selector-service", false, 6060, "admin") + expectedSelector := map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertex, + KeyMonoVertexName: "test-vertex", + } + assert.Equal(t, expectedSelector, svc.Spec.Selector) + }) +} + +func TestMonoVertex_GetServiceObjs(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vertex", + Namespace: "test-namespace", + }, + } + + t.Run("verify service objects", func(t *testing.T) { + services := mv.GetServiceObjs() + assert.Equal(t, 1, len(services), "Expected 1 service object") + + headlessService := services[0] + assert.Equal(t, mv.GetHeadlessServiceName(), headlessService.Name) + assert.Equal(t, "test-namespace", headlessService.Namespace) + assert.Equal(t, "None", headlessService.Spec.ClusterIP) + assert.Equal(t, 1, len(headlessService.Spec.Ports)) + assert.Equal(t, int32(MonoVertexMetricsPort), headlessService.Spec.Ports[0].Port) + assert.Equal(t, MonoVertexMetricsPortName, headlessService.Spec.Ports[0].Name) + }) +} + +func TestMonoVertex_GetDaemonDeploymentObj(t *testing.T) { + mv := MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vertex", + Namespace: "test-namespace", + }, + Spec: MonoVertexSpec{}, + } + + t.Run("basic deployment object", func(t *testing.T) { + req := GetMonoVertexDaemonDeploymentReq{ + Image: "test-image:latest", + PullPolicy: corev1.PullAlways, + DefaultResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + } + + deployment, err := mv.GetDaemonDeploymentObj(req) + assert.NoError(t, err) + assert.NotNil(t, deployment) + assert.Equal(t, mv.GetDaemonDeploymentName(), deployment.Name) + assert.Equal(t, mv.Namespace, deployment.Namespace) + assert.Equal(t, "test-image:latest", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) + assert.Equal(t, resource.MustParse("100m"), deployment.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU]) + assert.Equal(t, resource.MustParse("128Mi"), deployment.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory]) + }) + + t.Run("with custom environment variables", func(t *testing.T) { + req := GetMonoVertexDaemonDeploymentReq{ + Image: "test-image:v1", + Env: []corev1.EnvVar{ + {Name: "CUSTOM_ENV", Value: "custom_value"}, + }, + } + + deployment, err := mv.GetDaemonDeploymentObj(req) + assert.NoError(t, err) + assert.NotNil(t, deployment) + + envVars := deployment.Spec.Template.Spec.Containers[0].Env + assert.Contains(t, envVars, corev1.EnvVar{Name: "CUSTOM_ENV", Value: "custom_value"}) + }) + + t.Run("with daemon template", func(t *testing.T) { + mv.Spec.DaemonTemplate = &DaemonTemplate{ + Replicas: ptr.To[int32](3), + AbstractPodTemplate: AbstractPodTemplate{ + NodeSelector: map[string]string{"node": "special"}, + }, + ContainerTemplate: &ContainerTemplate{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }, + }, + } + + req := GetMonoVertexDaemonDeploymentReq{ + Image: "test-image:v2", + } + + deployment, err := mv.GetDaemonDeploymentObj(req) + assert.NoError(t, err) + assert.NotNil(t, deployment) + assert.Equal(t, int32(3), *deployment.Spec.Replicas) + assert.Equal(t, "special", deployment.Spec.Template.Spec.NodeSelector["node"]) + assert.Equal(t, resource.MustParse("200m"), deployment.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU]) + }) + + t.Run("verify probes", func(t *testing.T) { + req := GetMonoVertexDaemonDeploymentReq{ + Image: "test-image:v3", + } + + deployment, err := mv.GetDaemonDeploymentObj(req) + assert.NoError(t, err) + assert.NotNil(t, deployment) + + container := deployment.Spec.Template.Spec.Containers[0] + assert.NotNil(t, container.ReadinessProbe) + assert.NotNil(t, container.LivenessProbe) + + assert.Equal(t, int32(MonoVertexDaemonServicePort), container.ReadinessProbe.HTTPGet.Port.IntVal) + assert.Equal(t, "/readyz", container.ReadinessProbe.HTTPGet.Path) + assert.Equal(t, corev1.URISchemeHTTPS, container.ReadinessProbe.HTTPGet.Scheme) + + assert.Equal(t, int32(MonoVertexDaemonServicePort), container.LivenessProbe.HTTPGet.Port.IntVal) + assert.Equal(t, "/livez", container.LivenessProbe.HTTPGet.Path) + assert.Equal(t, corev1.URISchemeHTTPS, container.LivenessProbe.HTTPGet.Scheme) + }) + + t.Run("verify labels and owner references", func(t *testing.T) { + req := GetMonoVertexDaemonDeploymentReq{ + Image: "test-image:v4", + } + + deployment, err := mv.GetDaemonDeploymentObj(req) + assert.NoError(t, err) + assert.NotNil(t, deployment) + + expectedLabels := map[string]string{ + KeyPartOf: Project, + KeyManagedBy: ControllerMonoVertex, + KeyComponent: ComponentMonoVertexDaemon, + KeyAppName: mv.GetDaemonDeploymentName(), + KeyMonoVertexName: mv.Name, + } + assert.Equal(t, expectedLabels, deployment.Labels) + + assert.Len(t, deployment.OwnerReferences, 1) + assert.Equal(t, mv.Name, deployment.OwnerReferences[0].Name) + assert.Equal(t, MonoVertexGroupVersionKind.Kind, deployment.OwnerReferences[0].Kind) + }) +} diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 765dd96681..50a03d54a3 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -583,12 +583,19 @@ func schema_pkg_apis_numaflow_v1alpha1_AbstractVertex(ref common.ReferenceCallba Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate"), }, }, + "updateStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "The strategy to use to replace existing pods with new ones.", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -5731,6 +5738,13 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref common.ReferenceCallback) Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate"), }, }, + "updateStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "The strategy to use to replace existing pods with new ones.", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy"), + }, + }, "pipelineName": { SchemaProps: spec.SchemaProps{ Default: "", @@ -5789,7 +5803,7 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.CombinedEdge", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDF", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.VertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Watermark", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -5834,6 +5848,14 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback Format: "int64", }, }, + "desiredReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of desired replicas.", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, "selector": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, @@ -5872,30 +5894,30 @@ func schema_pkg_apis_numaflow_v1alpha1_VertexStatus(ref common.ReferenceCallback Format: "int64", }, }, - "currentReplicas": { + "updatedReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of Pods created by the controller from the Vertex version indicated by currentHash.", + Description: "The number of Pods created by the controller from the Vertex version indicated by updateHash.", Type: []string{"integer"}, Format: "int64", }, }, - "updatedReplicas": { + "updatedReadyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of Pods created by the controller from the Vertex version indicated by updateHash.", + Description: "The number of ready Pods created by the controller from the Vertex version indicated by updateHash.", Type: []string{"integer"}, Format: "int64", }, }, "currentHash": { SchemaProps: spec.SchemaProps{ - Description: "If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas).", + Description: "If not empty, indicates the current version of the Vertex used to generate Pods.", Type: []string{"string"}, Format: "", }, }, "updateHash": { SchemaProps: spec.SchemaProps{ - Description: "If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Description: "If not empty, indicates the updated version of the Vertex used to generate Pods.", Type: []string{"string"}, Format: "", }, diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 0b5ec7efc9..965e9f4bcc 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -37,6 +37,8 @@ const ( VertexPhaseRunning VertexPhase = "Running" VertexPhaseFailed VertexPhase = "Failed" + // VertexConditionDeployed has the status True when the vertex related sub resources are deployed. + VertexConditionDeployed ConditionType = "Deployed" // VertexConditionPodsHealthy has the status True when all the vertex pods are healthy. VertexConditionPodsHealthy ConditionType = "PodsHealthy" ) @@ -58,7 +60,7 @@ const NumaflowRustBinary = "/bin/numaflow-rs" // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` -// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.replicas` +// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.status.desiredReplicas` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.replicas` // +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.readyReplicas` // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` @@ -608,6 +610,10 @@ type AbstractVertex struct { // Container template for the side inputs watcher container. // +optional SideInputsContainerTemplate *ContainerTemplate `json:"sideInputsContainerTemplate,omitempty" protobuf:"bytes,15,opt,name=sideInputsContainerTemplate"` + // The strategy to use to replace existing pods with new ones. + // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} + // +optional + UpdateStrategy UpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,16,opt,name=updateStrategy"` } func (av AbstractVertex) GetVertexType() VertexType { @@ -715,29 +721,32 @@ type VertexStatus struct { // Total number of non-terminated pods targeted by this Vertex (their labels match the selector). // +optional Replicas uint32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // The number of desired replicas. + // +optional + DesiredReplicas uint32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` // +optional - Selector string `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + Selector string `json:"selector,omitempty" protobuf:"bytes,5,opt,name=selector"` // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + Reason string `json:"reason,omitempty" protobuf:"bytes,6,opt,name=reason"` // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + Message string `json:"message,omitempty" protobuf:"bytes,7,opt,name=message"` // Time of last scaling operation. // +optional - LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,7,opt,name=lastScaledAt"` + LastScaledAt metav1.Time `json:"lastScaledAt,omitempty" protobuf:"bytes,8,opt,name=lastScaledAt"` // The generation observed by the Vertex controller. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,9,opt,name=observedGeneration"` // The number of pods targeted by this Vertex with a Ready Condition. // +optional - ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` - // The number of Pods created by the controller from the Vertex version indicated by currentHash. - CurrentReplicas uint32 `json:"currentReplicas,omitempty" protobuf:"varint,10,opt,name=currentReplicas"` + ReadyReplicas uint32 `json:"readyReplicas,omitempty" protobuf:"varint,10,opt,name=readyReplicas"` // The number of Pods created by the controller from the Vertex version indicated by updateHash. UpdatedReplicas uint32 `json:"updatedReplicas,omitempty" protobuf:"varint,11,opt,name=updatedReplicas"` - // If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). - CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,12,opt,name=currentHash"` - // If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) - UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,13,opt,name=updateHash"` + // The number of ready Pods created by the controller from the Vertex version indicated by updateHash. + UpdatedReadyReplicas uint32 `json:"updatedReadyReplicas,omitempty" protobuf:"varint,12,opt,name=updatedReadyReplicas"` + // If not empty, indicates the current version of the Vertex used to generate Pods. + CurrentHash string `json:"currentHash,omitempty" protobuf:"bytes,13,opt,name=currentHash"` + // If not empty, indicates the updated version of the Vertex used to generate Pods. + UpdateHash string `json:"updateHash,omitempty" protobuf:"bytes,14,opt,name=updateHash"` } func (vs *VertexStatus) MarkPhase(phase VertexPhase, reason, message string) { @@ -756,6 +765,17 @@ func (vs *VertexStatus) MarkPhaseRunning() { vs.MarkPhase(VertexPhaseRunning, "", "") } +// MarkDeployed set the Vertex has it's sub resources deployed. +func (vs *VertexStatus) MarkDeployed() { + vs.MarkTrue(VertexConditionDeployed) +} + +// MarkDeployFailed set the Vertex deployment failed +func (vs *VertexStatus) MarkDeployFailed(reason, message string) { + vs.MarkFalse(VertexConditionDeployed, reason, message) + vs.MarkPhaseFailed(reason, message) +} + // MarkPodNotHealthy marks the pod not healthy with the given reason and message. func (vs *VertexStatus) MarkPodNotHealthy(reason, message string) { vs.MarkFalse(VertexConditionPodsHealthy, reason, message) @@ -770,7 +790,7 @@ func (vs *VertexStatus) MarkPodHealthy(reason, message string) { // InitConditions sets conditions to Unknown state. func (vs *VertexStatus) InitConditions() { - vs.InitializeConditions(VertexConditionPodsHealthy) + vs.InitializeConditions(VertexConditionDeployed, VertexConditionPodsHealthy) } // IsHealthy indicates whether the vertex is healthy or not diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index 1a4534c3ec..1f5572c424 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -565,10 +565,37 @@ func Test_VertexMarkPodHealthy(t *testing.T) { } } +func Test_VertexMarkDeployed(t *testing.T) { + s := VertexStatus{} + s.MarkDeployed() + for _, c := range s.Conditions { + if c.Type == string(VertexConditionDeployed) { + assert.Equal(t, metav1.ConditionTrue, c.Status) + assert.Equal(t, "Successful", c.Reason) + assert.Equal(t, "Successful", c.Message) + } + } +} + +func Test_VertexMarkDeployFailed(t *testing.T) { + s := VertexStatus{} + s.MarkDeployFailed("reason", "message") + assert.Equal(t, VertexPhaseFailed, s.Phase) + assert.Equal(t, "reason", s.Reason) + assert.Equal(t, "message", s.Message) + for _, c := range s.Conditions { + if c.Type == string(VertexConditionDeployed) { + assert.Equal(t, metav1.ConditionFalse, c.Status) + assert.Equal(t, "reason", c.Reason) + assert.Equal(t, "message", c.Message) + } + } +} + func Test_VertexInitConditions(t *testing.T) { v := VertexStatus{} v.InitConditions() - assert.Equal(t, 1, len(v.Conditions)) + assert.Equal(t, 2, len(v.Conditions)) for _, c := range v.Conditions { assert.Equal(t, metav1.ConditionUnknown, c.Status) } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index a7b7a90c40..9aca247a05 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -118,6 +118,7 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon if err := mr.orchestrateFixedResources(ctx, monoVtx); err != nil { monoVtx.Status.MarkDeployFailed("OrchestrateFixedResourcesFailed", err.Error()) + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "OrchestrateFixedResourcesFailed", "OrchestrateFixedResourcesFailed: %s", err.Error()) return ctrl.Result{}, err } @@ -125,6 +126,7 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon if err := mr.orchestratePods(ctx, monoVtx); err != nil { monoVtx.Status.MarkDeployFailed("OrchestratePodsFailed", err.Error()) + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "OrchestratePodsFailed", "OrchestratePodsFailed: %s", err.Error()) return ctrl.Result{}, err } @@ -399,8 +401,8 @@ func (mr *monoVertexReconciler) createOrUpdateMonoVtxServices(ctx context.Contex if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { if err := mr.client.Delete(ctx, &existingSvc); err != nil { if !apierrors.IsNotFound(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelSvcFailed", err.Error(), "Failed to delete existing mono vertex service", zap.String("service", existingSvc.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "DelSvcFailed", "Failed to delete existing mono vertex service %s: %s", existingSvc.Name, err.Error()) + return fmt.Errorf("failed to delete existing mono vertex service %s: %w", existingSvc.Name, err) } } else { log.Infow("Deleted a stale mono vertex service to recreate", zap.String("service", existingSvc.Name)) @@ -417,8 +419,8 @@ func (mr *monoVertexReconciler) createOrUpdateMonoVtxServices(ctx context.Contex if apierrors.IsAlreadyExists(err) { continue } - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateSvcFailed", err.Error(), "Failed to create a mono vertex service", zap.String("service", s.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "CreateSvcFailed", "Failed to create a mono vertex service %s: %s", s.Name, err.Error()) + return fmt.Errorf("failed to create a mono vertex service %s: %w", s.Name, err) } else { log.Infow("Succeeded to create a mono vertex service", zap.String("service", s.Name)) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateSvcSuccess", "Succeeded to create mono vertex service %s", s.Name) @@ -428,8 +430,8 @@ func (mr *monoVertexReconciler) createOrUpdateMonoVtxServices(ctx context.Contex for _, v := range existingSvcs { // clean up stale services if err := mr.client.Delete(ctx, &v); err != nil { if !apierrors.IsNotFound(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelSvcFailed", err.Error(), "Failed to delete mono vertex service not in use", zap.String("service", v.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "DelSvcFailed", "Failed to delete mono vertex service %s: %s", v.Name, err.Error()) + return fmt.Errorf("failed to delete mono vertex service %s: %w", v.Name, err) } } else { log.Infow("Deleted a stale mono vertx service", zap.String("service", v.Name)) @@ -463,20 +465,19 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonService(ctx context.Context, if apierrors.IsNotFound(err) { needToCreatDaemonSvc = true } else { - mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindDaemonSvcFailed", err.Error(), "Failed to find existing mono vtx daemon service", zap.String("service", svc.Name), zap.Error(err)) - return err + return fmt.Errorf("failed to find existing mono vertex daemon service: %w", err) } } else if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { if err := mr.client.Delete(ctx, existingSvc); err != nil && !apierrors.IsNotFound(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DelDaemonSvcFailed", err.Error(), "Failed to delete existing mono vtx daemon service", zap.String("service", existingSvc.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "DelDaemonSvcFailed", "Failed to delete existing mono vertex daemon service %s: %s", existingSvc.Name, err.Error()) + return fmt.Errorf("failed to delete existing mono vertex daemon service %s: %w", existingSvc.Name, err) } needToCreatDaemonSvc = true } if needToCreatDaemonSvc { if err := mr.client.Create(ctx, svc); err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateDaemonSvcFailed", err.Error(), "Failed to create mono vtx daemon service", zap.String("service", svc.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "CreateDaemonSvcFailed", "Failed to create a mono vertex daemon service %s: %s", svc.Name, err.Error()) + return fmt.Errorf("failed to create a mono vertex daemon service %s: %w", svc.Name, err) } log.Infow("Succeeded to create a mono vertex daemon service", zap.String("service", svc.Name)) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateMonoVtxDaemonSvcSuccess", "Succeeded to create a mono vertex daemon service %s", svc.Name) @@ -496,8 +497,7 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Conte } deploy, err := monoVtx.GetDaemonDeploymentObj(req) if err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "BuildDaemonDeployFailed", err.Error(), "Failed to build mono verex daemon deployment spec", zap.Error(err)) - return err + return fmt.Errorf("failed to build mono vertex daemon deployment spec: %w", err) } deployHash := sharedutil.MustHash(deploy.Spec) deploy.Annotations = map[string]string{dfv1.KeyHash: deployHash} @@ -505,8 +505,7 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Conte needToCreate := false if err := mr.client.Get(ctx, types.NamespacedName{Namespace: monoVtx.Namespace, Name: deploy.Name}, existingDeploy); err != nil { if !apierrors.IsNotFound(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, false, log, "FindDaemonDeployFailed", err.Error(), "Failed to find existing mono vertex daemon deployment", zap.String("deployment", deploy.Name), zap.Error(err)) - return err + return fmt.Errorf("failed to find existing mono vertex daemon deployment: %w", err) } else { needToCreate = true } @@ -514,16 +513,16 @@ func (mr *monoVertexReconciler) createOrUpdateDaemonDeployment(ctx context.Conte if existingDeploy.GetAnnotations()[dfv1.KeyHash] != deployHash { // Delete and recreate, to avoid updating immutable fields problem. if err := mr.client.Delete(ctx, existingDeploy); err != nil { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "DeleteOldDaemonDeployFailed", err.Error(), "Failed to delete the outdated daemon deployment", zap.String("deployment", existingDeploy.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "DeleteOldDaemonDeployFailed", "Failed to delete the outdated daemon deployment %s: %s", existingDeploy.Name, err.Error()) + return fmt.Errorf("failed to delete the outdated daemon deployment %s: %w", existingDeploy.Name, err) } needToCreate = true } } if needToCreate { if err := mr.client.Create(ctx, deploy); err != nil && !apierrors.IsAlreadyExists(err) { - mr.markDeploymentFailedAndLogEvent(monoVtx, true, log, "CreateDaemonDeployFailed", err.Error(), "Failed to create a mono vertex daemon deployment", zap.String("deployment", deploy.Name), zap.Error(err)) - return err + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "CreateDaemonDeployFailed", "Failed to create a mono vertex daemon deployment %s: %s", deploy.Name, err.Error()) + return fmt.Errorf("failed to create a mono vertex daemon deployment %s: %w", deploy.Name, err) } log.Infow("Succeeded to create/recreate a mono vertex daemon deployment", zap.String("deployment", deploy.Name)) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "CreateDaemonDeploySuccess", "Succeeded to create/recreate a mono vertex daemon deployment %s", deploy.Name) @@ -548,15 +547,6 @@ func (mr *monoVertexReconciler) buildPodSpec(monoVtx *dfv1.MonoVertex) (*corev1. return podSpec, nil } -// Helper function for warning event types -func (mr *monoVertexReconciler) markDeploymentFailedAndLogEvent(monoVtx *dfv1.MonoVertex, recordEvent bool, log *zap.SugaredLogger, reason, message, logMsg string, logWith ...interface{}) { - log.Errorw(logMsg, logWith) - monoVtx.Status.MarkDeployFailed(reason, message) - if recordEvent { - mr.recorder.Event(monoVtx, corev1.EventTypeWarning, reason, message) - } -} - // checkChildrenResourceStatus checks the status of the children resources of the mono vertex func (mr *monoVertexReconciler) checkChildrenResourceStatus(ctx context.Context, monoVtx *dfv1.MonoVertex) error { defer func() { diff --git a/pkg/reconciler/monovertex/controller_test.go b/pkg/reconciler/monovertex/controller_test.go index e9c4f9fdea..8e1f179db4 100644 --- a/pkg/reconciler/monovertex/controller_test.go +++ b/pkg/reconciler/monovertex/controller_test.go @@ -20,20 +20,25 @@ import ( "context" "strings" "testing" + "time" "go.uber.org/zap/zaptest" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" + sharedutil "github.com/numaproj/numaflow/pkg/shared/util" "github.com/stretchr/testify/assert" ) @@ -98,17 +103,62 @@ func Test_NewReconciler(t *testing.T) { assert.True(t, ok) } -func Test_BuildPodSpec(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) - cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ +func fakeReconciler(t *testing.T, cl client.WithWatch) *monoVertexReconciler { + t.Helper() + return &monoVertexReconciler{ client: cl, scheme: scheme.Scheme, - config: fakeConfig, + config: reconciler.FakeGlobalConfig(t, nil), image: testFlowImage, logger: zaptest.NewLogger(t).Sugar(), recorder: record.NewFakeRecorder(64), + scaler: scaling.NewScaler(cl), } +} + +func TestReconcile(t *testing.T) { + t.Run("test not found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "not-exist", + Namespace: testNamespace, + }, + } + _, err := r.Reconcile(context.TODO(), req) + // Return nil when not found + assert.NoError(t, err) + }) + + t.Run("test found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testMonoVtx.DeepCopy() + err := cl.Create(context.TODO(), testObj) + assert.NoError(t, err) + o := &dfv1.MonoVertex{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Namespace: testObj.Namespace, + Name: testObj.Name, + }, o) + assert.NoError(t, err) + assert.Equal(t, testObj.Name, o.Name) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: testObj.Name, + Namespace: testObj.Namespace, + }, + } + _, err = r.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.ErrorContains(t, err, "not found") + }) +} + +func Test_BuildPodSpec(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) t.Run("test has everything", func(t *testing.T) { testObj := testMonoVtx.DeepCopy() spec, err := r.buildPodSpec(testObj) @@ -137,16 +187,8 @@ func Test_BuildPodSpec(t *testing.T) { } func Test_createOrUpdateDaemonDeployment(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) t.Run("test everything from scratch for daemon deployment", func(t *testing.T) { testObj := testMonoVtx.DeepCopy() @@ -163,16 +205,8 @@ func Test_createOrUpdateDaemonDeployment(t *testing.T) { } func Test_createOrUpdateDaemonService(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) t.Run("test everything from scratch for daemon service", func(t *testing.T) { testObj := testMonoVtx.DeepCopy() @@ -189,16 +223,8 @@ func Test_createOrUpdateDaemonService(t *testing.T) { } func Test_createOrUpdateMonoVtxServices(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) t.Run("test everything from scratch for monovtx service", func(t *testing.T) { testObj := testMonoVtx.DeepCopy() @@ -218,17 +244,10 @@ func Test_createOrUpdateMonoVtxServices(t *testing.T) { } func Test_orchestratePods(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) - cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + t.Run("test orchestratePodsFromTo and cleanUpPodsFromTo", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) testObj := testMonoVtx.DeepCopy() hash := "test-hasssssh" podSpec, err := r.buildPodSpec(testObj) @@ -250,6 +269,8 @@ func Test_orchestratePods(t *testing.T) { }) t.Run("test orchestratePods", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) testObj := testMonoVtx.DeepCopy() err := r.orchestratePods(context.TODO(), testObj) assert.NoError(t, err) @@ -263,16 +284,8 @@ func Test_orchestratePods(t *testing.T) { } func Test_orchestrateFixedResources(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testMonoVtx.DeepCopy() err := r.orchestrateFixedResources(context.TODO(), testObj) assert.NoError(t, err) @@ -294,23 +307,87 @@ func Test_orchestrateFixedResources(t *testing.T) { } func Test_reconcile(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, nil) - cl := fake.NewClientBuilder().Build() - r := &monoVertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - scaler: scaling.NewScaler(cl), - } - testObj := testMonoVtx.DeepCopy() - _, err := r.reconcile(context.TODO(), testObj) - assert.NoError(t, err) - var daemonDeployment appv1.Deployment - err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonDeploymentName()}, - &daemonDeployment) - assert.NoError(t, err) - assert.Equal(t, testObj.GetDaemonDeploymentName(), daemonDeployment.Name) + + t.Run("test deletion", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testMonoVtx.DeepCopy() + testObj.DeletionTimestamp = &metav1.Time{Time: time.Now()} + _, err := r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + }) + + t.Run("test okay", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testMonoVtx.DeepCopy() + _, err := r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + var daemonDeployment appv1.Deployment + err = r.client.Get(context.TODO(), client.ObjectKey{Namespace: testObj.GetNamespace(), Name: testObj.GetDaemonDeploymentName()}, + &daemonDeployment) + assert.NoError(t, err) + assert.Equal(t, testObj.GetDaemonDeploymentName(), daemonDeployment.Name) + }) + + t.Run("test reconcile rolling update", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testMonoVtx.DeepCopy() + testObj.Spec.Replicas = ptr.To[int32](3) + _, err := r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + pods := &corev1.PodList{} + selector, _ := labels.Parse(dfv1.KeyComponent + "=" + dfv1.ComponentMonoVertex + "," + dfv1.KeyMonoVertexName + "=" + testObj.Name) + err = r.client.List(context.TODO(), pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 3, len(pods.Items)) + + podSpec, _ := r.buildPodSpec(testObj) + hash := sharedutil.MustHash(podSpec) + testObj.Status.Replicas = 3 + testObj.Status.ReadyReplicas = 3 + testObj.Status.UpdateHash = hash + testObj.Status.CurrentHash = hash + + // Reduce desired replicas + testObj.Spec.Replicas = ptr.To[int32](2) + _, err = r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + err = r.client.List(context.TODO(), pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 2, len(pods.Items)) + assert.Equal(t, uint32(2), testObj.Status.Replicas) + assert.Equal(t, uint32(2), testObj.Status.UpdatedReplicas) + + // updatedReplicas > desiredReplicas + testObj.Status.UpdatedReplicas = 3 + _, err = r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + assert.Equal(t, uint32(2), testObj.Status.UpdatedReplicas) + + // Clean up + testObj.Spec.Replicas = ptr.To[int32](0) + testObj.Spec.Scale.Min = ptr.To[int32](0) + _, err = r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + err = r.client.List(context.TODO(), pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 0, len(pods.Items)) + + // rolling update + testObj.Spec.Replicas = ptr.To[int32](20) + testObj.Status.UpdatedReplicas = 20 + testObj.Status.UpdatedReadyReplicas = 20 + testObj.Status.Replicas = 20 + testObj.Status.CurrentHash = "123456" + testObj.Status.UpdateHash = "123456" + _, err = r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + err = r.client.List(context.TODO(), pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 5, len(pods.Items)) + assert.Equal(t, uint32(20), testObj.Status.Replicas) + assert.Equal(t, uint32(5), testObj.Status.UpdatedReplicas) + }) } diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 955344a8c1..d8b989f2d6 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -145,13 +145,28 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( pl.Status.InitConditions() pl.Status.SetObservedGeneration(pl.Generation) + + if !controllerutil.ContainsFinalizer(pl, finalizerName) { + controllerutil.AddFinalizer(pl, finalizerName) + } + if err := ValidatePipeline(pl); err != nil { + r.recorder.Eventf(pl, corev1.EventTypeWarning, "ValidatePipelineFailed", "Invalid pipeline: %s", err.Error()) + pl.Status.MarkNotConfigured("InvalidSpec", err.Error()) + return ctrl.Result{}, err + } + pl.Status.SetVertexCounts(pl.Spec.Vertices) + pl.Status.MarkConfigured() + // Orchestrate pipeline sub resources. // This should be happening in all cases to ensure a clean initialization regardless of the lifecycle phase. // Eg: even for a pipeline started with desiredPhase = Pause, we should still create the resources for the pipeline. if err := r.reconcileFixedResources(ctx, pl); err != nil { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcilePipelineFailed", "Failed to reconcile pipeline: %v", err.Error()) + r.recorder.Eventf(pl, corev1.EventTypeWarning, "ReconcileFixedResourcesFailed", "Failed to reconcile pipeline sub resources: %s", err.Error()) + pl.Status.MarkDeployFailed("ReconcileFixedResourcesFailed", err.Error()) return ctrl.Result{}, err } + pl.Status.MarkDeployed() + // If the pipeline has a lifecycle change, then do not update the phase as // this should happen only after the required configs for the lifecycle changes // have been applied. @@ -203,17 +218,6 @@ func isLifecycleChange(pl *dfv1.Pipeline) bool { // reconcileFixedResources do the jobs of creating fixed resources such as daemon service, vertex objects, and ISB management jobs, etc func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *dfv1.Pipeline) error { log := logging.FromContext(ctx) - if !controllerutil.ContainsFinalizer(pl, finalizerName) { - controllerutil.AddFinalizer(pl, finalizerName) - } - if err := ValidatePipeline(pl); err != nil { - log.Errorw("Validation failed", zap.Error(err)) - pl.Status.MarkNotConfigured("InvalidSpec", err.Error()) - return err - } - pl.Status.SetVertexCounts(pl.Spec.Vertices) - pl.Status.MarkConfigured() - isbSvc := &dfv1.InterStepBufferService{} isbSvcName := dfv1.DefaultISBSvcName if len(pl.Spec.InterStepBufferServiceName) > 0 { @@ -222,16 +226,13 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df err := r.client.Get(ctx, types.NamespacedName{Namespace: pl.Namespace, Name: isbSvcName}, isbSvc) if err != nil { if apierrors.IsNotFound(err) { - pl.Status.MarkDeployFailed("ISBSvcNotFound", "ISB Service not found.") log.Errorw("ISB Service not found", zap.String("isbsvc", isbSvcName), zap.Error(err)) return fmt.Errorf("isbsvc %s not found", isbSvcName) } - pl.Status.MarkDeployFailed("GetISBSvcFailed", err.Error()) log.Errorw("Failed to get ISB Service", zap.String("isbsvc", isbSvcName), zap.Error(err)) return err } if !isbSvc.Status.IsHealthy() { - pl.Status.MarkDeployFailed("ISBSvcNotHealthy", "ISB Service not healthy.") log.Errorw("ISB Service is not in healthy status", zap.String("isbsvc", isbSvcName), zap.Error(err)) return fmt.Errorf("isbsvc not healthy") } @@ -239,16 +240,13 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df // Create or update the Side Inputs Manager deployments if err := r.createOrUpdateSIMDeployments(ctx, pl, isbSvc.Status.Config); err != nil { log.Errorw("Failed to create or update Side Inputs Manager deployments", zap.Error(err)) - pl.Status.MarkDeployFailed("CreateOrUpdateSIMDeploymentsFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateOrUpdateSIMDeploymentsFailed", "Failed to create or update Side Inputs Manager deployments: %w", err.Error()) - return err + return fmt.Errorf("failed to create or update SIM deployments: %w", err) } existingObjs, err := r.findExistingVertices(ctx, pl) if err != nil { - log.Errorw("Failed to find existing vertices", zap.Error(err)) - pl.Status.MarkDeployFailed("ListVerticesFailed", err.Error()) - return err + return fmt.Errorf("failed to find existing vertices: %w", err) } oldBuffers := make(map[string]string) newBuffers := make(map[string]string) @@ -286,7 +284,6 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df if apierrors.IsAlreadyExists(err) { // probably somebody else already created it continue } else { - pl.Status.MarkDeployFailed("CreateVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateVertexFailed", "Failed to create vertex: %w", err.Error()) return fmt.Errorf("failed to create vertex, err: %w", err) } @@ -298,7 +295,6 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df oldObj.Spec = newObj.Spec oldObj.Annotations[dfv1.KeyHash] = newObj.GetAnnotations()[dfv1.KeyHash] if err := r.client.Update(ctx, &oldObj); err != nil { - pl.Status.MarkDeployFailed("UpdateVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "UpdateVertexFailed", "Failed to update vertex: %w", err.Error()) return fmt.Errorf("failed to update vertex, err: %w", err) } @@ -310,7 +306,6 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df } for _, v := range existingObjs { if err := r.client.Delete(ctx, &v); err != nil { - pl.Status.MarkDeployFailed("DeleteStaleVertexFailed", err.Error()) r.recorder.Eventf(pl, corev1.EventTypeWarning, "DeleteStaleVertexFailed", "Failed to delete vertex: %w", err.Error()) return fmt.Errorf("failed to delete vertex, err: %w", err) } @@ -336,10 +331,11 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df args = append(args, fmt.Sprintf("--serving-source-streams=%s", strings.Join(pl.GetServingSourceStreamNames(), ","))) batchJob := buildISBBatchJob(pl, r.image, isbSvc.Status.Config, "isbsvc-create", args, "cre") if err := r.client.Create(ctx, batchJob); err != nil && !apierrors.IsAlreadyExists(err) { - pl.Status.MarkDeployFailed("CreateISBSvcCreatingJobFailed", err.Error()) - return fmt.Errorf("failed to create ISB Svc creating job, err: %w", err) + r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateJobForISBCeationFailed", "Failed to create a Job: %w", err.Error()) + return fmt.Errorf("failed to create ISB creating job, err: %w", err) } - log.Infow("Created a job successfully for ISB Svc creating", zap.Any("buffers", bfs), zap.Any("buckets", bks), zap.Any("servingStreams", pl.GetServingSourceStreamNames())) + log.Infow("Created a job successfully for ISB creating", zap.Any("buffers", bfs), zap.Any("buckets", bks), zap.Any("servingStreams", pl.GetServingSourceStreamNames())) + r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateJobForISBCeationSuccessful", "Create ISB creation job successfully") } if len(oldBuffers) > 0 || len(oldBuckets) > 0 { @@ -354,10 +350,11 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df args := []string{fmt.Sprintf("--buffers=%s", strings.Join(bfs, ",")), fmt.Sprintf("--buckets=%s", strings.Join(bks, ","))} batchJob := buildISBBatchJob(pl, r.image, isbSvc.Status.Config, "isbsvc-delete", args, "del") if err := r.client.Create(ctx, batchJob); err != nil && !apierrors.IsAlreadyExists(err) { - pl.Status.MarkDeployFailed("CreateISBSvcDeletingJobFailed", err.Error()) - return fmt.Errorf("failed to create ISB Svc deleting job, err: %w", err) + r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateJobForISBDeletionFailed", "Failed to create a Job: %w", err.Error()) + return fmt.Errorf("failed to create ISB deleting job, err: %w", err) } log.Infow("Created ISB Svc deleting job successfully", zap.Any("buffers", bfs), zap.Any("buckets", bks)) + r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateJobForISBDeletionSuccessful", "Create ISB deletion job successfully") } // Daemon service @@ -369,7 +366,6 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df return err } - pl.Status.MarkDeployed() return nil } diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index 2a1762aa4b..e130f49656 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -30,9 +30,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -173,6 +175,46 @@ func Test_NewReconciler(t *testing.T) { assert.True(t, ok) } +func TestReconcile(t *testing.T) { + t.Run("test not found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "not-exist", + Namespace: testNamespace, + }, + } + _, err := r.Reconcile(context.TODO(), req) + // Return nil when not found + assert.NoError(t, err) + }) + + t.Run("test found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testPipeline.DeepCopy() + err := cl.Create(context.TODO(), testObj) + assert.NoError(t, err) + o := &dfv1.Pipeline{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Namespace: testObj.Namespace, + Name: testObj.Name, + }, o) + assert.NoError(t, err) + assert.Equal(t, testObj.Name, o.Name) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: testObj.Name, + Namespace: testObj.Namespace, + }, + } + _, err = r.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.ErrorContains(t, err, "not found") + }) +} + func Test_reconcile(t *testing.T) { ctx := context.TODO() @@ -249,7 +291,7 @@ func Test_reconcile(t *testing.T) { _, err = r.reconcile(ctx, testObj) assert.Error(t, err) events := getEvents(t, r) - assert.Contains(t, events, "Warning ReconcilePipelineFailed Failed to reconcile pipeline: the length of the pipeline name plus the vertex name is over the max limit. (very-very-very-loooooooooooooooooooooooooooooooooooong-input), [must be no more than 63 characters]") + assert.Contains(t, events, "Warning ValidatePipelineFailed Invalid pipeline: the length of the pipeline name plus the vertex name is over the max limit. (very-very-very-loooooooooooooooooooooooooooooooooooong-input), [must be no more than 63 characters]") }) t.Run("test reconcile - duplicate vertex", func(t *testing.T) { @@ -267,7 +309,7 @@ func Test_reconcile(t *testing.T) { _, err = r.reconcile(ctx, testObj) assert.Error(t, err) events := getEvents(t, r) - assert.Contains(t, events, "Warning ReconcilePipelineFailed Failed to reconcile pipeline: duplicate vertex name \"input\"") + assert.Contains(t, events, "Warning ValidatePipelineFailed Invalid pipeline: duplicate vertex name \"input\"") }) } diff --git a/pkg/reconciler/pipeline/validate.go b/pkg/reconciler/pipeline/validate.go index 2a98c2e665..7304147c16 100644 --- a/pkg/reconciler/pipeline/validate.go +++ b/pkg/reconciler/pipeline/validate.go @@ -19,6 +19,7 @@ package pipeline import ( "fmt" + "k8s.io/apimachinery/pkg/util/intstr" k8svalidation "k8s.io/apimachinery/pkg/util/validation" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -256,6 +257,13 @@ func validateVertex(v dfv1.AbstractVertex) error { return fmt.Errorf("vertex %q: partitions should not > 1 for source vertices", v.Name) } } + // Validate the update strategy. + maxUvail := v.UpdateStrategy.GetRollingUpdateStrategy().GetMaxUnavailable() + _, err := intstr.GetScaledValueFromIntOrPercent(&maxUvail, 1, true) // maxUnavailable should be an interger or a percentage in string + if err != nil { + return fmt.Errorf("vertex %q: invalid maxUnavailable: %v", v.Name, err) + } + for _, ic := range v.InitContainers { if isReservedContainerName(ic.Name) { return fmt.Errorf("vertex %q: init container name %q is reserved for containers created by numaflow", v.Name, ic.Name) diff --git a/pkg/reconciler/pipeline/validate_test.go b/pkg/reconciler/pipeline/validate_test.go index f116e04825..8f7a272d89 100644 --- a/pkg/reconciler/pipeline/validate_test.go +++ b/pkg/reconciler/pipeline/validate_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" ) @@ -642,6 +643,46 @@ func TestValidateVertex(t *testing.T) { assert.Contains(t, err.Error(), "can not be 0") }) + t.Run("rollingUpdateStrategy - invalid maxUnavailable", func(t *testing.T) { + v := dfv1.AbstractVertex{ + Name: "my-vertex", + UpdateStrategy: dfv1.UpdateStrategy{ + RollingUpdate: &dfv1.RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("10")), + }, + }, + } + err := validateVertex(v) + assert.Error(t, err) + assert.Contains(t, err.Error(), "string is not a percentage") + }) + + t.Run("rollingUpdateStrategy - good percentage maxUnavailable", func(t *testing.T) { + v := dfv1.AbstractVertex{ + Name: "my-vertex", + UpdateStrategy: dfv1.UpdateStrategy{ + RollingUpdate: &dfv1.RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromString("10%")), + }, + }, + } + err := validateVertex(v) + assert.NoError(t, err) + }) + + t.Run("rollingUpdateStrategy - good integer maxUnavailable", func(t *testing.T) { + v := dfv1.AbstractVertex{ + Name: "my-vertex", + UpdateStrategy: dfv1.UpdateStrategy{ + RollingUpdate: &dfv1.RollingUpdateStrategy{ + MaxUnavailable: ptr.To[intstr.IntOrString](intstr.FromInt(3)), + }, + }, + } + err := validateVertex(v) + assert.NoError(t, err) + }) + t.Run("good init container", func(t *testing.T) { v := dfv1.AbstractVertex{Name: "my-vertex", InitContainers: goodContainers} err := validateVertex(v) diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index c1e6b8febb..8789b5d89a 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -95,8 +96,11 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( return ctrl.Result{}, nil } + vertex.Status.InitConditions() vertex.Status.SetObservedGeneration(vertex.Generation) + desiredReplicas := vertex.GetReplicas() + isbSvc := &dfv1.InterStepBufferService{} isbSvcName := dfv1.DefaultISBSvcName if len(vertex.Spec.InterStepBufferServiceName) > 0 { @@ -123,132 +127,199 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( r.scaler.StartWatching(vertexKey) } + // Create PVCs for reduce vertex + if vertex.IsReduceUDF() { + if err := r.buildReduceVertexPVCs(ctx, vertex); err != nil { + vertex.Status.MarkDeployFailed("BuildReduceVertexPVCsFailed", err.Error()) + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "BuildReduceVertexPVCsFailed", err.Error()) + return ctrl.Result{}, err + } + } + + // Create services + if err := r.createOrUpdateServices(ctx, vertex); err != nil { + vertex.Status.MarkDeployFailed("CreateOrUpdateServicesFailed", err.Error()) + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "CreateOrUpdateServicesFailed", err.Error()) + return ctrl.Result{}, err + } + + pipeline := &dfv1.Pipeline{} + if err := r.client.Get(ctx, types.NamespacedName{Namespace: vertex.Namespace, Name: vertex.Spec.PipelineName}, pipeline); err != nil { + log.Errorw("Failed to get pipeline object", zap.Error(err)) + vertex.Status.MarkDeployFailed("GetPipelineFailed", err.Error()) + return ctrl.Result{}, err + } + + // Create pods + if err := r.orchestratePods(ctx, vertex, pipeline, isbSvc); err != nil { + vertex.Status.MarkDeployFailed("OrchestratePodsFailed", err.Error()) + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "OrchestratePodsFailed", err.Error()) + return ctrl.Result{}, err + } + + vertex.Status.MarkDeployed() + + // Mark it running before checking the status of the pods + vertex.Status.MarkPhaseRunning() + + // Check status of the pods + var podList corev1.PodList + selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) + if err := r.client.List(ctx, &podList, &client.ListOptions{Namespace: vertex.GetNamespace(), LabelSelector: selector}); err != nil { + vertex.Status.MarkPodNotHealthy("ListVerticesPodsFailed", err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to get pods of a vertex: %w", err) + } + readyPods := reconciler.NumOfReadyPods(podList) + if readyPods > desiredReplicas { // It might happen in some corner cases, such as during rollout + readyPods = desiredReplicas + } + vertex.Status.ReadyReplicas = uint32(readyPods) + if healthy, reason, msg := reconciler.CheckPodsStatus(&podList); healthy { + vertex.Status.MarkPodHealthy(reason, msg) + } else { + // Do not need to explicitly requeue, since the it keeps watching the status change of the pods + vertex.Status.MarkPodNotHealthy(reason, msg) + } + + return ctrl.Result{}, nil +} + +func (r *vertexReconciler) orchestratePods(ctx context.Context, vertex *dfv1.Vertex, pipeline *dfv1.Pipeline, isbSvc *dfv1.InterStepBufferService) error { + log := logging.FromContext(ctx) desiredReplicas := vertex.GetReplicas() + vertex.Status.DesiredReplicas = uint32(desiredReplicas) + // Set metrics defer func() { reconciler.VertexDesiredReplicas.WithLabelValues(vertex.Namespace, vertex.Spec.PipelineName, vertex.Spec.Name).Set(float64(desiredReplicas)) reconciler.VertexCurrentReplicas.WithLabelValues(vertex.Namespace, vertex.Spec.PipelineName, vertex.Spec.Name).Set(float64(vertex.Status.Replicas)) }() - if vertex.IsReduceUDF() { - if x := vertex.Spec.UDF.GroupBy.Storage; x != nil && x.PersistentVolumeClaim != nil { - for i := 0; i < desiredReplicas; i++ { - newPvc, err := r.buildReduceVertexPVCSpec(vertex, i) - if err != nil { - log.Errorw("Error building a PVC spec", zap.Error(err)) - vertex.Status.MarkPhaseFailed("BuildPVCSpecFailed", err.Error()) - return ctrl.Result{}, err - } - hash := sharedutil.MustHash(newPvc.Spec) - newPvc.SetAnnotations(map[string]string{dfv1.KeyHash: hash}) - existingPvc := &corev1.PersistentVolumeClaim{} - if err := r.client.Get(ctx, types.NamespacedName{Namespace: vertex.Namespace, Name: newPvc.Name}, existingPvc); err != nil { - if !apierrors.IsNotFound(err) { - log.Errorw("Error finding existing PVC", zap.Error(err)) - vertex.Status.MarkPhaseFailed("FindExistingPVCFailed", err.Error()) - return ctrl.Result{}, err - } - if err := r.client.Create(ctx, newPvc); err != nil && !apierrors.IsAlreadyExists(err) { - r.markPhaseFailedAndLogEvent(vertex, log, "CreatePVCFailed", err.Error(), "Error creating a PVC", zap.Error(err)) - return ctrl.Result{}, err - } - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreatePVCSuccess", "Successfully created PVC %s", newPvc.Name) - } else { - if existingPvc.GetAnnotations()[dfv1.KeyHash] != hash { - // TODO: deal with spec difference - if false { - log.Debug("TODO: check spec difference") - } - } - } - } - } + // Build pod spec of the 1st replica to calculate the hash, which is used to determine whether the pod spec is changed + tmpSpec, err := r.buildPodSpec(vertex, pipeline, isbSvc.Status.Config, 0) + if err != nil { + return fmt.Errorf("failed to build a pod spec: %w", err) + } + hash := sharedutil.MustHash(tmpSpec) + if vertex.Status.UpdateHash != hash { // New spec, or still processing last update, while new update is coming + vertex.Status.UpdateHash = hash + vertex.Status.UpdatedReplicas = 0 + vertex.Status.UpdatedReadyReplicas = 0 } - // Create services - // Note: We purposely put service reconciliation before pod, - // to prevent pod reconciliation failure from blocking service creation. - // It's ok to keep failing to scale up/down pods (e.g., due to quota), - // but without services, certain platform functionalities will be broken. - // E.g., the vertex processing rate calculation relies on the headless service to determine the number of active pods. - existingSvcs, err := r.findExistingServices(ctx, vertex) - if err != nil { - log.Errorw("Failed to find existing services", zap.Error(err)) - vertex.Status.MarkPhaseFailed("FindExistingSvcsFailed", err.Error()) - return ctrl.Result{}, err + // Manually or automatically scaled down + if currentReplicas := int(vertex.Status.Replicas); currentReplicas > desiredReplicas { + if err := r.cleanUpPodsFromTo(ctx, vertex, desiredReplicas, currentReplicas); err != nil { + return fmt.Errorf("failed to clean up vertex pods [%v, %v): %w", desiredReplicas, currentReplicas, err) + } + vertex.Status.Replicas = uint32(desiredReplicas) } - for _, s := range vertex.GetServiceObjs() { - svcHash := sharedutil.MustHash(s.Spec) - s.Annotations = map[string]string{dfv1.KeyHash: svcHash} - needToCreate := false - if existingSvc, existing := existingSvcs[s.Name]; existing { - if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { - if err := r.client.Delete(ctx, &existingSvc); err != nil { - if !apierrors.IsNotFound(err) { - r.markPhaseFailedAndLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete existing service", zap.String("service", existingSvc.Name), zap.Error(err)) - return ctrl.Result{}, err + updatedReplicas := int(vertex.Status.UpdatedReplicas) + if updatedReplicas > desiredReplicas { + updatedReplicas = desiredReplicas + vertex.Status.UpdatedReplicas = uint32(updatedReplicas) + } + + if updatedReplicas > 0 { + // Make sure [0 - updatedReplicas] with hash are in place + if err := r.orchestratePodsFromTo(ctx, vertex, pipeline, isbSvc, 0, updatedReplicas, hash); err != nil { + return fmt.Errorf("failed to orchestrate vertex pods [0, %v): %w", updatedReplicas, err) + } + // Wait for the updated pods to be ready before moving on + if vertex.Status.UpdatedReadyReplicas != vertex.Status.UpdatedReplicas { + updatedReadyReplicas := 0 + existingPods, err := r.findExistingPods(ctx, vertex, 0, updatedReplicas) + if err != nil { + return fmt.Errorf("failed to get pods of a vertex: %w", err) + } + for _, pod := range existingPods { + if pod.GetAnnotations()[dfv1.KeyHash] == vertex.Status.UpdateHash { + if reconciler.IsPodReady(pod) { + updatedReadyReplicas++ } - } else { - log.Infow("Deleted a stale service to recreate", zap.String("service", existingSvc.Name)) - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale service %s to recreate", existingSvc.Name) } - needToCreate = true } - delete(existingSvcs, s.Name) - } else { - needToCreate = true - } - if needToCreate { - if err := r.client.Create(ctx, s); err != nil { - if apierrors.IsAlreadyExists(err) { - continue - } - r.markPhaseFailedAndLogEvent(vertex, log, "CreateSvcFailed", err.Error(), "Failed to create a service", zap.String("service", s.Name), zap.Error(err)) - return ctrl.Result{}, err - } else { - log.Infow("Succeeded to create a service", zap.String("service", s.Name)) - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreateSvcSuccess", "Succeeded to create service %s", s.Name) + vertex.Status.UpdatedReadyReplicas = uint32(updatedReadyReplicas) + if updatedReadyReplicas < updatedReplicas { + return nil } } } - for _, v := range existingSvcs { // clean up stale services - if err := r.client.Delete(ctx, &v); err != nil { - if !apierrors.IsNotFound(err) { - r.markPhaseFailedAndLogEvent(vertex, log, "DelSvcFailed", err.Error(), "Failed to delete service not in use", zap.String("service", v.Name), zap.Error(err)) - return ctrl.Result{}, err + + if vertex.Status.UpdateHash == vertex.Status.CurrentHash || + vertex.Status.CurrentHash == "" { + // 1. Regular scaling operation 2. First time + // create (desiredReplicas-updatedReplicas) pods directly + if desiredReplicas > updatedReplicas { + if err := r.orchestratePodsFromTo(ctx, vertex, pipeline, isbSvc, updatedReplicas, desiredReplicas, hash); err != nil { + return fmt.Errorf("failed to orchestrate vertex pods [%v, %v): %w", updatedReplicas, desiredReplicas, err) } - } else { - log.Infow("Deleted a stale service", zap.String("service", v.Name)) - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale service %s", v.Name) + } + vertex.Status.UpdatedReplicas = uint32(desiredReplicas) + vertex.Status.CurrentHash = vertex.Status.UpdateHash + } else { // Update scenario + if updatedReplicas >= desiredReplicas { + return nil + } + + // Create more pods + if vertex.Spec.UpdateStrategy.GetUpdateStrategyType() != dfv1.RollingUpdateStrategyType { + // Revisit later, we only support rolling update for now + return nil + } + + // Calculate the to be updated replicas based on the max unavailable configuration + maxUnavailConf := vertex.Spec.UpdateStrategy.GetRollingUpdateStrategy().GetMaxUnavailable() + toBeUpdated, err := intstr.GetScaledValueFromIntOrPercent(&maxUnavailConf, desiredReplicas, true) + if err != nil { // This should never happen since we have validated the configuration + return fmt.Errorf("invalid max unavailable configuration in rollingUpdate: %w", err) + } + if updatedReplicas+toBeUpdated > desiredReplicas { + toBeUpdated = desiredReplicas - updatedReplicas + } + log.Infof("Rolling update %d replicas, [%d, %d)", toBeUpdated, updatedReplicas, updatedReplicas+toBeUpdated) + + // Create pods [updatedReplicas, updatedReplicas+toBeUpdated), and clean up any pods in that range that has a different hash + if err := r.orchestratePodsFromTo(ctx, vertex, pipeline, isbSvc, updatedReplicas, updatedReplicas+toBeUpdated, vertex.Status.UpdateHash); err != nil { + return fmt.Errorf("failed to orchestrate pods [%v, %v)]: %w", updatedReplicas, updatedReplicas+toBeUpdated, err) + } + vertex.Status.UpdatedReplicas = uint32(updatedReplicas + toBeUpdated) + if vertex.Status.UpdatedReplicas == uint32(desiredReplicas) { + vertex.Status.CurrentHash = vertex.Status.UpdateHash } } - pipeline := &dfv1.Pipeline{} - if err := r.client.Get(ctx, types.NamespacedName{Namespace: vertex.Namespace, Name: vertex.Spec.PipelineName}, pipeline); err != nil { - log.Errorw("Failed to get pipeline object", zap.Error(err)) - vertex.Status.MarkPhaseFailed("GetPipelineFailed", err.Error()) - return ctrl.Result{}, err + currentReplicas := int(vertex.Status.Replicas) + if currentReplicas != desiredReplicas { + log.Infow("Pipeline Vertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) + vertex.Status.Replicas = uint32(desiredReplicas) + vertex.Status.LastScaledAt = metav1.Time{Time: time.Now()} } - // Create pods - existingPods, err := r.findExistingPods(ctx, vertex) + if vertex.Status.Selector == "" { + selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) + vertex.Status.Selector = selector.String() + } + + return nil +} + +func (r *vertexReconciler) orchestratePodsFromTo(ctx context.Context, vertex *dfv1.Vertex, pipeline *dfv1.Pipeline, isbSvc *dfv1.InterStepBufferService, fromReplica, toReplica int, newHash string) error { + log := logging.FromContext(ctx) + existingPods, err := r.findExistingPods(ctx, vertex, fromReplica, toReplica) if err != nil { - log.Errorw("Failed to find existing pods", zap.Error(err)) - vertex.Status.MarkPhaseFailed("FindExistingPodFailed", err.Error()) - return ctrl.Result{}, err + return fmt.Errorf("failed to find existing pods: %w", err) } - for replica := 0; replica < desiredReplicas; replica++ { + for replica := fromReplica; replica < toReplica; replica++ { podSpec, err := r.buildPodSpec(vertex, pipeline, isbSvc.Status.Config, replica) if err != nil { - log.Errorw("Failed to generate pod spec", zap.Error(err)) - vertex.Status.MarkPhaseFailed("PodSpecGenFailed", err.Error()) - return ctrl.Result{}, err + return fmt.Errorf("failed to generate pod spec: %w", err) } - hash := sharedutil.MustHash(podSpec) podNamePrefix := fmt.Sprintf("%s-%d-", vertex.Name, replica) needToCreate := true for existingPodName, existingPod := range existingPods { if strings.HasPrefix(existingPodName, podNamePrefix) { - if existingPod.GetAnnotations()[dfv1.KeyHash] == hash && existingPod.Status.Phase != corev1.PodFailed { + if existingPod.GetAnnotations()[dfv1.KeyHash] == newHash && existingPod.Status.Phase != corev1.PodFailed { needToCreate = false delete(existingPods, existingPodName) } @@ -272,7 +343,7 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( labels[dfv1.KeyAppName] = vertex.Name labels[dfv1.KeyPipelineName] = vertex.Spec.PipelineName labels[dfv1.KeyVertexName] = vertex.Spec.Name - annotations[dfv1.KeyHash] = hash + annotations[dfv1.KeyHash] = newHash annotations[dfv1.KeyReplica] = strconv.Itoa(replica) if vertex.IsMapUDF() || vertex.IsReduceUDF() { annotations[dfv1.KeyDefaultContainer] = dfv1.CtrUdf @@ -297,52 +368,73 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( } pod.Spec.Hostname = fmt.Sprintf("%s-%d", vertex.Name, replica) if err := r.client.Create(ctx, pod); err != nil { - r.markPhaseFailedAndLogEvent(vertex, log, "CreatePodFailed", err.Error(), "Failed to created pod", zap.Error(err)) - return ctrl.Result{}, err + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "CreatePodFailed", "Failed to create a pod %s", pod.Name) + return fmt.Errorf("failed to create a vertex pod: %w", err) } log.Infow("Succeeded to create a pod", zap.String("pod", pod.Name)) - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreatePodSuccess", "Succeeded to create pod %s", pod.Name) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreatePodSuccess", "Succeeded to create a pod %s", pod.Name) } } for _, v := range existingPods { if err := r.client.Delete(ctx, &v); err != nil && !apierrors.IsNotFound(err) { - r.markPhaseFailedAndLogEvent(vertex, log, "DelPodFailed", err.Error(), "Failed to delete pod", zap.Error(err)) - return ctrl.Result{}, err + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "DelPodFailed", "Failed to delete pod %s", v.Name) + return fmt.Errorf("failed to delete a vertex pod %s: %w", v.Name, err) } } + return nil +} - currentReplicas := int(vertex.Status.Replicas) - if currentReplicas != desiredReplicas || vertex.Status.Selector == "" { - log.Infow("Pipeline Vertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) - r.recorder.Eventf(vertex, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) - vertex.Status.Replicas = uint32(desiredReplicas) - vertex.Status.LastScaledAt = metav1.Time{Time: time.Now()} +func (r *vertexReconciler) cleanUpPodsFromTo(ctx context.Context, vertex *dfv1.Vertex, fromReplica, toReplica int) error { + log := logging.FromContext(ctx) + existingPods, err := r.findExistingPods(ctx, vertex, fromReplica, toReplica) + if err != nil { + return fmt.Errorf("failed to find existing pods: %w", err) } - selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) - vertex.Status.Selector = selector.String() - - // Mark it running before checking the status of the pods - vertex.Status.MarkPhaseRunning() - // Check status of the pods - var podList corev1.PodList - if err := r.client.List(ctx, &podList, &client.ListOptions{Namespace: vertex.GetNamespace(), LabelSelector: selector}); err != nil { - vertex.Status.MarkPodNotHealthy("ListVerticesPodsFailed", err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to get pods of a vertex: %w", err) + for _, pod := range existingPods { + if err := r.client.Delete(ctx, &pod); err != nil { + return fmt.Errorf("failed to delete pod %s: %w", pod.Name, err) + } + log.Infof("Deleted Vertx pod %q", pod.Name) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "DeletePodSuccess", "Succeeded to delete a vertex pod %s", pod.Name) } - readyPods := reconciler.NumOfReadyPods(podList) - if readyPods > desiredReplicas { // It might happen in some corner cases, such as during rollout - readyPods = desiredReplicas + return nil +} + +func (r *vertexReconciler) buildReduceVertexPVCs(ctx context.Context, vertex *dfv1.Vertex) error { + if !vertex.IsReduceUDF() { + return nil } - vertex.Status.ReadyReplicas = uint32(readyPods) - if healthy, reason, msg := reconciler.CheckPodsStatus(&podList); healthy { - vertex.Status.MarkPodHealthy(reason, msg) - } else { - // Do not need to explicitly requeue, since the it keeps watching the status change of the pods - vertex.Status.MarkPodNotHealthy(reason, msg) + if x := vertex.Spec.UDF.GroupBy.Storage; x != nil && x.PersistentVolumeClaim != nil { + log := logging.FromContext(ctx) + for i := 0; i < vertex.GetPartitionCount(); i++ { + newPvc, err := r.buildReduceVertexPVCSpec(vertex, i) + if err != nil { + return fmt.Errorf("failed to build a PVC spec: %w", err) + } + hash := sharedutil.MustHash(newPvc.Spec) + newPvc.SetAnnotations(map[string]string{dfv1.KeyHash: hash}) + existingPvc := &corev1.PersistentVolumeClaim{} + if err := r.client.Get(ctx, types.NamespacedName{Namespace: vertex.Namespace, Name: newPvc.Name}, existingPvc); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to find existing PVC: %w", err) + } + if err := r.client.Create(ctx, newPvc); err != nil && !apierrors.IsAlreadyExists(err) { + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "CreatePVCFailed", "Error creating a PVC: %s", err.Error()) + return fmt.Errorf("failed to create a PVC: %w", err) + } + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreatePVCSuccess", "Successfully created PVC %s", newPvc.Name) + } else { + if existingPvc.GetAnnotations()[dfv1.KeyHash] != hash { + // TODO: deal with spec difference + if false { + log.Debug("TODO: check spec difference") + } + } + } + } } - - return ctrl.Result{}, nil + return nil } func (r *vertexReconciler) buildReduceVertexPVCSpec(vertex *dfv1.Vertex, replicaIndex int) (*corev1.PersistentVolumeClaim, error) { @@ -364,6 +456,60 @@ func (r *vertexReconciler) buildReduceVertexPVCSpec(vertex *dfv1.Vertex, replica return &newPvc, nil } +func (r *vertexReconciler) createOrUpdateServices(ctx context.Context, vertex *dfv1.Vertex) error { + log := logging.FromContext(ctx) + existingSvcs, err := r.findExistingServices(ctx, vertex) + if err != nil { + return fmt.Errorf("failed to find existing services: %w", err) + } + for _, s := range vertex.GetServiceObjs() { + svcHash := sharedutil.MustHash(s.Spec) + s.Annotations = map[string]string{dfv1.KeyHash: svcHash} + needToCreate := false + if existingSvc, existing := existingSvcs[s.Name]; existing { + if existingSvc.GetAnnotations()[dfv1.KeyHash] != svcHash { + if err := r.client.Delete(ctx, &existingSvc); err != nil { + if !apierrors.IsNotFound(err) { + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "DelSvcFailed", "Error deleting existing service: %s", err.Error()) + return fmt.Errorf("failed to delete existing service: %w", err) + } + } else { + log.Infow("Deleted a stale service to recreate", zap.String("service", existingSvc.Name)) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale service %s to recreate", existingSvc.Name) + } + needToCreate = true + } + delete(existingSvcs, s.Name) + } else { + needToCreate = true + } + if needToCreate { + if err := r.client.Create(ctx, s); err != nil { + if apierrors.IsAlreadyExists(err) { + continue + } + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "CreateSvcFailed", "Error creating a service: %s", err.Error()) + return fmt.Errorf("failed to create a service: %w", err) + } else { + log.Infow("Succeeded to create a service", zap.String("service", s.Name)) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "CreateSvcSuccess", "Succeeded to create service %s", s.Name) + } + } + } + for _, v := range existingSvcs { // clean up stale services + if err := r.client.Delete(ctx, &v); err != nil { + if !apierrors.IsNotFound(err) { + r.recorder.Eventf(vertex, corev1.EventTypeWarning, "DelSvcFailed", "Error deleting existing service that is not in use: %s", err.Error()) + return fmt.Errorf("failed to delete existing service that is not in use: %w", err) + } + } else { + log.Infow("Deleted a stale service", zap.String("service", v.Name)) + r.recorder.Eventf(vertex, corev1.EventTypeNormal, "DelSvcSuccess", "Deleted stale service %s", v.Name) + } + } + return nil +} + func (r *vertexReconciler) buildPodSpec(vertex *dfv1.Vertex, pl *dfv1.Pipeline, isbSvcConfig dfv1.BufferServiceConfig, replicaIndex int) (*corev1.PodSpec, error) { isbSvcType, envs := sharedutil.GetIsbSvcEnvVars(isbSvcConfig) podSpec, err := vertex.GetPodSpec(dfv1.GetVertexPodSpecReq{ @@ -425,7 +571,7 @@ func (r *vertexReconciler) buildPodSpec(vertex *dfv1.Vertex, pl *dfv1.Pipeline, return podSpec, nil } -func (r *vertexReconciler) findExistingPods(ctx context.Context, vertex *dfv1.Vertex) (map[string]corev1.Pod, error) { +func (r *vertexReconciler) findExistingPods(ctx context.Context, vertex *dfv1.Vertex, fromReplica, toReplica int) (map[string]corev1.Pod, error) { pods := &corev1.PodList{} selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + vertex.Spec.PipelineName + "," + dfv1.KeyVertexName + "=" + vertex.Spec.Name) if err := r.client.List(ctx, pods, &client.ListOptions{Namespace: vertex.Namespace, LabelSelector: selector}); err != nil { @@ -437,7 +583,11 @@ func (r *vertexReconciler) findExistingPods(ctx context.Context, vertex *dfv1.Ve // Ignore pods being deleted continue } - result[v.Name] = v + replicaStr := v.GetAnnotations()[dfv1.KeyReplica] + replica, _ := strconv.Atoi(replicaStr) + if replica >= fromReplica && replica < toReplica { + result[v.Name] = v + } } return result, nil } @@ -454,10 +604,3 @@ func (r *vertexReconciler) findExistingServices(ctx context.Context, vertex *dfv } return result, nil } - -// Helper function for warning event types -func (r *vertexReconciler) markPhaseFailedAndLogEvent(vertex *dfv1.Vertex, log *zap.SugaredLogger, reason, message, logMsg string, logWith ...interface{}) { - log.Errorw(logMsg, logWith) - vertex.Status.MarkPhaseFailed(reason, message) - r.recorder.Event(vertex, corev1.EventTypeWarning, reason, message) -} diff --git a/pkg/reconciler/vertex/controller_test.go b/pkg/reconciler/vertex/controller_test.go index dedc25898e..4a2faa56b0 100644 --- a/pkg/reconciler/vertex/controller_test.go +++ b/pkg/reconciler/vertex/controller_test.go @@ -20,6 +20,7 @@ import ( "context" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "go.uber.org/zap/zaptest" @@ -28,14 +29,18 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" "github.com/numaproj/numaflow/pkg/reconciler/vertex/scaling" + sharedutil "github.com/numaproj/numaflow/pkg/shared/util" ) const ( @@ -166,6 +171,19 @@ func init() { _ = corev1.AddToScheme(scheme.Scheme) } +func fakeReconciler(t *testing.T, cl client.WithWatch) *vertexReconciler { + t.Helper() + return &vertexReconciler{ + client: cl, + scheme: scheme.Scheme, + config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), + image: testFlowImage, + logger: zaptest.NewLogger(t).Sugar(), + recorder: record.NewFakeRecorder(64), + scaler: scaling.NewScaler(cl), + } +} + func Test_NewReconciler(t *testing.T) { cl := fake.NewClientBuilder().Build() r := NewReconciler(cl, scheme.Scheme, reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), testFlowImage, scaling.NewScaler(cl), zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) @@ -173,19 +191,51 @@ func Test_NewReconciler(t *testing.T) { assert.True(t, ok) } +func TestReconcile(t *testing.T) { + t.Run("test not found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "not-exist", + Namespace: testNamespace, + }, + } + _, err := r.Reconcile(context.TODO(), req) + // Return nil when not found + assert.NoError(t, err) + }) + + t.Run("test found", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testVertex.DeepCopy() + err := cl.Create(context.TODO(), testObj) + assert.NoError(t, err) + o := &dfv1.Vertex{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Namespace: testObj.Namespace, + Name: testObj.Name, + }, o) + assert.NoError(t, err) + assert.Equal(t, testObj.Name, o.Name) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: testObj.Name, + Namespace: testObj.Namespace, + }, + } + _, err = r.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.ErrorContains(t, err, "not found") + }) +} + func Test_BuildPodSpec(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig) t.Run("test source", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testSrcVertex.DeepCopy() spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 0) assert.NoError(t, err) @@ -211,14 +261,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test source with transformer", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testSrcVertex.DeepCopy() testObj.Spec.Source = &dfv1.Source{ HTTP: &dfv1.HTTPSource{}, @@ -236,14 +279,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test user-defined source with transformer", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testSrcVertex.DeepCopy() testObj.Spec.Source = &dfv1.Source{ UDSource: &dfv1.UDSource{ @@ -265,14 +301,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test sink", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Name = "test-pl-output" testObj.Spec.Name = "output" @@ -303,14 +332,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test user-defined sink", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Name = "test-pl-output" testObj.Spec.Name = "output" @@ -340,14 +362,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test map udf", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.UDF = &dfv1.UDF{ Builtin: &dfv1.Function{ @@ -378,14 +393,7 @@ func Test_BuildPodSpec(t *testing.T) { t.Run("test reduce udf", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() volSize, _ := resource.ParseQuantity("1Gi") testObj.Spec.UDF = &dfv1.UDF{ @@ -422,7 +430,54 @@ func Test_BuildPodSpec(t *testing.T) { } func Test_reconcile(t *testing.T) { - fakeConfig := reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig) + + t.Run("test deletion", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testVertex.DeepCopy() + testObj.DeletionTimestamp = &metav1.Time{ + Time: time.Now(), + } + _, err := r.reconcile(context.TODO(), testObj) + assert.NoError(t, err) + }) + + t.Run("test no isbsvc", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testObj := testVertex.DeepCopy() + _, err := r.reconcile(context.TODO(), testObj) + assert.Error(t, err) + assert.ErrorContains(t, err, "not found") + assert.Equal(t, testObj.Status.Phase, dfv1.VertexPhaseFailed) + assert.Equal(t, testObj.Status.Reason, "ISBSvcNotFound") + assert.Contains(t, testObj.Status.Message, "not found") + }) + + t.Run("test isbsvc unhealthy", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + r := fakeReconciler(t, cl) + testIsbSvc := testNativeRedisIsbSvc.DeepCopy() + testIsbSvc.Status.MarkConfigured() + err := cl.Create(context.TODO(), testIsbSvc) + assert.Nil(t, err) + testPl := testPipeline.DeepCopy() + err = cl.Create(context.TODO(), testPl) + assert.Nil(t, err) + testObj := testVertex.DeepCopy() + testObj.Spec.Source = &dfv1.Source{ + HTTP: &dfv1.HTTPSource{ + Service: true, + }, + } + _, err = r.reconcile(context.TODO(), testObj) + assert.Error(t, err) + assert.ErrorContains(t, err, "not healthy") + assert.Equal(t, testObj.Status.Phase, dfv1.VertexPhaseFailed) + assert.Equal(t, testObj.Status.Reason, "ISBSvcNotHealthy") + assert.Contains(t, testObj.Status.Message, "not healthy") + }) + t.Run("test reconcile source", func(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() @@ -434,15 +489,7 @@ func Test_reconcile(t *testing.T) { testPl := testPipeline.DeepCopy() err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.Source = &dfv1.Source{ HTTP: &dfv1.HTTPSource{ @@ -486,15 +533,7 @@ func Test_reconcile(t *testing.T) { testPl := testPipeline.DeepCopy() err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.Sink = &dfv1.Sink{} _, err = r.reconcile(ctx, testObj) @@ -519,15 +558,7 @@ func Test_reconcile(t *testing.T) { testPl := testPipeline.DeepCopy() err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.UDF = &dfv1.UDF{ Builtin: &dfv1.Function{ @@ -545,7 +576,7 @@ func Test_reconcile(t *testing.T) { assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) }) - t.Run("test reconcile vertex with customization", func(t *testing.T) { + t.Run("test reconcile reduce udf", func(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() testIsbSvc := testNativeRedisIsbSvc.DeepCopy() @@ -556,15 +587,54 @@ func Test_reconcile(t *testing.T) { testPl := testPipeline.DeepCopy() err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), + r := fakeReconciler(t, cl) + testObj := testVertex.DeepCopy() + testObj.Spec.UDF = &dfv1.UDF{ + Container: &dfv1.Container{ + Image: "my-image", + }, + GroupBy: &dfv1.GroupBy{ + Window: dfv1.Window{ + Fixed: &dfv1.FixedWindow{ + Length: &metav1.Duration{ + Duration: 10 * time.Second, + }, + }, + }, + Storage: &dfv1.PBQStorage{ + PersistentVolumeClaim: &dfv1.PersistenceStrategy{ + AccessMode: ptr.To[corev1.PersistentVolumeAccessMode](corev1.ReadWriteOnce), + }, + }, + }, } + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + pods := &corev1.PodList{} + selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + testPipelineName + "," + dfv1.KeyVertexName + "=" + testVertexSpecName) + err = r.client.List(ctx, pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pods.Items)) + assert.True(t, strings.HasPrefix(pods.Items[0].Name, testVertexName+"-0-")) + assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) + pvc := &corev1.PersistentVolumeClaim{} + err = r.client.Get(ctx, types.NamespacedName{Name: dfv1.GeneratePBQStoragePVCName(testPl.Name, testObj.Spec.Name, 0), Namespace: testNamespace}, pvc) + assert.NoError(t, err) + assert.Equal(t, dfv1.GeneratePBQStoragePVCName(testPl.Name, testObj.Spec.Name, 0), pvc.Name) + }) + + t.Run("test reconcile vertex with customization", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + ctx := context.TODO() + testIsbSvc := testNativeRedisIsbSvc.DeepCopy() + testIsbSvc.Status.MarkConfigured() + testIsbSvc.Status.MarkDeployed() + err := cl.Create(ctx, testIsbSvc) + assert.Nil(t, err) + testPl := testPipeline.DeepCopy() + err = cl.Create(ctx, testPl) + assert.Nil(t, err) + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.Sink = &dfv1.Sink{} testObj.Spec.ContainerTemplate = &dfv1.ContainerTemplate{ @@ -629,15 +699,7 @@ func Test_reconcile(t *testing.T) { testPl.Spec.Vertices[1].SideInputs = []string{"s1"} err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: fakeConfig, - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.UDF = &dfv1.UDF{ Builtin: &dfv1.Function{ @@ -656,10 +718,84 @@ func Test_reconcile(t *testing.T) { assert.Equal(t, 3, len(pods.Items[0].Spec.Containers)) assert.Equal(t, 2, len(pods.Items[0].Spec.InitContainers)) }) + + t.Run("test reconcile rolling update", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + ctx := context.TODO() + testIsbSvc := testNativeRedisIsbSvc.DeepCopy() + testIsbSvc.Status.MarkConfigured() + testIsbSvc.Status.MarkDeployed() + err := cl.Create(ctx, testIsbSvc) + assert.Nil(t, err) + testPl := testPipeline.DeepCopy() + err = cl.Create(ctx, testPl) + assert.Nil(t, err) + r := fakeReconciler(t, cl) + testObj := testVertex.DeepCopy() + testObj.Spec.UDF = &dfv1.UDF{ + Builtin: &dfv1.Function{ + Name: "cat", + }, + } + testObj.Spec.Replicas = ptr.To[int32](3) + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + pods := &corev1.PodList{} + selector, _ := labels.Parse(dfv1.KeyPipelineName + "=" + testPipelineName + "," + dfv1.KeyVertexName + "=" + testVertexSpecName) + err = r.client.List(ctx, pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 3, len(pods.Items)) + + tmpSpec, _ := r.buildPodSpec(testObj, testPl, testIsbSvc.Status.Config, 0) + hash := sharedutil.MustHash(tmpSpec) + testObj.Status.Replicas = 3 + testObj.Status.ReadyReplicas = 3 + testObj.Status.UpdateHash = hash + testObj.Status.CurrentHash = hash + + // Reduce desired replicas + testObj.Spec.Replicas = ptr.To[int32](2) + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + err = r.client.List(ctx, pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 2, len(pods.Items)) + assert.Equal(t, uint32(2), testObj.Status.Replicas) + assert.Equal(t, uint32(2), testObj.Status.UpdatedReplicas) + + // updatedReplicas > desiredReplicas + testObj.Status.UpdatedReplicas = 3 + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + assert.Equal(t, uint32(2), testObj.Status.UpdatedReplicas) + + // Clean up + testObj.Spec.Replicas = ptr.To[int32](0) + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + err = r.client.List(ctx, pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 0, len(pods.Items)) + + // rolling update + testObj.Spec.Replicas = ptr.To[int32](20) + testObj.Status.UpdatedReplicas = 20 + testObj.Status.UpdatedReadyReplicas = 20 + testObj.Status.Replicas = 20 + testObj.Status.CurrentHash = "123456" + testObj.Status.UpdateHash = "123456" + _, err = r.reconcile(ctx, testObj) + assert.NoError(t, err) + err = r.client.List(ctx, pods, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) + assert.NoError(t, err) + assert.Equal(t, 5, len(pods.Items)) + assert.Equal(t, uint32(20), testObj.Status.Replicas) + assert.Equal(t, uint32(5), testObj.Status.UpdatedReplicas) + }) } func Test_reconcileEvents(t *testing.T) { - t.Run("test reconcile - isbsvc doesn't exist", func(t *testing.T) { + t.Run("test reconcile - events", func(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() testIsbSvc := testNativeRedisIsbSvc.DeepCopy() @@ -670,15 +806,7 @@ func Test_reconcileEvents(t *testing.T) { testPl := testPipeline.DeepCopy() err = cl.Create(ctx, testPl) assert.Nil(t, err) - r := &vertexReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - image: testFlowImage, - scaler: scaling.NewScaler(cl), - logger: zaptest.NewLogger(t).Sugar(), - recorder: record.NewFakeRecorder(64), - } + r := fakeReconciler(t, cl) testObj := testVertex.DeepCopy() testObj.Spec.UDF = &dfv1.UDF{ Builtin: &dfv1.Function{ diff --git a/rust/numaflow-models/src/models/abstract_vertex.rs b/rust/numaflow-models/src/models/abstract_vertex.rs index 23fb85c813..6dffd1237a 100644 --- a/rust/numaflow-models/src/models/abstract_vertex.rs +++ b/rust/numaflow-models/src/models/abstract_vertex.rs @@ -95,6 +95,8 @@ pub struct AbstractVertex { pub tolerations: Option>, #[serde(rename = "udf", skip_serializing_if = "Option::is_none")] pub udf: Option>, + #[serde(rename = "updateStrategy", skip_serializing_if = "Option::is_none")] + pub update_strategy: Option>, #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] pub volumes: Option>, } @@ -129,6 +131,7 @@ impl AbstractVertex { source: None, tolerations: None, udf: None, + update_strategy: None, volumes: None, } } diff --git a/rust/numaflow-models/src/models/vertex_spec.rs b/rust/numaflow-models/src/models/vertex_spec.rs index a647ecc7ae..7583c1d6ac 100644 --- a/rust/numaflow-models/src/models/vertex_spec.rs +++ b/rust/numaflow-models/src/models/vertex_spec.rs @@ -108,6 +108,8 @@ pub struct VertexSpec { pub tolerations: Option>, #[serde(rename = "udf", skip_serializing_if = "Option::is_none")] pub udf: Option>, + #[serde(rename = "updateStrategy", skip_serializing_if = "Option::is_none")] + pub update_strategy: Option>, #[serde(rename = "volumes", skip_serializing_if = "Option::is_none")] pub volumes: Option>, #[serde(rename = "watermark", skip_serializing_if = "Option::is_none")] @@ -149,6 +151,7 @@ impl VertexSpec { to_edges: None, tolerations: None, udf: None, + update_strategy: None, volumes: None, watermark: None, } diff --git a/rust/numaflow-models/src/models/vertex_status.rs b/rust/numaflow-models/src/models/vertex_status.rs index 950ffa9ba5..30cb952f6d 100644 --- a/rust/numaflow-models/src/models/vertex_status.rs +++ b/rust/numaflow-models/src/models/vertex_status.rs @@ -21,12 +21,12 @@ pub struct VertexStatus { /// Conditions are the latest available observations of a resource's current state. #[serde(rename = "conditions", skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// If not empty, indicates the version of the Vertex used to generate Pods in the sequence [0,currentReplicas). + /// If not empty, indicates the current version of the Vertex used to generate Pods. #[serde(rename = "currentHash", skip_serializing_if = "Option::is_none")] pub current_hash: Option, - /// The number of Pods created by the controller from the Vertex version indicated by currentHash. - #[serde(rename = "currentReplicas", skip_serializing_if = "Option::is_none")] - pub current_replicas: Option, + /// The number of desired replicas. + #[serde(rename = "desiredReplicas", skip_serializing_if = "Option::is_none")] + pub desired_replicas: Option, #[serde(rename = "lastScaledAt", skip_serializing_if = "Option::is_none")] pub last_scaled_at: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] @@ -46,9 +46,15 @@ pub struct VertexStatus { pub replicas: Option, #[serde(rename = "selector", skip_serializing_if = "Option::is_none")] pub selector: Option, - /// If not empty, indicates the version of the Vertx used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + /// If not empty, indicates the updated version of the Vertex used to generate Pods. #[serde(rename = "updateHash", skip_serializing_if = "Option::is_none")] pub update_hash: Option, + /// The number of ready Pods created by the controller from the Vertex version indicated by updateHash. + #[serde( + rename = "updatedReadyReplicas", + skip_serializing_if = "Option::is_none" + )] + pub updated_ready_replicas: Option, /// The number of Pods created by the controller from the Vertex version indicated by updateHash. #[serde(rename = "updatedReplicas", skip_serializing_if = "Option::is_none")] pub updated_replicas: Option, @@ -59,7 +65,7 @@ impl VertexStatus { VertexStatus { conditions: None, current_hash: None, - current_replicas: None, + desired_replicas: None, last_scaled_at: None, message: None, observed_generation: None, @@ -69,6 +75,7 @@ impl VertexStatus { replicas: None, selector: None, update_hash: None, + updated_ready_replicas: None, updated_replicas: None, } } From 24c6553bbf4af02b6660520e51d1e8331b200ecb Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 9 Sep 2024 12:43:00 -0700 Subject: [PATCH 053/188] chore: pin dependency versions in rust model (#2048) Signed-off-by: Derek Wang --- rust/Cargo.lock | 13 ++--- rust/monovertex/Cargo.toml | 2 +- rust/numaflow-models/Cargo.toml | 2 +- rust/numaflow-models/Makefile | 2 - rust/numaflow-models/templates/Cargo.mustache | 53 ++----------------- 5 files changed, 13 insertions(+), 59 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 6b34934210..21d6a28a7d 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1274,9 +1274,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.93.1" +version = "0.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0365920075af1a2d23619c1ca801c492f2400157de42627f041a061716e76416" +checksum = "65b8611df85a1a2eed6f47bd8bcca4e2b3dc14fbf83658efd01423ca9a13b72a" dependencies = [ "k8s-openapi", "kube-client", @@ -1285,9 +1285,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.93.1" +version = "0.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81336eb3a5b10a40c97a5a97ad66622e92bad942ce05ee789edd730aa4f8603" +checksum = "93c5ee3e48ef9b8d8fdb40ddd935f8addc8a201397e3c7552edae7bc96bc0a78" dependencies = [ "base64 0.22.1", "bytes", @@ -1323,15 +1323,16 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.93.1" +version = "0.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce373a74d787d439063cdefab0f3672860bd7bac01a38e39019177e764a0fe6" +checksum = "7fe6e24d4cc7e32576f363986dc3dfc13e8e90731bd7a467b67fc6c4bfbf8e95" dependencies = [ "chrono", "form_urlencoded", "http 1.1.0", "k8s-openapi", "serde", + "serde-value", "serde_json", "thiserror", ] diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index 90ddc5cbec..01eb5afafd 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -33,7 +33,7 @@ pep440_rs = "0.6.6" backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" -kube = "0.93.1" +kube = "0.94.0" [dev-dependencies] tempfile = "3.11.0" diff --git a/rust/numaflow-models/Cargo.toml b/rust/numaflow-models/Cargo.toml index 1e133e3b08..ecc0592efe 100644 --- a/rust/numaflow-models/Cargo.toml +++ b/rust/numaflow-models/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] k8s-openapi = { version = "0.22.0", features = ["v1_29"] } -kube = "0.93.1" +kube = "0.94.0" serde = "^1.0" serde_derive = "^1.0" serde_json = "^1.0" diff --git a/rust/numaflow-models/Makefile b/rust/numaflow-models/Makefile index 069179f746..42cfdda255 100644 --- a/rust/numaflow-models/Makefile +++ b/rust/numaflow-models/Makefile @@ -62,6 +62,4 @@ generate: --type-mappings IntOrString="k8s_openapi::apimachinery::pkg::util::intstr::IntOrString" \ --generate-alias-as-model - cargo add kube - cargo add k8s-openapi --features v1_29 cargo fmt diff --git a/rust/numaflow-models/templates/Cargo.mustache b/rust/numaflow-models/templates/Cargo.mustache index f7d1cdeb7c..ecc0592efe 100644 --- a/rust/numaflow-models/templates/Cargo.mustache +++ b/rust/numaflow-models/templates/Cargo.mustache @@ -1,65 +1,20 @@ [package] -name = "{{{packageName}}}" -version = "{{#lambdaVersion}}{{{packageVersion}}}{{/lambdaVersion}}" -{{#infoEmail}} -authors = ["{{{.}}}"] -{{/infoEmail}} -{{^infoEmail}} +name = "numaflow-models" +version = "0.0.0-pre" authors = ["The Numaproj Authors"] -{{/infoEmail}} -{{#appDescription}} description = "Numaflow models" -{{/appDescription}} license = "Apache-2.0 license" edition = "2021" -{{#publishRustRegistry}} -publish = ["{{.}}"] -{{/publishRustRegistry}} -{{#repositoryUrl}} -repository = "{{.}}" -{{/repositoryUrl}} -{{#documentationUrl}} -documentation = "{{.}}" -{{/documentationUrl}} -{{#homePageUrl}} -homepage = "{{.}} -{{/homePageUrl}} [dependencies] +k8s-openapi = { version = "0.22.0", features = ["v1_29"] } +kube = "0.94.0" serde = "^1.0" serde_derive = "^1.0" -{{#serdeWith}} -serde_with = "^2.0" -{{/serdeWith}} serde_json = "^1.0" url = "^2.2" uuid = { version = "^1.0", features = ["serde", "v4"] } -{{#hyper}} -hyper = { version = "~0.14", features = ["full"] } -hyper-tls = "~0.5" -http = "~0.2" -base64 = "~0.7.0" -futures = "^0.3" -{{/hyper}} -{{#withAWSV4Signature}} -aws-sigv4 = "0.3.0" -http = "0.2.5" -secrecy = "0.8.0" -{{/withAWSV4Signature}} -{{#reqwest}} -{{^supportAsync}} -[dependencies.reqwest] -version = "^0.11" -default-features = false -features = ["json", "blocking", "multipart", "rustls-tls"] -{{/supportAsync}} -{{#supportAsync}} -{{#supportMiddleware}} -reqwest-middleware = "0.2.0" -{{/supportMiddleware}} [dependencies.reqwest] version = "^0.11" default-features = false features = ["json", "multipart", "rustls-tls"] -{{/supportAsync}} -{{/reqwest}} From ba40b1500416a258fe131273d3cfc4b46a93a88f Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Mon, 9 Sep 2024 17:21:35 -0400 Subject: [PATCH 054/188] fix: builtin transformer should keep the keys (#2047) Signed-off-by: Keran Yang --- .../event_time/event_time_extractor.go | 10 +++--- .../event_time/event_time_extractor_test.go | 34 ++++++++++++++----- .../transformer/builtin/filter/filter.go | 10 +++--- .../transformer/builtin/filter/filter_test.go | 10 ++++-- .../time_extraction_filter.go | 10 +++--- .../time_extraction_filter_test.go | 21 ++++++++---- 6 files changed, 61 insertions(+), 34 deletions(-) diff --git a/pkg/sources/transformer/builtin/event_time/event_time_extractor.go b/pkg/sources/transformer/builtin/event_time/event_time_extractor.go index ff143f2c3d..c844c518db 100644 --- a/pkg/sources/transformer/builtin/event_time/event_time_extractor.go +++ b/pkg/sources/transformer/builtin/event_time/event_time_extractor.go @@ -56,7 +56,7 @@ func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) return func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { log := logging.FromContext(ctx) - resultMsg, err := e.apply(datum.Value(), datum.EventTime()) + resultMsg, err := e.apply(datum.Value(), datum.EventTime(), keys) if err != nil { log.Warnf("event time extractor got an error: %v, skip updating event time...", err) } @@ -66,10 +66,10 @@ func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) // apply compiles the payload to extract the new event time. If there is any error during extraction, // we pass on the original input event time. Otherwise, we assign the new event time to the message. -func (e eventTimeExtractor) apply(payload []byte, et time.Time) (sourcetransformer.Message, error) { +func (e eventTimeExtractor) apply(payload []byte, et time.Time, keys []string) (sourcetransformer.Message, error) { timeStr, err := expr.EvalStr(e.expression, payload) if err != nil { - return sourcetransformer.NewMessage(payload, et), err + return sourcetransformer.NewMessage(payload, et).WithKeys(keys), err } var newEventTime time.Time @@ -80,8 +80,8 @@ func (e eventTimeExtractor) apply(payload []byte, et time.Time) (sourcetransform newEventTime, err = dateparse.ParseStrict(timeStr) } if err != nil { - return sourcetransformer.NewMessage(payload, et), err + return sourcetransformer.NewMessage(payload, et).WithKeys(keys), err } else { - return sourcetransformer.NewMessage(payload, newEventTime), nil + return sourcetransformer.NewMessage(payload, newEventTime).WithKeys(keys), nil } } diff --git a/pkg/sources/transformer/builtin/event_time/event_time_extractor_test.go b/pkg/sources/transformer/builtin/event_time/event_time_extractor_test.go index 22fc6b631e..d14e37beed 100644 --- a/pkg/sources/transformer/builtin/event_time/event_time_extractor_test.go +++ b/pkg/sources/transformer/builtin/event_time/event_time_extractor_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" ) +var _keys = []string{"test-key"} + type testDatum struct { value []byte eventTime time.Time @@ -74,7 +76,7 @@ func TestEventTimeExtractor(t *testing.T) { assert.NoError(t, err) testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "2022-02-18T21:54:42.123Z"},{"id": 2, "name": "numa", "time": "2021-02-18T21:54:42.123Z"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: time.Time{}, watermark: time.Time{}, @@ -86,6 +88,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("Json expression valid, assign a new event time to the message - format specified", func(t *testing.T) { @@ -94,7 +98,7 @@ func TestEventTimeExtractor(t *testing.T) { assert.NoError(t, err) testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "2022-02-18T21:54:42.123Z"},{"id": 2, "name": "numa", "time": "2021-02-18T21:54:42.123Z"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: time.Time{}, watermark: time.Time{}, @@ -106,6 +110,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("Time string not matching user-provided format, pass on the message without assigning new event time", func(t *testing.T) { @@ -114,9 +120,9 @@ func TestEventTimeExtractor(t *testing.T) { assert.NoError(t, err) testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) - // Handler receives format as time.ANSIC but in the message, we use time.RFC3339. Format is not matched. + // Handler receives a format as time.ANSIC but in the message, we use time.RFC3339. Format is not matched. testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "2022-02-18T21:54:42.123Z"},{"id": 2, "name": "numa", "time": "2021-02-18T21:54:42.123Z"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, @@ -126,6 +132,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.Equal(t, testInputEventTime, result.Items()[0].EventTime()) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("Cannot compile json expression, pass on the message without assigning new event time", func(t *testing.T) { @@ -135,7 +143,7 @@ func TestEventTimeExtractor(t *testing.T) { testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "2022-02-18T21:54:42.123Z"},{"id": 2, "name": "numa", "time": "2021-02-18T21:54:42.123Z"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, @@ -146,6 +154,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("The time string is in epoch format with a granularity of seconds, assign a new event time to the message", func(t *testing.T) { @@ -154,9 +164,9 @@ func TestEventTimeExtractor(t *testing.T) { assert.NoError(t, err) testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) - // Handler receives format as time.ANSIC but in the message, we use time.RFC3339. Format is not matched. + // Handler receives a format as time.ANSIC but in the message, we use time.RFC3339. Format is not matched. testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "1673239888"},{"id": 2, "name": "numa", "time": "1673239888"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, @@ -168,6 +178,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("The time string is in epoch format with a granularity of milliseconds, assign a new event time to the message", func(t *testing.T) { @@ -177,7 +189,7 @@ func TestEventTimeExtractor(t *testing.T) { testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "1673239888123"},{"id": 2, "name": "numa", "time": "1673239888123"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, @@ -189,6 +201,8 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("The time string is ambiguous, pass on the message without assigning new event time", func(t *testing.T) { @@ -199,7 +213,7 @@ func TestEventTimeExtractor(t *testing.T) { testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) // 04/08/2014 is ambiguous because it could be mm/dd/yyyy or dd/mm/yyyy. testJsonMsg := `{"test": 21, "item": [{"id": 1, "name": "numa", "time": "04/08/2014 22:05"},{"id": 2, "name": "numa", "time": "04/08/2014 22:05"}]}` - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, @@ -211,5 +225,7 @@ func TestEventTimeExtractor(t *testing.T) { assert.True(t, expected.Equal(result.Items()[0].EventTime())) // Verify the payload remains unchanged. assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // Verify the keys remain unchanged. + assert.Equal(t, _keys, result.Items()[0].Keys()) }) } diff --git a/pkg/sources/transformer/builtin/filter/filter.go b/pkg/sources/transformer/builtin/filter/filter.go index 08f47795f2..c30b99017f 100644 --- a/pkg/sources/transformer/builtin/filter/filter.go +++ b/pkg/sources/transformer/builtin/filter/filter.go @@ -32,17 +32,17 @@ type filter struct { } func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) { - expr, existing := args["expression"] + exp, existing := args["expression"] if !existing { return nil, fmt.Errorf(`missing "expression"`) } f := filter{ - expression: expr, + expression: exp, } return func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { log := logging.FromContext(ctx) - resultMsg, err := f.apply(datum.EventTime(), datum.Value()) + resultMsg, err := f.apply(datum.EventTime(), datum.Value(), keys) if err != nil { log.Errorf("Filter map function apply got an error: %v", err) } @@ -50,13 +50,13 @@ func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) }, nil } -func (f filter) apply(et time.Time, msg []byte) (sourcetransformer.Message, error) { +func (f filter) apply(et time.Time, msg []byte, keys []string) (sourcetransformer.Message, error) { result, err := expr.EvalBool(f.expression, msg) if err != nil { return sourcetransformer.MessageToDrop(et), err } if result { - return sourcetransformer.NewMessage(msg, et), nil + return sourcetransformer.NewMessage(msg, et).WithKeys(keys), nil } return sourcetransformer.MessageToDrop(et), nil } diff --git a/pkg/sources/transformer/builtin/filter/filter_test.go b/pkg/sources/transformer/builtin/filter/filter_test.go index 8ac227bdc2..fb0d7a5768 100644 --- a/pkg/sources/transformer/builtin/filter/filter_test.go +++ b/pkg/sources/transformer/builtin/filter/filter_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/numaproj/numaflow-go/pkg/sourcetransformer" "github.com/stretchr/testify/assert" ) @@ -86,6 +87,7 @@ func TestExpression(t *testing.T) { watermark: time.Time{}, }) assert.Equal(t, jsonMsg, string(result.Items()[0].Value())) + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("invalid expression", func(t *testing.T) { @@ -99,7 +101,7 @@ func TestExpression(t *testing.T) { eventTime: time.Time{}, watermark: time.Time{}, }) - assert.Equal(t, "", string(result.Items()[0].Value())) + assert.Equal(t, sourcetransformer.MessageToDrop(time.Time{}), result.Items()[0]) }) t.Run("Json expression invalid", func(t *testing.T) { @@ -113,7 +115,7 @@ func TestExpression(t *testing.T) { eventTime: time.Time{}, watermark: time.Time{}, }) - assert.Equal(t, "", string(result.Items()[0].Value())) + assert.Equal(t, sourcetransformer.MessageToDrop(time.Time{}), result.Items()[0]) }) t.Run("String expression invalid", func(t *testing.T) { @@ -127,7 +129,7 @@ func TestExpression(t *testing.T) { eventTime: time.Time{}, watermark: time.Time{}, }) - assert.Equal(t, "", string(result.Items()[0].Value())) + assert.Equal(t, sourcetransformer.MessageToDrop(time.Time{}), result.Items()[0]) }) t.Run("base64 expression valid", func(t *testing.T) { @@ -142,6 +144,7 @@ func TestExpression(t *testing.T) { watermark: time.Time{}, }) assert.Equal(t, base64Msg, string(result.Items()[0].Value())) + assert.Equal(t, _keys, result.Items()[0].Keys()) }) t.Run("event time unchanged", func(t *testing.T) { @@ -157,5 +160,6 @@ func TestExpression(t *testing.T) { watermark: time.Time{}, }) assert.Equal(t, testEventTime, result.Items()[0].EventTime()) + assert.Equal(t, _keys, result.Items()[0].Keys()) }) } diff --git a/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter.go b/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter.go index 112c5de25c..bb1515cb1e 100644 --- a/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter.go +++ b/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter.go @@ -59,7 +59,7 @@ func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) return func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { log := logging.FromContext(ctx) - resultMsg, err := e.apply(datum.EventTime(), datum.Value()) + resultMsg, err := e.apply(datum.EventTime(), datum.Value(), keys) if err != nil { log.Errorf("Filter or event time extractor got an error: %v", err) } @@ -68,7 +68,7 @@ func New(args map[string]string) (sourcetransformer.SourceTransformFunc, error) } -func (e expressions) apply(et time.Time, payload []byte) (sourcetransformer.Message, error) { +func (e expressions) apply(et time.Time, payload []byte, keys []string) (sourcetransformer.Message, error) { result, err := expr.EvalBool(e.filterExpr, payload) if err != nil { return sourcetransformer.MessageToDrop(et), err @@ -76,7 +76,7 @@ func (e expressions) apply(et time.Time, payload []byte) (sourcetransformer.Mess if result { timeStr, err := expr.EvalStr(e.eventTimeExpr, payload) if err != nil { - return sourcetransformer.NewMessage(payload, et), err + return sourcetransformer.NewMessage(payload, et).WithKeys(keys), err } var newEventTime time.Time time.Local, _ = time.LoadLocation("UTC") @@ -86,9 +86,9 @@ func (e expressions) apply(et time.Time, payload []byte) (sourcetransformer.Mess newEventTime, err = dateparse.ParseStrict(timeStr) } if err != nil { - return sourcetransformer.NewMessage(payload, et), err + return sourcetransformer.NewMessage(payload, et).WithKeys(keys), err } else { - return sourcetransformer.NewMessage(payload, newEventTime), nil + return sourcetransformer.NewMessage(payload, newEventTime).WithKeys(keys), nil } } return sourcetransformer.MessageToDrop(et), nil diff --git a/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter_test.go b/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter_test.go index 02631c25cd..a178ce251b 100644 --- a/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter_test.go +++ b/pkg/sources/transformer/builtin/time_extraction_filter/time_extraction_filter_test.go @@ -21,9 +21,12 @@ import ( "testing" "time" + "github.com/numaproj/numaflow-go/pkg/sourcetransformer" "github.com/stretchr/testify/assert" ) +var _keys = []string{"test-key"} + type testDatum struct { value []byte eventTime time.Time @@ -66,7 +69,6 @@ var ( ) func TestFilterEventTime(t *testing.T) { - t.Run("Missing both expressions, return error", func(t *testing.T) { _, err := New(map[string]string{}) assert.Error(t, err) @@ -89,14 +91,16 @@ func TestFilterEventTime(t *testing.T) { handle, err := New(map[string]string{"filterExpr": "int(json(payload).item[1].id) == 2", "eventTimeExpr": "json(payload).item[1].time", "eventTimeFormat": time.RFC3339}) assert.NoError(t, err) - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: time.Time{}, watermark: time.Time{}, }) - // check that messsage has not changed + // check that message has not changed assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // check that keys have not changed + assert.Equal(t, _keys, result.Items()[0].Keys()) // check that event time has changed time.Local, _ = time.LoadLocation("UTC") @@ -108,13 +112,13 @@ func TestFilterEventTime(t *testing.T) { handle, err := New(map[string]string{"filterExpr": "int(json(payload).item[1].id) == 3", "eventTimeExpr": "json(payload).item[1].time", "eventTimeFormat": time.RFC3339}) assert.NoError(t, err) - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: time.Time{}, watermark: time.Time{}, }) - assert.Equal(t, "", string(result.Items()[0].Value())) + assert.Equal(t, sourcetransformer.MessageToDrop(time.Time{}), result.Items()[0]) }) t.Run("Valid JSON expression for filter, incorrect format to eventTime", func(t *testing.T) { @@ -122,14 +126,17 @@ func TestFilterEventTime(t *testing.T) { assert.NoError(t, err) testInputEventTime := time.Date(2022, 1, 4, 2, 3, 4, 5, time.UTC) - result := handle(context.Background(), []string{"test-key"}, &testDatum{ + result := handle(context.Background(), _keys, &testDatum{ value: []byte(testJsonMsg), eventTime: testInputEventTime, watermark: time.Time{}, }) + // check that message event time has not changed assert.Equal(t, testInputEventTime, result.Items()[0].EventTime()) + // check that message has not changed assert.Equal(t, testJsonMsg, string(result.Items()[0].Value())) + // check that keys have not been added + assert.Equal(t, _keys, result.Items()[0].Keys()) }) - } From 0811eb4aff59dda8b9143a7420b2beb415143d27 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Tue, 10 Sep 2024 20:57:25 +0530 Subject: [PATCH 055/188] fix: Fix numaflow-rs binary location in image (#2050) Signed-off-by: Sreekanth --- .github/workflows/nightly-build.yml | 4 ++-- .github/workflows/release.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 641bc2d593..51bdb6bfbc 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -136,13 +136,13 @@ jobs: uses: actions/download-artifact@v3 with: name: numaflow-rs-linux-amd64 - path: dist/numaflow-rs-linux-amd64 + path: dist/ - name: Download Rust arm64 binaries uses: actions/download-artifact@v3 with: name: numaflow-rs-linux-arm64 - path: dist/numaflow-rs-linux-arm64 + path: dist/ - name: Registry Login uses: docker/login-action@v2 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5c17591fe8..dcab109e0e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -120,13 +120,13 @@ jobs: uses: actions/download-artifact@v3 with: name: numaflow-rs-linux-amd64 - path: dist/numaflow-rs-linux-amd64 + path: dist/ - name: Download Rust arm64 binaries uses: actions/download-artifact@v3 with: name: numaflow-rs-linux-arm64 - path: dist/numaflow-rs-linux-arm64 + path: dist/ - name: Registry Login uses: docker/login-action@v2 From 49b733e68a895048a94e67fc082bded2de5872f4 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 11 Sep 2024 11:47:26 -0700 Subject: [PATCH 056/188] chore: use readyReplicas to calculate desired replicas (#2052) Signed-off-by: Derek Wang --- pkg/reconciler/monovertex/scaling/scaling.go | 28 +++--- pkg/reconciler/vertex/scaling/scaling.go | 24 +++-- pkg/reconciler/vertex/scaling/scaling_test.go | 99 ++++++++++--------- 3 files changed, 86 insertions(+), 65 deletions(-) diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index d523800cdd..481408672c 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -183,16 +183,6 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) return nil } - var err error - daemonClient, _ := s.mvtxDaemonClientsCache.Get(monoVtx.GetDaemonServiceURL()) - if daemonClient == nil { - daemonClient, err = mvtxdaemonclient.NewGRPCClient(monoVtx.GetDaemonServiceURL()) - if err != nil { - return fmt.Errorf("failed to get daemon service client for MonoVertex %s, %w", monoVtx.Name, err) - } - s.mvtxDaemonClientsCache.Add(monoVtx.GetDaemonServiceURL(), daemonClient) - } - if monoVtx.Status.Replicas == 0 { // Was scaled to 0 // Periodically wake them up from 0 replicas to 1, to peek for the incoming messages if secondsSinceLastScale >= float64(monoVtx.Spec.Scale.GetZeroReplicaSleepSeconds()) { @@ -204,6 +194,22 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) } } + // There's no ready pods, skip scaling + if monoVtx.Status.ReadyReplicas == 0 { + log.Infof("MonoVertex has no ready replicas, skip scaling.") + return nil + } + + var err error + daemonClient, _ := s.mvtxDaemonClientsCache.Get(monoVtx.GetDaemonServiceURL()) + if daemonClient == nil { + daemonClient, err = mvtxdaemonclient.NewGRPCClient(monoVtx.GetDaemonServiceURL()) + if err != nil { + return fmt.Errorf("failed to get daemon service client for MonoVertex %s, %w", monoVtx.Name, err) + } + s.mvtxDaemonClientsCache.Add(monoVtx.GetDaemonServiceURL(), daemonClient) + } + vMetrics, err := daemonClient.GetMonoVertexMetrics(ctx) if err != nil { return fmt.Errorf("failed to get metrics of mono vertex key %q, %w", key, err) @@ -282,7 +288,7 @@ func (s *Scaler) desiredReplicas(_ context.Context, monoVtx *dfv1.MonoVertex, pr var desired int32 // We calculate the time of finishing processing the pending messages, // and then we know how many replicas are needed to get them done in target seconds. - desired = int32(math.Round(((float64(pending) / processingRate) / float64(monoVtx.Spec.Scale.GetTargetProcessingSeconds())) * float64(monoVtx.Status.Replicas))) + desired = int32(math.Round(((float64(pending) / processingRate) / float64(monoVtx.Spec.Scale.GetTargetProcessingSeconds())) * float64(monoVtx.Status.ReadyReplicas))) // we only scale down to zero when the pending and rate are both zero. if desired == 0 { diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index 5ea6b7e6d5..eed5981e89 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -170,6 +170,14 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err s.StopWatching(key) // Remove it in case it's watched. return nil } + if vertex.Status.Phase != dfv1.VertexPhaseRunning { + log.Infof("Vertex not in Running phase, skip scaling.") + return nil + } + if vertex.Status.UpdateHash != vertex.Status.CurrentHash && vertex.Status.UpdateHash != "" { + log.Info("Vertex is updating, skip scaling.") + return nil + } secondsSinceLastScale := time.Since(vertex.Status.LastScaledAt.Time).Seconds() scaleDownCooldown := float64(vertex.Spec.Scale.GetScaleDownCooldownSeconds()) scaleUpCooldown := float64(vertex.Spec.Scale.GetScaleUpCooldownSeconds()) @@ -178,10 +186,6 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err log.Infof("Cooldown period, skip scaling.") return nil } - if vertex.Status.Phase != dfv1.VertexPhaseRunning { - log.Infof("Vertex not in Running phase, skip scaling.") - return nil - } pl := &dfv1.Pipeline{} if err := s.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: vertex.Spec.PipelineName}, pl); err != nil { if apierrors.IsNotFound(err) { @@ -246,6 +250,12 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err } } + // Vertex pods are not ready yet. + if vertex.Status.ReadyReplicas == 0 { + log.Infof("Vertex %q has no ready replicas, skip scaling.", vertex.Name) + return nil + } + vMetrics, err := daemonClient.GetVertexMetrics(ctx, pl.Name, vertex.Spec.Name) if err != nil { return fmt.Errorf("failed to get metrics of vertex key %q, %w", key, err) @@ -289,7 +299,7 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err } var desired int32 - current := int32(vertex.GetReplicas()) + current := int32(vertex.Status.Replicas) // if both totalRate and totalPending are 0, we scale down to 0 // since pending contains the pending acks, we can scale down to 0. if totalPending == 0 && totalRate == 0 { @@ -370,7 +380,7 @@ func (s *Scaler) desiredReplicas(_ context.Context, vertex *dfv1.Vertex, partiti if vertex.IsASource() { // For sources, we calculate the time of finishing processing the pending messages, // and then we know how many replicas are needed to get them done in target seconds. - desired = int32(math.Round(((float64(pending) / rate) / float64(vertex.Spec.Scale.GetTargetProcessingSeconds())) * float64(vertex.Status.Replicas))) + desired = int32(math.Round(((float64(pending) / rate) / float64(vertex.Spec.Scale.GetTargetProcessingSeconds())) * float64(vertex.Status.ReadyReplicas))) } else { // For UDF and sinks, we calculate the available buffer length, and consider it is the contribution of current replicas, // then we figure out how many replicas are needed to keep the available buffer length at target level. @@ -378,7 +388,7 @@ func (s *Scaler) desiredReplicas(_ context.Context, vertex *dfv1.Vertex, partiti // Simply return current replica number + max allowed if the pending messages are more than available buffer length desired = int32(vertex.Status.Replicas) + int32(vertex.Spec.Scale.GetReplicasPerScaleUp()) } else { - singleReplicaContribution := float64(partitionBufferLengths[i]-pending) / float64(vertex.Status.Replicas) + singleReplicaContribution := float64(partitionBufferLengths[i]-pending) / float64(vertex.Status.ReadyReplicas) desired = int32(math.Round(float64(partitionAvailableBufferLengths[i]) / singleReplicaContribution)) } } diff --git a/pkg/reconciler/vertex/scaling/scaling_test.go b/pkg/reconciler/vertex/scaling/scaling_test.go index 9a2d14554b..0ea80cef17 100644 --- a/pkg/reconciler/vertex/scaling/scaling_test.go +++ b/pkg/reconciler/vertex/scaling/scaling_test.go @@ -27,6 +27,23 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" ) +var ( + fakeVertex = &dfv1.Vertex{ + Spec: dfv1.VertexSpec{ + Replicas: ptr.To[int32](3), + AbstractVertex: dfv1.AbstractVertex{ + Scale: dfv1.Scale{ + TargetProcessingSeconds: ptr.To[uint32](1), + }, + }, + }, + Status: dfv1.VertexStatus{ + Replicas: uint32(3), + ReadyReplicas: uint32(2), + }, + } +) + func Test_BasicOperations(t *testing.T) { cl := fake.NewClientBuilder().Build() s := NewScaler(cl) @@ -39,53 +56,40 @@ func Test_BasicOperations(t *testing.T) { } func Test_desiredReplicasSinglePartition(t *testing.T) { - cl := fake.NewClientBuilder().Build() - s := NewScaler(cl) - one := uint32(1) - src := &dfv1.Vertex{ - Spec: dfv1.VertexSpec{ - Replicas: ptr.To[int32](2), - AbstractVertex: dfv1.AbstractVertex{ - Source: &dfv1.Source{ - Kafka: &dfv1.KafkaSource{}, - }, - Scale: dfv1.Scale{ - TargetProcessingSeconds: &one, - }, - }, - }, - Status: dfv1.VertexStatus{ - Replicas: uint32(2), - }, - } - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{0}, []int64{0}, []int64{10000}, []int64{5000})) - assert.Equal(t, int32(8), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{10010}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(8), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{9950}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(7), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{8751}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(7), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{8749}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{0}, []int64{9950}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{2}, []int64{30000}, []int64{20000})) - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{0}, []int64{30000}, []int64{20000})) - udf := &dfv1.Vertex{ - Spec: dfv1.VertexSpec{ - Replicas: ptr.To[int32](2), - AbstractVertex: dfv1.AbstractVertex{ - UDF: &dfv1.UDF{}, - }, - }, - Status: dfv1.VertexStatus{ - Replicas: uint32(2), - }, - } - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{0}, []int64{0}, []int64{10000}, []int64{5000})) - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{5000})) - assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{6000})) - assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{7500})) - assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{7900})) - assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{10000})) - assert.Equal(t, int32(3), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{12500})) - assert.Equal(t, int32(3), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{12550})) + t.Run("test src", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + s := NewScaler(cl) + src := fakeVertex.DeepCopy() + src.Spec.Source = &dfv1.Source{ + Kafka: &dfv1.KafkaSource{}, + } + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{0}, []int64{0}, []int64{10000}, []int64{5000})) + assert.Equal(t, int32(8), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{10010}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(8), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{9950}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(7), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{8751}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(7), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{8749}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{0}, []int64{9950}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{2}, []int64{30000}, []int64{20000})) + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), src, []float64{2500}, []int64{0}, []int64{30000}, []int64{20000})) + + }) + + t.Run("test udf", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + s := NewScaler(cl) + udf := fakeVertex.DeepCopy() + udf.Spec.UDF = &dfv1.UDF{} + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{0}, []int64{0}, []int64{10000}, []int64{5000})) + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{5000})) + assert.Equal(t, int32(1), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{6000})) + assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{7500})) + assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{7900})) + assert.Equal(t, int32(2), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{10000})) + assert.Equal(t, int32(3), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{12500})) + assert.Equal(t, int32(3), s.desiredReplicas(context.TODO(), udf, []float64{250}, []int64{10000}, []int64{20000}, []int64{12550})) + }) + } func Test_desiredReplicasMultiplePartitions(t *testing.T) { @@ -99,7 +103,8 @@ func Test_desiredReplicasMultiplePartitions(t *testing.T) { }, }, Status: dfv1.VertexStatus{ - Replicas: uint32(2), + Replicas: uint32(2), + ReadyReplicas: uint32(2), }, } From f00685a15983330980447113086f31eede200276 Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Fri, 13 Sep 2024 22:58:58 -0400 Subject: [PATCH 057/188] chore: remove server info dependencies on go sdk (#2060) Signed-off-by: Keran Yang --- api/json-schema/schema.json | 2 +- api/openapi-spec/swagger.json | 2 +- docs/APIs.md | 8 +- pkg/apis/numaflow/v1alpha1/generated.proto | 2 +- .../numaflow/v1alpha1/mono_vertex_types.go | 2 +- .../numaflow/v1alpha1/openapi_generated.go | 2 +- pkg/sdkclient/batchmapper/client.go | 7 +- pkg/sdkclient/grpc/grpc_utils.go | 7 +- pkg/sdkclient/mapper/client.go | 5 +- pkg/sdkclient/mapstreamer/client.go | 6 +- pkg/sdkclient/reducer/client.go | 6 +- pkg/sdkclient/serverinfo/serverinfo.go | 103 ++++-- pkg/sdkclient/serverinfo/serverinfo_test.go | 136 ++++++- pkg/sdkclient/serverinfo/types.go | 69 ++++ pkg/sdkclient/serverinfo/versions.go | 29 -- pkg/sdkclient/sessionreducer/client.go | 5 +- pkg/sdkclient/sideinput/client.go | 6 +- pkg/sdkclient/sinker/client.go | 6 +- pkg/sdkclient/source/client.go | 6 +- pkg/sdkclient/sourcetransformer/client.go | 5 +- pkg/sideinputs/manager/manager.go | 4 +- pkg/sinks/sink.go | 6 +- pkg/sources/source.go | 6 +- pkg/udf/map_udf.go | 12 +- pkg/udf/reduce_udf.go | 16 +- rust/Cargo.lock | 350 +++++++++--------- rust/monovertex/src/lib.rs | 8 +- rust/monovertex/src/server_info.rs | 34 +- 28 files changed, 531 insertions(+), 319 deletions(-) create mode 100644 pkg/sdkclient/serverinfo/types.go delete mode 100644 pkg/sdkclient/serverinfo/versions.go diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index bf1f28f594..fefd397d8c 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19203,7 +19203,7 @@ }, "limits": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits", - "description": "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings" + "description": "Limits define the limitations such as read batch size for the mono vertex." }, "metadata": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Metadata", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 91e6e43fb6..f688e51c8d 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19198,7 +19198,7 @@ } }, "limits": { - "description": "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings", + "description": "Limits define the limitations such as read batch size for the mono vertex.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits" }, "metadata": { diff --git a/docs/APIs.md b/docs/APIs.md index da6ab6eeb1..d5deaa76b2 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5720,8 +5720,8 @@ MonoVertexLimits (Optional)

-Limits define the limitations such as buffer read batch size for all the -vertices of a pipeline, will override pipeline level settings +Limits define the limitations such as read batch size for the mono +vertex.

@@ -6109,8 +6109,8 @@ MonoVertexLimits (Optional)

-Limits define the limitations such as buffer read batch size for all the -vertices of a pipeline, will override pipeline level settings +Limits define the limitations such as read batch size for the mono +vertex.

diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index c927033e7e..70936b9d47 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -928,7 +928,7 @@ message MonoVertexSpec { // +patchMergeKey=name repeated k8s.io.api.core.v1.Volume volumes = 6; - // Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings + // Limits define the limitations such as read batch size for the mono vertex. // +optional optional MonoVertexLimits limits = 7; diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 677ec4fc5c..934d497878 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -408,7 +408,7 @@ type MonoVertexSpec struct { // +patchStrategy=merge // +patchMergeKey=name Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=volumes"` - // Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings + // Limits define the limitations such as read batch size for the mono vertex. // +optional Limits *MonoVertexLimits `json:"limits,omitempty" protobuf:"bytes,7,opt,name=limits"` // Settings for autoscaling diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 50a03d54a3..5987162567 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -3296,7 +3296,7 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallba }, "limits": { SchemaProps: spec.SchemaProps{ - Description: "Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings", + Description: "Limits define the limitations such as read batch size for the mono vertex.", Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits"), }, }, diff --git a/pkg/sdkclient/batchmapper/client.go b/pkg/sdkclient/batchmapper/client.go index 7c6db2f608..5cc1718492 100644 --- a/pkg/sdkclient/batchmapper/client.go +++ b/pkg/sdkclient/batchmapper/client.go @@ -21,15 +21,14 @@ import ( "errors" "io" + batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "github.com/numaproj/numaflow-go/pkg/info" - "github.com/numaproj/numaflow/pkg/sdkclient" sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -39,7 +38,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.BatchMapAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/grpc/grpc_utils.go b/pkg/sdkclient/grpc/grpc_utils.go index 6d3574a290..42fba83e86 100644 --- a/pkg/sdkclient/grpc/grpc_utils.go +++ b/pkg/sdkclient/grpc/grpc_utils.go @@ -21,22 +21,21 @@ import ( "log" "strconv" - "github.com/numaproj/numaflow-go/pkg/info" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" resolver "github.com/numaproj/numaflow/pkg/sdkclient/grpc_resolver" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // ConnectToServer connects to the server with the given socket address based on the server info protocol. -func ConnectToServer(udsSockAddr string, serverInfo *info.ServerInfo, maxMessageSize int) (*grpc.ClientConn, error) { +func ConnectToServer(udsSockAddr string, serverInfo *serverinfo.ServerInfo, maxMessageSize int) (*grpc.ClientConn, error) { var conn *grpc.ClientConn var err error var sockAddr string // Check if Multiproc server mode is enabled - if multiProcServer, ok := serverInfo.Metadata[sdkserverinfo.MultiProcMetadata]; ok { + if multiProcServer, ok := serverInfo.Metadata[serverinfo.MultiProcKey]; ok { // Extract the server ports from the server info file numServers, _ := strconv.Atoi(multiProcServer) // In Multiprocessing server mode we have multiple servers forks diff --git a/pkg/sdkclient/mapper/client.go b/pkg/sdkclient/mapper/client.go index d22c852906..07ef848a09 100644 --- a/pkg/sdkclient/mapper/client.go +++ b/pkg/sdkclient/mapper/client.go @@ -23,10 +23,11 @@ import ( "google.golang.org/protobuf/types/known/emptypb" mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" - "github.com/numaproj/numaflow-go/pkg/info" + "github.com/numaproj/numaflow/pkg/sdkclient" sdkerror "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -36,7 +37,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.MapAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/mapstreamer/client.go b/pkg/sdkclient/mapstreamer/client.go index 9b512d22ad..ff5d07a7a6 100644 --- a/pkg/sdkclient/mapstreamer/client.go +++ b/pkg/sdkclient/mapstreamer/client.go @@ -21,14 +21,14 @@ import ( "fmt" "io" + mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "github.com/numaproj/numaflow-go/pkg/info" "github.com/numaproj/numaflow/pkg/sdkclient" sdkerror "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -38,7 +38,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.MapStreamAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/reducer/client.go b/pkg/sdkclient/reducer/client.go index 64580c831d..6825fdc4c2 100644 --- a/pkg/sdkclient/reducer/client.go +++ b/pkg/sdkclient/reducer/client.go @@ -21,14 +21,14 @@ import ( "errors" "io" + reducepb "github.com/numaproj/numaflow-go/pkg/apis/proto/reduce/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - reducepb "github.com/numaproj/numaflow-go/pkg/apis/proto/reduce/v1" - "github.com/numaproj/numaflow-go/pkg/info" "github.com/numaproj/numaflow/pkg/sdkclient" sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -38,7 +38,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.ReduceAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/serverinfo/serverinfo.go b/pkg/sdkclient/serverinfo/serverinfo.go index aa1cdde29d..932ab2ff50 100644 --- a/pkg/sdkclient/serverinfo/serverinfo.go +++ b/pkg/sdkclient/serverinfo/serverinfo.go @@ -18,39 +18,23 @@ package serverinfo import ( "context" + "encoding/json" "fmt" "log" + "os" "strings" "time" "github.com/Masterminds/semver/v3" pep440 "github.com/aquasecurity/go-pep440-version" - "github.com/numaproj/numaflow-go/pkg/info" - "github.com/numaproj/numaflow" ) -// Metadata keys used in the server info file -const ( - // MultiProcMetadata is the field used to indicate that MultiProc map mode is enabled - // The value contains the number of servers spawned. - MultiProcMetadata = "MULTIPROC" - // MapModeMetadata field is used to indicate which map mode is enabled - // If none is set, we consider unary map as default - MapModeMetadata = "MAP_MODE" -) - -type MapMode string - -const ( - UnaryMap MapMode = "unary-map" - StreamMap MapMode = "stream-map" - BatchMap MapMode = "batch-map" -) +var END = fmt.Sprintf("%U__END__", '\\') // SDKServerInfo wait for the server to start and return the server info. -func SDKServerInfo(inputOptions ...Option) (*info.ServerInfo, error) { +func SDKServerInfo(inputOptions ...Option) (*ServerInfo, error) { var opts = DefaultOptions() for _, inputOption := range inputOptions { @@ -68,33 +52,32 @@ func SDKServerInfo(inputOptions ...Option) (*info.ServerInfo, error) { } // waitForServerInfo waits until the server info is ready. It returns an error if the server info is not ready within the given timeout -func waitForServerInfo(timeout time.Duration, filePath string) (*info.ServerInfo, error) { +func waitForServerInfo(timeout time.Duration, filePath string) (*ServerInfo, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - if err := info.WaitUntilReady(ctx, info.WithServerInfoFilePath(filePath)); err != nil { + if err := waitUntilReady(ctx, WithServerInfoFilePath(filePath)); err != nil { return nil, fmt.Errorf("failed to wait until server info is ready: %w", err) } - - serverInfo, err := info.Read(info.WithServerInfoFilePath(filePath)) + serverInfo, err := read(WithServerInfoFilePath(filePath)) if err != nil { return nil, fmt.Errorf("failed to read server info: %w", err) } - sdkVersion := serverInfo.Version minNumaflowVersion := serverInfo.MinimumNumaflowVersion sdkLanguage := serverInfo.Language numaflowVersion := numaflow.GetVersion().Version // If MinimumNumaflowVersion is empty, skip the numaflow compatibility check as there was an - // error writing server info on the SDK side + // error writing server info file on the SDK side if minNumaflowVersion == "" { log.Printf("warning: failed to get the minimum numaflow version, skipping numaflow version compatibility check") // If we are testing locally or in CI, we can skip checking for numaflow compatibility issues - // because both return us a version string that the version check libraries can't properly parse (local: "*latest*" CI: commit SHA) + // because both return us a version string that the version-check libraries can't properly parse, + // local: "*latest*", CI: commit SHA } else if !strings.Contains(numaflowVersion, "latest") && !strings.Contains(numaflowVersion, numaflow.GetVersion().GitCommit) { if err := checkNumaflowCompatibility(numaflowVersion, minNumaflowVersion); err != nil { - return nil, fmt.Errorf("numaflow %s does not satisfy the minimum required by SDK %s: %w", + return nil, fmt.Errorf("numaflow version %s does not satisfy the minimum required by SDK version %s: %w", numaflowVersion, sdkVersion, err) } } @@ -105,14 +88,66 @@ func waitForServerInfo(timeout time.Duration, filePath string) (*info.ServerInfo log.Printf("warning: failed to get the SDK version/language, skipping SDK version compatibility check") } else { if err := checkSDKCompatibility(sdkVersion, sdkLanguage, minimumSupportedSDKVersions); err != nil { - return nil, fmt.Errorf("SDK %s does not satisfy the minimum required by numaflow %s: %w", + return nil, fmt.Errorf("SDK version %s does not satisfy the minimum required by numaflow version %s: %w", sdkVersion, numaflowVersion, err) } } - return serverInfo, nil } +// waitUntilReady waits until the server info is ready +func waitUntilReady(ctx context.Context, opts ...Option) error { + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if fileInfo, err := os.Stat(options.serverInfoFilePath); err != nil { + log.Printf("Server info file %s is not ready...", options.serverInfoFilePath) + time.Sleep(1 * time.Second) + continue + } else { + if fileInfo.Size() > 0 { + return nil + } + } + } + } +} + +// read reads the server info from a file +func read(opts ...Option) (*ServerInfo, error) { + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + // It takes some time for the server to write the server info file + // TODO: use a better way to wait for the file to be ready + retry := 0 + b, err := os.ReadFile(options.serverInfoFilePath) + for !strings.HasSuffix(string(b), END) && err == nil && retry < 10 { + time.Sleep(100 * time.Millisecond) + b, err = os.ReadFile(options.serverInfoFilePath) + retry++ + } + if err != nil { + return nil, err + } + if !strings.HasSuffix(string(b), END) { + return nil, fmt.Errorf("server info file is not ready") + } + b = b[:len(b)-len([]byte(END))] + info := &ServerInfo{} + if err := json.Unmarshal(b, info); err != nil { + return nil, fmt.Errorf("failed to unmarshal server info: %w", err) + } + return info, nil +} + func checkConstraint(version *semver.Version, constraint string) error { if c, err := semver.NewConstraint(constraint); err != nil { return fmt.Errorf("error parsing constraint: %w, constraint string: %s", err, constraint) @@ -128,26 +163,23 @@ func checkNumaflowCompatibility(numaflowVersion string, minNumaflowVersion strin if minNumaflowVersion == "" { return fmt.Errorf("server info does not contain minimum numaflow version. Upgrade to newer SDK version") } - numaflowVersionSemVer, err := semver.NewVersion(numaflowVersion) if err != nil { return fmt.Errorf("error parsing numaflow version: %w", err) } - numaflowConstraint := fmt.Sprintf(">= %s", minNumaflowVersion) if err = checkConstraint(numaflowVersionSemVer, numaflowConstraint); err != nil { return fmt.Errorf("numaflow version %s must be upgraded to at least %s, in order to work with current SDK version: %w", numaflowVersionSemVer.String(), minNumaflowVersion, err) } - return nil } // checkSDKCompatibility checks if the current SDK version is compatible with the numaflow version -func checkSDKCompatibility(sdkVersion string, sdkLanguage info.Language, minSupportedSDKVersions sdkConstraints) error { +func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, minSupportedSDKVersions sdkConstraints) error { if sdkRequiredVersion, ok := minSupportedSDKVersions[sdkLanguage]; ok { sdkConstraint := fmt.Sprintf(">= %s", sdkRequiredVersion) - if sdkLanguage == info.Python { + if sdkLanguage == Python { // Python pre-releases/releases follow PEP440 specification which requires a different library for parsing sdkVersionPEP440, err := pep440.Parse(sdkVersion) if err != nil { @@ -175,6 +207,5 @@ func checkSDKCompatibility(sdkVersion string, sdkLanguage info.Language, minSupp } } } - return nil } diff --git a/pkg/sdkclient/serverinfo/serverinfo_test.go b/pkg/sdkclient/serverinfo/serverinfo_test.go index 90a683a81f..e96919e243 100644 --- a/pkg/sdkclient/serverinfo/serverinfo_test.go +++ b/pkg/sdkclient/serverinfo/serverinfo_test.go @@ -17,20 +17,78 @@ limitations under the License. package serverinfo import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" "testing" - - "github.com/numaproj/numaflow-go/pkg/info" + "time" "github.com/stretchr/testify/assert" ) -var testMinimumSupportedSDKVersions = sdkConstraints{ - info.Go: "0.6.0-0", - info.Python: "0.6.0a", - info.Java: "0.6.0-0", +func Test_SDKServerInfo(t *testing.T) { + filepath := os.TempDir() + "/server-info" + defer os.Remove(filepath) + info := &ServerInfo{ + Protocol: TCP, + Language: Java, + MinimumNumaflowVersion: "1.3.0-rc1", + Version: "v0.8.0", + Metadata: map[string]string{"key1": "value1", "key2": "value2"}, + } + err := write(info, WithServerInfoFilePath(filepath)) + assert.NoError(t, err) + got, err := SDKServerInfo(WithServerInfoFilePath(filepath)) + assert.NoError(t, err) + assert.Equal(t, info, got) +} + +func Test_WaitUntilReady(t *testing.T) { + serverInfoFile, err := os.CreateTemp("/tmp", "server-info") + assert.NoError(t, err) + defer os.Remove(serverInfoFile.Name()) + err = os.WriteFile(serverInfoFile.Name(), []byte("test"), 0644) + assert.NoError(t, err) + + t.Run("test timeout", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + err := waitUntilReady(ctx, WithServerInfoFilePath("/tmp/not-exist")) + assert.True(t, errors.Is(err, context.DeadlineExceeded)) + }) + + t.Run("test success", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + err = waitUntilReady(ctx, WithServerInfoFilePath(serverInfoFile.Name())) + assert.NoError(t, err) + }) +} + +func Test_ReadServerInfoFile(t *testing.T) { + filepath := os.TempDir() + "/server-info" + defer os.Remove(filepath) + info := &ServerInfo{ + Protocol: TCP, + Language: Java, + MinimumNumaflowVersion: "1.3.0-rc1", + Version: "v0.8.0", + Metadata: map[string]string{"key1": "value1", "key2": "value2"}, + } + err := write(info, WithServerInfoFilePath(filepath)) + assert.NoError(t, err) + got, err := read(WithServerInfoFilePath("/tmp/not-exist")) + assert.Error(t, err) + assert.True(t, os.IsNotExist(err)) + assert.Nil(t, got) + got, err = read(WithServerInfoFilePath(filepath)) + assert.NoError(t, err) + assert.Equal(t, info, got) } -func TestCheckNumaflowCompatibility(t *testing.T) { +func Test_CheckNumaflowCompatibility(t *testing.T) { tests := []struct { name string numaflowVersion string @@ -59,7 +117,6 @@ func TestCheckNumaflowCompatibility(t *testing.T) { shouldErr: false, }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := checkNumaflowCompatibility(tt.numaflowVersion, tt.minNumaflowVersion) @@ -73,11 +130,17 @@ func TestCheckNumaflowCompatibility(t *testing.T) { } } -func TestCheckSDKCompatibility(t *testing.T) { +func Test_CheckSDKCompatibility(t *testing.T) { + var testMinimumSupportedSDKVersions = sdkConstraints{ + Go: "0.6.0-0", + Python: "0.6.0a", + Java: "0.6.0-0", + Rust: "0.1.0", + } tests := []struct { name string sdkVersion string - sdkLanguage info.Language + sdkLanguage Language minimumSupportedSDKVersions sdkConstraints shouldErr bool errMessage string @@ -85,7 +148,7 @@ func TestCheckSDKCompatibility(t *testing.T) { { name: "Test with incompatible Python version", sdkVersion: "v0.5.3a1", - sdkLanguage: info.Python, + sdkLanguage: Python, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: true, errMessage: "SDK version 0.5.3a1 must be upgraded to at least 0.6.0a, in order to work with current numaflow version", @@ -93,14 +156,14 @@ func TestCheckSDKCompatibility(t *testing.T) { { name: "Test with compatible Python version", sdkVersion: "v0.6.0a2", - sdkLanguage: info.Python, + sdkLanguage: Python, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: false, }, { name: "Test with incompatible Java version", sdkVersion: "v0.4.3", - sdkLanguage: info.Java, + sdkLanguage: Java, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: true, errMessage: "SDK version 0.4.3 must be upgraded to at least 0.6.0-0, in order to work with current numaflow version", @@ -108,12 +171,26 @@ func TestCheckSDKCompatibility(t *testing.T) { { name: "Test with compatible Go version", sdkVersion: "v0.6.0-rc2", - sdkLanguage: info.Go, + sdkLanguage: Go, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, + { + name: "Test with incompatible Rust version", + sdkVersion: "v0.0.3", + sdkLanguage: Rust, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.0.3 must be upgraded to at least 0.1.0, in order to work with current numaflow version", + }, + { + name: "Test with compatible Rust version", + sdkVersion: "v0.1.1", + sdkLanguage: Rust, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: false, }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, tt.minimumSupportedSDKVersions) @@ -126,3 +203,32 @@ func TestCheckSDKCompatibility(t *testing.T) { }) } } + +// write is a test helper function to prepare server info file +func write(svrInfo *ServerInfo, opts ...Option) error { + b, err := json.Marshal(svrInfo) + if err != nil { + return fmt.Errorf("failed to marshal server info: %w", err) + } + options := DefaultOptions() + for _, opt := range opts { + opt(options) + } + if err := os.Remove(options.serverInfoFilePath); !os.IsNotExist(err) && err != nil { + return fmt.Errorf("failed to remove server-info file: %w", err) + } + f, err := os.Create(options.serverInfoFilePath) + if err != nil { + return fmt.Errorf("failed to create server-info file: %w", err) + } + defer f.Close() + _, err = f.Write(b) + if err != nil { + return fmt.Errorf("failed to write server-info file: %w", err) + } + _, err = f.WriteString(END) + if err != nil { + return fmt.Errorf("failed to write END server-info file: %w", err) + } + return nil +} diff --git a/pkg/sdkclient/serverinfo/types.go b/pkg/sdkclient/serverinfo/types.go new file mode 100644 index 0000000000..fc8fdd9b81 --- /dev/null +++ b/pkg/sdkclient/serverinfo/types.go @@ -0,0 +1,69 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverinfo + +type Language string + +const ( + Go Language = "go" + Python Language = "python" + Java Language = "java" + Rust Language = "rust" +) + +type sdkConstraints map[Language]string + +var minimumSupportedSDKVersions = sdkConstraints{ + Go: "0.8.0", + Python: "0.8.0", + Java: "0.8.0", + Rust: "0.1.0", +} + +type Protocol string + +const ( + UDS Protocol = "uds" + TCP Protocol = "tcp" +) + +type MapMode string + +const ( + UnaryMap MapMode = "unary-map" + StreamMap MapMode = "stream-map" + BatchMap MapMode = "batch-map" +) + +// Metadata keys used in the server info file +const ( + // MultiProcKey is the field used to indicate that MultiProc map mode is enabled + // The value contains the number of servers spawned. + MultiProcKey = "MULTIPROC" + // MapModeKey field is used to indicate which map mode is enabled + // If none is set, we consider the unary map as default + MapModeKey = "MAP_MODE" +) + +// ServerInfo is the information about the server +type ServerInfo struct { + Protocol Protocol `json:"protocol"` + Language Language `json:"language"` + MinimumNumaflowVersion string `json:"minimum_numaflow_version"` + Version string `json:"version"` + Metadata map[string]string `json:"metadata"` +} diff --git a/pkg/sdkclient/serverinfo/versions.go b/pkg/sdkclient/serverinfo/versions.go deleted file mode 100644 index aded1c9bd3..0000000000 --- a/pkg/sdkclient/serverinfo/versions.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serverinfo - -import ( - "github.com/numaproj/numaflow-go/pkg/info" -) - -type sdkConstraints map[info.Language]string - -var minimumSupportedSDKVersions = sdkConstraints{ - info.Go: "0.8.0", - info.Python: "0.8.0", - info.Java: "0.8.0", -} diff --git a/pkg/sdkclient/sessionreducer/client.go b/pkg/sdkclient/sessionreducer/client.go index f2ff4d6770..a2c8b12f8e 100644 --- a/pkg/sdkclient/sessionreducer/client.go +++ b/pkg/sdkclient/sessionreducer/client.go @@ -25,10 +25,11 @@ import ( "google.golang.org/protobuf/types/known/emptypb" sessionreducepb "github.com/numaproj/numaflow-go/pkg/apis/proto/sessionreduce/v1" - "github.com/numaproj/numaflow-go/pkg/info" + "github.com/numaproj/numaflow/pkg/sdkclient" sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -38,7 +39,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SessionReduceAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/sideinput/client.go b/pkg/sdkclient/sideinput/client.go index 8b9f9730f2..48c101f260 100644 --- a/pkg/sdkclient/sideinput/client.go +++ b/pkg/sdkclient/sideinput/client.go @@ -21,13 +21,13 @@ import ( "fmt" "time" + sideinputpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sideinput/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - sideinputpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sideinput/v1" - "github.com/numaproj/numaflow-go/pkg/info" "github.com/numaproj/numaflow/pkg/sdkclient" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -39,7 +39,7 @@ type client struct { var _ Client = (*client)(nil) // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (*client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (*client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SideInputAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/sinker/client.go b/pkg/sdkclient/sinker/client.go index e8249d1859..67fe08557c 100644 --- a/pkg/sdkclient/sinker/client.go +++ b/pkg/sdkclient/sinker/client.go @@ -20,13 +20,13 @@ import ( "context" "fmt" + sinkpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sink/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - sinkpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sink/v1" - "github.com/numaproj/numaflow-go/pkg/info" "github.com/numaproj/numaflow/pkg/sdkclient" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -37,7 +37,7 @@ type client struct { var _ Client = (*client)(nil) -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SinkAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/source/client.go b/pkg/sdkclient/source/client.go index c5275b3c77..39b96a13ed 100644 --- a/pkg/sdkclient/source/client.go +++ b/pkg/sdkclient/source/client.go @@ -21,13 +21,13 @@ import ( "fmt" "io" + sourcepb "github.com/numaproj/numaflow-go/pkg/apis/proto/source/v1" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" - sourcepb "github.com/numaproj/numaflow-go/pkg/apis/proto/source/v1" - "github.com/numaproj/numaflow-go/pkg/info" "github.com/numaproj/numaflow/pkg/sdkclient" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -38,7 +38,7 @@ type client struct { var _ Client = (*client)(nil) -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SourceAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sdkclient/sourcetransformer/client.go b/pkg/sdkclient/sourcetransformer/client.go index 1893640a73..d9d47302c0 100644 --- a/pkg/sdkclient/sourcetransformer/client.go +++ b/pkg/sdkclient/sourcetransformer/client.go @@ -23,10 +23,11 @@ import ( "google.golang.org/protobuf/types/known/emptypb" transformpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" - "github.com/numaproj/numaflow-go/pkg/info" + "github.com/numaproj/numaflow/pkg/sdkclient" sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" ) // client contains the grpc connection and the grpc client. @@ -36,7 +37,7 @@ type client struct { } // New creates a new client object. -func New(serverInfo *info.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SourceTransformerAddr) for _, inputOption := range inputOptions { diff --git a/pkg/sideinputs/manager/manager.go b/pkg/sideinputs/manager/manager.go index 84de31a2c9..5aea876c72 100644 --- a/pkg/sideinputs/manager/manager.go +++ b/pkg/sideinputs/manager/manager.go @@ -28,7 +28,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isbsvc" "github.com/numaproj/numaflow/pkg/sdkclient" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" "github.com/numaproj/numaflow/pkg/sdkclient/sideinput" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" "github.com/numaproj/numaflow/pkg/shared/kvs" @@ -83,7 +83,7 @@ func (sim *sideInputsManager) Start(ctx context.Context) error { } // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.SideInputServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.SideInputServerInfoFile)) if err != nil { return err } diff --git a/pkg/sinks/sink.go b/pkg/sinks/sink.go index f18f545127..413d232799 100644 --- a/pkg/sinks/sink.go +++ b/pkg/sinks/sink.go @@ -32,7 +32,7 @@ import ( "github.com/numaproj/numaflow/pkg/isbsvc" "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/sdkclient" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" sinkclient "github.com/numaproj/numaflow/pkg/sdkclient/sinker" "github.com/numaproj/numaflow/pkg/shared/callback" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" @@ -150,7 +150,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { maxMessageSize := sharedutil.LookupEnvIntOr(dfv1.EnvGRPCMaxMessageSize, sdkclient.DefaultGRPCMaxMessageSize) if udSink := u.VertexInstance.Vertex.Spec.Sink.UDSink; udSink != nil { // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.SinkServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.SinkServerInfoFile)) if err != nil { return err } @@ -179,7 +179,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { if u.VertexInstance.Vertex.HasFallbackUDSink() { // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.FbSinkServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.FbSinkServerInfoFile)) if err != nil { return err } diff --git a/pkg/sources/source.go b/pkg/sources/source.go index 8a8e64ffd6..e206d76dbc 100644 --- a/pkg/sources/source.go +++ b/pkg/sources/source.go @@ -34,7 +34,7 @@ import ( "github.com/numaproj/numaflow/pkg/isbsvc" "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/sdkclient" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" sourceclient "github.com/numaproj/numaflow/pkg/sdkclient/source" "github.com/numaproj/numaflow/pkg/sdkclient/sourcetransformer" "github.com/numaproj/numaflow/pkg/shared/callback" @@ -196,7 +196,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { var udsGRPCClient *udsource.GRPCBasedUDSource if sp.VertexInstance.Vertex.IsUDSource() { // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.SourceServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.SourceServerInfoFile)) if err != nil { return err } @@ -235,7 +235,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { if sp.VertexInstance.Vertex.HasUDTransformer() { // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.SourceTransformerServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.SourceTransformerServerInfoFile)) if err != nil { return err } diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 44cb1b5aa7..5d926751f0 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -33,7 +33,7 @@ import ( "github.com/numaproj/numaflow/pkg/sdkclient/batchmapper" "github.com/numaproj/numaflow/pkg/sdkclient/mapper" "github.com/numaproj/numaflow/pkg/sdkclient/mapstreamer" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" "github.com/numaproj/numaflow/pkg/shared/callback" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" "github.com/numaproj/numaflow/pkg/shared/logging" @@ -138,16 +138,16 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { maxMessageSize := sharedutil.LookupEnvIntOr(dfv1.EnvGRPCMaxMessageSize, sdkclient.DefaultGRPCMaxMessageSize) // Wait for map server info to be ready, we use the same info file for all the map modes - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.MapServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.MapServerInfoFile)) if err != nil { return err } // Read the server info file to read which map mode is enabled // Based on the value set, we will create the corresponding handler and clients - mapMode, ok := serverInfo.Metadata[sdkserverinfo.MapModeMetadata] + mapMode, ok := serverInfo.Metadata[serverinfo.MapModeKey] - if ok && (sdkserverinfo.MapMode(mapMode) == sdkserverinfo.StreamMap) { + if ok && (serverinfo.MapMode(mapMode) == serverinfo.StreamMap) { log.Info("Map mode enabled: Stream Map") // Map Stream mode enableMapUdfStream = true @@ -170,9 +170,9 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { }() opts = append(opts, forward.WithUDFStreamingMap(mapStreamHandler)) - } else if ok && (sdkserverinfo.MapMode(mapMode) == sdkserverinfo.BatchMap) { + } else if ok && (serverinfo.MapMode(mapMode) == serverinfo.BatchMap) { log.Info("Map mode enabled: Batch Map") - // if Batch Map mode is enabled create the client and handler for that accordingly + // if Batch Map mode is enabled, create the client and handler for that accordingly enableBatchMapUdf = true // create the client and handler for batch map interface diff --git a/pkg/udf/reduce_udf.go b/pkg/udf/reduce_udf.go index eec3df3ff4..a1c9500f2e 100644 --- a/pkg/udf/reduce_udf.go +++ b/pkg/udf/reduce_udf.go @@ -23,12 +23,8 @@ import ( "strings" "sync" - "github.com/numaproj/numaflow-go/pkg/info" "go.uber.org/zap" - alignedfs "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/aligned/fs" - noopwal "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/noop" - dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/forwarder" "github.com/numaproj/numaflow/pkg/isb" @@ -36,12 +32,14 @@ import ( "github.com/numaproj/numaflow/pkg/reduce" "github.com/numaproj/numaflow/pkg/reduce/applier" "github.com/numaproj/numaflow/pkg/reduce/pbq" + alignedfs "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/aligned/fs" + noopwal "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/noop" "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/unaligned" unalignedfs "github.com/numaproj/numaflow/pkg/reduce/pbq/wal/unaligned/fs" "github.com/numaproj/numaflow/pkg/reduce/pnf" "github.com/numaproj/numaflow/pkg/sdkclient" "github.com/numaproj/numaflow/pkg/sdkclient/reducer" - sdkserverinfo "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" "github.com/numaproj/numaflow/pkg/sdkclient/sessionreducer" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" "github.com/numaproj/numaflow/pkg/shared/logging" @@ -94,19 +92,19 @@ func (u *ReduceUDFProcessor) Start(ctx context.Context) error { // create udf handler and wait until it is ready if windowType.Fixed != nil || windowType.Sliding != nil { - var serverInfo *info.ServerInfo + var serverInfo *serverinfo.ServerInfo var client reducer.Client // if streaming is enabled, use the reduceStreaming address if (windowType.Fixed != nil && windowType.Fixed.Streaming) || (windowType.Sliding != nil && windowType.Sliding.Streaming) { // Wait for server info to be ready - serverInfo, err = sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.ReduceStreamServerInfoFile)) + serverInfo, err = serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.ReduceStreamServerInfoFile)) if err != nil { return err } client, err = reducer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.ReduceStreamAddr)) } else { // Wait for server info to be ready - serverInfo, err = sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.ReduceServerInfoFile)) + serverInfo, err = serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.ReduceServerInfoFile)) if err != nil { return err } @@ -132,7 +130,7 @@ func (u *ReduceUDFProcessor) Start(ctx context.Context) error { healthChecker = reduceHandler } else if windowType.Session != nil { // Wait for server info to be ready - serverInfo, err := sdkserverinfo.SDKServerInfo(sdkserverinfo.WithServerInfoFilePath(sdkclient.SessionReduceServerInfoFile)) + serverInfo, err := serverinfo.SDKServerInfo(serverinfo.WithServerInfoFilePath(sdkclient.SessionReduceServerInfoFile)) if err != nil { return err } diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 21d6a28a7d..7748607ca5 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aho-corasick" @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" [[package]] name = "arc-swap" @@ -70,9 +70,9 @@ dependencies = [ "rand", "regex", "ring", - "rustls-native-certs", + "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.8", "serde", "serde_json", "serde_nanos", @@ -110,9 +110,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", @@ -133,9 +133,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -145,9 +145,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +checksum = "234314bd569802ec87011d653d6815c6d7b9ffb969e9fee5b8b20ef860e8dce9" dependencies = [ "bindgen", "cc", @@ -240,7 +240,7 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "pin-project-lite", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pemfile 2.1.3", "rustls-pki-types", "tokio", @@ -259,17 +259,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -360,12 +360,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.7" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -411,9 +412,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] @@ -499,15 +500,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -667,9 +668,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fiat-crypto" @@ -816,9 +817,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -838,7 +839,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.3.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -847,9 +848,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -857,7 +858,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.3.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -1034,7 +1035,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -1057,10 +1058,10 @@ dependencies = [ "headers", "http 1.1.0", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-util", "pin-project-lite", - "rustls-native-certs", + "rustls-native-certs 0.7.3", "tokio", "tokio-rustls 0.26.0", "tower-service", @@ -1082,17 +1083,17 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.12", - "rustls-native-certs", + "rustls 0.23.13", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -1114,9 +1115,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1177,9 +1178,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -1187,9 +1188,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "itertools" @@ -1226,9 +1227,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -1274,9 +1275,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.94.0" +version = "0.94.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b8611df85a1a2eed6f47bd8bcca4e2b3dc14fbf83658efd01423ca9a13b72a" +checksum = "52ace78a62b361077505f2950bd48aa3e46596fb15350c9c993de15ddfa3cac5" dependencies = [ "k8s-openapi", "kube-client", @@ -1285,9 +1286,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.94.0" +version = "0.94.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c5ee3e48ef9b8d8fdb40ddd935f8addc8a201397e3c7552edae7bc96bc0a78" +checksum = "18ec0fcafd3add30b413b096a61d69b0a37f94d3f95b6f505a57ea3d27cec2a7" dependencies = [ "base64 0.22.1", "bytes", @@ -1300,14 +1301,14 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-http-proxy", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-timeout", "hyper-util", "jsonpath-rust", "k8s-openapi", "kube-core", "pem", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pemfile 2.1.3", "secrecy", "serde", @@ -1323,9 +1324,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.94.0" +version = "0.94.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe6e24d4cc7e32576f363986dc3dfc13e8e90731bd7a467b67fc6c4bfbf8e95" +checksum = "a50c095f051dada37740d883b6d47ad0430e95082140718073b773c8a70f231c" dependencies = [ "chrono", "form_urlencoded", @@ -1351,9 +1352,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -1438,18 +1439,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", @@ -1475,7 +1476,7 @@ dependencies = [ "chrono", "hyper-util", "kube", - "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", + "numaflow 0.1.1", "numaflow-models", "once_cell", "parking_lot", @@ -1484,7 +1485,7 @@ dependencies = [ "prost", "prost-types", "rcgen", - "rustls 0.23.12", + "rustls 0.23.13", "semver", "serde", "serde_json", @@ -1601,8 +1602,8 @@ dependencies = [ [[package]] name = "numaflow" -version = "0.1.0" -source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#f265a615716ab3ec3adf85e8c24413cc076cd695" +version = "0.1.1" +source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#d3afabd2fff1d070bb3fd79866c0389f009556b3" dependencies = [ "chrono", "futures-util", @@ -1637,9 +1638,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.2" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -1754,9 +1755,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" dependencies = [ "memchr", "thiserror", @@ -1765,9 +1766,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.11" +version = "2.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" dependencies = [ "pest", "pest_generator", @@ -1775,9 +1776,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.11" +version = "2.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" dependencies = [ "pest", "pest_meta", @@ -1788,9 +1789,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.11" +version = "2.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" dependencies = [ "once_cell", "pest", @@ -1804,7 +1805,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.3.0", + "indexmap 2.5.0", ] [[package]] @@ -1872,9 +1873,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", "syn", @@ -1914,9 +1915,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" dependencies = [ "bytes", "prost-derive", @@ -1924,9 +1925,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1" +checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", @@ -1945,9 +1946,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", "itertools 0.13.0", @@ -1958,18 +1959,18 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" dependencies = [ "prost", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2044,9 +2045,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -2188,18 +2189,18 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -2222,25 +2223,38 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.3", @@ -2270,9 +2284,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -2286,9 +2300,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "aws-lc-rs", "ring", @@ -2310,11 +2324,11 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2374,9 +2388,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -2393,9 +2407,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -2404,9 +2418,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -2471,7 +2485,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -2482,7 +2496,7 @@ dependencies = [ name = "servesink" version = "0.1.0" dependencies = [ - "numaflow 0.1.0 (git+https://github.com/numaproj/numaflow-rs.git?branch=main)", + "numaflow 0.1.1", "reqwest", "tokio", "tonic", @@ -2644,9 +2658,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.72" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -2688,15 +2702,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2786,9 +2800,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -2840,16 +2854,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -2858,9 +2872,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -2896,7 +2910,7 @@ version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", @@ -2905,16 +2919,16 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.22.1", "bytes", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", @@ -2935,9 +2949,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964" +checksum = "fe4ee8877250136bd7e3d2331632810a4df4ea5e004656990d8d66d2f5ee8a67" dependencies = [ "prettyplease", "proc-macro2", @@ -2994,9 +3008,9 @@ checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -3117,9 +3131,9 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" @@ -3132,9 +3146,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" @@ -3210,19 +3224,20 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -3235,9 +3250,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -3247,9 +3262,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3257,9 +3272,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", @@ -3270,15 +3285,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -3351,6 +3366,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -3535,17 +3559,3 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index c1d172adf9..af69199be2 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -134,7 +134,7 @@ pub async fn init( .await .map_err(|e| { warn!("Error waiting for source server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::ForwarderError(format!("Error waiting for source server info file: {}", e)) })?; let mut source_client = SourceClient::connect(source_config).await?; @@ -142,7 +142,7 @@ pub async fn init( .await .map_err(|e| { warn!("Error waiting for sink server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::ForwarderError(format!("Error waiting for sink server info file: {}", e)) })?; let mut sink_client = SinkClient::connect(sink_config).await?; @@ -152,7 +152,7 @@ pub async fn init( .await .map_err(|e| { warn!("Error waiting for transformer server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::ForwarderError(format!("Error waiting for transformer server info file: {}", e)) })?; Some(TransformerClient::connect(config).await?) } else { @@ -164,7 +164,7 @@ pub async fn init( .await .map_err(|e| { warn!("Error waiting for fallback sink server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::ForwarderError(format!("Error waiting for fallback sink server info file: {}", e)) })?; Some(SinkClient::connect(config).await?) } else { diff --git a/rust/monovertex/src/server_info.rs b/rust/monovertex/src/server_info.rs index 7412b2ca9d..225218b158 100644 --- a/rust/monovertex/src/server_info.rs +++ b/rust/monovertex/src/server_info.rs @@ -256,11 +256,12 @@ mod version { // MINIMUM_SUPPORTED_SDK_VERSIONS is a HashMap with SDK language as key and minimum supported version as value static MINIMUM_SUPPORTED_SDK_VERSIONS: Lazy = Lazy::new(|| { // TODO: populate this from a static file and make it part of the release process + // the value of the map matches `minimumSupportedSDKVersions` in pkg/sdkclient/serverinfo/types.go let mut m = HashMap::new(); - m.insert("go".to_string(), "0.7.0-rc2".to_string()); - m.insert("python".to_string(), "0.7.0a1".to_string()); - m.insert("java".to_string(), "0.7.2-0".to_string()); - m.insert("rust".to_string(), "0.0.1".to_string()); + m.insert("go".to_string(), "0.8.0".to_string()); + m.insert("python".to_string(), "0.8.0".to_string()); + m.insert("java".to_string(), "0.8.0".to_string()); + m.insert("rust".to_string(), "0.1.0".to_string()); m }); @@ -402,6 +403,7 @@ mod tests { constraints.insert("python".to_string(), "1.2.0".to_string()); constraints.insert("java".to_string(), "2.0.0".to_string()); constraints.insert("go".to_string(), "0.10.0".to_string()); + constraints.insert("rust".to_string(), "0.1.0".to_string()); constraints } @@ -477,6 +479,30 @@ mod tests { assert!(result.is_err()); } + #[tokio::test] + async fn test_sdk_compatibility_rust_valid() { + let sdk_version = "v0.1.0"; + let sdk_language = "rust"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_rust_invalid() { + let sdk_version = "0.0.9"; + let sdk_language = "rust"; + + let min_supported_sdk_versions = create_sdk_constraints(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + } + #[tokio::test] async fn test_numaflow_compatibility_valid() { let numaflow_version = "1.4.0"; From c6003314c8f77905fbd86ddccab12853ca6c63a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 22:28:28 -0700 Subject: [PATCH 058/188] chore(deps): bump express from 4.19.2 to 4.21.0 in /ui (#2061) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 93 +++++++++++++++++++++++++++------------------------- 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index 36a04c8dd9..d71fb7d1e6 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -3857,10 +3857,10 @@ bluebird@^3.7.2: resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== -body-parser@1.20.2: - version "1.20.2" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd" - integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA== +body-parser@1.20.3: + version "1.20.3" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.3.tgz#1953431221c6fb5cd63c4b36d53fab0928e548c6" + integrity sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g== dependencies: bytes "3.1.2" content-type "~1.0.5" @@ -3870,7 +3870,7 @@ body-parser@1.20.2: http-errors "2.0.0" iconv-lite "0.4.24" on-finished "2.4.1" - qs "6.11.0" + qs "6.13.0" raw-body "2.5.2" type-is "~1.6.18" unpipe "1.0.0" @@ -5176,6 +5176,11 @@ encodeurl@~1.0.2: resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== +encodeurl@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-2.0.0.tgz#7b8ea898077d7e409d3ac45474ea38eaf0857a58" + integrity sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg== + enhanced-resolve@^5.17.1: version "5.17.1" resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" @@ -5791,36 +5796,36 @@ expect@^27.5.1: jest-message-util "^27.5.1" express@^4.17.3: - version "4.19.2" - resolved "https://registry.yarnpkg.com/express/-/express-4.19.2.tgz#e25437827a3aa7f2a827bc8171bbbb664a356465" - integrity sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q== + version "4.21.0" + resolved "https://registry.yarnpkg.com/express/-/express-4.21.0.tgz#d57cb706d49623d4ac27833f1cbc466b668eb915" + integrity sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng== dependencies: accepts "~1.3.8" array-flatten "1.1.1" - body-parser "1.20.2" + body-parser "1.20.3" content-disposition "0.5.4" content-type "~1.0.4" cookie "0.6.0" cookie-signature "1.0.6" debug "2.6.9" depd "2.0.0" - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" etag "~1.8.1" - finalhandler "1.2.0" + finalhandler "1.3.1" fresh "0.5.2" http-errors "2.0.0" - merge-descriptors "1.0.1" + merge-descriptors "1.0.3" methods "~1.1.2" on-finished "2.4.1" parseurl "~1.3.3" - path-to-regexp "0.1.7" + path-to-regexp "0.1.10" proxy-addr "~2.0.7" - qs "6.11.0" + qs "6.13.0" range-parser "~1.2.1" safe-buffer "5.2.1" - send "0.18.0" - serve-static "1.15.0" + send "0.19.0" + serve-static "1.16.2" setprototypeof "1.2.0" statuses "2.0.1" type-is "~1.6.18" @@ -5959,13 +5964,13 @@ fill-range@^7.1.1: dependencies: to-regex-range "^5.0.1" -finalhandler@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" - integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== +finalhandler@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.3.1.tgz#0c575f1d1d324ddd1da35ad7ece3df7d19088019" + integrity sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ== dependencies: debug "2.6.9" - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" on-finished "2.4.1" parseurl "~1.3.3" @@ -8054,10 +8059,10 @@ memoize-one@^4.0.0: resolved "https://registry.yarnpkg.com/memoize-one/-/memoize-one-4.1.0.tgz#a2387c58c03fff27ca390c31b764a79addf3f906" integrity sha512-2GApq0yI/b22J2j9rhbrAlsHb0Qcz+7yWxeLG8h+95sl1XPUgeLimQSOdur4Vw7cUhrBHwaUZxWFZueojqNRzA== -merge-descriptors@1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" - integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== +merge-descriptors@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.3.tgz#d80319a65f3c7935351e5cfdac8f9318504dbed5" + integrity sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ== merge-stream@^2.0.0: version "2.0.0" @@ -8638,10 +8643,10 @@ path-scurry@^1.11.1: lru-cache "^10.2.0" minipass "^5.0.0 || ^6.0.2 || ^7.0.0" -path-to-regexp@0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== +path-to-regexp@0.1.10: + version "0.1.10" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b" + integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w== path-to-regexp@^1.7.0: version "1.8.0" @@ -9394,12 +9399,12 @@ q@^1.1.2: resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" integrity sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw== -qs@6.11.0: - version "6.11.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" - integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== +qs@6.13.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== dependencies: - side-channel "^1.0.4" + side-channel "^1.0.6" querystringify@^2.1.1: version "2.2.0" @@ -10112,10 +10117,10 @@ semver@^7.2.1, semver@^7.3.2, semver@^7.3.5, semver@^7.3.7, semver@^7.5.3, semve resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== -send@0.18.0: - version "0.18.0" - resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" - integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== +send@0.19.0: + version "0.19.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.19.0.tgz#bbc5a388c8ea6c048967049dbeac0e4a3f09d7f8" + integrity sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw== dependencies: debug "2.6.9" depd "2.0.0" @@ -10158,15 +10163,15 @@ serve-index@^1.9.1: mime-types "~2.1.17" parseurl "~1.3.2" -serve-static@1.15.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" - integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== +serve-static@1.16.2: + version "1.16.2" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.16.2.tgz#b6a5343da47f6bdd2673848bf45754941e803296" + integrity sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw== dependencies: - encodeurl "~1.0.2" + encodeurl "~2.0.0" escape-html "~1.0.3" parseurl "~1.3.3" - send "0.18.0" + send "0.19.0" set-cookie-parser@^2.4.6: version "2.6.0" From 910ff9b4ec15e4a6d0bea0b790a9ec97bbe7e119 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 13 Sep 2024 22:51:04 -0700 Subject: [PATCH 059/188] chore: patch instead of update and bugfix (#2059) Signed-off-by: Derek Wang --- pkg/apis/numaflow/v1alpha1/const.go | 2 - pkg/reconciler/isbsvc/controller.go | 26 +++++------- pkg/reconciler/isbsvc/controller_test.go | 24 ----------- pkg/reconciler/monovertex/controller.go | 2 + pkg/reconciler/monovertex/scaling/scaling.go | 10 +---- pkg/reconciler/pipeline/controller.go | 43 +++++++------------- pkg/reconciler/pipeline/controller_test.go | 15 ------- pkg/reconciler/vertex/controller.go | 2 + pkg/reconciler/vertex/scaling/scaling.go | 10 +---- 9 files changed, 32 insertions(+), 102 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index d0e9eb62f0..e36ec9bd34 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -41,8 +41,6 @@ const ( KeyPauseTimestamp = "numaflow.numaproj.io/pause-timestamp" KeyDefaultContainer = "kubectl.kubernetes.io/default-container" - RemovePauseTimestampPatch = `[{"op": "remove", "path": "/metadata/annotations/numaflow.numaproj.io~1pause-timestamp"}]` - // ID key in the header of sources like http KeyMetaID = "X-Numaflow-Id" KeyMetaEventTime = "X-Numaflow-Event-Time" diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index 1ab7d4e79b..d94e14424d 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -18,17 +18,20 @@ package isbsvc import ( "context" + "strings" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" @@ -58,7 +61,7 @@ func (r *interStepBufferServiceReconciler) Reconcile(ctx context.Context, req ct isbSvc := &dfv1.InterStepBufferService{} if err := r.client.Get(ctx, req.NamespacedName, isbSvc); err != nil { if apierrors.IsNotFound(err) { - return reconcile.Result{}, nil + return ctrl.Result{}, nil } r.logger.Errorw("Unable to get ISB Service", zap.Any("request", req), zap.Error(err)) return ctrl.Result{}, err @@ -69,14 +72,15 @@ func (r *interStepBufferServiceReconciler) Reconcile(ctx context.Context, req ct if reconcileErr != nil { log.Errorw("Reconcile error", zap.Error(reconcileErr)) } - if r.needsUpdate(isbSvc, isbSvcCopy) { - // Update with a DeepCopy because .Status will be cleaned up. - if err := r.client.Update(ctx, isbSvcCopy.DeepCopy()); err != nil { - return reconcile.Result{}, err + if !equality.Semantic.DeepEqual(isbSvc.Finalizers, isbSvcCopy.Finalizers) { + patchYaml := "metadata:\n finalizers: [" + strings.Join(isbSvcCopy.Finalizers, ",") + "]" + patchJson, _ := yaml.YAMLToJSON([]byte(patchYaml)) + if err := r.client.Patch(ctx, isbSvc, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil { + return ctrl.Result{}, err } } if err := r.client.Status().Update(ctx, isbSvcCopy); err != nil { - return reconcile.Result{}, err + return ctrl.Result{}, err } return ctrl.Result{}, reconcileErr } @@ -122,16 +126,6 @@ func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc return installer.Install(ctx, isbSvc, r.client, r.kubeClient, r.config, log, r.recorder) } -func (r *interStepBufferServiceReconciler) needsUpdate(old, new *dfv1.InterStepBufferService) bool { - if old == nil { - return true - } - if !equality.Semantic.DeepEqual(old.Finalizers, new.Finalizers) { - return true - } - return false -} - func needsFinalizer(isbSvc *dfv1.InterStepBufferService) bool { if isbSvc.Spec.Redis != nil && isbSvc.Spec.Redis.Native != nil && isbSvc.Spec.Redis.Native.Persistence != nil { return true diff --git a/pkg/reconciler/isbsvc/controller_test.go b/pkg/reconciler/isbsvc/controller_test.go index 82aee9d90f..2a24ec7c69 100644 --- a/pkg/reconciler/isbsvc/controller_test.go +++ b/pkg/reconciler/isbsvc/controller_test.go @@ -202,42 +202,18 @@ func TestReconcileJetStream(t *testing.T) { func TestNeedsUpdate(t *testing.T) { t.Run("needs redis update", func(t *testing.T) { testIsbs := nativeRedisIsbs.DeepCopy() - cl := fake.NewClientBuilder().Build() - r := &interStepBufferServiceReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - logger: zaptest.NewLogger(t).Sugar(), - } - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) controllerutil.AddFinalizer(testIsbs, finalizerName) assert.True(t, contains(testIsbs.Finalizers, finalizerName)) - assert.True(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) controllerutil.RemoveFinalizer(testIsbs, finalizerName) assert.False(t, contains(testIsbs.Finalizers, finalizerName)) - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) - testIsbs.Status.MarkConfigured() - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) }) t.Run("needs jetstream update", func(t *testing.T) { testIsbs := jetStreamIsbs.DeepCopy() - cl := fake.NewClientBuilder().Build() - r := &interStepBufferServiceReconciler{ - client: cl, - scheme: scheme.Scheme, - config: reconciler.FakeGlobalConfig(t, fakeGlobalISBSvcConfig), - logger: zaptest.NewLogger(t).Sugar(), - } - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) controllerutil.AddFinalizer(testIsbs, finalizerName) assert.True(t, contains(testIsbs.Finalizers, finalizerName)) - assert.True(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) controllerutil.RemoveFinalizer(testIsbs, finalizerName) assert.False(t, contains(testIsbs.Finalizers, finalizerName)) - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) - testIsbs.Status.MarkConfigured() - assert.False(t, r.needsUpdate(nativeRedisIsbs, testIsbs)) }) } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 9aca247a05..3fbfb3c1ab 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -236,6 +236,8 @@ func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *df monoVtx.Status.CurrentHash = monoVtx.Status.UpdateHash } else { // Update scenario if updatedReplicas >= desiredReplicas { + monoVtx.Status.UpdatedReplicas = uint32(desiredReplicas) + monoVtx.Status.CurrentHash = monoVtx.Status.UpdateHash return nil } diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index 481408672c..0b35265190 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -19,7 +19,6 @@ package scaling import ( "container/list" "context" - "encoding/json" "fmt" "math" "strings" @@ -30,7 +29,6 @@ import ( "go.uber.org/zap" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -359,12 +357,8 @@ func (s *Scaler) Start(ctx context.Context) error { func (s *Scaler) patchMonoVertexReplicas(ctx context.Context, monoVtx *dfv1.MonoVertex, desiredReplicas int32) error { log := logging.FromContext(ctx) origin := monoVtx.Spec.Replicas - monoVtx.Spec.Replicas = ptr.To[int32](desiredReplicas) - body, err := json.Marshal(monoVtx) - if err != nil { - return fmt.Errorf("failed to marshal MonoVertex object to json, %w", err) - } - if err := s.client.Patch(ctx, monoVtx, client.RawPatch(types.MergePatchType, body)); err != nil && !apierrors.IsNotFound(err) { + patchJson := fmt.Sprintf(`{"spec":{"replicas":%d}}`, desiredReplicas) + if err := s.client.Patch(ctx, monoVtx, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("failed to patch MonoVertex replicas, %w", err) } log.Infow("Auto scaling - mono vertex replicas changed.", zap.Int32p("from", origin), zap.Int32("to", desiredReplicas), zap.String("namespace", monoVtx.Namespace), zap.String("vertex", monoVtx.Name)) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index d8b989f2d6..b2f99e7b1d 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -18,7 +18,6 @@ package pipeline import ( "context" - "encoding/json" "fmt" "strings" "time" @@ -40,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" daemonclient "github.com/numaproj/numaflow/pkg/daemon/client" @@ -51,6 +51,8 @@ import ( const ( finalizerName = dfv1.ControllerPipeline + + pauseTimestampPath = `/metadata/annotations/numaflow.numaproj.io~1pause-timestamp` ) // pipelineReconciler reconciles a pipeline object. @@ -85,9 +87,10 @@ func (r *pipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c log.Errorw("Reconcile error", zap.Error(reconcileErr)) } plCopy.Status.LastUpdated = metav1.Now() - if needsUpdate(pl, plCopy) { - // Update with a DeepCopy because .Status will be cleaned up. - if err := r.client.Update(ctx, plCopy.DeepCopy()); err != nil { + if !equality.Semantic.DeepEqual(pl.Finalizers, plCopy.Finalizers) { + patchYaml := "metadata:\n finalizers: [" + strings.Join(plCopy.Finalizers, ",") + "]" + patchJson, _ := yaml.YAMLToJSON([]byte(patchYaml)) + if err := r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil { return result, err } } @@ -292,7 +295,9 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateVertexSuccess", "Created vertex %s successfully", vertexName) } else { if oldObj.GetAnnotations()[dfv1.KeyHash] != newObj.GetAnnotations()[dfv1.KeyHash] { // need to update + originalReplicas := oldObj.Spec.Replicas oldObj.Spec = newObj.Spec + oldObj.Spec.Replicas = originalReplicas oldObj.Annotations[dfv1.KeyHash] = newObj.GetAnnotations()[dfv1.KeyHash] if err := r.client.Update(ctx, &oldObj); err != nil { r.recorder.Eventf(pl, corev1.EventTypeWarning, "UpdateVertexFailed", "Failed to update vertex: %w", err.Error()) @@ -588,17 +593,6 @@ func (r *pipelineReconciler) cleanUpBuffers(ctx context.Context, pl *dfv1.Pipeli return nil } -func needsUpdate(old, new *dfv1.Pipeline) bool { - if old == nil { - return true - } - if !equality.Semantic.DeepEqual(old.Finalizers, new.Finalizers) { - return true - } - - return false -} - func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { result := make(map[string]dfv1.Vertex) for _, v := range pl.Spec.Vertices { @@ -814,7 +808,7 @@ func (r *pipelineReconciler) updateDesiredState(ctx context.Context, pl *dfv1.Pi func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { // reset pause timestamp if pl.GetAnnotations()[dfv1.KeyPauseTimestamp] != "" { - err := r.client.Patch(ctx, pl, client.RawPatch(types.JSONPatchType, []byte(dfv1.RemovePauseTimestampPatch))) + err := r.client.Patch(ctx, pl, client.RawPatch(types.JSONPatchType, []byte(`[{"op": "remove", "path": "`+pauseTimestampPath+`"}]`))) if err != nil { if apierrors.IsNotFound(err) { return false, nil // skip pipeline if it can't be found @@ -837,13 +831,8 @@ func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeli func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { // check that annotations / pause timestamp annotation exist if pl.GetAnnotations() == nil || pl.GetAnnotations()[dfv1.KeyPauseTimestamp] == "" { - pl.SetAnnotations(map[string]string{dfv1.KeyPauseTimestamp: time.Now().Format(time.RFC3339)}) - body, err := json.Marshal(pl) - if err != nil { - return false, err - } - err = r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, body)) - if err != nil && !apierrors.IsNotFound(err) { + patchJson := `[{"op": "add", "path": "` + pauseTimestampPath + `", "value": "` + time.Now().Format(time.RFC3339) + `"}]` + if err := r.client.Patch(ctx, pl, client.RawPatch(types.JSONPatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { return true, err } } @@ -924,12 +913,8 @@ func (r *pipelineReconciler) scaleVertex(ctx context.Context, pl *dfv1.Pipeline, } } } - vertex.Spec.Replicas = ptr.To[int32](scaleTo) - body, err := json.Marshal(vertex) - if err != nil { - return false, err - } - err = r.client.Patch(ctx, &vertex, client.RawPatch(types.MergePatchType, body)) + patchJson := fmt.Sprintf(`{"spec":{"replicas":%d}}`, scaleTo) + err = r.client.Patch(ctx, &vertex, client.RawPatch(types.MergePatchType, []byte(patchJson))) if err != nil && !apierrors.IsNotFound(err) { return false, err } diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index e130f49656..aafff27cc3 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -37,7 +37,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" @@ -352,8 +351,6 @@ func Test_pauseAndResumePipeline(t *testing.T) { v, err := r.findExistingVertices(ctx, testObj) assert.NoError(t, err) assert.Equal(t, int32(0), *v[testObj.Name+"-"+testObj.Spec.Vertices[0].Name].Spec.Replicas) - assert.NotNil(t, testObj.Annotations[dfv1.KeyPauseTimestamp]) - testObj.Annotations[dfv1.KeyPauseTimestamp] = "" _, err = r.resumePipeline(ctx, testObj) assert.NoError(t, err) v, err = r.findExistingVertices(ctx, testObj) @@ -380,8 +377,6 @@ func Test_pauseAndResumePipeline(t *testing.T) { assert.NoError(t, err) _, err = r.findExistingVertices(ctx, testObj) assert.NoError(t, err) - assert.NotNil(t, testObj.Annotations[dfv1.KeyPauseTimestamp]) - testObj.Annotations[dfv1.KeyPauseTimestamp] = "" _, err = r.resumePipeline(ctx, testObj) assert.NoError(t, err) v, err := r.findExistingVertices(ctx, testObj) @@ -560,16 +555,6 @@ func Test_buildISBBatchJob(t *testing.T) { }) } -func Test_needsUpdate(t *testing.T) { - testObj := testPipeline.DeepCopy() - assert.True(t, needsUpdate(nil, testObj)) - assert.False(t, needsUpdate(testPipeline, testObj)) - controllerutil.AddFinalizer(testObj, finalizerName) - assert.True(t, needsUpdate(testPipeline, testObj)) - testobj1 := testObj.DeepCopy() - assert.False(t, needsUpdate(testObj, testobj1)) -} - func Test_cleanupBuffers(t *testing.T) { cl := fake.NewClientBuilder().Build() ctx := context.TODO() diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index 8789b5d89a..20945639ab 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -259,6 +259,8 @@ func (r *vertexReconciler) orchestratePods(ctx context.Context, vertex *dfv1.Ver vertex.Status.CurrentHash = vertex.Status.UpdateHash } else { // Update scenario if updatedReplicas >= desiredReplicas { + vertex.Status.UpdatedReplicas = uint32(desiredReplicas) + vertex.Status.CurrentHash = vertex.Status.UpdateHash return nil } diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index eed5981e89..139e189f5f 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -19,7 +19,6 @@ package scaling import ( "container/list" "context" - "encoding/json" "fmt" "math" "strings" @@ -30,7 +29,6 @@ import ( "go.uber.org/zap" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -499,12 +497,8 @@ loop: func (s *Scaler) patchVertexReplicas(ctx context.Context, vertex *dfv1.Vertex, desiredReplicas int32) error { log := logging.FromContext(ctx) origin := vertex.Spec.Replicas - vertex.Spec.Replicas = ptr.To[int32](desiredReplicas) - body, err := json.Marshal(vertex) - if err != nil { - return fmt.Errorf("failed to marshal vertex object to json, %w", err) - } - if err := s.client.Patch(ctx, vertex, client.RawPatch(types.MergePatchType, body)); err != nil && !apierrors.IsNotFound(err) { + patchJson := fmt.Sprintf(`{"spec":{"replicas":%d}}`, desiredReplicas) + if err := s.client.Patch(ctx, vertex, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("failed to patch vertex replicas, %w", err) } log.Infow("Auto scaling - vertex replicas changed.", zap.Int32p("from", origin), zap.Int32("to", desiredReplicas), zap.String("namespace", vertex.Namespace), zap.String("pipeline", vertex.Spec.PipelineName), zap.String("vertex", vertex.Spec.Name)) From 692fbeec1b94d8ff66a82b9c3fe5d8242962750b Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 16 Sep 2024 23:52:54 -0700 Subject: [PATCH 060/188] fix: skip updating phase for resource check (#2065) Signed-off-by: Sidhant Kohli --- pkg/reconciler/pipeline/controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index b2f99e7b1d..5776c2873c 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -610,7 +610,7 @@ func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { copyVertexTemplate(pl, vCopy) copyVertexLimits(pl, vCopy) replicas := int32(1) - // If the desired phase is pause or we are in the middle of pausing we should not start any vertex replicas + // If the desired phase is paused or we are in the middle of pausing we should not start any vertex replicas if isLifecycleChange(pl) { replicas = int32(0) } else if v.IsReduceUDF() { @@ -952,7 +952,7 @@ func (r *pipelineReconciler) checkChildrenResourceStatus(ctx context.Context, pi defer func() { for _, c := range pipeline.Status.Conditions { if c.Status != metav1.ConditionTrue { - pipeline.Status.SetPhase(pipeline.Spec.Lifecycle.GetDesiredPhase(), "Degraded: "+c.Message) + pipeline.Status.Message = "Degraded: " + c.Message return } } From cbe9054f8507639dac3a48b7b8eeb9e236ce706e Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 17 Sep 2024 10:37:07 -0700 Subject: [PATCH 061/188] doc: example for PVC (#2067) Signed-off-by: Vigith Maurice --- .../reference/configuration/volumes.md | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/user-guide/reference/configuration/volumes.md b/docs/user-guide/reference/configuration/volumes.md index 3e34adb123..4ac3affbf6 100644 --- a/docs/user-guide/reference/configuration/volumes.md +++ b/docs/user-guide/reference/configuration/volumes.md @@ -47,3 +47,28 @@ spec: - mountPath: /path/to/my-sink-config name: my-udsink-config ``` + +## PVC Example + +Example to show how to attach a Persistent Volume Claim (PVC) to a container. + +```yaml +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: Pipeline +metadata: + name: my-pipeline +spec: + vertices: + - name: my-source + volumes: + - name: mypd + persistentVolumeClaim: + claimName: myclaim + source: + udsource: + container: + image: my-source:latest + volumeMounts: + - mountPath: /path/to/my-source-config + name: mypd +``` \ No newline at end of file From 9995ff813d39489d22c94e574adae9e6a8a4ebe8 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 19 Sep 2024 00:01:26 -0700 Subject: [PATCH 062/188] feat: allow customization on readyz and livez config (#2068) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 43 + api/openapi-spec/swagger.json | 43 + ...w.numaproj.io_interstepbufferservices.yaml | 252 +++ .../numaflow.numaproj.io_monovertices.yaml | 252 +++ .../full/numaflow.numaproj.io_pipelines.yaml | 576 ++++++ .../full/numaflow.numaproj.io_vertices.yaml | 288 +++ config/install.yaml | 1368 +++++++++++++++ config/namespace-install.yaml | 1368 +++++++++++++++ docs/APIs.md | 209 +++ .../configuration/liveness-and-readiness.md | 117 ++ go.mod | 20 +- go.sum | 40 +- mkdocs.yml | 1 + pkg/apis/numaflow/v1alpha1/const.go | 14 + .../numaflow/v1alpha1/container_template.go | 38 + .../v1alpha1/container_template_test.go | 146 ++ pkg/apis/numaflow/v1alpha1/generated.pb.go | 1548 +++++++++++------ pkg/apis/numaflow/v1alpha1/generated.proto | 39 + .../numaflow/v1alpha1/mono_vertex_types.go | 26 +- .../v1alpha1/mono_vertex_types_test.go | 24 + .../numaflow/v1alpha1/openapi_generated.go | 73 +- pkg/apis/numaflow/v1alpha1/probe.go | 74 + pkg/apis/numaflow/v1alpha1/probe_test.go | 199 +++ pkg/apis/numaflow/v1alpha1/sink.go | 14 +- pkg/apis/numaflow/v1alpha1/sink_test.go | 22 + pkg/apis/numaflow/v1alpha1/source.go | 30 +- pkg/apis/numaflow/v1alpha1/source_test.go | 62 +- pkg/apis/numaflow/v1alpha1/udf.go | 15 +- pkg/apis/numaflow/v1alpha1/udf_test.go | 11 + .../v1alpha1/user_defined_container.go | 4 + pkg/apis/numaflow/v1alpha1/vertex_types.go | 27 +- .../numaflow/v1alpha1/vertex_types_test.go | 24 + pkg/shared/clients/nats/test/server.go | 1 + pkg/sources/nats/nats_test.go | 4 +- rust/numaflow-models/src/models/container.rs | 6 + .../src/models/container_template.rs | 6 + rust/numaflow-models/src/models/mod.rs | 2 + rust/numaflow-models/src/models/probe.rs | 54 + test/e2e/functional_test.go | 6 +- test/fixtures/util.go | 14 +- 40 files changed, 6463 insertions(+), 597 deletions(-) create mode 100644 docs/user-guide/reference/configuration/liveness-and-readiness.md create mode 100644 pkg/apis/numaflow/v1alpha1/probe.go create mode 100644 pkg/apis/numaflow/v1alpha1/probe_test.go create mode 100644 rust/numaflow-models/src/models/probe.rs diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index fefd397d8c..24ca429580 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -17939,6 +17939,12 @@ "imagePullPolicy": { "type": "string" }, + "livenessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, + "readinessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, @@ -17972,6 +17978,12 @@ "imagePullPolicy": { "type": "string" }, + "livenessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, + "readinessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, @@ -19739,6 +19751,37 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.Probe": { + "description": "Probe is used to customize the configuration for Readiness and Liveness probes.", + "properties": { + "failureThreshold": { + "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + "format": "int32", + "type": "integer" + }, + "initialDelaySeconds": { + "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "description": "How often (in seconds) to perform the probe.", + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "format": "int32", + "type": "integer" + }, + "timeoutSeconds": { + "description": "Number of seconds after which the probe times out. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.RedisBufferService": { "properties": { "external": { diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f688e51c8d..e2e29fbabf 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -17944,6 +17944,12 @@ "imagePullPolicy": { "type": "string" }, + "livenessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, + "readinessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, @@ -17977,6 +17983,12 @@ "imagePullPolicy": { "type": "string" }, + "livenessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, + "readinessProbe": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, @@ -19725,6 +19737,37 @@ } } }, + "io.numaproj.numaflow.v1alpha1.Probe": { + "description": "Probe is used to customize the configuration for Readiness and Liveness probes.", + "type": "object", + "properties": { + "failureThreshold": { + "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + "type": "integer", + "format": "int32" + }, + "initialDelaySeconds": { + "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "type": "integer", + "format": "int32" + }, + "periodSeconds": { + "description": "How often (in seconds) to perform the probe.", + "type": "integer", + "format": "int32" + }, + "successThreshold": { + "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "type": "integer", + "format": "int32" + }, + "timeoutSeconds": { + "description": "Number of seconds after which the probe times out. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "type": "integer", + "format": "int32" + } + } + }, "io.numaproj.numaflow.v1alpha1.RedisBufferService": { "type": "object", "properties": { diff --git a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml index acc540cd10..0fb6ff2583 100644 --- a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml @@ -527,6 +527,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -747,6 +783,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -947,6 +1019,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1702,6 +1810,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1891,6 +2035,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2091,6 +2271,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2355,6 +2571,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 02ae281ebd..4d8a23ba14 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -532,6 +532,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1108,6 +1144,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1315,6 +1387,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3290,6 +3398,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3727,6 +3871,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4446,6 +4626,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4657,6 +4873,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 40db1f403e..d262a8664b 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -246,6 +246,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1581,6 +1617,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1788,6 +1860,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2491,6 +2599,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3220,6 +3364,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3427,6 +3607,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4124,6 +4340,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4331,6 +4583,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -5030,6 +5318,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -5237,6 +5561,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -6210,6 +6570,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7246,6 +7642,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7683,6 +8115,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -8402,6 +8870,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -8613,6 +9117,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -8861,6 +9401,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 1f02fd2b35..a1756ec313 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -532,6 +532,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -816,6 +852,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1797,6 +1869,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2833,6 +2941,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3270,6 +3414,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3989,6 +4169,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4200,6 +4416,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -4525,6 +4777,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/config/install.yaml b/config/install.yaml index c3db75767a..c6551b513a 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -526,6 +526,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -746,6 +782,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -946,6 +1018,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1701,6 +1809,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1890,6 +2034,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2090,6 +2270,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2354,6 +2570,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3176,6 +3428,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3752,6 +4040,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3959,6 +4283,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -5934,6 +6294,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -6371,6 +6767,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7090,6 +7522,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7301,6 +7769,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -8519,6 +9023,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -9854,6 +10394,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -10061,6 +10637,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -10764,6 +11376,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -11493,6 +12141,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -11700,6 +12384,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -12397,6 +13117,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -12604,6 +13360,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -13303,6 +14095,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -13510,6 +14338,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -14483,6 +15347,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -15519,6 +16419,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -15956,6 +16892,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -16675,6 +17647,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -16886,6 +17894,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -17134,6 +18178,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -18696,6 +19776,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -18980,6 +20096,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -19961,6 +21113,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -20997,6 +22185,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -21434,6 +22658,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22153,6 +23413,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22364,6 +23660,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22689,6 +24021,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 5162dc6ad7..07fc13628d 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -526,6 +526,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -746,6 +782,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -946,6 +1018,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1701,6 +1809,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -1890,6 +2034,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2090,6 +2270,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -2354,6 +2570,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3176,6 +3428,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3752,6 +4040,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -3959,6 +4283,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -5934,6 +6294,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -6371,6 +6767,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7090,6 +7522,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -7301,6 +7769,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -8519,6 +9023,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -9854,6 +10394,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -10061,6 +10637,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -10764,6 +11376,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -11493,6 +12141,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -11700,6 +12384,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -12397,6 +13117,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -12604,6 +13360,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -13303,6 +14095,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -13510,6 +14338,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -14483,6 +15347,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -15519,6 +16419,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -15956,6 +16892,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -16675,6 +17647,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -16886,6 +17894,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -17134,6 +18178,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -18696,6 +19776,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -18980,6 +20096,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -19961,6 +21113,42 @@ spec: type: array imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -20997,6 +22185,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -21434,6 +22658,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22153,6 +23413,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22364,6 +23660,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: @@ -22689,6 +24021,42 @@ spec: type: string imagePullPolicy: type: string + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object resources: properties: claims: diff --git a/docs/APIs.md b/docs/APIs.md index d5deaa76b2..7b698883e7 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -1597,6 +1597,36 @@ Kubernetes core/v1.PullPolicy + + + + +readinessProbe
+ Probe + + + + +(Optional) + + + + + + + + +livenessProbe
+ Probe + + + + +(Optional) + + + + @@ -1730,6 +1760,36 @@ Kubernetes core/v1.SecurityContext + + + + +readinessProbe
+ Probe + + + + +(Optional) + + + + + + + + +livenessProbe
+ Probe + + + + +(Optional) + + + + @@ -8048,6 +8108,155 @@ successfully drained. +

+ +Probe +

+ +

+ +(Appears on: +Container, +ContainerTemplate) +

+ +

+ +

+ +Probe is used to customize the configuration for Readiness and Liveness +probes. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +initialDelaySeconds
int32 +
+ +(Optional) +

+ +Number of seconds after the container has started before liveness probes +are initiated. More info: +https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +

+ +
+ +timeoutSeconds
int32 +
+ +(Optional) +

+ +Number of seconds after which the probe times out. More info: +https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +

+ +
+ +periodSeconds
int32 +
+ +(Optional) +

+ +How often (in seconds) to perform the probe. +

+ +
+ +successThreshold
int32 +
+ +(Optional) +

+ +Minimum consecutive successes for the probe to be considered successful +after having failed. Defaults to 1. Must be 1 for liveness and startup. +Minimum value is 1. +

+ +
+ +failureThreshold
int32 +
+ +(Optional) +

+ +Minimum consecutive failures for the probe to be considered failed after +having succeeded. Defaults to 3. Minimum value is 1. +

+ +
+

RedisBufferService diff --git a/docs/user-guide/reference/configuration/liveness-and-readiness.md b/docs/user-guide/reference/configuration/liveness-and-readiness.md new file mode 100644 index 0000000000..f4a3dd5d0f --- /dev/null +++ b/docs/user-guide/reference/configuration/liveness-and-readiness.md @@ -0,0 +1,117 @@ +# Liveness and Readiness + +`Liveness` and `Readiness` probes have been pre-configured in the pods orchestrated in Numaflow, including the containers of `Vertex` and `MonoVertex` pods. For these probes, the probe handlers are not allowed to be customized, but the other configurations are. + +- `initialDelaySeconds` +- `timeoutSeconds` +- `periodSeconds` +- `successThreshold` +- `failureThreshold` + +Here is an example for `Pipeline` customization, similar configuration can be applied to containers including `udf`, `udsource`, `transformer`, `udsink` and `fb-udsink`. + +```yaml +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: Pipeline +metadata: + name: my-pipeline +spec: + vertices: + - name: my-source + containerTemplate: # For "numa" container + readinessProbe: + initialDelaySeconds: 30 + periodSeconds: 60 + livenessProbe: + initialDelaySeconds: 60 + periodSeconds: 120 + volumes: + - name: my-udsource-config + configMap: + name: udsource-config + source: + udsource: + container: + image: my-source:latest + volumeMounts: + - mountPath: /path/to/my-source-config + name: my-udsource-config + # For User-Defined source + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 + - name: my-udf + containerTemplate: # For "numa" container + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 60 + livenessProbe: + initialDelaySeconds: 180 + periodSeconds: 60 + timeoutSeconds: 50 + volumes: + - name: my-udf-config + configMap: + name: udf-config + udf: + container: + image: my-function:latest + volumeMounts: + - mountPath: /path/to/my-function-config + name: my-udf-config + # For "udf" + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 +``` + +The customization for `numa` container is also available with a [Vertex Template](./pipeline-customization.md#vertices) defined in `spec.templates.vertex`, which is going to be applied to all the vertices of a pipeline. + +A `MonoVertex` example is as below. + +```yaml +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex +metadata: + name: simple-mono-vertex +spec: + containerTemplate: # For "numa" container + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 60 + livenessProbe: + initialDelaySeconds: 180 + periodSeconds: 60 + source: + udsource: + container: + image: quay.io/numaio/numaflow-java/source-simple-source:stable + # For User-Defined source + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 + timeoutSeconds: 40 + transformer: + container: + image: quay.io/numaio/numaflow-rs/source-transformer-now:stable + # For transformer + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 + sink: + udsink: + container: + image: quay.io/numaio/numaflow-java/simple-sink:stable + # For User-Defined Sink + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 + fallback: + udsink: + container: + image: my-sink:latest + # # For Fallback Sink + livenessProbe: + initialDelaySeconds: 40 + failureThreshold: 5 +``` diff --git a/go.mod b/go.mod index f0dd236bd7..9f54c88017 100644 --- a/go.mod +++ b/go.mod @@ -30,8 +30,8 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/imdario/mergo v0.3.16 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe - github.com/nats-io/nats-server/v2 v2.10.17 - github.com/nats-io/nats.go v1.36.0 + github.com/nats-io/nats-server/v2 v2.10.20 + github.com/nats-io/nats.go v1.37.0 github.com/numaproj/numaflow-go v0.8.0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 @@ -48,10 +48,10 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.24.0 + golang.org/x/crypto v0.26.0 golang.org/x/net v0.25.0 golang.org/x/oauth2 v0.20.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 google.golang.org/grpc v1.59.0 @@ -148,7 +148,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/minio/highwayhash v1.0.2 // indirect + github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -158,7 +158,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/nats-io/jwt/v2 v2.5.7 // indirect + github.com/nats-io/jwt/v2 v2.5.8 // indirect github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/oklog/ulid v1.3.1 // indirect @@ -201,10 +201,10 @@ require ( golang.org/x/arch v0.7.0 // indirect golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect diff --git a/go.sum b/go.sum index 1e46c54600..77784a6a8d 100644 --- a/go.sum +++ b/go.sum @@ -446,8 +446,8 @@ github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= +github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -474,12 +474,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c= -github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A= -github.com/nats-io/nats-server/v2 v2.10.17 h1:PTVObNBD3TZSNUDgzFb1qQsQX4mOgFmOuG9vhT+KBUY= -github.com/nats-io/nats-server/v2 v2.10.17/go.mod h1:5OUyc4zg42s/p2i92zbbqXvUNsbF0ivdTLKshVMn2YQ= -github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= -github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/jwt/v2 v2.5.8 h1:uvdSzwWiEGWGXf+0Q+70qv6AQdvcvxrv9hPM0RiPamE= +github.com/nats-io/jwt/v2 v2.5.8/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A= +github.com/nats-io/nats-server/v2 v2.10.20 h1:CXDTYNHeBiAKBTAIP2gjpgbWap2GhATnTLgP8etyvEI= +github.com/nats-io/nats-server/v2 v2.10.20/go.mod h1:hgcPnoUtMfxz1qVOvLZGurVypQ+Cg6GXVXjG53iHk+M= +github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= +github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -687,8 +687,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -802,11 +802,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -863,15 +862,16 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -884,14 +884,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/mkdocs.yml b/mkdocs.yml index 55dac8b1c1..1118c1ff43 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -101,6 +101,7 @@ nav: - user-guide/reference/configuration/labels-and-annotations.md - user-guide/reference/configuration/init-containers.md - user-guide/reference/configuration/sidecar-containers.md + - user-guide/reference/configuration/liveness-and-readiness.md - user-guide/reference/configuration/pipeline-customization.md - user-guide/reference/configuration/istio.md - user-guide/reference/configuration/max-message-size.md diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index e36ec9bd34..f65e2a5bd7 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -265,6 +265,20 @@ const ( // This strategy argues for robustness in operations, aiming // to minimize the chances of data loss or failed deliveries in transient failure scenarios. DefaultOnFailureRetryStrategy = OnFailureRetry + + // Defeault values for readiness and liveness probes + NumaContainerReadyzInitialDelaySeconds = 5 + NumaContainerReadyzPeriodSeconds = 10 + NumaContainerReadyzTimeoutSeconds = 30 + NumaContainerReadyzFailureThreshold = 6 + NumaContainerLivezInitialDelaySeconds = 20 + NumaContainerLivezPeriodSeconds = 60 + NumaContainerLivezTimeoutSeconds = 30 + NumaContainerLivezFailureThreshold = 5 + UDContainerLivezInitialDelaySeconds = 30 + UDContainerLivezPeriodSeconds = 60 + UDContainerLivezTimeoutSeconds = 30 + UDContainerLivezFailureThreshold = 5 ) var ( diff --git a/pkg/apis/numaflow/v1alpha1/container_template.go b/pkg/apis/numaflow/v1alpha1/container_template.go index 071229114c..5b3ad3270c 100644 --- a/pkg/apis/numaflow/v1alpha1/container_template.go +++ b/pkg/apis/numaflow/v1alpha1/container_template.go @@ -33,6 +33,10 @@ type ContainerTemplate struct { Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` // +optional EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,5,rep,name=envFrom"` + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,6,opt,name=readinessProbe"` + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,7,opt,name=livenessProbe"` } // ApplyToContainer updates the Container with the values from the ContainerTemplate @@ -48,6 +52,40 @@ func (ct *ContainerTemplate) ApplyToContainer(c *corev1.Container) { if len(ct.EnvFrom) > 0 { c.EnvFrom = append(c.EnvFrom, ct.EnvFrom...) } + if rp := ct.ReadinessProbe; rp != nil && c.ReadinessProbe != nil { + if rp.InitialDelaySeconds != nil { + c.ReadinessProbe.InitialDelaySeconds = *rp.InitialDelaySeconds + } + if rp.TimeoutSeconds != nil { + c.ReadinessProbe.TimeoutSeconds = *rp.TimeoutSeconds + } + if rp.PeriodSeconds != nil { + c.ReadinessProbe.PeriodSeconds = *rp.PeriodSeconds + } + if rp.FailureThreshold != nil { + c.ReadinessProbe.FailureThreshold = *rp.FailureThreshold + } + if rp.SuccessThreshold != nil { + c.ReadinessProbe.SuccessThreshold = *rp.SuccessThreshold + } + } + if lp := ct.LivenessProbe; lp != nil && c.LivenessProbe != nil { + if lp.InitialDelaySeconds != nil { + c.LivenessProbe.InitialDelaySeconds = *lp.InitialDelaySeconds + } + if lp.TimeoutSeconds != nil { + c.LivenessProbe.TimeoutSeconds = *lp.TimeoutSeconds + } + if lp.PeriodSeconds != nil { + c.LivenessProbe.PeriodSeconds = *lp.PeriodSeconds + } + if lp.FailureThreshold != nil { + c.LivenessProbe.FailureThreshold = *lp.FailureThreshold + } + if lp.SuccessThreshold != nil { + c.LivenessProbe.SuccessThreshold = *lp.SuccessThreshold + } + } } // ApplyToNumaflowContainers updates any numa or init containers with the values from the ContainerTemplate diff --git a/pkg/apis/numaflow/v1alpha1/container_template_test.go b/pkg/apis/numaflow/v1alpha1/container_template_test.go index a1e8b8f479..5b835cda72 100644 --- a/pkg/apis/numaflow/v1alpha1/container_template_test.go +++ b/pkg/apis/numaflow/v1alpha1/container_template_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) var ( @@ -81,3 +82,148 @@ func Test_ApplyToNumaflowContainers(t *testing.T) { assert.Equal(t, testContainerTemplate.Resources, cs[0].Resources) assert.NotEqual(t, testContainerTemplate.Resources, cs[1].Resources) } + +func TestApplyProbes(t *testing.T) { + tests := []struct { + name string + template *ContainerTemplate + input *corev1.Container + expected *corev1.Container + }{ + { + name: "Apply ReadinessProbe", + template: &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](5), + TimeoutSeconds: ptr.To[int32](10), + PeriodSeconds: ptr.To[int32](15), + FailureThreshold: ptr.To[int32](3), + SuccessThreshold: ptr.To[int32](1), + }, + }, + input: &corev1.Container{ + ReadinessProbe: &corev1.Probe{}, + }, + expected: &corev1.Container{ + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 5, + TimeoutSeconds: 10, + PeriodSeconds: 15, + FailureThreshold: 3, + SuccessThreshold: 1, + }, + }, + }, + { + name: "Apply LivenessProbe", + template: &ContainerTemplate{ + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](10), + TimeoutSeconds: ptr.To[int32](5), + PeriodSeconds: ptr.To[int32](20), + FailureThreshold: ptr.To[int32](5), + SuccessThreshold: ptr.To[int32](1), + }, + }, + input: &corev1.Container{ + LivenessProbe: &corev1.Probe{}, + }, + expected: &corev1.Container{ + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 10, + TimeoutSeconds: 5, + PeriodSeconds: 20, + FailureThreshold: 5, + SuccessThreshold: 1, + }, + }, + }, + { + name: "Apply Both Probes", + template: &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](5), + TimeoutSeconds: ptr.To[int32](10), + }, + LivenessProbe: &Probe{ + PeriodSeconds: ptr.To[int32](20), + FailureThreshold: ptr.To[int32](5), + }, + }, + input: &corev1.Container{ + ReadinessProbe: &corev1.Probe{}, + LivenessProbe: &corev1.Probe{}, + }, + expected: &corev1.Container{ + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 5, + TimeoutSeconds: 10, + }, + LivenessProbe: &corev1.Probe{ + PeriodSeconds: 20, + FailureThreshold: 5, + }, + }, + }, + { + name: "No Probes in Template", + template: &ContainerTemplate{}, + input: &corev1.Container{ + ReadinessProbe: &corev1.Probe{InitialDelaySeconds: 30}, + LivenessProbe: &corev1.Probe{TimeoutSeconds: 15}, + }, + expected: &corev1.Container{ + ReadinessProbe: &corev1.Probe{InitialDelaySeconds: 30}, + LivenessProbe: &corev1.Probe{TimeoutSeconds: 15}, + }, + }, + { + name: "No Probes in Container", + template: &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](5), + TimeoutSeconds: ptr.To[int32](10), + }, + LivenessProbe: &Probe{ + PeriodSeconds: ptr.To[int32](20), + FailureThreshold: ptr.To[int32](5), + }, + }, + input: &corev1.Container{}, + expected: &corev1.Container{}, + }, + { + name: "Partial Probe Updates", + template: &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](25), + }, + LivenessProbe: &Probe{ + FailureThreshold: ptr.To[int32](4), + }, + }, + input: &corev1.Container{ + ReadinessProbe: &corev1.Probe{TimeoutSeconds: 5}, + LivenessProbe: &corev1.Probe{PeriodSeconds: 10}, + }, + expected: &corev1.Container{ + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 25, + TimeoutSeconds: 5, + }, + LivenessProbe: &corev1.Probe{ + PeriodSeconds: 10, + FailureThreshold: 4, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.template.ApplyToContainer(tt.input) + assert.Equal(t, tt.expected.ReadinessProbe, tt.input.ReadinessProbe) + assert.Equal(t, tt.expected.LivenessProbe, tt.input.LivenessProbe) + }) + } +} diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 65ac7601e5..138962c4c2 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -1702,10 +1702,38 @@ func (m *PipelineStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PipelineStatus proto.InternalMessageInfo +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{59} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) +} + +var xxx_messageInfo_Probe proto.InternalMessageInfo + func (m *RedisBufferService) Reset() { *m = RedisBufferService{} } func (*RedisBufferService) ProtoMessage() {} func (*RedisBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{59} + return fileDescriptor_9d0d1b17d3865563, []int{60} } func (m *RedisBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1733,7 +1761,7 @@ var xxx_messageInfo_RedisBufferService proto.InternalMessageInfo func (m *RedisConfig) Reset() { *m = RedisConfig{} } func (*RedisConfig) ProtoMessage() {} func (*RedisConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{60} + return fileDescriptor_9d0d1b17d3865563, []int{61} } func (m *RedisConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1761,7 +1789,7 @@ var xxx_messageInfo_RedisConfig proto.InternalMessageInfo func (m *RedisSettings) Reset() { *m = RedisSettings{} } func (*RedisSettings) ProtoMessage() {} func (*RedisSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{61} + return fileDescriptor_9d0d1b17d3865563, []int{62} } func (m *RedisSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1789,7 +1817,7 @@ var xxx_messageInfo_RedisSettings proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{62} + return fileDescriptor_9d0d1b17d3865563, []int{63} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1817,7 +1845,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RollingUpdateStrategy) Reset() { *m = RollingUpdateStrategy{} } func (*RollingUpdateStrategy) ProtoMessage() {} func (*RollingUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *RollingUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1845,7 +1873,7 @@ var xxx_messageInfo_RollingUpdateStrategy proto.InternalMessageInfo func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1873,7 +1901,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1901,7 +1929,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1929,7 +1957,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1957,7 +1985,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1985,7 +2013,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2013,7 +2041,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2041,7 +2069,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2069,7 +2097,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2097,7 +2125,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2125,7 +2153,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2153,7 +2181,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2181,7 +2209,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2209,7 +2237,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2237,7 +2265,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2265,7 +2293,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2293,7 +2321,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2321,7 +2349,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2349,7 +2377,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2377,7 +2405,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2405,7 +2433,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{84} + return fileDescriptor_9d0d1b17d3865563, []int{85} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2433,7 +2461,7 @@ var xxx_messageInfo_UDTransformer proto.InternalMessageInfo func (m *UpdateStrategy) Reset() { *m = UpdateStrategy{} } func (*UpdateStrategy) ProtoMessage() {} func (*UpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{85} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *UpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2461,7 +2489,7 @@ var xxx_messageInfo_UpdateStrategy proto.InternalMessageInfo func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{86} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2489,7 +2517,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{87} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2517,7 +2545,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{88} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2545,7 +2573,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{89} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2573,7 +2601,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{90} + return fileDescriptor_9d0d1b17d3865563, []int{91} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2601,7 +2629,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{91} + return fileDescriptor_9d0d1b17d3865563, []int{92} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2629,7 +2657,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{92} + return fileDescriptor_9d0d1b17d3865563, []int{93} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2657,7 +2685,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{93} + return fileDescriptor_9d0d1b17d3865563, []int{94} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2685,7 +2713,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{94} + return fileDescriptor_9d0d1b17d3865563, []int{95} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2778,6 +2806,7 @@ func init() { proto.RegisterType((*PipelineList)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PipelineList") proto.RegisterType((*PipelineSpec)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PipelineSpec") proto.RegisterType((*PipelineStatus)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PipelineStatus") + proto.RegisterType((*Probe)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Probe") proto.RegisterType((*RedisBufferService)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisBufferService") proto.RegisterType((*RedisConfig)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisConfig") proto.RegisterType((*RedisSettings)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisSettings") @@ -2822,501 +2851,510 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 7889 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x25, 0xd9, - 0xb5, 0xd6, 0x9c, 0x97, 0xcf, 0x39, 0xeb, 0xf8, 0xd5, 0xbb, 0x1f, 0xe3, 0xee, 0xe9, 0x69, 0xf7, - 0xad, 0xb9, 0x33, 0xb7, 0x2f, 0xf7, 0x5e, 0x9b, 0xf1, 0x9d, 0x57, 0xee, 0xbd, 0xc9, 0x8c, 0x8f, - 0xdd, 0x76, 0xbb, 0xdb, 0xee, 0x76, 0xd6, 0xb1, 0x7b, 0x26, 0x77, 0x48, 0x86, 0x72, 0xd5, 0xf6, - 0x71, 0x8d, 0xeb, 0x54, 0x9d, 0xa9, 0xaa, 0xe3, 0x6e, 0x4f, 0x40, 0x79, 0x0c, 0x68, 0x06, 0x01, - 0x02, 0xe5, 0x57, 0x24, 0x14, 0x10, 0x08, 0x29, 0x3f, 0xa2, 0xf0, 0x03, 0x29, 0xfc, 0x40, 0x82, - 0x10, 0x84, 0x20, 0x20, 0x1e, 0x11, 0x42, 0x62, 0xf8, 0x63, 0x11, 0x23, 0x7e, 0x80, 0x04, 0x8a, - 0x88, 0x20, 0xa1, 0x15, 0x11, 0xb4, 0x5f, 0xf5, 0x3a, 0x75, 0xba, 0xed, 0x53, 0x76, 0x4f, 0x0f, - 0xcc, 0xbf, 0xaa, 0xbd, 0xd6, 0xfe, 0xd6, 0xae, 0x5d, 0xbb, 0xf6, 0x5e, 0x7b, 0xad, 0xb5, 0x57, - 0xc1, 0x72, 0xdb, 0x0a, 0x76, 0x7a, 0x5b, 0x33, 0x86, 0xdb, 0x99, 0x75, 0x7a, 0x1d, 0xbd, 0xeb, - 0xb9, 0xef, 0xf2, 0x8b, 0x6d, 0xdb, 0xbd, 0x37, 0xdb, 0xdd, 0x6d, 0xcf, 0xea, 0x5d, 0xcb, 0x8f, - 0x4a, 0xf6, 0x5e, 0xd4, 0xed, 0xee, 0x8e, 0xfe, 0xe2, 0x6c, 0x9b, 0x3a, 0xd4, 0xd3, 0x03, 0x6a, - 0xce, 0x74, 0x3d, 0x37, 0x70, 0xc9, 0xab, 0x11, 0xd0, 0x8c, 0x02, 0x9a, 0x51, 0xd5, 0x66, 0xba, - 0xbb, 0xed, 0x19, 0x06, 0x14, 0x95, 0x28, 0xa0, 0x4b, 0xbf, 0x17, 0x6b, 0x41, 0xdb, 0x6d, 0xbb, - 0xb3, 0x1c, 0x6f, 0xab, 0xb7, 0xcd, 0xef, 0xf8, 0x0d, 0xbf, 0x12, 0x72, 0x2e, 0x69, 0xbb, 0xaf, - 0xf9, 0x33, 0x96, 0xcb, 0x9a, 0x35, 0x6b, 0xb8, 0x1e, 0x9d, 0xdd, 0xeb, 0x6b, 0xcb, 0xa5, 0x97, - 0x22, 0x9e, 0x8e, 0x6e, 0xec, 0x58, 0x0e, 0xf5, 0xf6, 0xd5, 0xb3, 0xcc, 0x7a, 0xd4, 0x77, 0x7b, - 0x9e, 0x41, 0x8f, 0x55, 0xcb, 0x9f, 0xed, 0xd0, 0x40, 0xcf, 0x92, 0x35, 0x3b, 0xa8, 0x96, 0xd7, - 0x73, 0x02, 0xab, 0xd3, 0x2f, 0xe6, 0x95, 0x47, 0x55, 0xf0, 0x8d, 0x1d, 0xda, 0xd1, 0xfb, 0xea, - 0xfd, 0xfe, 0xa0, 0x7a, 0xbd, 0xc0, 0xb2, 0x67, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x5d, 0x49, 0xfb, - 0x11, 0xc0, 0xd9, 0xf9, 0x2d, 0x3f, 0xf0, 0x74, 0x23, 0x58, 0x77, 0xcd, 0x0d, 0xda, 0xe9, 0xda, - 0x7a, 0x40, 0xc9, 0x2e, 0xd4, 0xd8, 0x03, 0x99, 0x7a, 0xa0, 0x4f, 0x15, 0xae, 0x16, 0xae, 0x35, - 0xe6, 0xe6, 0x67, 0x86, 0x7c, 0x81, 0x33, 0x6b, 0x12, 0xa8, 0x39, 0x7a, 0x78, 0x30, 0x5d, 0x53, - 0x77, 0x18, 0x0a, 0x20, 0xdf, 0x2e, 0xc0, 0xa8, 0xe3, 0x9a, 0xb4, 0x45, 0x6d, 0x6a, 0x04, 0xae, - 0x37, 0x55, 0xbc, 0x5a, 0xba, 0xd6, 0x98, 0xfb, 0xca, 0xd0, 0x12, 0x33, 0x9e, 0x68, 0xe6, 0x76, - 0x4c, 0xc0, 0x75, 0x27, 0xf0, 0xf6, 0x9b, 0xe7, 0x7e, 0x7c, 0x30, 0xfd, 0xd4, 0xe1, 0xc1, 0xf4, - 0x68, 0x9c, 0x84, 0x89, 0x96, 0x90, 0x4d, 0x68, 0x04, 0xae, 0xcd, 0xba, 0xcc, 0x72, 0x1d, 0x7f, - 0xaa, 0xc4, 0x1b, 0x76, 0x65, 0x46, 0x74, 0x35, 0x13, 0x3f, 0xc3, 0xc6, 0xd8, 0xcc, 0xde, 0x8b, - 0x33, 0x1b, 0x21, 0x5b, 0xf3, 0xac, 0x04, 0x6e, 0x44, 0x65, 0x3e, 0xc6, 0x71, 0x08, 0x85, 0x09, - 0x9f, 0x1a, 0x3d, 0xcf, 0x0a, 0xf6, 0x17, 0x5c, 0x27, 0xa0, 0xf7, 0x83, 0xa9, 0x32, 0xef, 0xe5, - 0x17, 0xb2, 0xa0, 0xd7, 0x5d, 0xb3, 0x95, 0xe4, 0x6e, 0x9e, 0x3d, 0x3c, 0x98, 0x9e, 0x48, 0x15, - 0x62, 0x1a, 0x93, 0x38, 0x30, 0x69, 0x75, 0xf4, 0x36, 0x5d, 0xef, 0xd9, 0x76, 0x8b, 0x1a, 0x1e, - 0x0d, 0xfc, 0xa9, 0x0a, 0x7f, 0x84, 0x6b, 0x59, 0x72, 0x56, 0x5d, 0x43, 0xb7, 0xef, 0x6c, 0xbd, - 0x4b, 0x8d, 0x00, 0xe9, 0x36, 0xf5, 0xa8, 0x63, 0xd0, 0xe6, 0x94, 0x7c, 0x98, 0xc9, 0x95, 0x14, - 0x12, 0xf6, 0x61, 0x93, 0x65, 0x38, 0xd3, 0xf5, 0x2c, 0x97, 0x37, 0xc1, 0xd6, 0x7d, 0xff, 0xb6, - 0xde, 0xa1, 0x53, 0x23, 0x57, 0x0b, 0xd7, 0xea, 0xcd, 0x8b, 0x12, 0xe6, 0xcc, 0x7a, 0x9a, 0x01, - 0xfb, 0xeb, 0x90, 0x6b, 0x50, 0x53, 0x85, 0x53, 0xd5, 0xab, 0x85, 0x6b, 0x15, 0x31, 0x76, 0x54, - 0x5d, 0x0c, 0xa9, 0x64, 0x09, 0x6a, 0xfa, 0xf6, 0xb6, 0xe5, 0x30, 0xce, 0x1a, 0xef, 0xc2, 0xcb, - 0x59, 0x8f, 0x36, 0x2f, 0x79, 0x04, 0x8e, 0xba, 0xc3, 0xb0, 0x2e, 0xb9, 0x09, 0xc4, 0xa7, 0xde, - 0x9e, 0x65, 0xd0, 0x79, 0xc3, 0x70, 0x7b, 0x4e, 0xc0, 0xdb, 0x5e, 0xe7, 0x6d, 0xbf, 0x24, 0xdb, - 0x4e, 0x5a, 0x7d, 0x1c, 0x98, 0x51, 0x8b, 0xbc, 0x01, 0x93, 0xf2, 0x5b, 0x8d, 0x7a, 0x01, 0x38, - 0xd2, 0x39, 0xd6, 0x91, 0x98, 0xa2, 0x61, 0x1f, 0x37, 0x31, 0xe1, 0xb2, 0xde, 0x0b, 0xdc, 0x0e, - 0x83, 0x4c, 0x0a, 0xdd, 0x70, 0x77, 0xa9, 0x33, 0xd5, 0xb8, 0x5a, 0xb8, 0x56, 0x6b, 0x5e, 0x3d, - 0x3c, 0x98, 0xbe, 0x3c, 0xff, 0x10, 0x3e, 0x7c, 0x28, 0x0a, 0xb9, 0x03, 0x75, 0xd3, 0xf1, 0xd7, - 0x5d, 0xdb, 0x32, 0xf6, 0xa7, 0x46, 0x79, 0x03, 0x5f, 0x94, 0x8f, 0x5a, 0x5f, 0xbc, 0xdd, 0x12, - 0x84, 0x07, 0x07, 0xd3, 0x97, 0xfb, 0xa7, 0xd4, 0x99, 0x90, 0x8e, 0x11, 0x06, 0x59, 0xe3, 0x80, - 0x0b, 0xae, 0xb3, 0x6d, 0xb5, 0xa7, 0xc6, 0xf8, 0xdb, 0xb8, 0x3a, 0x60, 0x40, 0x2f, 0xde, 0x6e, - 0x09, 0xbe, 0xe6, 0x98, 0x14, 0x27, 0x6e, 0x31, 0x42, 0x20, 0x26, 0x8c, 0xab, 0xc9, 0x78, 0xc1, - 0xd6, 0xad, 0x8e, 0x3f, 0x35, 0xce, 0x07, 0xef, 0x6f, 0x0e, 0xc0, 0xc4, 0x38, 0x73, 0xf3, 0x82, - 0x7c, 0x94, 0xf1, 0x44, 0xb1, 0x8f, 0x29, 0xcc, 0x4b, 0xaf, 0xc3, 0x99, 0xbe, 0xb9, 0x81, 0x4c, - 0x42, 0x69, 0x97, 0xee, 0xf3, 0xa9, 0xaf, 0x8e, 0xec, 0x92, 0x9c, 0x83, 0xca, 0x9e, 0x6e, 0xf7, - 0xe8, 0x54, 0x91, 0x97, 0x89, 0x9b, 0x3f, 0x28, 0xbe, 0x56, 0xd0, 0xfe, 0x56, 0x09, 0x46, 0xd5, - 0x8c, 0xd3, 0xb2, 0x9c, 0x5d, 0xf2, 0x26, 0x94, 0x6c, 0xb7, 0x2d, 0xe7, 0xcd, 0x3f, 0x1a, 0x7a, - 0x16, 0x5b, 0x75, 0xdb, 0xcd, 0xea, 0xe1, 0xc1, 0x74, 0x69, 0xd5, 0x6d, 0x23, 0x43, 0x24, 0x06, - 0x54, 0x76, 0xf5, 0xed, 0x5d, 0x9d, 0xb7, 0xa1, 0x31, 0xd7, 0x1c, 0x1a, 0xfa, 0x16, 0x43, 0x61, - 0x6d, 0x6d, 0xd6, 0x0f, 0x0f, 0xa6, 0x2b, 0xfc, 0x16, 0x05, 0x36, 0x71, 0xa1, 0xbe, 0x65, 0xeb, - 0xc6, 0xee, 0x8e, 0x6b, 0xd3, 0xa9, 0x52, 0x4e, 0x41, 0x4d, 0x85, 0x24, 0x5e, 0x73, 0x78, 0x8b, - 0x91, 0x0c, 0x62, 0xc0, 0x48, 0xcf, 0xf4, 0x2d, 0x67, 0x57, 0xce, 0x81, 0xaf, 0x0f, 0x2d, 0x6d, - 0x73, 0x91, 0x3f, 0x13, 0x1c, 0x1e, 0x4c, 0x8f, 0x88, 0x6b, 0x94, 0xd0, 0xda, 0x2f, 0x47, 0x61, - 0x5c, 0xbd, 0xa4, 0xbb, 0xd4, 0x0b, 0xe8, 0x7d, 0x72, 0x15, 0xca, 0x0e, 0xfb, 0x34, 0xf9, 0x4b, - 0x6e, 0x8e, 0xca, 0xe1, 0x52, 0xe6, 0x9f, 0x24, 0xa7, 0xb0, 0x96, 0x89, 0xa1, 0x22, 0x3b, 0x7c, - 0xf8, 0x96, 0xb5, 0x38, 0x8c, 0x68, 0x99, 0xb8, 0x46, 0x09, 0x4d, 0xde, 0x86, 0x32, 0x7f, 0x78, - 0xd1, 0xd5, 0x9f, 0x1f, 0x5e, 0x04, 0x7b, 0xf4, 0x1a, 0x7b, 0x02, 0xfe, 0xe0, 0x1c, 0x94, 0x0d, - 0xc5, 0x9e, 0xb9, 0x2d, 0x3b, 0xf6, 0x8f, 0x72, 0x74, 0xec, 0x92, 0x18, 0x8a, 0x9b, 0x8b, 0x4b, - 0xc8, 0x10, 0xc9, 0x5f, 0x29, 0xc0, 0x19, 0xc3, 0x75, 0x02, 0x9d, 0xe9, 0x19, 0x6a, 0x91, 0x9d, - 0xaa, 0x70, 0x39, 0x37, 0x87, 0x96, 0xb3, 0x90, 0x46, 0x6c, 0x9e, 0x67, 0x6b, 0x46, 0x5f, 0x31, - 0xf6, 0xcb, 0x26, 0x7f, 0xad, 0x00, 0xe7, 0xd9, 0x5c, 0xde, 0xc7, 0xcc, 0x57, 0xa0, 0x93, 0x6d, - 0xd5, 0xc5, 0xc3, 0x83, 0xe9, 0xf3, 0x2b, 0x59, 0xc2, 0x30, 0xbb, 0x0d, 0xac, 0x75, 0x67, 0xf5, - 0x7e, 0xb5, 0x84, 0xaf, 0x6e, 0x8d, 0xb9, 0xd5, 0x93, 0x54, 0x75, 0x9a, 0xcf, 0xc8, 0xa1, 0x9c, - 0xa5, 0xd9, 0x61, 0x56, 0x2b, 0xc8, 0x75, 0xa8, 0xee, 0xb9, 0x76, 0xaf, 0x43, 0xfd, 0xa9, 0x1a, - 0x9f, 0x62, 0x2f, 0x65, 0x4d, 0xb1, 0x77, 0x39, 0x4b, 0x73, 0x42, 0xc2, 0x57, 0xc5, 0xbd, 0x8f, - 0xaa, 0x2e, 0xb1, 0x60, 0xc4, 0xb6, 0x3a, 0x56, 0xe0, 0xf3, 0x85, 0xb3, 0x31, 0x77, 0x7d, 0xe8, - 0xc7, 0x12, 0x9f, 0xe8, 0x2a, 0x07, 0x13, 0x5f, 0x8d, 0xb8, 0x46, 0x29, 0x80, 0x4d, 0x85, 0xbe, - 0xa1, 0xdb, 0x62, 0x61, 0x6d, 0xcc, 0x7d, 0x61, 0xf8, 0xcf, 0x86, 0xa1, 0x34, 0xc7, 0xe4, 0x33, - 0x55, 0xf8, 0x2d, 0x0a, 0x6c, 0xf2, 0x65, 0x18, 0x4f, 0xbc, 0x4d, 0x7f, 0xaa, 0xc1, 0x7b, 0xe7, - 0xd9, 0xac, 0xde, 0x09, 0xb9, 0xa2, 0x95, 0x27, 0x31, 0x42, 0x7c, 0x4c, 0x81, 0x91, 0x5b, 0x50, - 0xf3, 0x2d, 0x93, 0x1a, 0xba, 0xe7, 0x4f, 0x8d, 0x1e, 0x05, 0x78, 0x52, 0x02, 0xd7, 0x5a, 0xb2, - 0x1a, 0x86, 0x00, 0x64, 0x06, 0xa0, 0xab, 0x7b, 0x81, 0x25, 0x14, 0xd5, 0x31, 0xae, 0x34, 0x8d, - 0x1f, 0x1e, 0x4c, 0xc3, 0x7a, 0x58, 0x8a, 0x31, 0x0e, 0xc6, 0xcf, 0xea, 0xae, 0x38, 0xdd, 0x5e, - 0x20, 0x16, 0xd6, 0xba, 0xe0, 0x6f, 0x85, 0xa5, 0x18, 0xe3, 0x20, 0xdf, 0x2f, 0xc0, 0x33, 0xd1, - 0x6d, 0xff, 0x47, 0x36, 0x71, 0xe2, 0x1f, 0xd9, 0xf4, 0xe1, 0xc1, 0xf4, 0x33, 0xad, 0xc1, 0x22, - 0xf1, 0x61, 0xed, 0x21, 0x1f, 0x16, 0x60, 0xbc, 0xd7, 0x35, 0xf5, 0x80, 0xb6, 0x02, 0xb6, 0xe3, - 0x69, 0xef, 0x4f, 0x4d, 0xf2, 0x26, 0x2e, 0x0f, 0x3f, 0x0b, 0x26, 0xe0, 0xa2, 0xd7, 0x9c, 0x2c, - 0xc7, 0x94, 0x58, 0xed, 0x4d, 0x18, 0x9b, 0xef, 0x05, 0x3b, 0xae, 0x67, 0xbd, 0xcf, 0xd5, 0x7f, - 0xb2, 0x04, 0x95, 0x80, 0xab, 0x71, 0x42, 0x43, 0x78, 0x3e, 0xeb, 0xa5, 0x0b, 0x95, 0xfa, 0x16, - 0xdd, 0x57, 0x7a, 0x89, 0x58, 0xa9, 0x85, 0x5a, 0x27, 0xaa, 0x6b, 0x7f, 0xae, 0x00, 0xd5, 0xa6, - 0x6e, 0xec, 0xba, 0xdb, 0xdb, 0xe4, 0x2d, 0xa8, 0x59, 0x4e, 0x40, 0xbd, 0x3d, 0xdd, 0x96, 0xb0, - 0x33, 0x31, 0xd8, 0x70, 0x43, 0x18, 0x3d, 0x1e, 0xdb, 0x7d, 0x31, 0x41, 0x8b, 0x3d, 0xb9, 0x6b, - 0xe1, 0x9a, 0xf1, 0x8a, 0xc4, 0xc0, 0x10, 0x8d, 0x4c, 0x43, 0xc5, 0x0f, 0x68, 0xd7, 0xe7, 0x6b, - 0xe0, 0x98, 0x68, 0x46, 0x8b, 0x15, 0xa0, 0x28, 0xd7, 0xfe, 0x66, 0x01, 0xea, 0x4d, 0xdd, 0xb7, - 0x0c, 0xf6, 0x94, 0x64, 0x01, 0xca, 0x3d, 0x9f, 0x7a, 0xc7, 0x7b, 0x36, 0xbe, 0x6c, 0x6d, 0xfa, - 0xd4, 0x43, 0x5e, 0x99, 0xdc, 0x81, 0x5a, 0x57, 0xf7, 0xfd, 0x7b, 0xae, 0x67, 0xca, 0xa5, 0xf7, - 0x88, 0x40, 0x62, 0x9b, 0x20, 0xab, 0x62, 0x08, 0xa2, 0x35, 0x20, 0xd2, 0x3d, 0xb4, 0x9f, 0x17, - 0xe0, 0x6c, 0xb3, 0xb7, 0xbd, 0x4d, 0x3d, 0xa9, 0x15, 0x4b, 0x7d, 0x93, 0x42, 0xc5, 0xa3, 0xa6, - 0xe5, 0xcb, 0xb6, 0x2f, 0x0e, 0x3d, 0x50, 0x90, 0xa1, 0x48, 0xf5, 0x96, 0xf7, 0x17, 0x2f, 0x40, - 0x81, 0x4e, 0x7a, 0x50, 0x7f, 0x97, 0xb2, 0xdd, 0x38, 0xd5, 0x3b, 0xf2, 0xe9, 0x6e, 0x0c, 0x2d, - 0xea, 0x26, 0x0d, 0x5a, 0x1c, 0x29, 0xae, 0x4d, 0x87, 0x85, 0x18, 0x49, 0xd2, 0x7e, 0x54, 0x81, - 0xd1, 0x05, 0xb7, 0xb3, 0x65, 0x39, 0xd4, 0xbc, 0x6e, 0xb6, 0x29, 0x79, 0x07, 0xca, 0xd4, 0x6c, - 0x53, 0xf9, 0xb4, 0xc3, 0x2b, 0x1e, 0x0c, 0x2c, 0x52, 0x9f, 0xd8, 0x1d, 0x72, 0x60, 0xb2, 0x0a, - 0xe3, 0xdb, 0x9e, 0xdb, 0x11, 0x73, 0xf9, 0xc6, 0x7e, 0x57, 0xea, 0xce, 0xcd, 0xdf, 0x54, 0x1f, - 0xce, 0x52, 0x82, 0xfa, 0xe0, 0x60, 0x1a, 0xa2, 0x3b, 0x4c, 0xd5, 0x25, 0x6f, 0xc1, 0x54, 0x54, - 0x12, 0x4e, 0x6a, 0x0b, 0x6c, 0x3b, 0xc3, 0x75, 0xa7, 0x4a, 0xf3, 0xf2, 0xe1, 0xc1, 0xf4, 0xd4, - 0xd2, 0x00, 0x1e, 0x1c, 0x58, 0x9b, 0x4d, 0x15, 0x93, 0x11, 0x51, 0x2c, 0x34, 0x52, 0x65, 0x3a, - 0xa1, 0x15, 0x8c, 0xef, 0xfb, 0x96, 0x52, 0x22, 0xb0, 0x4f, 0x28, 0x59, 0x82, 0xd1, 0xc0, 0x8d, - 0xf5, 0x57, 0x85, 0xf7, 0x97, 0xa6, 0x0c, 0x15, 0x1b, 0xee, 0xc0, 0xde, 0x4a, 0xd4, 0x23, 0x08, - 0x17, 0xd4, 0x7d, 0xaa, 0xa7, 0x46, 0x78, 0x4f, 0x5d, 0x3a, 0x3c, 0x98, 0xbe, 0xb0, 0x91, 0xc9, - 0x81, 0x03, 0x6a, 0x92, 0x6f, 0x14, 0x60, 0x5c, 0x91, 0x64, 0x1f, 0x55, 0x4f, 0xb2, 0x8f, 0x08, - 0x1b, 0x11, 0x1b, 0x09, 0x01, 0x98, 0x12, 0xa8, 0xfd, 0xb2, 0x0c, 0xf5, 0x70, 0xaa, 0x27, 0xcf, - 0x41, 0x85, 0x9b, 0x20, 0xa4, 0x06, 0x1f, 0xae, 0xe1, 0xdc, 0x52, 0x81, 0x82, 0x46, 0x9e, 0x87, - 0xaa, 0xe1, 0x76, 0x3a, 0xba, 0x63, 0x72, 0xb3, 0x52, 0xbd, 0xd9, 0x60, 0xaa, 0xcb, 0x82, 0x28, - 0x42, 0x45, 0x23, 0x97, 0xa1, 0xac, 0x7b, 0x6d, 0x61, 0xe1, 0xa9, 0x8b, 0xf9, 0x68, 0xde, 0x6b, - 0xfb, 0xc8, 0x4b, 0xc9, 0xe7, 0xa0, 0x44, 0x9d, 0xbd, 0xa9, 0xf2, 0x60, 0xdd, 0xe8, 0xba, 0xb3, - 0x77, 0x57, 0xf7, 0x9a, 0x0d, 0xd9, 0x86, 0xd2, 0x75, 0x67, 0x0f, 0x59, 0x1d, 0xb2, 0x0a, 0x55, - 0xea, 0xec, 0xb1, 0x77, 0x2f, 0x4d, 0x2f, 0xbf, 0x31, 0xa0, 0x3a, 0x63, 0x91, 0xdb, 0x84, 0x50, - 0xc3, 0x92, 0xc5, 0xa8, 0x20, 0xc8, 0x97, 0x60, 0x54, 0x28, 0x5b, 0x6b, 0xec, 0x9d, 0xf8, 0x53, - 0x23, 0x1c, 0x72, 0x7a, 0xb0, 0xb6, 0xc6, 0xf9, 0x22, 0x53, 0x57, 0xac, 0xd0, 0xc7, 0x04, 0x14, - 0xf9, 0x12, 0xd4, 0xd5, 0xce, 0x58, 0xbd, 0xd9, 0x4c, 0x2b, 0x91, 0xda, 0x4e, 0x23, 0x7d, 0xaf, - 0x67, 0x79, 0xb4, 0x43, 0x9d, 0xc0, 0x6f, 0x9e, 0x51, 0x76, 0x03, 0x45, 0xf5, 0x31, 0x42, 0x23, - 0x5b, 0xfd, 0xe6, 0x2e, 0x61, 0xab, 0x79, 0x6e, 0xc0, 0xac, 0x3e, 0x84, 0xad, 0xeb, 0x2b, 0x30, - 0x11, 0xda, 0xa3, 0xa4, 0x49, 0x43, 0x58, 0x6f, 0x5e, 0x62, 0xd5, 0x57, 0x92, 0xa4, 0x07, 0x07, - 0xd3, 0xcf, 0x66, 0x18, 0x35, 0x22, 0x06, 0x4c, 0x83, 0x69, 0x3f, 0x2c, 0x41, 0xff, 0x3e, 0x24, - 0xd9, 0x69, 0x85, 0x93, 0xee, 0xb4, 0xf4, 0x03, 0x89, 0xe9, 0xf3, 0x35, 0x59, 0x2d, 0xff, 0x43, - 0x65, 0xbd, 0x98, 0xd2, 0x49, 0xbf, 0x98, 0x27, 0xe5, 0xdb, 0xd1, 0x3e, 0x2a, 0xc3, 0xf8, 0xa2, - 0x4e, 0x3b, 0xae, 0xf3, 0xc8, 0x5d, 0x59, 0xe1, 0x89, 0xd8, 0x95, 0x5d, 0x83, 0x9a, 0x47, 0xbb, - 0xb6, 0x65, 0xe8, 0x42, 0xf9, 0x92, 0x56, 0x50, 0x94, 0x65, 0x18, 0x52, 0x07, 0xec, 0xc6, 0x4b, - 0x4f, 0xe4, 0x6e, 0xbc, 0xfc, 0xc9, 0xef, 0xc6, 0xb5, 0x6f, 0x14, 0x81, 0x2b, 0x2a, 0xe4, 0x2a, - 0x94, 0xd9, 0x22, 0x9c, 0xb6, 0x01, 0xf1, 0x81, 0xc3, 0x29, 0xe4, 0x12, 0x14, 0x03, 0x57, 0x7e, - 0x79, 0x20, 0xe9, 0xc5, 0x0d, 0x17, 0x8b, 0x81, 0x4b, 0xde, 0x07, 0x30, 0x5c, 0xc7, 0xb4, 0x94, - 0x73, 0x20, 0xdf, 0x83, 0x2d, 0xb9, 0xde, 0x3d, 0xdd, 0x33, 0x17, 0x42, 0x44, 0xb1, 0x1f, 0x8b, - 0xee, 0x31, 0x26, 0x8d, 0xbc, 0x0e, 0x23, 0xae, 0xb3, 0xd4, 0xb3, 0x6d, 0xde, 0xa1, 0xf5, 0xe6, - 0x6f, 0xb1, 0x4d, 0xf2, 0x1d, 0x5e, 0xf2, 0xe0, 0x60, 0xfa, 0xa2, 0xd0, 0x6f, 0xd9, 0xdd, 0x9b, - 0x9e, 0x15, 0x58, 0x4e, 0x3b, 0xdc, 0x9e, 0xc8, 0x6a, 0xda, 0xb7, 0x0a, 0xd0, 0x58, 0xb2, 0xee, - 0x53, 0xf3, 0x4d, 0xcb, 0x31, 0xdd, 0x7b, 0x04, 0x61, 0xc4, 0xa6, 0x4e, 0x3b, 0xd8, 0x19, 0x72, - 0xff, 0x20, 0x76, 0xe9, 0x1c, 0x01, 0x25, 0x12, 0x99, 0x85, 0xba, 0xd0, 0x3e, 0x2d, 0xa7, 0xcd, - 0xfb, 0xb0, 0x16, 0x4d, 0x7a, 0x2d, 0x45, 0xc0, 0x88, 0x47, 0xdb, 0x87, 0x33, 0x7d, 0xdd, 0x40, - 0x4c, 0x28, 0x07, 0x7a, 0x5b, 0xcd, 0xaf, 0x4b, 0x43, 0x77, 0xf0, 0x86, 0xde, 0x8e, 0x75, 0x2e, - 0x5f, 0xe3, 0x37, 0x74, 0xb6, 0xc6, 0x33, 0x74, 0xed, 0x57, 0x05, 0xa8, 0x2d, 0xf5, 0x1c, 0x83, - 0x6f, 0xd1, 0x1e, 0x6d, 0x1b, 0x54, 0x0a, 0x43, 0x31, 0x53, 0x61, 0xe8, 0xc1, 0xc8, 0xee, 0xbd, - 0x50, 0xa1, 0x68, 0xcc, 0xad, 0x0d, 0x3f, 0x2a, 0x64, 0x93, 0x66, 0x6e, 0x71, 0x3c, 0xe1, 0xba, - 0x1a, 0x97, 0x0d, 0x1a, 0xb9, 0xf5, 0x26, 0x17, 0x2a, 0x85, 0x5d, 0xfa, 0x1c, 0x34, 0x62, 0x6c, - 0xc7, 0xb2, 0x62, 0xff, 0xbd, 0x32, 0x8c, 0x2c, 0xb7, 0x5a, 0xf3, 0xeb, 0x2b, 0xe4, 0x65, 0x68, - 0x48, 0xaf, 0xc6, 0xed, 0xa8, 0x0f, 0x42, 0xa7, 0x56, 0x2b, 0x22, 0x61, 0x9c, 0x8f, 0xa9, 0x63, - 0x1e, 0xd5, 0xed, 0x8e, 0xfc, 0x58, 0x42, 0x75, 0x0c, 0x59, 0x21, 0x0a, 0x1a, 0xd1, 0x61, 0x9c, - 0xed, 0xf0, 0x58, 0x17, 0x8a, 0xdd, 0x9b, 0xfc, 0x6c, 0x8e, 0xb8, 0xbf, 0xe3, 0x4a, 0xe2, 0x66, - 0x02, 0x00, 0x53, 0x80, 0xe4, 0x35, 0xa8, 0xe9, 0xbd, 0x60, 0x87, 0x2b, 0xd0, 0xe2, 0xdb, 0xb8, - 0xcc, 0x9d, 0x3e, 0xb2, 0xec, 0xc1, 0xc1, 0xf4, 0xe8, 0x2d, 0x6c, 0xbe, 0xac, 0xee, 0x31, 0xe4, - 0x66, 0x8d, 0x53, 0x3b, 0x46, 0xd9, 0xb8, 0xca, 0xb1, 0x1b, 0xb7, 0x9e, 0x00, 0xc0, 0x14, 0x20, - 0x79, 0x1b, 0x46, 0x77, 0xe9, 0x7e, 0xa0, 0x6f, 0x49, 0x01, 0x23, 0xc7, 0x11, 0x30, 0xc9, 0x54, - 0xb8, 0x5b, 0xb1, 0xea, 0x98, 0x00, 0x23, 0x3e, 0x9c, 0xdb, 0xa5, 0xde, 0x16, 0xf5, 0x5c, 0xb9, - 0xfb, 0x94, 0x42, 0xaa, 0xc7, 0x11, 0x32, 0x75, 0x78, 0x30, 0x7d, 0xee, 0x56, 0x06, 0x0c, 0x66, - 0x82, 0x6b, 0xff, 0xbb, 0x08, 0x13, 0xcb, 0xc2, 0xad, 0xec, 0x7a, 0x62, 0x11, 0x26, 0x17, 0xa1, - 0xe4, 0x75, 0x7b, 0x7c, 0xe4, 0x94, 0x84, 0xe1, 0x18, 0xd7, 0x37, 0x91, 0x95, 0x91, 0xb7, 0xa0, - 0x66, 0xca, 0x29, 0x43, 0x6e, 0x7e, 0x87, 0x32, 0x54, 0xa8, 0x3b, 0x0c, 0xd1, 0x98, 0xa6, 0xdf, - 0xf1, 0xdb, 0x2d, 0xeb, 0x7d, 0x2a, 0xf7, 0x83, 0x5c, 0xd3, 0x5f, 0x13, 0x45, 0xa8, 0x68, 0x6c, - 0x55, 0xdd, 0xa5, 0xfb, 0x62, 0x37, 0x54, 0x8e, 0x56, 0xd5, 0x5b, 0xb2, 0x0c, 0x43, 0x2a, 0x99, - 0x56, 0x1f, 0x0b, 0x1b, 0x05, 0x65, 0xb1, 0x93, 0xbf, 0xcb, 0x0a, 0xe4, 0x77, 0xc3, 0xa6, 0xcc, - 0x77, 0xad, 0x20, 0xa0, 0x9e, 0x7c, 0x8d, 0x43, 0x4d, 0x99, 0x37, 0x39, 0x02, 0x4a, 0x24, 0xf2, - 0x3b, 0x50, 0xe7, 0xe0, 0x4d, 0xdb, 0xdd, 0xe2, 0x2f, 0xae, 0x2e, 0xf6, 0xf4, 0x77, 0x55, 0x21, - 0x46, 0x74, 0xed, 0xd7, 0x45, 0xb8, 0xb0, 0x4c, 0x03, 0xa1, 0xd5, 0x2c, 0xd2, 0xae, 0xed, 0xee, - 0x33, 0xd5, 0x12, 0xe9, 0x7b, 0xe4, 0x0d, 0x00, 0xcb, 0xdf, 0x6a, 0xed, 0x19, 0xfc, 0x3b, 0x10, - 0xdf, 0xf0, 0x55, 0xf9, 0x49, 0xc2, 0x4a, 0xab, 0x29, 0x29, 0x0f, 0x12, 0x77, 0x18, 0xab, 0x13, - 0x6d, 0xaf, 0x8a, 0x0f, 0xd9, 0x5e, 0xb5, 0x00, 0xba, 0x91, 0x82, 0x5a, 0xe2, 0x9c, 0xbf, 0xaf, - 0xc4, 0x1c, 0x47, 0x37, 0x8d, 0xc1, 0xe4, 0x51, 0x19, 0x1d, 0x98, 0x34, 0xe9, 0xb6, 0xde, 0xb3, - 0x83, 0x50, 0xa9, 0x96, 0x1f, 0xf1, 0xd1, 0xf5, 0xf2, 0xd0, 0xe5, 0xbd, 0x98, 0x42, 0xc2, 0x3e, - 0x6c, 0xed, 0xef, 0x97, 0xe0, 0xd2, 0x32, 0x0d, 0x42, 0x8b, 0x8b, 0x9c, 0x1d, 0x5b, 0x5d, 0x6a, - 0xb0, 0xb7, 0xf0, 0x61, 0x01, 0x46, 0x6c, 0x7d, 0x8b, 0xda, 0x6c, 0xf5, 0x62, 0x4f, 0xf3, 0xce, - 0xd0, 0x0b, 0xc1, 0x60, 0x29, 0x33, 0xab, 0x5c, 0x42, 0x6a, 0x69, 0x10, 0x85, 0x28, 0xc5, 0xb3, - 0x49, 0xdd, 0xb0, 0x7b, 0x7e, 0x40, 0xbd, 0x75, 0xd7, 0x0b, 0xa4, 0x3e, 0x19, 0x4e, 0xea, 0x0b, - 0x11, 0x09, 0xe3, 0x7c, 0x64, 0x0e, 0xc0, 0xb0, 0x2d, 0xea, 0x04, 0xbc, 0x96, 0xf8, 0xae, 0x88, - 0x7a, 0xbf, 0x0b, 0x21, 0x05, 0x63, 0x5c, 0x4c, 0x54, 0xc7, 0x75, 0xac, 0xc0, 0x15, 0xa2, 0xca, - 0x49, 0x51, 0x6b, 0x11, 0x09, 0xe3, 0x7c, 0xbc, 0x1a, 0x0d, 0x3c, 0xcb, 0xf0, 0x79, 0xb5, 0x4a, - 0xaa, 0x5a, 0x44, 0xc2, 0x38, 0x1f, 0x5b, 0xf3, 0x62, 0xcf, 0x7f, 0xac, 0x35, 0xef, 0x7b, 0x75, - 0xb8, 0x92, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0x76, 0xcf, 0x6e, 0xd1, 0x40, 0xbd, 0xc0, 0x21, 0xd7, - 0xc2, 0xbf, 0x18, 0xbd, 0x77, 0x11, 0xcc, 0x62, 0x9c, 0xcc, 0x7b, 0xef, 0x6b, 0xe0, 0x91, 0xde, - 0xfd, 0x2c, 0xd4, 0x1d, 0x3d, 0xf0, 0xf9, 0x87, 0x2b, 0xbf, 0xd1, 0x50, 0x0d, 0xbb, 0xad, 0x08, - 0x18, 0xf1, 0x90, 0x75, 0x38, 0x27, 0xbb, 0xf8, 0xfa, 0xfd, 0xae, 0xeb, 0x05, 0xd4, 0x13, 0x75, - 0xe5, 0x72, 0x2a, 0xeb, 0x9e, 0x5b, 0xcb, 0xe0, 0xc1, 0xcc, 0x9a, 0x64, 0x0d, 0xce, 0x1a, 0xc2, - 0xc1, 0x4f, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x70, 0x85, 0x5b, 0xa3, 0x85, 0x7e, 0x16, 0xcc, - 0xaa, 0x97, 0x1e, 0xcd, 0x23, 0x43, 0x8d, 0xe6, 0xea, 0x30, 0xa3, 0xb9, 0x36, 0xdc, 0x68, 0xae, - 0x1f, 0x6d, 0x34, 0xb3, 0x9e, 0x67, 0xe3, 0x88, 0x7a, 0x4c, 0x3d, 0x11, 0x2b, 0x6c, 0x2c, 0x7e, - 0x24, 0xec, 0xf9, 0x56, 0x06, 0x0f, 0x66, 0xd6, 0x24, 0x5b, 0x70, 0x49, 0x94, 0x5f, 0x77, 0x0c, - 0x6f, 0xbf, 0xcb, 0x16, 0x9e, 0x18, 0x6e, 0x23, 0x61, 0x61, 0xbc, 0xd4, 0x1a, 0xc8, 0x89, 0x0f, - 0x41, 0x21, 0x7f, 0x08, 0x63, 0xe2, 0x2d, 0xad, 0xe9, 0x5d, 0x0e, 0x2b, 0xa2, 0x49, 0xce, 0x4b, - 0xd8, 0xb1, 0x85, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x1e, 0x26, 0xba, 0x7b, 0x06, 0xbb, 0x5c, 0xd9, - 0xbe, 0x4d, 0xa9, 0x49, 0x4d, 0xee, 0xbe, 0xaa, 0x37, 0x9f, 0x56, 0x86, 0x8e, 0xf5, 0x24, 0x19, - 0xd3, 0xfc, 0xe4, 0x35, 0x18, 0xf5, 0x03, 0xdd, 0x0b, 0xa4, 0x59, 0x6f, 0x6a, 0x5c, 0x44, 0xdb, - 0x28, 0xab, 0x57, 0x2b, 0x46, 0xc3, 0x04, 0x67, 0xe6, 0x7a, 0x31, 0x71, 0x7a, 0xeb, 0x45, 0x9e, - 0xd9, 0xea, 0x9f, 0x15, 0xe1, 0xea, 0x32, 0x0d, 0xd6, 0x5c, 0x47, 0x1a, 0x45, 0xb3, 0x96, 0xfd, - 0x23, 0xd9, 0x44, 0x93, 0x8b, 0x76, 0xf1, 0x44, 0x17, 0xed, 0xd2, 0x09, 0x2d, 0xda, 0xe5, 0x53, - 0x5c, 0xb4, 0xff, 0x61, 0x11, 0x9e, 0x4e, 0xf4, 0xe4, 0xba, 0x6b, 0xaa, 0x09, 0xff, 0xb3, 0x0e, - 0x3c, 0x42, 0x07, 0x3e, 0x10, 0x7a, 0x27, 0x77, 0x6b, 0xa5, 0x34, 0x9e, 0x0f, 0xd2, 0x1a, 0xcf, - 0xdb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0x47, 0x5a, 0xf1, 0x6e, 0x02, 0xf1, 0xa4, 0x13, 0x4e, 0x98, - 0x7e, 0x62, 0x4a, 0x4f, 0x18, 0xce, 0x87, 0x7d, 0x1c, 0x98, 0x51, 0x8b, 0xb4, 0xe0, 0xbc, 0x4f, - 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, 0xb3, 0x12, 0xee, 0x7c, 0x2b, 0x8b, 0x09, - 0xb3, 0xeb, 0xe6, 0x99, 0x07, 0xfe, 0x25, 0x70, 0x95, 0x53, 0x74, 0xcd, 0x89, 0x69, 0x2c, 0x1f, - 0xa6, 0x35, 0x96, 0x77, 0xf2, 0xbf, 0xb7, 0xe1, 0xb4, 0x95, 0x39, 0x00, 0xfe, 0x16, 0xe2, 0xea, - 0x4a, 0xb8, 0x48, 0x63, 0x48, 0xc1, 0x18, 0x17, 0x5b, 0x80, 0x54, 0x3f, 0xc7, 0x35, 0x95, 0x70, - 0x01, 0x6a, 0xc5, 0x89, 0x98, 0xe4, 0x1d, 0xa8, 0xed, 0x54, 0x86, 0xd6, 0x76, 0x6e, 0x02, 0x49, - 0x18, 0x1e, 0x05, 0xde, 0x48, 0x32, 0x9a, 0x74, 0xa5, 0x8f, 0x03, 0x33, 0x6a, 0x0d, 0x18, 0xca, - 0xd5, 0x93, 0x1d, 0xca, 0xb5, 0xe1, 0x87, 0x32, 0x79, 0x07, 0x2e, 0x72, 0x51, 0xb2, 0x7f, 0x92, - 0xc0, 0x42, 0xef, 0xf9, 0x0d, 0x09, 0x7c, 0x11, 0x07, 0x31, 0xe2, 0x60, 0x0c, 0xf6, 0x7e, 0x0c, - 0x8f, 0x9a, 0x4c, 0xb8, 0x6e, 0x0f, 0xd6, 0x89, 0x16, 0x32, 0x78, 0x30, 0xb3, 0x26, 0x1b, 0x62, - 0x01, 0x1b, 0x86, 0xfa, 0x96, 0x4d, 0x4d, 0x19, 0x4d, 0x1b, 0x0e, 0xb1, 0x8d, 0xd5, 0x96, 0xa4, - 0x60, 0x8c, 0x2b, 0x4b, 0x4d, 0x19, 0x3d, 0xa6, 0x9a, 0xb2, 0xcc, 0xad, 0xf4, 0xdb, 0x09, 0x6d, - 0x48, 0xea, 0x3a, 0x61, 0x7c, 0xf4, 0x42, 0x9a, 0x01, 0xfb, 0xeb, 0x70, 0x2d, 0xd1, 0xf0, 0xac, - 0x6e, 0xe0, 0x27, 0xb1, 0xc6, 0x53, 0x5a, 0x62, 0x06, 0x0f, 0x66, 0xd6, 0x64, 0xfa, 0xf9, 0x0e, - 0xd5, 0xed, 0x60, 0x27, 0x09, 0x38, 0x91, 0xd4, 0xcf, 0x6f, 0xf4, 0xb3, 0x60, 0x56, 0xbd, 0xcc, - 0x05, 0x69, 0xf2, 0xc9, 0x54, 0xab, 0xbe, 0x59, 0x82, 0x8b, 0xcb, 0x34, 0x08, 0x03, 0x8d, 0x3e, - 0x33, 0xa3, 0x7c, 0x02, 0x66, 0x94, 0xef, 0x56, 0xe0, 0xec, 0x32, 0x0d, 0xfa, 0xb4, 0xb1, 0xff, - 0x4f, 0xbb, 0x7f, 0x0d, 0xce, 0x46, 0xb1, 0x6d, 0xad, 0xc0, 0xf5, 0xc4, 0x5a, 0x9e, 0xda, 0x2d, - 0xb7, 0xfa, 0x59, 0x30, 0xab, 0x1e, 0xf9, 0x12, 0x3c, 0xcd, 0x97, 0x7a, 0xa7, 0x2d, 0xec, 0xb3, - 0xc2, 0x98, 0x10, 0x3b, 0x9d, 0x31, 0x2d, 0x21, 0x9f, 0x6e, 0x65, 0xb3, 0xe1, 0xa0, 0xfa, 0xe4, - 0x6b, 0x30, 0xda, 0xb5, 0xba, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0x09, 0x59, 0x8f, 0x81, - 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, 0xda, 0x29, 0x8e, 0xd4, 0xff, 0x51, - 0x84, 0xea, 0xb2, 0xe7, 0xf6, 0xba, 0xcd, 0x7d, 0xd2, 0x86, 0x91, 0x7b, 0xdc, 0x79, 0x26, 0x5d, - 0x53, 0xc3, 0xc7, 0x87, 0x0b, 0x1f, 0x5c, 0xa4, 0x12, 0x89, 0x7b, 0x94, 0xf0, 0x6c, 0x10, 0xef, - 0xd2, 0x7d, 0x6a, 0x4a, 0x1f, 0x5a, 0x38, 0x88, 0x6f, 0xb1, 0x42, 0x14, 0x34, 0xd2, 0x81, 0x09, - 0xdd, 0xb6, 0xdd, 0x7b, 0xd4, 0x5c, 0xd5, 0x03, 0xea, 0x50, 0x5f, 0xb9, 0x24, 0x8f, 0x6b, 0x96, - 0xe6, 0x7e, 0xfd, 0xf9, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xbb, 0x50, 0xf5, 0x03, 0xd7, 0x53, 0xca, - 0x56, 0x63, 0x6e, 0x61, 0xf8, 0x97, 0xde, 0xfc, 0x62, 0x4b, 0x40, 0x09, 0x9b, 0xbd, 0xbc, 0x41, - 0x25, 0x40, 0xfb, 0x4e, 0x01, 0xe0, 0xc6, 0xc6, 0xc6, 0xba, 0x74, 0x2f, 0x98, 0x50, 0xd6, 0x7b, - 0xa1, 0xa3, 0x72, 0x78, 0x87, 0x60, 0x22, 0x2c, 0x53, 0xfa, 0xf0, 0x7a, 0xc1, 0x0e, 0x72, 0x74, - 0xf2, 0xdb, 0x50, 0x95, 0x0a, 0xb2, 0xec, 0xf6, 0x30, 0xb4, 0x40, 0x2a, 0xd1, 0xa8, 0xe8, 0xda, - 0xdf, 0x2d, 0x02, 0xac, 0x98, 0x36, 0x6d, 0xa9, 0x90, 0xfe, 0x7a, 0xb0, 0xe3, 0x51, 0x7f, 0xc7, - 0xb5, 0xcd, 0x21, 0xbd, 0xa9, 0xdc, 0xe6, 0xbf, 0xa1, 0x40, 0x30, 0xc2, 0x23, 0x26, 0x8c, 0xfa, - 0x01, 0xed, 0xaa, 0x48, 0xcd, 0x21, 0x9d, 0x28, 0x93, 0xc2, 0x2e, 0x12, 0xe1, 0x60, 0x02, 0x95, - 0xe8, 0xd0, 0xb0, 0x1c, 0x43, 0x7c, 0x20, 0xcd, 0xfd, 0x21, 0x07, 0xd2, 0x04, 0xdb, 0x71, 0xac, - 0x44, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0x56, 0x84, 0x0b, 0x5c, 0x1e, 0x6b, 0x46, 0x22, 0x1e, 0x93, - 0xfc, 0xe9, 0xbe, 0xe3, 0x87, 0x7f, 0xf2, 0x68, 0xa2, 0xc5, 0xe9, 0xb5, 0x35, 0x1a, 0xe8, 0x91, - 0x3e, 0x17, 0x95, 0xc5, 0xce, 0x1c, 0xf6, 0xa0, 0xec, 0xb3, 0xf9, 0x4a, 0xf4, 0x5e, 0x6b, 0xe8, - 0x21, 0x94, 0xfd, 0x00, 0x7c, 0xf6, 0x0a, 0xbd, 0xc6, 0x7c, 0xd6, 0xe2, 0xe2, 0xc8, 0x9f, 0x85, - 0x11, 0x3f, 0xd0, 0x83, 0x9e, 0xfa, 0x34, 0x37, 0x4f, 0x5a, 0x30, 0x07, 0x8f, 0xe6, 0x11, 0x71, - 0x8f, 0x52, 0xa8, 0xf6, 0xb3, 0x02, 0x5c, 0xca, 0xae, 0xb8, 0x6a, 0xf9, 0x01, 0xf9, 0x53, 0x7d, - 0xdd, 0x7e, 0xc4, 0x37, 0xce, 0x6a, 0xf3, 0x4e, 0x0f, 0x23, 0xd4, 0x55, 0x49, 0xac, 0xcb, 0x03, - 0xa8, 0x58, 0x01, 0xed, 0xa8, 0xfd, 0xe5, 0x9d, 0x13, 0x7e, 0xf4, 0xd8, 0xd2, 0xce, 0xa4, 0xa0, - 0x10, 0xa6, 0x7d, 0x54, 0x1c, 0xf4, 0xc8, 0x7c, 0xf9, 0xb0, 0x93, 0x31, 0xbf, 0xb7, 0xf2, 0xc5, - 0xfc, 0x26, 0x1b, 0xd4, 0x1f, 0xfa, 0xfb, 0x67, 0xfa, 0x43, 0x7f, 0xef, 0xe4, 0x0f, 0xfd, 0x4d, - 0x75, 0xc3, 0xc0, 0x08, 0xe0, 0x8f, 0x4b, 0x70, 0xf9, 0x61, 0xc3, 0x86, 0xad, 0x67, 0x72, 0x74, - 0xe6, 0x5d, 0xcf, 0x1e, 0x3e, 0x0e, 0xc9, 0x1c, 0x54, 0xba, 0x3b, 0xba, 0xaf, 0x94, 0x32, 0xb5, - 0x61, 0xa9, 0xac, 0xb3, 0xc2, 0x07, 0x6c, 0xd2, 0xe0, 0xca, 0x1c, 0xbf, 0x45, 0xc1, 0xca, 0xa6, - 0xe3, 0x0e, 0xf5, 0xfd, 0xc8, 0x26, 0x10, 0x4e, 0xc7, 0x6b, 0xa2, 0x18, 0x15, 0x9d, 0x04, 0x30, - 0x22, 0x4c, 0xcc, 0x72, 0x65, 0x1a, 0x3e, 0x90, 0x2b, 0x23, 0x4c, 0x3c, 0x7a, 0x28, 0xe9, 0xad, - 0x90, 0xb2, 0xc8, 0x0c, 0x94, 0x83, 0x28, 0x68, 0x57, 0x6d, 0xcd, 0xcb, 0x19, 0xfa, 0x29, 0xe7, - 0x63, 0x1b, 0x7b, 0x77, 0x8b, 0x1b, 0xd5, 0x4d, 0xe9, 0x3f, 0xb7, 0x5c, 0x87, 0x2b, 0x64, 0xa5, - 0x68, 0x63, 0x7f, 0xa7, 0x8f, 0x03, 0x33, 0x6a, 0x69, 0xff, 0xa6, 0x06, 0x17, 0xb2, 0xc7, 0x03, - 0xeb, 0xb7, 0x3d, 0xea, 0xf9, 0x0c, 0xbb, 0x90, 0xec, 0xb7, 0xbb, 0xa2, 0x18, 0x15, 0xfd, 0x53, - 0x1d, 0x70, 0xf6, 0xdd, 0x02, 0x5c, 0xf4, 0xa4, 0x8f, 0xe8, 0x71, 0x04, 0x9d, 0x3d, 0x2b, 0xcc, - 0x19, 0x03, 0x04, 0xe2, 0xe0, 0xb6, 0x90, 0xbf, 0x5d, 0x80, 0xa9, 0x4e, 0xca, 0xce, 0x71, 0x8a, - 0x27, 0xe8, 0x78, 0x54, 0xfc, 0xda, 0x00, 0x79, 0x38, 0xb0, 0x25, 0xe4, 0x6b, 0xd0, 0xe8, 0xb2, - 0x71, 0xe1, 0x07, 0xd4, 0x31, 0xd4, 0x21, 0xba, 0xe1, 0xbf, 0xa4, 0xf5, 0x08, 0x2b, 0x3c, 0x41, - 0xc3, 0xf5, 0x83, 0x18, 0x01, 0xe3, 0x12, 0x9f, 0xf0, 0x23, 0x73, 0xd7, 0xa0, 0xe6, 0xd3, 0x20, - 0xb0, 0x9c, 0xb6, 0xd8, 0x6f, 0xd4, 0xc5, 0xb7, 0xd2, 0x92, 0x65, 0x18, 0x52, 0xc9, 0xef, 0x40, - 0x9d, 0xbb, 0x9c, 0xe6, 0xbd, 0xb6, 0x3f, 0x55, 0xe7, 0xe1, 0x62, 0x63, 0x22, 0x00, 0x4e, 0x16, - 0x62, 0x44, 0x27, 0x2f, 0xc1, 0xe8, 0x16, 0xff, 0x7c, 0xe5, 0x29, 0x6a, 0x61, 0xe3, 0xe2, 0xda, - 0x5a, 0x33, 0x56, 0x8e, 0x09, 0x2e, 0x32, 0x07, 0x40, 0x43, 0xbf, 0x5c, 0xda, 0x9e, 0x15, 0x79, - 0xec, 0x30, 0xc6, 0x45, 0x9e, 0x85, 0x52, 0x60, 0xfb, 0xdc, 0x86, 0x55, 0x8b, 0xb6, 0xa0, 0x1b, - 0xab, 0x2d, 0x64, 0xe5, 0xda, 0xaf, 0x0b, 0x30, 0x91, 0x3a, 0x5c, 0xc2, 0xaa, 0xf4, 0x3c, 0x5b, - 0x4e, 0x23, 0x61, 0x95, 0x4d, 0x5c, 0x45, 0x56, 0x4e, 0xde, 0x91, 0x6a, 0x79, 0x31, 0x67, 0xc2, - 0x88, 0xdb, 0x7a, 0xe0, 0x33, 0x3d, 0xbc, 0x4f, 0x23, 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, 0x72, 0x1d, - 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, 0xbf, 0xf2, 0x51, 0x0c, 0x7e, 0xda, 0xb7, - 0x8a, 0xb1, 0x1e, 0x90, 0x9a, 0xfd, 0x23, 0x7a, 0xe0, 0x05, 0xb6, 0x80, 0x86, 0x8b, 0x7b, 0x3d, - 0xbe, 0xfe, 0xf1, 0xc5, 0x58, 0x52, 0xc9, 0x9b, 0xa2, 0xef, 0x4b, 0x39, 0x8f, 0xe5, 0x6e, 0xac, - 0xb6, 0x44, 0x74, 0x95, 0x7a, 0x6b, 0xe1, 0x2b, 0x28, 0x9f, 0xd2, 0x2b, 0xd0, 0xfe, 0x45, 0x09, - 0x1a, 0x37, 0xdd, 0xad, 0x4f, 0x49, 0x04, 0x75, 0xf6, 0x32, 0x55, 0xfc, 0x04, 0x97, 0xa9, 0x4d, - 0x78, 0x3a, 0x08, 0xec, 0x16, 0x35, 0x5c, 0xc7, 0xf4, 0xe7, 0xb7, 0x03, 0xea, 0x2d, 0x59, 0x8e, - 0xe5, 0xef, 0x50, 0x53, 0xba, 0x93, 0x9e, 0x39, 0x3c, 0x98, 0x7e, 0x7a, 0x63, 0x63, 0x35, 0x8b, - 0x05, 0x07, 0xd5, 0xe5, 0xd3, 0x86, 0x38, 0x09, 0xc8, 0x4f, 0xca, 0xc8, 0x98, 0x1b, 0x31, 0x6d, - 0xc4, 0xca, 0x31, 0xc1, 0xa5, 0xfd, 0xa0, 0x08, 0xf5, 0x30, 0x15, 0x00, 0x79, 0x1e, 0xaa, 0x5b, - 0x9e, 0xbb, 0x4b, 0x3d, 0xe1, 0xb9, 0x93, 0x27, 0x65, 0x9a, 0xa2, 0x08, 0x15, 0x8d, 0x3c, 0x07, - 0x95, 0xc0, 0xed, 0x5a, 0x46, 0xda, 0xa0, 0xb6, 0xc1, 0x0a, 0x51, 0xd0, 0x4e, 0x6f, 0x80, 0xbf, - 0x90, 0x50, 0xed, 0xea, 0x03, 0x95, 0xb1, 0xb7, 0xa1, 0xec, 0xeb, 0xbe, 0x2d, 0xd7, 0xd3, 0x1c, - 0xa7, 0xea, 0xe7, 0x5b, 0xab, 0xf2, 0x54, 0xfd, 0x7c, 0x6b, 0x15, 0x39, 0xa8, 0xf6, 0xcb, 0x22, - 0x34, 0x44, 0xbf, 0x89, 0x59, 0xe1, 0x24, 0x7b, 0xee, 0x75, 0x1e, 0x4a, 0xe1, 0xf7, 0x3a, 0xd4, - 0xe3, 0x66, 0x26, 0x39, 0xc9, 0xc5, 0xfd, 0x03, 0x11, 0x31, 0x0c, 0xa7, 0x88, 0x8a, 0x54, 0xd7, - 0x97, 0x4f, 0xb1, 0xeb, 0x2b, 0x47, 0xea, 0xfa, 0x91, 0xd3, 0xe8, 0xfa, 0x0f, 0x8b, 0x50, 0x5f, - 0xb5, 0xb6, 0xa9, 0xb1, 0x6f, 0xd8, 0xfc, 0x4c, 0xa0, 0x49, 0x6d, 0x1a, 0xd0, 0x65, 0x4f, 0x37, - 0xe8, 0x3a, 0xf5, 0x2c, 0x9e, 0x2a, 0x87, 0x7d, 0x1f, 0x7c, 0x06, 0x92, 0x67, 0x02, 0x17, 0x07, - 0xf0, 0xe0, 0xc0, 0xda, 0x64, 0x05, 0x46, 0x4d, 0xea, 0x5b, 0x1e, 0x35, 0xd7, 0x63, 0x1b, 0x95, - 0xe7, 0xd5, 0x52, 0xb3, 0x18, 0xa3, 0x3d, 0x38, 0x98, 0x1e, 0x53, 0x06, 0x4a, 0xb1, 0x63, 0x49, - 0x54, 0x65, 0x9f, 0x7c, 0x57, 0xef, 0xf9, 0x59, 0x6d, 0x8c, 0x7d, 0xf2, 0xeb, 0xd9, 0x2c, 0x38, - 0xa8, 0xae, 0x56, 0x81, 0xd2, 0xaa, 0xdb, 0xd6, 0x3e, 0x2a, 0x41, 0x98, 0x53, 0x89, 0xfc, 0x85, - 0x02, 0x34, 0x74, 0xc7, 0x71, 0x03, 0x99, 0xaf, 0x48, 0x78, 0xe0, 0x31, 0x77, 0xea, 0xa6, 0x99, - 0xf9, 0x08, 0x54, 0x38, 0x6f, 0x43, 0x87, 0x72, 0x8c, 0x82, 0x71, 0xd9, 0xa4, 0x97, 0xf2, 0x27, - 0xaf, 0xe5, 0x6f, 0xc5, 0x11, 0xbc, 0xc7, 0x97, 0xbe, 0x00, 0x93, 0xe9, 0xc6, 0x1e, 0xc7, 0x1d, - 0x94, 0xcb, 0x31, 0x5f, 0x04, 0x88, 0x62, 0x4a, 0x1e, 0x83, 0x11, 0xcb, 0x4a, 0x18, 0xb1, 0x86, - 0x3f, 0xd8, 0x1e, 0x35, 0x7a, 0xa0, 0xe1, 0xea, 0xbd, 0x94, 0xe1, 0x6a, 0xe5, 0x24, 0x84, 0x3d, - 0xdc, 0x58, 0xf5, 0x77, 0x0a, 0x30, 0x19, 0x31, 0xcb, 0x13, 0xb2, 0xaf, 0xc2, 0x98, 0x47, 0x75, - 0xb3, 0xa9, 0x07, 0xc6, 0x0e, 0x0f, 0xf5, 0x2e, 0xf0, 0xd8, 0xec, 0x33, 0x87, 0x07, 0xd3, 0x63, - 0x18, 0x27, 0x60, 0x92, 0x8f, 0xe8, 0xd0, 0x60, 0x05, 0x1b, 0x56, 0x87, 0xba, 0xbd, 0x60, 0x48, - 0xab, 0x29, 0xdf, 0xb0, 0x60, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xe3, 0x02, 0x8c, 0xc7, 0x1b, 0x7c, - 0xea, 0x16, 0xb5, 0x9d, 0xa4, 0x45, 0x6d, 0xe1, 0x04, 0xde, 0xc9, 0x00, 0x2b, 0xda, 0x07, 0x10, - 0x7f, 0x34, 0x6e, 0x39, 0x8b, 0x1b, 0x0b, 0x0a, 0x0f, 0x35, 0x16, 0x7c, 0xfa, 0xd3, 0xe8, 0x0c, - 0xd2, 0x72, 0xcb, 0x4f, 0xb0, 0x96, 0xfb, 0x49, 0xe6, 0xe2, 0x89, 0xe5, 0x93, 0x19, 0xc9, 0x91, - 0x4f, 0xa6, 0x13, 0xe6, 0x93, 0xa9, 0x9e, 0xd8, 0xa4, 0x73, 0x94, 0x9c, 0x32, 0xb5, 0xc7, 0x9a, - 0x53, 0xa6, 0x7e, 0x5a, 0x39, 0x65, 0x20, 0x6f, 0x4e, 0x99, 0x0f, 0x0a, 0x30, 0x6e, 0x26, 0x4e, - 0xcc, 0x72, 0xdb, 0x42, 0x9e, 0xa5, 0x26, 0x79, 0x00, 0x57, 0x1c, 0x99, 0x4a, 0x96, 0x61, 0x4a, - 0x64, 0x56, 0x26, 0x97, 0xd1, 0x4f, 0x26, 0x93, 0xcb, 0x2f, 0xaa, 0xf1, 0x15, 0xe9, 0x71, 0x1b, - 0xcd, 0x5f, 0x49, 0x1a, 0xcd, 0xaf, 0xa6, 0x8d, 0xe6, 0x13, 0xb1, 0x78, 0xd6, 0xb8, 0xe1, 0xfc, - 0x77, 0x63, 0x13, 0x75, 0x89, 0xe7, 0x70, 0x09, 0xdf, 0x79, 0xc6, 0x64, 0x3d, 0x0f, 0x13, 0x52, - 0x7b, 0x55, 0x44, 0x3e, 0xcb, 0x8d, 0x45, 0x61, 0x4e, 0x8b, 0x49, 0x32, 0xa6, 0xf9, 0x99, 0x40, - 0x5f, 0xa5, 0xf2, 0x14, 0x5b, 0x85, 0x68, 0x90, 0xa9, 0x34, 0x9b, 0x21, 0x07, 0xdb, 0x56, 0x78, - 0x54, 0xf7, 0xa5, 0xe9, 0x3b, 0xb6, 0xad, 0x40, 0x5e, 0x8a, 0x92, 0x1a, 0xb7, 0xff, 0x57, 0x1f, - 0x61, 0xff, 0xd7, 0xa1, 0x61, 0xeb, 0x7e, 0x20, 0xde, 0xa6, 0x29, 0x3f, 0xe7, 0x3f, 0x71, 0xb4, - 0x85, 0x97, 0x2d, 0xe6, 0x91, 0x76, 0xbb, 0x1a, 0xc1, 0x60, 0x1c, 0x93, 0x98, 0x30, 0xca, 0x6e, - 0xf9, 0xa7, 0x6d, 0xce, 0x07, 0x32, 0xe1, 0xd5, 0x71, 0x64, 0x84, 0x66, 0xab, 0xd5, 0x18, 0x0e, - 0x26, 0x50, 0x07, 0xb8, 0x08, 0x60, 0x18, 0x17, 0x01, 0xf9, 0x43, 0xa1, 0x39, 0xed, 0x87, 0xaf, - 0xb5, 0xc1, 0x5f, 0x6b, 0x18, 0x22, 0x89, 0x71, 0x22, 0x26, 0x79, 0xd9, 0xa8, 0xe8, 0xc9, 0x6e, - 0x50, 0xd5, 0x47, 0x93, 0xa3, 0x62, 0x33, 0x49, 0xc6, 0x34, 0x3f, 0x59, 0x87, 0x73, 0x61, 0x51, - 0xbc, 0x19, 0x63, 0x1c, 0x27, 0x8c, 0x59, 0xdb, 0xcc, 0xe0, 0xc1, 0xcc, 0x9a, 0xfc, 0x10, 0x48, - 0xcf, 0xf3, 0xa8, 0x13, 0xdc, 0xd0, 0xfd, 0x1d, 0x19, 0xfc, 0x16, 0x1d, 0x02, 0x89, 0x48, 0x18, - 0xe7, 0x23, 0x73, 0x00, 0x02, 0x8e, 0xd7, 0x9a, 0x48, 0xc6, 0x97, 0x6e, 0x86, 0x14, 0x8c, 0x71, - 0x69, 0x1f, 0xd4, 0xa1, 0x71, 0x5b, 0x0f, 0xac, 0x3d, 0xca, 0xfd, 0x79, 0xa7, 0xe3, 0x54, 0xf9, - 0xeb, 0x05, 0xb8, 0x90, 0x0c, 0xda, 0x3c, 0x45, 0xcf, 0x0a, 0x4f, 0x01, 0x83, 0x99, 0xd2, 0x70, - 0x40, 0x2b, 0xb8, 0x8f, 0xa5, 0x2f, 0x06, 0xf4, 0xb4, 0x7d, 0x2c, 0xad, 0x41, 0x02, 0x71, 0x70, - 0x5b, 0x3e, 0x2d, 0x3e, 0x96, 0x27, 0x3b, 0x67, 0x61, 0xca, 0x03, 0x54, 0x7d, 0x62, 0x3c, 0x40, - 0xb5, 0x27, 0x42, 0xed, 0xee, 0xc6, 0x3c, 0x40, 0xf5, 0x9c, 0x91, 0x48, 0xf2, 0x9c, 0x83, 0x40, - 0x1b, 0xe4, 0x49, 0xe2, 0x29, 0x0a, 0x94, 0x65, 0x9e, 0x69, 0xab, 0x5b, 0xba, 0x6f, 0x19, 0x52, - 0xed, 0xc8, 0x91, 0xa3, 0x55, 0xe5, 0x6e, 0x13, 0x01, 0x0b, 0xfc, 0x16, 0x05, 0x76, 0x94, 0xaa, - 0xae, 0x98, 0x2b, 0x55, 0x1d, 0x59, 0x80, 0xb2, 0xb3, 0x4b, 0xf7, 0x8f, 0x77, 0xd8, 0x9f, 0xef, - 0xc2, 0x6e, 0xdf, 0xa2, 0xfb, 0xc8, 0x2b, 0x6b, 0x3f, 0x28, 0x02, 0xb0, 0xc7, 0x3f, 0x9a, 0x2f, - 0xe6, 0xb7, 0xa1, 0xea, 0xf7, 0xb8, 0xd5, 0x44, 0x2a, 0x4c, 0x51, 0xf8, 0x96, 0x28, 0x46, 0x45, - 0x27, 0xcf, 0x41, 0xe5, 0xbd, 0x1e, 0xed, 0xa9, 0xc0, 0x82, 0x50, 0x71, 0xff, 0x22, 0x2b, 0x44, - 0x41, 0x3b, 0x3d, 0xbb, 0xaa, 0xf2, 0xd9, 0x54, 0x4e, 0xcb, 0x67, 0x53, 0x87, 0xea, 0x6d, 0x97, - 0x47, 0x83, 0x6a, 0xff, 0xb5, 0x08, 0x10, 0x45, 0xdb, 0x91, 0xef, 0x14, 0xe0, 0x7c, 0xf8, 0xc1, - 0x05, 0x62, 0xff, 0xc5, 0xd3, 0x22, 0xe7, 0xf6, 0xdf, 0x64, 0x7d, 0xec, 0x7c, 0x06, 0x5a, 0xcf, - 0x12, 0x87, 0xd9, 0xad, 0x20, 0x08, 0x35, 0xda, 0xe9, 0x06, 0xfb, 0x8b, 0x96, 0x27, 0x47, 0x60, - 0x66, 0x50, 0xe7, 0x75, 0xc9, 0x23, 0xaa, 0x4a, 0x23, 0x01, 0xff, 0x88, 0x14, 0x05, 0x43, 0x1c, - 0xb2, 0x03, 0x35, 0xc7, 0x7d, 0xc7, 0x67, 0xdd, 0x21, 0x87, 0xe3, 0x1b, 0xc3, 0x77, 0xb9, 0xe8, - 0x56, 0x61, 0xef, 0x97, 0x37, 0x58, 0x75, 0x64, 0x67, 0x7f, 0xbb, 0x08, 0x67, 0x33, 0xfa, 0x81, - 0xbc, 0x01, 0x93, 0x32, 0xb0, 0x31, 0xca, 0x0f, 0x5e, 0x88, 0xf2, 0x83, 0xb7, 0x52, 0x34, 0xec, - 0xe3, 0x26, 0xef, 0x00, 0xe8, 0x86, 0x41, 0x7d, 0x7f, 0xcd, 0x35, 0xd5, 0x7e, 0xe0, 0x75, 0xa6, - 0xbe, 0xcc, 0x87, 0xa5, 0x0f, 0x0e, 0xa6, 0x7f, 0x2f, 0x2b, 0x56, 0x39, 0xd5, 0xcf, 0x51, 0x05, - 0x8c, 0x41, 0x92, 0xaf, 0x00, 0x88, 0x4d, 0x78, 0x98, 0x4e, 0xe1, 0x11, 0x96, 0xab, 0x19, 0x95, - 0xb8, 0x6a, 0xe6, 0x8b, 0x3d, 0xdd, 0x09, 0xac, 0x60, 0x5f, 0x64, 0xaf, 0xb9, 0x1b, 0xa2, 0x60, - 0x0c, 0x51, 0xfb, 0xa7, 0x45, 0xa8, 0x29, 0x9b, 0xf9, 0x63, 0x30, 0x94, 0xb6, 0x13, 0x86, 0xd2, - 0x13, 0x8a, 0x4e, 0xce, 0x32, 0x93, 0xba, 0x29, 0x33, 0xe9, 0x72, 0x7e, 0x51, 0x0f, 0x37, 0x92, - 0x7e, 0xbf, 0x08, 0xe3, 0x8a, 0x35, 0xaf, 0x89, 0xf4, 0xf3, 0x30, 0x21, 0xa2, 0x0a, 0xd6, 0xf4, - 0xfb, 0x22, 0x91, 0x0f, 0xef, 0xb0, 0xb2, 0x08, 0x08, 0x6e, 0x26, 0x49, 0x98, 0xe6, 0x65, 0xc3, - 0x5a, 0x14, 0x6d, 0xb2, 0x4d, 0x98, 0xf0, 0x43, 0x8a, 0xfd, 0x26, 0x1f, 0xd6, 0xcd, 0x14, 0x0d, - 0xfb, 0xb8, 0xd3, 0x36, 0xda, 0xf2, 0x29, 0xd8, 0x68, 0xff, 0x5d, 0x01, 0x46, 0xa3, 0xfe, 0x3a, - 0x75, 0x0b, 0xed, 0x76, 0xd2, 0x42, 0x3b, 0x9f, 0x7b, 0x38, 0x0c, 0xb0, 0xcf, 0xfe, 0xe5, 0x2a, - 0x24, 0x82, 0xe4, 0xc9, 0x16, 0x5c, 0xb2, 0x32, 0x43, 0xfd, 0x62, 0xb3, 0x4d, 0x78, 0xea, 0x7b, - 0x65, 0x20, 0x27, 0x3e, 0x04, 0x85, 0xf4, 0xa0, 0xb6, 0x47, 0xbd, 0xc0, 0x32, 0xa8, 0x7a, 0xbe, - 0xe5, 0xdc, 0x2a, 0x99, 0xb4, 0x42, 0x87, 0x7d, 0x7a, 0x57, 0x0a, 0xc0, 0x50, 0x14, 0xd9, 0x82, - 0x0a, 0x35, 0xdb, 0x54, 0xa5, 0x56, 0xca, 0x99, 0xb8, 0x34, 0xec, 0x4f, 0x76, 0xe7, 0xa3, 0x80, - 0x26, 0x3e, 0xd4, 0x6d, 0xe5, 0x65, 0x94, 0xe3, 0x70, 0x78, 0x05, 0x2b, 0xf4, 0x57, 0x46, 0x59, - 0x17, 0xc2, 0x22, 0x8c, 0xe4, 0x90, 0xdd, 0xd0, 0xdc, 0x59, 0x39, 0xa1, 0xc9, 0xe3, 0x21, 0xc6, - 0x4e, 0x1f, 0xea, 0xf7, 0xf4, 0x80, 0x7a, 0x1d, 0xdd, 0xdb, 0x95, 0xbb, 0x8d, 0xe1, 0x9f, 0xf0, - 0x4d, 0x85, 0x14, 0x3d, 0x61, 0x58, 0x84, 0x91, 0x1c, 0xe2, 0x42, 0x3d, 0x90, 0xea, 0xb3, 0xb2, - 0xe9, 0x0e, 0x2f, 0x54, 0x29, 0xe2, 0xbe, 0x0c, 0x96, 0x57, 0xb7, 0x18, 0xc9, 0x20, 0x7b, 0x89, - 0x2c, 0xd7, 0x22, 0xb7, 0x79, 0x33, 0x87, 0x6f, 0x40, 0x42, 0x45, 0xcb, 0x4d, 0x76, 0xb6, 0x6c, - 0xed, 0x7f, 0x56, 0xa2, 0x69, 0xf9, 0x71, 0xdb, 0x09, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa4, 0xed, - 0x84, 0x29, 0x67, 0xf5, 0xf1, 0xc3, 0x6b, 0x53, 0xe6, 0xb5, 0xf2, 0x29, 0x98, 0xd7, 0x5e, 0x84, - 0xc6, 0x1e, 0x9f, 0x09, 0x44, 0x9e, 0xa6, 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0x7e, 0x37, 0x2a, 0xc6, - 0x38, 0x0f, 0xab, 0x22, 0xff, 0xeb, 0x11, 0x26, 0xba, 0x95, 0x55, 0x5a, 0x51, 0x31, 0xc6, 0x79, - 0x78, 0x64, 0x9e, 0xe5, 0xec, 0x8a, 0x0a, 0x55, 0x5e, 0x41, 0x44, 0xe6, 0xa9, 0x42, 0x8c, 0xe8, - 0xe4, 0x1a, 0xd4, 0x7a, 0xe6, 0xb6, 0xe0, 0xad, 0x71, 0x5e, 0xae, 0x61, 0x6e, 0x2e, 0x2e, 0xc9, - 0xbc, 0x51, 0x8a, 0xca, 0x5a, 0xd2, 0xd1, 0xbb, 0x8a, 0xc0, 0xf7, 0x86, 0xb2, 0x25, 0x6b, 0x51, - 0x31, 0xc6, 0x79, 0xc8, 0x1f, 0xc0, 0xb8, 0x47, 0xcd, 0x9e, 0x41, 0xc3, 0x5a, 0xc0, 0x6b, 0x11, - 0xf1, 0x03, 0x93, 0x38, 0x05, 0x53, 0x9c, 0x03, 0x8c, 0x84, 0x8d, 0xa1, 0x8c, 0x84, 0x5f, 0x80, - 0x71, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0x77, 0x1c, 0x1e, 0x91, 0x20, 0xe3, 0x03, 0x43, 0x0b, 0xf9, - 0x62, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0xbf, 0x17, 0x80, 0xf4, 0x47, 0xc2, 0x93, 0x1d, 0x18, 0x71, - 0xb8, 0xf5, 0x2c, 0x77, 0x6a, 0xed, 0x98, 0x11, 0x4e, 0x4c, 0x6b, 0xb2, 0x40, 0xe2, 0x13, 0x07, - 0x6a, 0xf4, 0x7e, 0x40, 0x3d, 0x27, 0x3c, 0x19, 0x73, 0x32, 0x69, 0xbc, 0xc5, 0x6e, 0x42, 0x22, - 0x63, 0x28, 0x43, 0xfb, 0x79, 0x11, 0x1a, 0x31, 0xbe, 0x47, 0x6d, 0x4a, 0xf9, 0xe1, 0x7c, 0x61, - 0xb4, 0xda, 0xf4, 0x6c, 0xf9, 0x85, 0xc6, 0x0e, 0xe7, 0x4b, 0x12, 0xae, 0x62, 0x9c, 0x8f, 0xcc, - 0x01, 0x74, 0x74, 0x3f, 0xa0, 0x1e, 0x5f, 0xbd, 0x53, 0x47, 0xe2, 0xd7, 0x42, 0x0a, 0xc6, 0xb8, - 0xc8, 0x55, 0x99, 0x88, 0xbd, 0x9c, 0x4c, 0x61, 0x38, 0x20, 0xcb, 0x7a, 0xe5, 0x04, 0xb2, 0xac, - 0x93, 0x36, 0x4c, 0xaa, 0x56, 0x2b, 0xea, 0xf1, 0x12, 0xdc, 0x89, 0xfd, 0x4f, 0x0a, 0x02, 0xfb, - 0x40, 0xb5, 0x1f, 0x14, 0x60, 0x2c, 0x61, 0x32, 0x11, 0xc9, 0x07, 0xd5, 0x39, 0x8e, 0x44, 0xf2, - 0xc1, 0xd8, 0xf1, 0x8b, 0x17, 0x60, 0x44, 0x74, 0x50, 0x3a, 0x3c, 0x53, 0x74, 0x21, 0x4a, 0x2a, - 0x9b, 0x0b, 0xa5, 0x51, 0x36, 0x3d, 0x17, 0x4a, 0xab, 0x2d, 0x2a, 0xba, 0xf0, 0x75, 0x88, 0xd6, - 0xc9, 0x9e, 0x8e, 0xf9, 0x3a, 0x44, 0x39, 0x86, 0x1c, 0xda, 0x0f, 0x79, 0xbb, 0x03, 0x6f, 0x3f, - 0xdc, 0x0b, 0xb6, 0xa1, 0x2a, 0x43, 0xf2, 0xe4, 0xa7, 0xf1, 0x46, 0x0e, 0x3b, 0x0e, 0xc7, 0x91, - 0xc1, 0x67, 0xba, 0xb1, 0x7b, 0x67, 0x7b, 0x1b, 0x15, 0x3a, 0xb9, 0x0e, 0x75, 0xd7, 0x59, 0xd2, - 0x2d, 0xbb, 0xe7, 0xa9, 0x95, 0xe1, 0xb7, 0xd8, 0x5c, 0x77, 0x47, 0x15, 0x3e, 0x38, 0x98, 0xbe, - 0x10, 0xde, 0x24, 0x1a, 0x89, 0x51, 0x4d, 0xed, 0xcf, 0x17, 0xe0, 0x3c, 0xba, 0xb6, 0x6d, 0x39, - 0xed, 0xa4, 0xb3, 0x8c, 0xd8, 0x30, 0xde, 0xd1, 0xef, 0x6f, 0x3a, 0xfa, 0x9e, 0x6e, 0xd9, 0xfa, - 0x96, 0x4d, 0x1f, 0xb9, 0x97, 0xeb, 0x05, 0x96, 0x3d, 0x23, 0x7e, 0x4c, 0x37, 0xb3, 0xe2, 0x04, - 0x77, 0xbc, 0x56, 0xe0, 0x59, 0x4e, 0x5b, 0x4c, 0x7a, 0x6b, 0x09, 0x2c, 0x4c, 0x61, 0x6b, 0xbf, - 0x28, 0x01, 0x0f, 0x0b, 0x23, 0xaf, 0x42, 0xbd, 0x43, 0x8d, 0x1d, 0xdd, 0xb1, 0x7c, 0x95, 0xc6, - 0xf5, 0x22, 0x7b, 0xae, 0x35, 0x55, 0xf8, 0x80, 0xbd, 0x8a, 0xf9, 0xd6, 0x2a, 0x3f, 0x79, 0x11, - 0xf1, 0x12, 0x03, 0x46, 0xda, 0xbe, 0xaf, 0x77, 0xad, 0xdc, 0x51, 0x09, 0x22, 0x6d, 0xa6, 0x98, - 0x8e, 0xc4, 0x35, 0x4a, 0x68, 0x62, 0x40, 0xa5, 0x6b, 0xeb, 0x96, 0x93, 0xfb, 0x47, 0x4a, 0xec, - 0x09, 0xd6, 0x19, 0x92, 0x30, 0xae, 0xf1, 0x4b, 0x14, 0xd8, 0xa4, 0x07, 0x0d, 0xdf, 0xf0, 0xf4, - 0x8e, 0xbf, 0xa3, 0xcf, 0xbd, 0xfc, 0x4a, 0x6e, 0x75, 0x35, 0x12, 0x25, 0x56, 0xcf, 0x05, 0x9c, - 0x5f, 0x6b, 0xdd, 0x98, 0x9f, 0x7b, 0xf9, 0x15, 0x8c, 0xcb, 0x89, 0x8b, 0x7d, 0xf9, 0xc5, 0x39, - 0x39, 0x83, 0x9c, 0xb8, 0xd8, 0x97, 0x5f, 0x9c, 0xc3, 0xb8, 0x1c, 0xed, 0x7f, 0x15, 0xa0, 0x1e, - 0xf2, 0x92, 0x4d, 0x00, 0x36, 0x97, 0xc9, 0x44, 0x97, 0xc7, 0xfa, 0xe9, 0x04, 0xb7, 0x4f, 0x6c, - 0x86, 0x95, 0x31, 0x06, 0x94, 0x91, 0x09, 0xb4, 0x78, 0xd2, 0x99, 0x40, 0x67, 0xa1, 0xbe, 0xa3, - 0x3b, 0xa6, 0xbf, 0xa3, 0xef, 0x8a, 0x29, 0x3d, 0x96, 0x1b, 0xf7, 0x86, 0x22, 0x60, 0xc4, 0xa3, - 0xfd, 0xe3, 0x11, 0x10, 0xa1, 0x04, 0x6c, 0xd2, 0x31, 0x2d, 0x5f, 0xc4, 0xb2, 0x17, 0x78, 0xcd, - 0x70, 0xd2, 0x59, 0x94, 0xe5, 0x18, 0x72, 0x90, 0x8b, 0x50, 0xea, 0x58, 0x8e, 0xf4, 0x3d, 0x71, - 0xd3, 0xe3, 0x9a, 0xe5, 0x20, 0x2b, 0xe3, 0x24, 0xfd, 0xbe, 0x0c, 0x43, 0x14, 0x24, 0xfd, 0x3e, - 0xb2, 0x32, 0xf2, 0x79, 0x98, 0xb0, 0x5d, 0x77, 0x97, 0x4d, 0x1f, 0x2a, 0x5a, 0x51, 0xf8, 0x81, - 0xb9, 0x31, 0x60, 0x35, 0x49, 0xc2, 0x34, 0x2f, 0xd9, 0x84, 0xa7, 0xdf, 0xa7, 0x9e, 0x2b, 0xe7, - 0xcb, 0x96, 0x4d, 0x69, 0x57, 0xc1, 0x08, 0x65, 0x8e, 0x07, 0x3d, 0xfe, 0x71, 0x36, 0x0b, 0x0e, - 0xaa, 0xcb, 0xc3, 0xa7, 0x75, 0xaf, 0x4d, 0x83, 0x75, 0xcf, 0x35, 0xa8, 0xef, 0x5b, 0x4e, 0x5b, - 0xc1, 0x8e, 0x44, 0xb0, 0x1b, 0xd9, 0x2c, 0x38, 0xa8, 0x2e, 0x79, 0x0b, 0xa6, 0x04, 0x49, 0xa8, - 0x2d, 0xf3, 0x62, 0x9a, 0xb1, 0x6c, 0xf5, 0xff, 0xc1, 0x31, 0xe1, 0xe1, 0xd9, 0x18, 0xc0, 0x83, - 0x03, 0x6b, 0x93, 0x9b, 0x30, 0xa9, 0xfc, 0x7b, 0xeb, 0xd4, 0x6b, 0x85, 0xe1, 0x25, 0x63, 0xcd, - 0x2b, 0x6c, 0xe7, 0xbd, 0x48, 0xbb, 0x1e, 0x35, 0xe2, 0x7e, 0x52, 0xc5, 0x85, 0x7d, 0xf5, 0x08, - 0xc2, 0x05, 0x1e, 0x43, 0xb2, 0xd9, 0x5d, 0x70, 0x5d, 0xdb, 0x74, 0xef, 0x39, 0xea, 0xd9, 0x85, - 0x8a, 0xc9, 0x5d, 0x7a, 0xad, 0x4c, 0x0e, 0x1c, 0x50, 0x93, 0x3d, 0x39, 0xa7, 0x2c, 0xba, 0xf7, - 0x9c, 0x34, 0x2a, 0x44, 0x4f, 0xde, 0x1a, 0xc0, 0x83, 0x03, 0x6b, 0x93, 0x25, 0x20, 0xe9, 0x27, - 0xd8, 0xec, 0x4a, 0xa7, 0xf3, 0x05, 0x91, 0xb3, 0x26, 0x4d, 0xc5, 0x8c, 0x1a, 0x64, 0x15, 0xce, - 0xa5, 0x4b, 0x99, 0x38, 0xe9, 0x7f, 0xe6, 0xd9, 0x6a, 0x31, 0x83, 0x8e, 0x99, 0xb5, 0xb4, 0x7f, - 0x52, 0x84, 0xb1, 0x44, 0x92, 0x83, 0x27, 0xee, 0x30, 0x39, 0xdb, 0x0b, 0x74, 0xfc, 0xf6, 0xca, - 0xe2, 0x0d, 0xaa, 0x9b, 0xd4, 0xbb, 0x45, 0x55, 0x42, 0x0a, 0xb1, 0x2c, 0x26, 0x28, 0x98, 0xe2, - 0x24, 0xdb, 0x50, 0x11, 0x96, 0xed, 0xbc, 0xbf, 0x2f, 0x51, 0x7d, 0xc4, 0xcd, 0xdb, 0xf2, 0x9f, - 0x3f, 0xae, 0x47, 0x51, 0xc0, 0x6b, 0x01, 0x8c, 0xc6, 0x39, 0xd8, 0x44, 0x12, 0xa9, 0xbd, 0xd5, - 0x84, 0xca, 0xbb, 0x02, 0xa5, 0x20, 0x18, 0xf6, 0x98, 0xba, 0xf0, 0x94, 0x6c, 0xac, 0x22, 0xc3, - 0xd0, 0xb6, 0xd9, 0xbb, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0xce, 0xf2, 0x4d, 0xa8, 0x06, 0xd2, 0x58, - 0x38, 0xdc, 0x31, 0x7b, 0xae, 0x2b, 0x29, 0x43, 0xa1, 0xc2, 0xd2, 0xfe, 0x7d, 0x11, 0xea, 0xe1, - 0xc6, 0xfe, 0x08, 0xb9, 0xc0, 0x5d, 0xa8, 0x87, 0x31, 0x70, 0xb9, 0xff, 0xcd, 0x18, 0x85, 0x66, - 0xf1, 0xbd, 0x68, 0x78, 0x8b, 0x91, 0x8c, 0x78, 0x7c, 0x5d, 0x29, 0x47, 0x7c, 0x5d, 0x17, 0xaa, - 0x81, 0x67, 0xb5, 0xdb, 0x72, 0x97, 0x90, 0x27, 0xc0, 0x2e, 0xec, 0xae, 0x0d, 0x01, 0x28, 0x7b, - 0x56, 0xdc, 0xa0, 0x12, 0xa3, 0xbd, 0x0b, 0x93, 0x69, 0x4e, 0xae, 0x42, 0x1b, 0x3b, 0xd4, 0xec, - 0xd9, 0xaa, 0x8f, 0x23, 0x15, 0x5a, 0x96, 0x63, 0xc8, 0xc1, 0xb6, 0xe1, 0xec, 0x35, 0xbd, 0xef, - 0x3a, 0x4a, 0x8d, 0xe5, 0xbb, 0x91, 0x0d, 0x59, 0x86, 0x21, 0x55, 0xfb, 0x2f, 0x25, 0xb8, 0x18, - 0x99, 0x67, 0xd6, 0x74, 0x47, 0x6f, 0x1f, 0xe1, 0x87, 0x7c, 0x9f, 0x1d, 0x5c, 0x3a, 0xee, 0x0f, - 0x1d, 0x4a, 0x4f, 0xc0, 0x0f, 0x1d, 0xfe, 0x4f, 0x11, 0x78, 0xbc, 0x2e, 0xf9, 0x1a, 0x8c, 0xea, - 0xb1, 0x7f, 0xb1, 0xca, 0xd7, 0x79, 0x3d, 0xf7, 0xeb, 0xe4, 0x61, 0xc1, 0x61, 0xc8, 0x56, 0xbc, - 0x14, 0x13, 0x02, 0x89, 0x0b, 0xb5, 0x6d, 0xdd, 0xb6, 0x99, 0x2e, 0x94, 0xdb, 0xdd, 0x94, 0x10, - 0xce, 0x87, 0xf9, 0x92, 0x84, 0xc6, 0x50, 0x08, 0xf9, 0xa0, 0x00, 0x63, 0x5e, 0x7c, 0xbb, 0x26, - 0x5f, 0x48, 0x9e, 0x60, 0x84, 0x18, 0x5a, 0x3c, 0x40, 0x2c, 0xbe, 0x27, 0x4c, 0xca, 0xd4, 0xfe, - 0x73, 0x01, 0xc6, 0x5a, 0xb6, 0x65, 0x5a, 0x4e, 0xfb, 0x14, 0xff, 0x27, 0x71, 0x07, 0x2a, 0xbe, - 0x6d, 0x99, 0x74, 0xc8, 0xd5, 0x44, 0xac, 0x63, 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0x07, 0x15, 0xa5, - 0x23, 0xfc, 0xa0, 0xe2, 0x57, 0x23, 0x20, 0x23, 0xcf, 0x49, 0x0f, 0xea, 0x6d, 0x95, 0xf7, 0x5e, - 0x3e, 0xe3, 0x8d, 0x1c, 0x39, 0x13, 0x13, 0x19, 0xf4, 0xc5, 0xdc, 0x1f, 0x16, 0x62, 0x24, 0x89, - 0xd0, 0xe4, 0x4f, 0x80, 0x17, 0x73, 0xfe, 0x04, 0x58, 0x88, 0xeb, 0xff, 0x0d, 0xb0, 0x0e, 0xe5, - 0x9d, 0x20, 0xe8, 0xca, 0xc1, 0x34, 0xfc, 0xd1, 0x82, 0x28, 0x6d, 0x8f, 0xd0, 0x89, 0xd8, 0x3d, - 0x72, 0x68, 0x26, 0xc2, 0xd1, 0xc3, 0x5f, 0xad, 0x2d, 0xe4, 0x0a, 0x7c, 0x88, 0x8b, 0x60, 0xf7, - 0xc8, 0xa1, 0xc9, 0x57, 0xa1, 0x11, 0x78, 0xba, 0xe3, 0x6f, 0xbb, 0x5e, 0x87, 0x7a, 0x72, 0x8f, - 0xba, 0x94, 0xe3, 0x3f, 0xb8, 0x1b, 0x11, 0x9a, 0xf0, 0xa8, 0x26, 0x8a, 0x30, 0x2e, 0x8d, 0xec, - 0x42, 0xad, 0x67, 0x8a, 0x86, 0x49, 0x33, 0xd8, 0x7c, 0x9e, 0x5f, 0x1b, 0xc7, 0xc2, 0x1a, 0xd4, - 0x1d, 0x86, 0x02, 0x92, 0x7f, 0x15, 0xac, 0x9e, 0xd4, 0x5f, 0x05, 0xe3, 0xa3, 0x31, 0x2b, 0xa7, - 0x08, 0xe9, 0x48, 0xbd, 0xd6, 0x69, 0xcb, 0xa8, 0xac, 0xa5, 0xdc, 0x2a, 0xa7, 0x10, 0xd9, 0x08, - 0x75, 0x63, 0xa7, 0x8d, 0x4a, 0x86, 0xd6, 0x01, 0xe9, 0xed, 0x20, 0x46, 0xe2, 0xdf, 0x3b, 0xe2, - 0xa0, 0xdb, 0xec, 0xd1, 0xe6, 0x83, 0xf0, 0x27, 0x30, 0xb1, 0xdc, 0xdf, 0x99, 0x3f, 0xd9, 0xd1, - 0xfe, 0x43, 0x11, 0x4a, 0x1b, 0xab, 0x2d, 0x91, 0xcf, 0x93, 0xff, 0xd8, 0x8a, 0xb6, 0x76, 0xad, - 0xee, 0x5d, 0xea, 0x59, 0xdb, 0xfb, 0x72, 0xeb, 0x1d, 0xcb, 0xe7, 0x99, 0xe6, 0xc0, 0x8c, 0x5a, - 0xe4, 0x6d, 0x18, 0x35, 0xf4, 0x05, 0xea, 0x05, 0xc3, 0x18, 0x16, 0xf8, 0x89, 0xde, 0x85, 0xf9, - 0xa8, 0x3a, 0x26, 0xc0, 0xc8, 0x26, 0x80, 0x11, 0x41, 0x97, 0x8e, 0x6d, 0x0e, 0x89, 0x01, 0xc7, - 0x80, 0x08, 0x42, 0x7d, 0x97, 0xb1, 0x72, 0xd4, 0xf2, 0x71, 0x50, 0xf9, 0xc8, 0xb9, 0xa5, 0xea, - 0x62, 0x04, 0xa3, 0x39, 0x30, 0x96, 0xf8, 0x21, 0x0f, 0xf9, 0x1c, 0xd4, 0xdc, 0x6e, 0x6c, 0x3a, - 0xad, 0xf3, 0xf8, 0xcf, 0xda, 0x1d, 0x59, 0xf6, 0xe0, 0x60, 0x7a, 0x6c, 0xd5, 0x6d, 0x5b, 0x86, - 0x2a, 0xc0, 0x90, 0x9d, 0x68, 0x30, 0xc2, 0x8f, 0xe1, 0xa9, 0xdf, 0xf1, 0xf0, 0xb5, 0x83, 0xff, - 0x31, 0xc3, 0x47, 0x49, 0xd1, 0xbe, 0x5e, 0x86, 0xc8, 0x47, 0x48, 0x7c, 0x18, 0x11, 0xc7, 0x0c, - 0xe4, 0xcc, 0x7d, 0xaa, 0x27, 0x1a, 0xa4, 0x28, 0xd2, 0x86, 0xd2, 0xbb, 0xee, 0x56, 0xee, 0x89, - 0x3b, 0x76, 0xfe, 0x5e, 0xd8, 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x51, 0x80, 0x33, 0x7e, - 0x5a, 0xf5, 0x95, 0xc3, 0x01, 0xf3, 0xeb, 0xf8, 0x69, 0x65, 0x5a, 0x06, 0xea, 0x0e, 0x22, 0x63, - 0x7f, 0x5b, 0x58, 0xff, 0x0b, 0xe7, 0x9d, 0x1c, 0x4e, 0xcb, 0x39, 0x7f, 0x22, 0x99, 0xec, 0xff, - 0x64, 0x19, 0x4a, 0x51, 0xda, 0x37, 0x8b, 0xd0, 0x88, 0xcd, 0xd6, 0xb9, 0xff, 0xf2, 0x74, 0x3f, - 0xf5, 0x97, 0xa7, 0xf5, 0xe1, 0x7d, 0xd9, 0x51, 0xab, 0x4e, 0xfb, 0x47, 0x4f, 0xff, 0xbc, 0x08, - 0xa5, 0xcd, 0xc5, 0xa5, 0xe4, 0xa6, 0xb5, 0xf0, 0x18, 0x36, 0xad, 0x3b, 0x50, 0xdd, 0xea, 0x59, - 0x76, 0x60, 0x39, 0xb9, 0x33, 0x84, 0xa8, 0x9f, 0x62, 0x49, 0x5f, 0x87, 0x40, 0x45, 0x05, 0x4f, - 0xda, 0x50, 0x6d, 0x8b, 0x14, 0x8d, 0xb9, 0x23, 0xfc, 0x64, 0xaa, 0x47, 0x21, 0x48, 0xde, 0xa0, - 0x42, 0xd7, 0xf6, 0x61, 0x64, 0x73, 0x51, 0xaa, 0xfd, 0x8f, 0xb7, 0x37, 0xb5, 0xaf, 0x42, 0xa8, - 0x05, 0x3c, 0x7e, 0xe1, 0xff, 0xad, 0x00, 0x49, 0xc5, 0xe7, 0xf1, 0x8f, 0xa6, 0xdd, 0xf4, 0x68, - 0x5a, 0x3c, 0x89, 0x8f, 0x2f, 0x7b, 0x40, 0x69, 0xff, 0xb6, 0x00, 0xa9, 0xb3, 0x61, 0xe4, 0x15, - 0x99, 0xed, 0x2b, 0x19, 0x4a, 0xa5, 0xb2, 0x7d, 0x91, 0x24, 0x77, 0x2c, 0xeb, 0xd7, 0x87, 0x6c, - 0xbb, 0x16, 0x77, 0xa0, 0xc9, 0xe6, 0xdf, 0x1e, 0x7e, 0xbb, 0x96, 0xe5, 0x8e, 0x93, 0xe1, 0x7e, - 0x71, 0x12, 0x26, 0xe5, 0x6a, 0xff, 0xa8, 0x08, 0x23, 0x8f, 0xed, 0xa8, 0x3a, 0x4d, 0x44, 0x60, - 0x2e, 0xe4, 0x9c, 0xed, 0x07, 0xc6, 0x5f, 0x76, 0x52, 0xf1, 0x97, 0x79, 0xff, 0x4d, 0xfc, 0x88, - 0xe8, 0xcb, 0x7f, 0x5d, 0x00, 0xb9, 0xd6, 0xac, 0x38, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0xb8, - 0xb0, 0xe5, 0x0d, 0xf3, 0x91, 0xa1, 0x70, 0x42, 0x97, 0xe1, 0xd7, 0x6a, 0x21, 0x23, 0xbf, 0x0b, - 0xb5, 0x1d, 0xd7, 0x0f, 0xf8, 0xe2, 0x55, 0x4c, 0x9a, 0xcc, 0x6e, 0xc8, 0x72, 0x0c, 0x39, 0xd2, - 0xee, 0xec, 0xca, 0x60, 0x77, 0xb6, 0xf6, 0xbd, 0x22, 0x8c, 0x7e, 0x5a, 0xce, 0xdb, 0x67, 0xc5, - 0xab, 0x96, 0x72, 0xc6, 0xab, 0x96, 0x8f, 0x13, 0xaf, 0xaa, 0xfd, 0xa4, 0x00, 0xf0, 0xd8, 0x0e, - 0xfb, 0x9b, 0xc9, 0x50, 0xd2, 0xdc, 0xe3, 0x2a, 0x3b, 0x90, 0xf4, 0x1f, 0x54, 0xd4, 0x23, 0xf1, - 0x30, 0xd2, 0x0f, 0x0b, 0x30, 0xae, 0x27, 0x42, 0x33, 0x73, 0xeb, 0xcb, 0xa9, 0x48, 0xcf, 0x30, - 0xb2, 0x28, 0x59, 0x8e, 0x29, 0xb1, 0xe4, 0xb5, 0x28, 0xd1, 0xf4, 0xed, 0x68, 0xd8, 0xf7, 0x65, - 0x88, 0xe6, 0xba, 0x5b, 0x82, 0xf3, 0x11, 0xa1, 0xb0, 0xa5, 0x13, 0x09, 0x85, 0x8d, 0x1f, 0xf2, - 0x2b, 0x3f, 0xf4, 0x90, 0xdf, 0x1e, 0xd4, 0xb7, 0x3d, 0xb7, 0xc3, 0xa3, 0x4d, 0xe5, 0x5f, 0x8d, - 0xaf, 0xe7, 0x58, 0x28, 0xa3, 0xff, 0xf9, 0x47, 0x86, 0xab, 0x25, 0x85, 0x8f, 0x91, 0x28, 0x6e, - 0xeb, 0x77, 0x85, 0xd4, 0x91, 0x93, 0x94, 0x1a, 0xce, 0x25, 0x1b, 0x02, 0x1d, 0x95, 0x98, 0x64, - 0x84, 0x69, 0xf5, 0xf1, 0x44, 0x98, 0x6a, 0x7f, 0xa9, 0xaa, 0x26, 0xb0, 0x27, 0x2e, 0xa7, 0xe9, - 0x67, 0x47, 0xb3, 0xdb, 0xb4, 0xef, 0xdc, 0x74, 0xed, 0x31, 0x9e, 0x9b, 0xae, 0x9f, 0xcc, 0xb9, - 0x69, 0xc8, 0x77, 0x6e, 0xba, 0x71, 0x42, 0xe7, 0xa6, 0x47, 0x4f, 0xea, 0xdc, 0xf4, 0xd8, 0x50, - 0xe7, 0xa6, 0xc7, 0x8f, 0x74, 0x6e, 0xfa, 0xa0, 0x04, 0xa9, 0xcd, 0xf8, 0x67, 0x8e, 0xb7, 0xff, - 0xa7, 0x1c, 0x6f, 0x1f, 0x15, 0x21, 0x9a, 0x88, 0x8f, 0x19, 0x98, 0xf4, 0x16, 0xd4, 0x3a, 0xfa, - 0xfd, 0x45, 0x6a, 0xeb, 0xfb, 0x79, 0x7e, 0x05, 0xbc, 0x26, 0x31, 0x30, 0x44, 0x23, 0x3e, 0x80, - 0x15, 0xa6, 0xe3, 0xcf, 0xed, 0xc2, 0x88, 0x32, 0xfb, 0x0b, 0x23, 0x69, 0x74, 0x8f, 0x31, 0x31, - 0xda, 0xbf, 0x2a, 0x82, 0xfc, 0x6f, 0x03, 0xa1, 0x50, 0xd9, 0xb6, 0xee, 0x53, 0x33, 0x77, 0xb8, - 0x73, 0xec, 0x07, 0xed, 0xc2, 0x47, 0xc3, 0x0b, 0x50, 0xa0, 0x73, 0xe3, 0xbb, 0xf0, 0xb9, 0xc9, - 0xfe, 0xcb, 0x61, 0x7c, 0x8f, 0xfb, 0xee, 0xa4, 0xf1, 0x5d, 0x14, 0xa1, 0x92, 0x21, 0x6c, 0xfd, - 0x3c, 0xfc, 0x22, 0xb7, 0x8b, 0x31, 0x11, 0xc6, 0xa1, 0x6c, 0xfd, 0xbe, 0x48, 0x9c, 0x20, 0x65, - 0x34, 0xbf, 0xfc, 0xe3, 0x9f, 0x5e, 0x79, 0xea, 0x27, 0x3f, 0xbd, 0xf2, 0xd4, 0xc7, 0x3f, 0xbd, - 0xf2, 0xd4, 0xd7, 0x0f, 0xaf, 0x14, 0x7e, 0x7c, 0x78, 0xa5, 0xf0, 0x93, 0xc3, 0x2b, 0x85, 0x8f, - 0x0f, 0xaf, 0x14, 0xfe, 0xe3, 0xe1, 0x95, 0xc2, 0x5f, 0xfd, 0x4f, 0x57, 0x9e, 0xfa, 0xe3, 0x57, - 0xa3, 0x26, 0xcc, 0xaa, 0x26, 0xcc, 0x2a, 0x81, 0xb3, 0xdd, 0xdd, 0xf6, 0x2c, 0x6b, 0x42, 0x54, - 0xa2, 0x9a, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xe2, 0xd0, 0x0e, 0xda, 0x9a, 0x00, - 0x00, + // 8046 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0x57, + 0x76, 0x9e, 0xfa, 0xbf, 0xfb, 0x34, 0xff, 0x74, 0x67, 0x34, 0xe2, 0xcc, 0x4a, 0xd3, 0xe3, 0x5a, + 0xef, 0x7a, 0x1c, 0xdb, 0x64, 0x44, 0xaf, 0xb4, 0x5a, 0xdb, 0xbb, 0x12, 0x9b, 0x1c, 0x72, 0x38, + 0x43, 0xce, 0x70, 0x4f, 0x93, 0x23, 0xad, 0x15, 0xaf, 0x52, 0xac, 0xba, 0x6c, 0x96, 0x58, 0x5d, + 0xd5, 0x5b, 0x55, 0xcd, 0x19, 0xca, 0x09, 0xd6, 0xb6, 0x12, 0x68, 0x83, 0x24, 0x48, 0xe0, 0x27, + 0x03, 0x81, 0x13, 0x24, 0x08, 0xe0, 0x07, 0xc3, 0x79, 0x08, 0xb2, 0x79, 0x08, 0x90, 0x1f, 0x07, + 0x41, 0xb2, 0xf9, 0x5f, 0x04, 0x01, 0xb2, 0x79, 0x21, 0xb2, 0x0c, 0xf2, 0x90, 0x00, 0x0e, 0x8c, + 0x18, 0x89, 0x9d, 0x81, 0x11, 0x07, 0xf7, 0xaf, 0xfe, 0xba, 0x7a, 0x86, 0xec, 0x6a, 0x8e, 0x46, + 0x89, 0xde, 0xba, 0xef, 0x39, 0xf7, 0x3b, 0xb7, 0x6e, 0xdd, 0xba, 0xf7, 0xdc, 0x73, 0xce, 0x3d, + 0x17, 0xd6, 0xbb, 0x56, 0x70, 0x30, 0xd8, 0x5b, 0x30, 0xdc, 0xde, 0xa2, 0x33, 0xe8, 0xe9, 0x7d, + 0xcf, 0xfd, 0x80, 0xff, 0xd8, 0xb7, 0xdd, 0x87, 0x8b, 0xfd, 0xc3, 0xee, 0xa2, 0xde, 0xb7, 0xfc, + 0xa8, 0xe4, 0xe8, 0x35, 0xdd, 0xee, 0x1f, 0xe8, 0xaf, 0x2d, 0x76, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, + 0xe6, 0x42, 0xdf, 0x73, 0x03, 0x97, 0x7c, 0x39, 0x02, 0x5a, 0x50, 0x40, 0x0b, 0xaa, 0xda, 0x42, + 0xff, 0xb0, 0xbb, 0xc0, 0x80, 0xa2, 0x12, 0x05, 0x74, 0xed, 0xa7, 0x62, 0x2d, 0xe8, 0xba, 0x5d, + 0x77, 0x91, 0xe3, 0xed, 0x0d, 0xf6, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x72, 0xae, 0x69, 0x87, + 0x6f, 0xfa, 0x0b, 0x96, 0xcb, 0x9a, 0xb5, 0x68, 0xb8, 0x1e, 0x5d, 0x3c, 0x1a, 0x6a, 0xcb, 0xb5, + 0x2f, 0x45, 0x3c, 0x3d, 0xdd, 0x38, 0xb0, 0x1c, 0xea, 0x1d, 0xab, 0x67, 0x59, 0xf4, 0xa8, 0xef, + 0x0e, 0x3c, 0x83, 0x9e, 0xab, 0x96, 0xbf, 0xd8, 0xa3, 0x81, 0x9e, 0x25, 0x6b, 0x71, 0x54, 0x2d, + 0x6f, 0xe0, 0x04, 0x56, 0x6f, 0x58, 0xcc, 0x1b, 0x4f, 0xab, 0xe0, 0x1b, 0x07, 0xb4, 0xa7, 0x0f, + 0xd5, 0xfb, 0xe9, 0x51, 0xf5, 0x06, 0x81, 0x65, 0x2f, 0x5a, 0x4e, 0xe0, 0x07, 0x5e, 0xba, 0x92, + 0xf6, 0xdb, 0x00, 0x97, 0x96, 0xf7, 0xfc, 0xc0, 0xd3, 0x8d, 0x60, 0xdb, 0x35, 0x77, 0x68, 0xaf, + 0x6f, 0xeb, 0x01, 0x25, 0x87, 0x50, 0x67, 0x0f, 0x64, 0xea, 0x81, 0x3e, 0x5f, 0xb8, 0x51, 0xb8, + 0xd9, 0x5c, 0x5a, 0x5e, 0x18, 0xf3, 0x05, 0x2e, 0x6c, 0x49, 0xa0, 0xf6, 0xd4, 0xe9, 0x49, 0xab, + 0xae, 0xfe, 0x61, 0x28, 0x80, 0xfc, 0x5a, 0x01, 0xa6, 0x1c, 0xd7, 0xa4, 0x1d, 0x6a, 0x53, 0x23, + 0x70, 0xbd, 0xf9, 0xe2, 0x8d, 0xd2, 0xcd, 0xe6, 0xd2, 0x37, 0xc7, 0x96, 0x98, 0xf1, 0x44, 0x0b, + 0xf7, 0x62, 0x02, 0x6e, 0x39, 0x81, 0x77, 0xdc, 0xbe, 0xfc, 0xbd, 0x93, 0xd6, 0x0b, 0xa7, 0x27, + 0xad, 0xa9, 0x38, 0x09, 0x13, 0x2d, 0x21, 0xbb, 0xd0, 0x0c, 0x5c, 0x9b, 0x75, 0x99, 0xe5, 0x3a, + 0xfe, 0x7c, 0x89, 0x37, 0xec, 0xfa, 0x82, 0xe8, 0x6a, 0x26, 0x7e, 0x81, 0x8d, 0xb1, 0x85, 0xa3, + 0xd7, 0x16, 0x76, 0x42, 0xb6, 0xf6, 0x25, 0x09, 0xdc, 0x8c, 0xca, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, + 0xb3, 0x3e, 0x35, 0x06, 0x9e, 0x15, 0x1c, 0xaf, 0xb8, 0x4e, 0x40, 0x1f, 0x05, 0xf3, 0x65, 0xde, + 0xcb, 0x5f, 0xcc, 0x82, 0xde, 0x76, 0xcd, 0x4e, 0x92, 0xbb, 0x7d, 0xe9, 0xf4, 0xa4, 0x35, 0x9b, + 0x2a, 0xc4, 0x34, 0x26, 0x71, 0x60, 0xce, 0xea, 0xe9, 0x5d, 0xba, 0x3d, 0xb0, 0xed, 0x0e, 0x35, + 0x3c, 0x1a, 0xf8, 0xf3, 0x15, 0xfe, 0x08, 0x37, 0xb3, 0xe4, 0x6c, 0xba, 0x86, 0x6e, 0xdf, 0xdf, + 0xfb, 0x80, 0x1a, 0x01, 0xd2, 0x7d, 0xea, 0x51, 0xc7, 0xa0, 0xed, 0x79, 0xf9, 0x30, 0x73, 0x1b, + 0x29, 0x24, 0x1c, 0xc2, 0x26, 0xeb, 0xf0, 0x62, 0xdf, 0xb3, 0x5c, 0xde, 0x04, 0x5b, 0xf7, 0xfd, + 0x7b, 0x7a, 0x8f, 0xce, 0x57, 0x6f, 0x14, 0x6e, 0x36, 0xda, 0x57, 0x25, 0xcc, 0x8b, 0xdb, 0x69, + 0x06, 0x1c, 0xae, 0x43, 0x6e, 0x42, 0x5d, 0x15, 0xce, 0xd7, 0x6e, 0x14, 0x6e, 0x56, 0xc4, 0xd8, + 0x51, 0x75, 0x31, 0xa4, 0x92, 0x35, 0xa8, 0xeb, 0xfb, 0xfb, 0x96, 0xc3, 0x38, 0xeb, 0xbc, 0x0b, + 0x5f, 0xc9, 0x7a, 0xb4, 0x65, 0xc9, 0x23, 0x70, 0xd4, 0x3f, 0x0c, 0xeb, 0x92, 0x3b, 0x40, 0x7c, + 0xea, 0x1d, 0x59, 0x06, 0x5d, 0x36, 0x0c, 0x77, 0xe0, 0x04, 0xbc, 0xed, 0x0d, 0xde, 0xf6, 0x6b, + 0xb2, 0xed, 0xa4, 0x33, 0xc4, 0x81, 0x19, 0xb5, 0xc8, 0xdb, 0x30, 0x27, 0xbf, 0xd5, 0xa8, 0x17, + 0x80, 0x23, 0x5d, 0x66, 0x1d, 0x89, 0x29, 0x1a, 0x0e, 0x71, 0x13, 0x13, 0x5e, 0xd1, 0x07, 0x81, + 0xdb, 0x63, 0x90, 0x49, 0xa1, 0x3b, 0xee, 0x21, 0x75, 0xe6, 0x9b, 0x37, 0x0a, 0x37, 0xeb, 0xed, + 0x1b, 0xa7, 0x27, 0xad, 0x57, 0x96, 0x9f, 0xc0, 0x87, 0x4f, 0x44, 0x21, 0xf7, 0xa1, 0x61, 0x3a, + 0xfe, 0xb6, 0x6b, 0x5b, 0xc6, 0xf1, 0xfc, 0x14, 0x6f, 0xe0, 0x6b, 0xf2, 0x51, 0x1b, 0xab, 0xf7, + 0x3a, 0x82, 0xf0, 0xf8, 0xa4, 0xf5, 0xca, 0xf0, 0x94, 0xba, 0x10, 0xd2, 0x31, 0xc2, 0x20, 0x5b, + 0x1c, 0x70, 0xc5, 0x75, 0xf6, 0xad, 0xee, 0xfc, 0x34, 0x7f, 0x1b, 0x37, 0x46, 0x0c, 0xe8, 0xd5, + 0x7b, 0x1d, 0xc1, 0xd7, 0x9e, 0x96, 0xe2, 0xc4, 0x5f, 0x8c, 0x10, 0x88, 0x09, 0x33, 0x6a, 0x32, + 0x5e, 0xb1, 0x75, 0xab, 0xe7, 0xcf, 0xcf, 0xf0, 0xc1, 0xfb, 0xa3, 0x23, 0x30, 0x31, 0xce, 0xdc, + 0xbe, 0x22, 0x1f, 0x65, 0x26, 0x51, 0xec, 0x63, 0x0a, 0xf3, 0xda, 0x5b, 0xf0, 0xe2, 0xd0, 0xdc, + 0x40, 0xe6, 0xa0, 0x74, 0x48, 0x8f, 0xf9, 0xd4, 0xd7, 0x40, 0xf6, 0x93, 0x5c, 0x86, 0xca, 0x91, + 0x6e, 0x0f, 0xe8, 0x7c, 0x91, 0x97, 0x89, 0x3f, 0x3f, 0x53, 0x7c, 0xb3, 0xa0, 0xfd, 0x8d, 0x12, + 0x4c, 0xa9, 0x19, 0xa7, 0x63, 0x39, 0x87, 0xe4, 0x1d, 0x28, 0xd9, 0x6e, 0x57, 0xce, 0x9b, 0x3f, + 0x37, 0xf6, 0x2c, 0xb6, 0xe9, 0x76, 0xdb, 0xb5, 0xd3, 0x93, 0x56, 0x69, 0xd3, 0xed, 0x22, 0x43, + 0x24, 0x06, 0x54, 0x0e, 0xf5, 0xfd, 0x43, 0x9d, 0xb7, 0xa1, 0xb9, 0xd4, 0x1e, 0x1b, 0xfa, 0x2e, + 0x43, 0x61, 0x6d, 0x6d, 0x37, 0x4e, 0x4f, 0x5a, 0x15, 0xfe, 0x17, 0x05, 0x36, 0x71, 0xa1, 0xb1, + 0x67, 0xeb, 0xc6, 0xe1, 0x81, 0x6b, 0xd3, 0xf9, 0x52, 0x4e, 0x41, 0x6d, 0x85, 0x24, 0x5e, 0x73, + 0xf8, 0x17, 0x23, 0x19, 0xc4, 0x80, 0xea, 0xc0, 0xf4, 0x2d, 0xe7, 0x50, 0xce, 0x81, 0x6f, 0x8d, + 0x2d, 0x6d, 0x77, 0x95, 0x3f, 0x13, 0x9c, 0x9e, 0xb4, 0xaa, 0xe2, 0x37, 0x4a, 0x68, 0xed, 0x0f, + 0xa6, 0x60, 0x46, 0xbd, 0xa4, 0x07, 0xd4, 0x0b, 0xe8, 0x23, 0x72, 0x03, 0xca, 0x0e, 0xfb, 0x34, + 0xf9, 0x4b, 0x6e, 0x4f, 0xc9, 0xe1, 0x52, 0xe6, 0x9f, 0x24, 0xa7, 0xb0, 0x96, 0x89, 0xa1, 0x22, + 0x3b, 0x7c, 0xfc, 0x96, 0x75, 0x38, 0x8c, 0x68, 0x99, 0xf8, 0x8d, 0x12, 0x9a, 0xbc, 0x07, 0x65, + 0xfe, 0xf0, 0xa2, 0xab, 0xbf, 0x3a, 0xbe, 0x08, 0xf6, 0xe8, 0x75, 0xf6, 0x04, 0xfc, 0xc1, 0x39, + 0x28, 0x1b, 0x8a, 0x03, 0x73, 0x5f, 0x76, 0xec, 0xcf, 0xe5, 0xe8, 0xd8, 0x35, 0x31, 0x14, 0x77, + 0x57, 0xd7, 0x90, 0x21, 0x92, 0xbf, 0x54, 0x80, 0x17, 0x0d, 0xd7, 0x09, 0x74, 0xa6, 0x67, 0xa8, + 0x45, 0x76, 0xbe, 0xc2, 0xe5, 0xdc, 0x19, 0x5b, 0xce, 0x4a, 0x1a, 0xb1, 0xfd, 0x12, 0x5b, 0x33, + 0x86, 0x8a, 0x71, 0x58, 0x36, 0xf9, 0x2b, 0x05, 0x78, 0x89, 0xcd, 0xe5, 0x43, 0xcc, 0x7c, 0x05, + 0x9a, 0x6c, 0xab, 0xae, 0x9e, 0x9e, 0xb4, 0x5e, 0xda, 0xc8, 0x12, 0x86, 0xd9, 0x6d, 0x60, 0xad, + 0xbb, 0xa4, 0x0f, 0xab, 0x25, 0x7c, 0x75, 0x6b, 0x2e, 0x6d, 0x4e, 0x52, 0xd5, 0x69, 0x7f, 0x4e, + 0x0e, 0xe5, 0x2c, 0xcd, 0x0e, 0xb3, 0x5a, 0x41, 0x6e, 0x41, 0xed, 0xc8, 0xb5, 0x07, 0x3d, 0xea, + 0xcf, 0xd7, 0xf9, 0x14, 0x7b, 0x2d, 0x6b, 0x8a, 0x7d, 0xc0, 0x59, 0xda, 0xb3, 0x12, 0xbe, 0x26, + 0xfe, 0xfb, 0xa8, 0xea, 0x12, 0x0b, 0xaa, 0xb6, 0xd5, 0xb3, 0x02, 0x9f, 0x2f, 0x9c, 0xcd, 0xa5, + 0x5b, 0x63, 0x3f, 0x96, 0xf8, 0x44, 0x37, 0x39, 0x98, 0xf8, 0x6a, 0xc4, 0x6f, 0x94, 0x02, 0xd8, + 0x54, 0xe8, 0x1b, 0xba, 0x2d, 0x16, 0xd6, 0xe6, 0xd2, 0xd7, 0xc6, 0xff, 0x6c, 0x18, 0x4a, 0x7b, + 0x5a, 0x3e, 0x53, 0x85, 0xff, 0x45, 0x81, 0x4d, 0x7e, 0x01, 0x66, 0x12, 0x6f, 0xd3, 0x9f, 0x6f, + 0xf2, 0xde, 0x79, 0x35, 0xab, 0x77, 0x42, 0xae, 0x68, 0xe5, 0x49, 0x8c, 0x10, 0x1f, 0x53, 0x60, + 0xe4, 0x2e, 0xd4, 0x7d, 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0xf3, 0x53, 0x67, 0x01, 0x9e, 0x93, 0xc0, + 0xf5, 0x8e, 0xac, 0x86, 0x21, 0x00, 0x59, 0x00, 0xe8, 0xeb, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x9a, + 0x2b, 0x4d, 0x33, 0xa7, 0x27, 0x2d, 0xd8, 0x0e, 0x4b, 0x31, 0xc6, 0xc1, 0xf8, 0x59, 0xdd, 0x0d, + 0xa7, 0x3f, 0x08, 0xc4, 0xc2, 0xda, 0x10, 0xfc, 0x9d, 0xb0, 0x14, 0x63, 0x1c, 0xe4, 0xb7, 0x0a, + 0xf0, 0xb9, 0xe8, 0xef, 0xf0, 0x47, 0x36, 0x3b, 0xf1, 0x8f, 0xac, 0x75, 0x7a, 0xd2, 0xfa, 0x5c, + 0x67, 0xb4, 0x48, 0x7c, 0x52, 0x7b, 0xc8, 0xc7, 0x05, 0x98, 0x19, 0xf4, 0x4d, 0x3d, 0xa0, 0x9d, + 0x80, 0xed, 0x78, 0xba, 0xc7, 0xf3, 0x73, 0xbc, 0x89, 0xeb, 0xe3, 0xcf, 0x82, 0x09, 0xb8, 0xe8, + 0x35, 0x27, 0xcb, 0x31, 0x25, 0x56, 0x7b, 0x07, 0xa6, 0x97, 0x07, 0xc1, 0x81, 0xeb, 0x59, 0x1f, + 0x72, 0xf5, 0x9f, 0xac, 0x41, 0x25, 0xe0, 0x6a, 0x9c, 0xd0, 0x10, 0xbe, 0x90, 0xf5, 0xd2, 0x85, + 0x4a, 0x7d, 0x97, 0x1e, 0x2b, 0xbd, 0x44, 0xac, 0xd4, 0x42, 0xad, 0x13, 0xd5, 0xb5, 0x3f, 0x53, + 0x80, 0x5a, 0x5b, 0x37, 0x0e, 0xdd, 0xfd, 0x7d, 0xf2, 0x2e, 0xd4, 0x2d, 0x27, 0xa0, 0xde, 0x91, + 0x6e, 0x4b, 0xd8, 0x85, 0x18, 0x6c, 0xb8, 0x21, 0x8c, 0x1e, 0x8f, 0xed, 0xbe, 0x98, 0xa0, 0xd5, + 0x81, 0xdc, 0xb5, 0x70, 0xcd, 0x78, 0x43, 0x62, 0x60, 0x88, 0x46, 0x5a, 0x50, 0xf1, 0x03, 0xda, + 0xf7, 0xf9, 0x1a, 0x38, 0x2d, 0x9a, 0xd1, 0x61, 0x05, 0x28, 0xca, 0xb5, 0xbf, 0x5e, 0x80, 0x46, + 0x5b, 0xf7, 0x2d, 0x83, 0x3d, 0x25, 0x59, 0x81, 0xf2, 0xc0, 0xa7, 0xde, 0xf9, 0x9e, 0x8d, 0x2f, + 0x5b, 0xbb, 0x3e, 0xf5, 0x90, 0x57, 0x26, 0xf7, 0xa1, 0xde, 0xd7, 0x7d, 0xff, 0xa1, 0xeb, 0x99, + 0x72, 0xe9, 0x3d, 0x23, 0x90, 0xd8, 0x26, 0xc8, 0xaa, 0x18, 0x82, 0x68, 0x4d, 0x88, 0x74, 0x0f, + 0xed, 0xf7, 0x0a, 0x70, 0xa9, 0x3d, 0xd8, 0xdf, 0xa7, 0x9e, 0xd4, 0x8a, 0xa5, 0xbe, 0x49, 0xa1, + 0xe2, 0x51, 0xd3, 0xf2, 0x65, 0xdb, 0x57, 0xc7, 0x1e, 0x28, 0xc8, 0x50, 0xa4, 0x7a, 0xcb, 0xfb, + 0x8b, 0x17, 0xa0, 0x40, 0x27, 0x03, 0x68, 0x7c, 0x40, 0xd9, 0x6e, 0x9c, 0xea, 0x3d, 0xf9, 0x74, + 0xb7, 0xc7, 0x16, 0x75, 0x87, 0x06, 0x1d, 0x8e, 0x14, 0xd7, 0xa6, 0xc3, 0x42, 0x8c, 0x24, 0x69, + 0xbf, 0x5d, 0x81, 0xa9, 0x15, 0xb7, 0xb7, 0x67, 0x39, 0xd4, 0xbc, 0x65, 0x76, 0x29, 0x79, 0x1f, + 0xca, 0xd4, 0xec, 0x52, 0xf9, 0xb4, 0xe3, 0x2b, 0x1e, 0x0c, 0x2c, 0x52, 0x9f, 0xd8, 0x3f, 0xe4, + 0xc0, 0x64, 0x13, 0x66, 0xf6, 0x3d, 0xb7, 0x27, 0xe6, 0xf2, 0x9d, 0xe3, 0xbe, 0xd4, 0x9d, 0xdb, + 0x3f, 0xaa, 0x3e, 0x9c, 0xb5, 0x04, 0xf5, 0xf1, 0x49, 0x0b, 0xa2, 0x7f, 0x98, 0xaa, 0x4b, 0xde, + 0x85, 0xf9, 0xa8, 0x24, 0x9c, 0xd4, 0x56, 0xd8, 0x76, 0x86, 0xeb, 0x4e, 0x95, 0xf6, 0x2b, 0xa7, + 0x27, 0xad, 0xf9, 0xb5, 0x11, 0x3c, 0x38, 0xb2, 0x36, 0x9b, 0x2a, 0xe6, 0x22, 0xa2, 0x58, 0x68, + 0xa4, 0xca, 0x34, 0xa1, 0x15, 0x8c, 0xef, 0xfb, 0xd6, 0x52, 0x22, 0x70, 0x48, 0x28, 0x59, 0x83, + 0xa9, 0xc0, 0x8d, 0xf5, 0x57, 0x85, 0xf7, 0x97, 0xa6, 0x0c, 0x15, 0x3b, 0xee, 0xc8, 0xde, 0x4a, + 0xd4, 0x23, 0x08, 0x57, 0xd4, 0xff, 0x54, 0x4f, 0x55, 0x79, 0x4f, 0x5d, 0x3b, 0x3d, 0x69, 0x5d, + 0xd9, 0xc9, 0xe4, 0xc0, 0x11, 0x35, 0xc9, 0x2f, 0x17, 0x60, 0x46, 0x91, 0x64, 0x1f, 0xd5, 0x26, + 0xd9, 0x47, 0x84, 0x8d, 0x88, 0x9d, 0x84, 0x00, 0x4c, 0x09, 0xd4, 0x7e, 0xa7, 0x0a, 0x8d, 0x70, + 0xaa, 0x27, 0x9f, 0x87, 0x0a, 0x37, 0x41, 0x48, 0x0d, 0x3e, 0x5c, 0xc3, 0xb9, 0xa5, 0x02, 0x05, + 0x8d, 0x7c, 0x01, 0x6a, 0x86, 0xdb, 0xeb, 0xe9, 0x8e, 0xc9, 0xcd, 0x4a, 0x8d, 0x76, 0x93, 0xa9, + 0x2e, 0x2b, 0xa2, 0x08, 0x15, 0x8d, 0xbc, 0x02, 0x65, 0xdd, 0xeb, 0x0a, 0x0b, 0x4f, 0x43, 0xcc, + 0x47, 0xcb, 0x5e, 0xd7, 0x47, 0x5e, 0x4a, 0xbe, 0x02, 0x25, 0xea, 0x1c, 0xcd, 0x97, 0x47, 0xeb, + 0x46, 0xb7, 0x9c, 0xa3, 0x07, 0xba, 0xd7, 0x6e, 0xca, 0x36, 0x94, 0x6e, 0x39, 0x47, 0xc8, 0xea, + 0x90, 0x4d, 0xa8, 0x51, 0xe7, 0x88, 0xbd, 0x7b, 0x69, 0x7a, 0xf9, 0x91, 0x11, 0xd5, 0x19, 0x8b, + 0xdc, 0x26, 0x84, 0x1a, 0x96, 0x2c, 0x46, 0x05, 0x41, 0xbe, 0x01, 0x53, 0x42, 0xd9, 0xda, 0x62, + 0xef, 0xc4, 0x9f, 0xaf, 0x72, 0xc8, 0xd6, 0x68, 0x6d, 0x8d, 0xf3, 0x45, 0xa6, 0xae, 0x58, 0xa1, + 0x8f, 0x09, 0x28, 0xf2, 0x0d, 0x68, 0xa8, 0x9d, 0xb1, 0x7a, 0xb3, 0x99, 0x56, 0x22, 0xb5, 0x9d, + 0x46, 0xfa, 0xad, 0x81, 0xe5, 0xd1, 0x1e, 0x75, 0x02, 0xbf, 0xfd, 0xa2, 0xb2, 0x1b, 0x28, 0xaa, + 0x8f, 0x11, 0x1a, 0xd9, 0x1b, 0x36, 0x77, 0x09, 0x5b, 0xcd, 0xe7, 0x47, 0xcc, 0xea, 0x63, 0xd8, + 0xba, 0xbe, 0x09, 0xb3, 0xa1, 0x3d, 0x4a, 0x9a, 0x34, 0x84, 0xf5, 0xe6, 0x4b, 0xac, 0xfa, 0x46, + 0x92, 0xf4, 0xf8, 0xa4, 0xf5, 0x6a, 0x86, 0x51, 0x23, 0x62, 0xc0, 0x34, 0x18, 0xf9, 0x10, 0x66, + 0x3c, 0xaa, 0x9b, 0x96, 0x43, 0x7d, 0x7f, 0xdb, 0x73, 0xf7, 0xf2, 0x6b, 0x9e, 0x1c, 0x45, 0x0c, + 0x7b, 0x4c, 0x20, 0x63, 0x4a, 0x12, 0x79, 0x08, 0xd3, 0xb6, 0x75, 0x44, 0x23, 0xd1, 0xcd, 0x89, + 0x88, 0x7e, 0xf1, 0xf4, 0xa4, 0x35, 0xbd, 0x19, 0x07, 0xc6, 0xa4, 0x1c, 0xed, 0xef, 0x54, 0x60, + 0x78, 0xf3, 0x95, 0x1c, 0x29, 0x85, 0x49, 0x8f, 0x94, 0xf4, 0x5b, 0x14, 0x6b, 0xc6, 0x9b, 0xb2, + 0xda, 0x04, 0xde, 0x64, 0xc6, 0x68, 0x2c, 0x4d, 0x7a, 0x34, 0x3e, 0x37, 0x13, 0xc6, 0xf0, 0xb0, + 0xad, 0x7e, 0x72, 0xc3, 0xb6, 0xf6, 0x8c, 0x86, 0xed, 0x77, 0xca, 0x30, 0xb3, 0xaa, 0xd3, 0x9e, + 0xeb, 0x3c, 0x75, 0xff, 0x5d, 0x78, 0x2e, 0xf6, 0xdf, 0x37, 0xa1, 0xee, 0xd1, 0xbe, 0x6d, 0x19, + 0xba, 0x50, 0xb3, 0xa5, 0xbd, 0x1b, 0x65, 0x19, 0x86, 0xd4, 0x11, 0x76, 0x97, 0xd2, 0x73, 0x69, + 0x77, 0x29, 0x7f, 0xf2, 0x76, 0x17, 0xed, 0x97, 0x8b, 0xc0, 0x55, 0x52, 0x72, 0x03, 0xca, 0x4c, + 0xdd, 0x4a, 0x5b, 0xfb, 0xf8, 0xd7, 0xc2, 0x29, 0xe4, 0x1a, 0x14, 0x03, 0x57, 0x4e, 0x37, 0x20, + 0xe9, 0xc5, 0x1d, 0x17, 0x8b, 0x81, 0x4b, 0x3e, 0x04, 0x30, 0x5c, 0xc7, 0xb4, 0x94, 0x1b, 0x28, + 0xdf, 0x83, 0xad, 0xb9, 0xde, 0x43, 0xdd, 0x33, 0x57, 0x42, 0x44, 0xb1, 0xf3, 0x8e, 0xfe, 0x63, + 0x4c, 0x1a, 0x79, 0x0b, 0xaa, 0xae, 0xb3, 0x36, 0xb0, 0x6d, 0xde, 0xa1, 0x8d, 0xf6, 0x8f, 0x9d, + 0x9e, 0xb4, 0xaa, 0xf7, 0x79, 0xc9, 0xe3, 0x93, 0xd6, 0x55, 0xb1, 0x93, 0x61, 0xff, 0xde, 0xf1, + 0xac, 0xc0, 0x72, 0xba, 0xe1, 0x46, 0x54, 0x56, 0xd3, 0x7e, 0xb5, 0x00, 0xcd, 0x35, 0xeb, 0x11, + 0x35, 0xdf, 0xb1, 0x1c, 0xd3, 0x7d, 0x48, 0x10, 0xaa, 0x36, 0x75, 0xba, 0xc1, 0xc1, 0x98, 0x3b, + 0x45, 0x61, 0x8f, 0xe1, 0x08, 0x28, 0x91, 0xc8, 0x22, 0x34, 0xc4, 0x3e, 0xc3, 0x72, 0xba, 0xbc, + 0x0f, 0xeb, 0xd1, 0x4c, 0xdf, 0x51, 0x04, 0x8c, 0x78, 0xb4, 0x63, 0x78, 0x71, 0xa8, 0x1b, 0x88, + 0x09, 0xe5, 0x40, 0xef, 0xaa, 0x45, 0x65, 0x6d, 0xec, 0x0e, 0xde, 0xd1, 0xbb, 0xb1, 0xce, 0xe5, + 0xda, 0xdc, 0x8e, 0xce, 0xb4, 0x39, 0x86, 0xae, 0xfd, 0x61, 0x01, 0xea, 0x6b, 0x03, 0xc7, 0xe0, + 0x9b, 0xf1, 0xa7, 0x5b, 0x81, 0x95, 0x6a, 0x58, 0xcc, 0x54, 0x0d, 0x07, 0x50, 0x3d, 0x7c, 0x18, + 0xaa, 0x8e, 0xcd, 0xa5, 0xad, 0xf1, 0x47, 0x85, 0x6c, 0xd2, 0xc2, 0x5d, 0x8e, 0x27, 0x9c, 0x94, + 0x33, 0xb2, 0x41, 0xd5, 0xbb, 0xef, 0x70, 0xa1, 0x52, 0xd8, 0xb5, 0xaf, 0x40, 0x33, 0xc6, 0x76, + 0x2e, 0x7f, 0xc5, 0xdf, 0x2d, 0x43, 0x75, 0xbd, 0xd3, 0x59, 0xde, 0xde, 0x20, 0xaf, 0x43, 0x53, + 0xfa, 0xaf, 0xee, 0x45, 0x7d, 0x10, 0xba, 0x2f, 0x3b, 0x11, 0x09, 0xe3, 0x7c, 0x4c, 0xf1, 0xf6, + 0xa8, 0x6e, 0xf7, 0xe4, 0xc7, 0x12, 0x2a, 0xde, 0xc8, 0x0a, 0x51, 0xd0, 0x88, 0x0e, 0x33, 0x6c, + 0x2f, 0xcf, 0xba, 0x50, 0xec, 0xd3, 0xe5, 0x67, 0x73, 0xc6, 0x9d, 0x3c, 0x5f, 0x60, 0x76, 0x13, + 0x00, 0x98, 0x02, 0x24, 0x6f, 0x42, 0x5d, 0x1f, 0x04, 0x07, 0x7c, 0xab, 0x24, 0xbe, 0x8d, 0x57, + 0xb8, 0x7b, 0x4f, 0x96, 0x3d, 0x3e, 0x69, 0x4d, 0xdd, 0xc5, 0xf6, 0xeb, 0xea, 0x3f, 0x86, 0xdc, + 0xac, 0x71, 0xca, 0x36, 0x20, 0x1b, 0x57, 0x39, 0x77, 0xe3, 0xb6, 0x13, 0x00, 0x98, 0x02, 0x24, + 0xef, 0xc1, 0xd4, 0x21, 0x3d, 0x0e, 0xf4, 0x3d, 0x29, 0xa0, 0x7a, 0x1e, 0x01, 0x73, 0x4c, 0x59, + 0xbf, 0x1b, 0xab, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xe5, 0x43, 0xea, 0xed, 0x51, 0xcf, 0x95, 0x76, + 0x06, 0x29, 0xa4, 0x76, 0x1e, 0x21, 0xf3, 0xa7, 0x27, 0xad, 0xcb, 0x77, 0x33, 0x60, 0x30, 0x13, + 0x5c, 0xfb, 0xdf, 0x45, 0x98, 0x5d, 0x17, 0x01, 0x04, 0xae, 0x27, 0x34, 0x0f, 0x72, 0x15, 0x4a, + 0x5e, 0x7f, 0xc0, 0x47, 0x4e, 0x49, 0xb8, 0x08, 0x70, 0x7b, 0x17, 0x59, 0x19, 0x79, 0x17, 0xea, + 0xa6, 0x9c, 0x32, 0xa4, 0x99, 0x63, 0x2c, 0x93, 0x94, 0xfa, 0x87, 0x21, 0x1a, 0xdb, 0xd3, 0xf5, + 0xfc, 0x6e, 0xc7, 0xfa, 0x90, 0xca, 0x9d, 0x3f, 0xdf, 0xd3, 0x6d, 0x89, 0x22, 0x54, 0x34, 0xb6, + 0xaa, 0x1e, 0xd2, 0x63, 0xb1, 0xef, 0x2d, 0x47, 0xab, 0xea, 0x5d, 0x59, 0x86, 0x21, 0x95, 0xb4, + 0xd4, 0xc7, 0xc2, 0x46, 0x41, 0x59, 0xd8, 0x6c, 0x1e, 0xb0, 0x02, 0xf9, 0xdd, 0xb0, 0x29, 0xf3, + 0x03, 0x2b, 0x08, 0xa8, 0x27, 0x5f, 0xe3, 0x58, 0x53, 0xe6, 0x1d, 0x8e, 0x80, 0x12, 0x89, 0xfc, + 0x04, 0x34, 0x38, 0x78, 0xdb, 0x76, 0xf7, 0xf8, 0x8b, 0x6b, 0x08, 0xeb, 0xcd, 0x03, 0x55, 0x88, + 0x11, 0x5d, 0xfb, 0xa3, 0x22, 0x5c, 0x59, 0xa7, 0x81, 0xd0, 0x6a, 0x56, 0x69, 0xdf, 0x76, 0x8f, + 0x99, 0x3e, 0x8d, 0xf4, 0x5b, 0xe4, 0x6d, 0x00, 0xcb, 0xdf, 0xeb, 0x1c, 0x19, 0xfc, 0x3b, 0x10, + 0xdf, 0xf0, 0x0d, 0xf9, 0x49, 0xc2, 0x46, 0xa7, 0x2d, 0x29, 0x8f, 0x13, 0xff, 0x30, 0x56, 0x27, + 0xda, 0x48, 0x17, 0x9f, 0xb0, 0x91, 0xee, 0x00, 0xf4, 0x23, 0xad, 0xbc, 0xc4, 0x39, 0x7f, 0x5a, + 0x89, 0x39, 0x8f, 0x42, 0x1e, 0x83, 0xc9, 0xa3, 0x27, 0x3b, 0x30, 0x67, 0xd2, 0x7d, 0x7d, 0x60, + 0x07, 0xe1, 0x4e, 0x42, 0x7e, 0xc4, 0x67, 0xdf, 0x8c, 0x84, 0xc1, 0x0d, 0xab, 0x29, 0x24, 0x1c, + 0xc2, 0xd6, 0xfe, 0x5e, 0x09, 0xae, 0xad, 0xd3, 0x20, 0xb4, 0xad, 0xc9, 0xd9, 0xb1, 0xd3, 0xa7, + 0x06, 0x7b, 0x0b, 0x1f, 0x17, 0xa0, 0x6a, 0xeb, 0x7b, 0xd4, 0x66, 0xab, 0x17, 0x7b, 0x9a, 0xf7, + 0xc7, 0x5e, 0x08, 0x46, 0x4b, 0x59, 0xd8, 0xe4, 0x12, 0x52, 0x4b, 0x83, 0x28, 0x44, 0x29, 0x9e, + 0x4d, 0xea, 0x86, 0x3d, 0xf0, 0x03, 0xea, 0x6d, 0xbb, 0x5e, 0x20, 0xf5, 0xc9, 0x70, 0x52, 0x5f, + 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x86, 0x6d, 0x51, 0x27, 0xe0, 0xb5, 0xc4, 0x77, 0x45, + 0xd4, 0xfb, 0x5d, 0x09, 0x29, 0x18, 0xe3, 0x62, 0xa2, 0x7a, 0xae, 0x63, 0x05, 0xae, 0x10, 0x55, + 0x4e, 0x8a, 0xda, 0x8a, 0x48, 0x18, 0xe7, 0xe3, 0xd5, 0x68, 0xe0, 0x59, 0x86, 0xcf, 0xab, 0x55, + 0x52, 0xd5, 0x22, 0x12, 0xc6, 0xf9, 0xd8, 0x9a, 0x17, 0x7b, 0xfe, 0x73, 0xad, 0x79, 0xbf, 0xd9, + 0x80, 0xeb, 0x89, 0x6e, 0x0d, 0xf4, 0x80, 0xee, 0x0f, 0xec, 0x0e, 0x0d, 0xd4, 0x0b, 0x1c, 0x73, + 0x2d, 0xfc, 0xf3, 0xd1, 0x7b, 0x17, 0x61, 0x4b, 0xc6, 0x64, 0xde, 0xfb, 0x50, 0x03, 0xcf, 0xf4, + 0xee, 0x17, 0xa1, 0xe1, 0xe8, 0x81, 0xcf, 0x3f, 0x5c, 0xf9, 0x8d, 0x86, 0x6a, 0xd8, 0x3d, 0x45, + 0xc0, 0x88, 0x87, 0x6c, 0xc3, 0x65, 0xd9, 0xc5, 0xb7, 0x1e, 0xf5, 0x5d, 0x2f, 0xa0, 0x9e, 0xa8, + 0x2b, 0x97, 0x53, 0x59, 0xf7, 0xf2, 0x56, 0x06, 0x0f, 0x66, 0xd6, 0x24, 0x5b, 0x70, 0xc9, 0x10, + 0xa1, 0x1c, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x53, 0x66, 0xb8, 0x35, 0x5a, 0x19, 0x66, 0xc1, + 0xac, 0x7a, 0xe9, 0xd1, 0x5c, 0x1d, 0x6b, 0x34, 0xd7, 0xc6, 0x19, 0xcd, 0xf5, 0xf1, 0x46, 0x73, + 0xe3, 0x6c, 0xa3, 0x99, 0xf5, 0x3c, 0x1b, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x58, 0x61, 0x63, 0x91, + 0x42, 0x61, 0xcf, 0x77, 0x32, 0x78, 0x30, 0xb3, 0x26, 0xd9, 0x83, 0x6b, 0xa2, 0xfc, 0x96, 0x63, + 0x78, 0xc7, 0x7d, 0xb6, 0xf0, 0xc4, 0x70, 0x9b, 0x09, 0x5b, 0xf2, 0xb5, 0xce, 0x48, 0x4e, 0x7c, + 0x02, 0x0a, 0xf9, 0x59, 0x98, 0x16, 0x6f, 0x69, 0x4b, 0xef, 0x73, 0x58, 0x11, 0x37, 0xf4, 0x92, + 0x84, 0x9d, 0x5e, 0x89, 0x13, 0x31, 0xc9, 0x4b, 0x96, 0x61, 0xb6, 0x7f, 0x64, 0xb0, 0x9f, 0x1b, + 0xfb, 0xf7, 0x28, 0x35, 0xa9, 0xc9, 0x1d, 0x95, 0x8d, 0xf6, 0xcb, 0xca, 0xba, 0xb3, 0x9d, 0x24, + 0x63, 0x9a, 0x9f, 0xbc, 0x09, 0x53, 0x7e, 0xa0, 0x7b, 0x81, 0x34, 0xe0, 0xce, 0xcf, 0x88, 0xb8, + 0x2a, 0x65, 0xdf, 0xec, 0xc4, 0x68, 0x98, 0xe0, 0xcc, 0x5c, 0x2f, 0x66, 0x2f, 0x6e, 0xbd, 0xc8, + 0x33, 0x5b, 0xfd, 0xb3, 0x22, 0xdc, 0x58, 0xa7, 0xc1, 0x96, 0xeb, 0x48, 0xf3, 0x77, 0xd6, 0xb2, + 0x7f, 0x26, 0xeb, 0x77, 0x72, 0xd1, 0x2e, 0x4e, 0x74, 0xd1, 0x2e, 0x4d, 0x68, 0xd1, 0x2e, 0x5f, + 0xe0, 0xa2, 0xfd, 0x0f, 0x8a, 0xf0, 0x72, 0xa2, 0x27, 0xb7, 0x5d, 0x53, 0x4d, 0xf8, 0x9f, 0x75, + 0xe0, 0x19, 0x3a, 0xf0, 0xb1, 0xd0, 0x3b, 0xb9, 0x03, 0x33, 0xa5, 0xf1, 0x7c, 0x94, 0xd6, 0x78, + 0xde, 0xcb, 0xb3, 0xf2, 0x65, 0x48, 0x38, 0xd3, 0x8a, 0x77, 0x07, 0x88, 0x27, 0xdd, 0xad, 0xc2, + 0xf4, 0x13, 0x53, 0x7a, 0xc2, 0xc0, 0x4d, 0x1c, 0xe2, 0xc0, 0x8c, 0x5a, 0xa4, 0x03, 0x2f, 0xf9, + 0xd4, 0x09, 0x2c, 0x87, 0xda, 0x49, 0x38, 0xa1, 0x0d, 0xbd, 0x2a, 0xe1, 0x5e, 0xea, 0x64, 0x31, + 0x61, 0x76, 0xdd, 0x3c, 0xf3, 0xc0, 0xbf, 0x02, 0xae, 0x72, 0x8a, 0xae, 0x99, 0x98, 0xc6, 0xf2, + 0x71, 0x5a, 0x63, 0x79, 0x3f, 0xff, 0x7b, 0x1b, 0x4f, 0x5b, 0x59, 0x02, 0xe0, 0x6f, 0x21, 0xae, + 0xae, 0x84, 0x8b, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x05, 0x48, 0xf5, 0x73, 0x5c, 0x53, 0x09, + 0x17, 0xa0, 0x4e, 0x9c, 0x88, 0x49, 0xde, 0x91, 0xda, 0x4e, 0x65, 0x6c, 0x6d, 0xe7, 0x0e, 0x90, + 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0x71, 0xc3, 0x1b, 0x43, 0x1c, 0x98, 0x51, 0x6b, 0xc4, 0x50, + 0xae, 0x4d, 0x76, 0x28, 0xd7, 0xc7, 0x1f, 0xca, 0xe4, 0x7d, 0xb8, 0xca, 0x45, 0xc9, 0xfe, 0x49, + 0x02, 0x0b, 0xbd, 0xe7, 0x47, 0x24, 0xf0, 0x55, 0x1c, 0xc5, 0x88, 0xa3, 0x31, 0xd8, 0xfb, 0x31, + 0x3c, 0x6a, 0x32, 0xe1, 0xba, 0x3d, 0x5a, 0x27, 0x5a, 0xc9, 0xe0, 0xc1, 0xcc, 0x9a, 0x6c, 0x88, + 0x05, 0x6c, 0x18, 0xea, 0x7b, 0x36, 0x35, 0x65, 0xdc, 0x74, 0x38, 0xc4, 0x76, 0x36, 0x3b, 0x92, + 0x82, 0x31, 0xae, 0x2c, 0x35, 0x65, 0xea, 0x9c, 0x6a, 0xca, 0x3a, 0xb7, 0xd2, 0xef, 0x27, 0xb4, + 0x21, 0xa9, 0xeb, 0x84, 0x91, 0xf0, 0x2b, 0x69, 0x06, 0x1c, 0xae, 0xc3, 0xb5, 0x44, 0xc3, 0xb3, + 0xfa, 0x81, 0x9f, 0xc4, 0x9a, 0x49, 0x69, 0x89, 0x19, 0x3c, 0x98, 0x59, 0x93, 0xe9, 0xe7, 0x07, + 0x54, 0xb7, 0x83, 0x83, 0x24, 0xe0, 0x6c, 0x52, 0x3f, 0xbf, 0x3d, 0xcc, 0x82, 0x59, 0xf5, 0x32, + 0x17, 0xa4, 0xb9, 0xe7, 0x53, 0xad, 0xfa, 0x95, 0x12, 0x5c, 0x5d, 0xa7, 0x41, 0x18, 0x52, 0xf6, + 0x99, 0x19, 0xe5, 0x13, 0x30, 0xa3, 0xfc, 0x46, 0x05, 0x2e, 0xad, 0xd3, 0x60, 0x48, 0x1b, 0xfb, + 0xff, 0xb4, 0xfb, 0xb7, 0xe0, 0x52, 0x14, 0xc5, 0xd8, 0x09, 0x5c, 0x4f, 0xac, 0xe5, 0xa9, 0xdd, + 0x72, 0x67, 0x98, 0x05, 0xb3, 0xea, 0x91, 0x6f, 0xc0, 0xcb, 0x7c, 0xa9, 0x77, 0xba, 0xc2, 0x3e, + 0x2b, 0x8c, 0x09, 0xb1, 0x73, 0x38, 0x2d, 0x09, 0xf9, 0x72, 0x27, 0x9b, 0x0d, 0x47, 0xd5, 0x27, + 0xdf, 0x86, 0xa9, 0xbe, 0xd5, 0xa7, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0x77, 0xf0, 0xcf, 0x76, 0x0c, + 0x2c, 0xda, 0xc0, 0xc5, 0x4b, 0x31, 0x21, 0x30, 0x73, 0xa4, 0xd6, 0x2f, 0x70, 0xa4, 0xfe, 0x8f, + 0x22, 0xd4, 0xd6, 0x3d, 0x77, 0xd0, 0x6f, 0x1f, 0x93, 0x2e, 0x54, 0x1f, 0x72, 0xe7, 0x99, 0x74, + 0x4d, 0x8d, 0x7f, 0x12, 0x40, 0xf8, 0xe0, 0x22, 0x95, 0x48, 0xfc, 0x47, 0x09, 0xcf, 0x06, 0xf1, + 0x21, 0x3d, 0xa6, 0xa6, 0xf4, 0xa1, 0x85, 0x83, 0xf8, 0x2e, 0x2b, 0x44, 0x41, 0x23, 0x3d, 0x98, + 0xd5, 0x6d, 0xdb, 0x7d, 0x48, 0xcd, 0x4d, 0x3d, 0xe0, 0x7e, 0x6f, 0xe9, 0x5b, 0x39, 0xaf, 0x59, + 0x9a, 0x07, 0x33, 0x2c, 0x27, 0xa1, 0x30, 0x8d, 0x4d, 0x3e, 0x80, 0x9a, 0x1f, 0xb8, 0x9e, 0x52, + 0xb6, 0x9a, 0x4b, 0x2b, 0xe3, 0xbf, 0xf4, 0xf6, 0xd7, 0x3b, 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x1f, + 0x54, 0x02, 0xb4, 0x5f, 0x2f, 0x00, 0xdc, 0xde, 0xd9, 0xd9, 0x96, 0xee, 0x05, 0x13, 0xca, 0xfa, + 0x20, 0x74, 0x54, 0x8e, 0xef, 0x10, 0x4c, 0x04, 0xe0, 0x4a, 0x1f, 0xde, 0x20, 0x38, 0x40, 0x8e, + 0x4e, 0x7e, 0x1c, 0x6a, 0x52, 0x41, 0x96, 0xdd, 0x1e, 0xc6, 0x53, 0x48, 0x25, 0x1a, 0x15, 0x5d, + 0xfb, 0xdb, 0x45, 0x80, 0x0d, 0xd3, 0xa6, 0x1d, 0x75, 0x78, 0xa3, 0x11, 0x1c, 0x78, 0xd4, 0x3f, + 0x70, 0x6d, 0x73, 0x4c, 0x6f, 0x2a, 0xb7, 0xf9, 0xef, 0x28, 0x10, 0x8c, 0xf0, 0x88, 0x09, 0x53, + 0x7e, 0x40, 0xfb, 0x2a, 0x26, 0x77, 0x4c, 0x27, 0xca, 0x9c, 0xb0, 0x8b, 0x44, 0x38, 0x98, 0x40, + 0x25, 0x3a, 0x34, 0x2d, 0xc7, 0x10, 0x1f, 0x48, 0xfb, 0x78, 0xcc, 0x81, 0x34, 0xcb, 0x76, 0x1c, + 0x1b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0xdf, 0x2d, 0xc2, 0x15, 0x2e, 0x8f, 0x35, 0x23, 0x11, 0x79, + 0x4b, 0xfe, 0xe4, 0xd0, 0x41, 0xd3, 0x3f, 0x7e, 0x36, 0xd1, 0xe2, 0x9c, 0xe2, 0x16, 0x0d, 0xf4, + 0x48, 0x9f, 0x8b, 0xca, 0x62, 0xa7, 0x4b, 0x07, 0x50, 0xf6, 0xd9, 0x7c, 0x25, 0x7a, 0xaf, 0x33, + 0xf6, 0x10, 0xca, 0x7e, 0x00, 0x3e, 0x7b, 0x85, 0x5e, 0x63, 0x3e, 0x6b, 0x71, 0x71, 0xe4, 0x4f, + 0x43, 0xd5, 0x0f, 0xf4, 0x60, 0xa0, 0x3e, 0xcd, 0xdd, 0x49, 0x0b, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, + 0xfe, 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xae, 0x65, 0x57, 0xdc, 0xb4, 0xfc, 0x80, 0xfc, 0x89, + 0xa1, 0x6e, 0x3f, 0xe3, 0x1b, 0x67, 0xb5, 0x79, 0xa7, 0x87, 0x67, 0x11, 0x54, 0x49, 0xac, 0xcb, + 0x03, 0xa8, 0x58, 0x01, 0xed, 0xa9, 0xfd, 0xe5, 0xfd, 0x09, 0x3f, 0x7a, 0x6c, 0x69, 0x67, 0x52, + 0x50, 0x08, 0xd3, 0xbe, 0x53, 0x1c, 0xf5, 0xc8, 0x7c, 0xf9, 0xb0, 0x93, 0xd1, 0xdd, 0x77, 0xf3, + 0x45, 0x77, 0x27, 0x1b, 0x34, 0x1c, 0xe4, 0xfd, 0xa7, 0x86, 0x83, 0xbc, 0xef, 0xe7, 0x0f, 0xf2, + 0x4e, 0x75, 0xc3, 0xc8, 0x58, 0xef, 0x1f, 0x94, 0xe0, 0x95, 0x27, 0x0d, 0x1b, 0xb6, 0x9e, 0xc9, + 0xd1, 0x99, 0x77, 0x3d, 0x7b, 0xf2, 0x38, 0x24, 0x4b, 0x50, 0xe9, 0x1f, 0xe8, 0xbe, 0x52, 0xca, + 0xd4, 0x86, 0xa5, 0xb2, 0xcd, 0x0a, 0x1f, 0xb3, 0x49, 0x83, 0x2b, 0x73, 0xfc, 0x2f, 0x0a, 0x56, + 0x36, 0x1d, 0xf7, 0xa8, 0xef, 0x47, 0x36, 0x81, 0x70, 0x3a, 0xde, 0x12, 0xc5, 0xa8, 0xe8, 0x24, + 0x80, 0xaa, 0x30, 0x31, 0xcb, 0x95, 0x69, 0xfc, 0x40, 0xae, 0x8c, 0x03, 0x01, 0xd1, 0x43, 0x49, + 0x6f, 0x85, 0x94, 0x45, 0x16, 0xa0, 0x1c, 0x44, 0xe1, 0xd9, 0x6a, 0x6b, 0x5e, 0xce, 0xd0, 0x4f, + 0x39, 0x1f, 0xdb, 0xd8, 0xbb, 0x7b, 0xdc, 0xa8, 0x6e, 0x4a, 0xff, 0xb9, 0xe5, 0x3a, 0x5c, 0x21, + 0x2b, 0x45, 0x1b, 0xfb, 0xfb, 0x43, 0x1c, 0x98, 0x51, 0x4b, 0xfb, 0xb7, 0x75, 0xb8, 0x92, 0x3d, + 0x1e, 0x58, 0xbf, 0x1d, 0x51, 0xcf, 0x67, 0xd8, 0x85, 0x64, 0xbf, 0x3d, 0x10, 0xc5, 0xa8, 0xe8, + 0x9f, 0xea, 0x80, 0xb3, 0xdf, 0x28, 0xc0, 0x55, 0x4f, 0xfa, 0x88, 0x9e, 0x45, 0xd0, 0xd9, 0xab, + 0xc2, 0x9c, 0x31, 0x42, 0x20, 0x8e, 0x6e, 0x0b, 0xf9, 0x9b, 0x05, 0x98, 0xef, 0xa5, 0xec, 0x1c, + 0x17, 0x78, 0x56, 0x92, 0x9f, 0x7f, 0xd8, 0x1a, 0x21, 0x0f, 0x47, 0xb6, 0x84, 0x7c, 0x1b, 0x9a, + 0x7d, 0x36, 0x2e, 0xfc, 0x80, 0x3a, 0x86, 0x0a, 0x10, 0x1d, 0xff, 0x4b, 0xda, 0x8e, 0xb0, 0xc2, + 0xb3, 0x52, 0x5c, 0x3f, 0x88, 0x11, 0x30, 0x2e, 0xf1, 0x39, 0x3f, 0x1c, 0x79, 0x13, 0xea, 0x3e, + 0x0d, 0x02, 0xcb, 0xe9, 0x8a, 0xfd, 0x46, 0x43, 0x7c, 0x2b, 0x1d, 0x59, 0x86, 0x21, 0x95, 0xfc, + 0x04, 0x34, 0xb8, 0xcb, 0x69, 0xd9, 0xeb, 0xfa, 0xf3, 0x0d, 0x1e, 0x2e, 0x36, 0x2d, 0x02, 0xe0, + 0x64, 0x21, 0x46, 0x74, 0xf2, 0x25, 0x98, 0xda, 0xe3, 0x9f, 0xaf, 0x3c, 0x2f, 0x2f, 0x6c, 0x5c, + 0x5c, 0x5b, 0x6b, 0xc7, 0xca, 0x31, 0xc1, 0x45, 0x96, 0x00, 0x68, 0xe8, 0x97, 0x4b, 0xdb, 0xb3, + 0x22, 0x8f, 0x1d, 0xc6, 0xb8, 0xc8, 0xab, 0x50, 0x0a, 0x6c, 0x9f, 0xdb, 0xb0, 0xea, 0xd1, 0x16, + 0x74, 0x67, 0xb3, 0x83, 0xac, 0x5c, 0xfb, 0xa3, 0x02, 0xcc, 0xa6, 0x8e, 0x11, 0xb1, 0x2a, 0x03, + 0xcf, 0x96, 0xd3, 0x48, 0x58, 0x65, 0x17, 0x37, 0x91, 0x95, 0x93, 0xf7, 0xa5, 0x5a, 0x5e, 0xcc, + 0x99, 0x1a, 0xe4, 0x9e, 0x1e, 0xf8, 0x4c, 0x0f, 0x1f, 0xd2, 0xc8, 0xb9, 0x9b, 0x2f, 0x6a, 0x8f, + 0x5c, 0x07, 0x62, 0x6e, 0xbe, 0x88, 0x86, 0x09, 0xce, 0x94, 0xc1, 0xaf, 0x7c, 0x16, 0x83, 0x9f, + 0xf6, 0xab, 0xc5, 0x58, 0x0f, 0x48, 0xcd, 0xfe, 0x29, 0x3d, 0xf0, 0x45, 0xb6, 0x80, 0x86, 0x8b, + 0x7b, 0x23, 0xbe, 0xfe, 0xf1, 0xc5, 0x58, 0x52, 0xc9, 0x3b, 0xa2, 0xef, 0x4b, 0x39, 0x0f, 0x60, + 0xef, 0x6c, 0x76, 0x44, 0x74, 0x95, 0x7a, 0x6b, 0xe1, 0x2b, 0x28, 0x5f, 0xd0, 0x2b, 0xd0, 0xfe, + 0x45, 0x09, 0x9a, 0x77, 0xdc, 0xbd, 0x4f, 0x49, 0x04, 0x75, 0xf6, 0x32, 0x55, 0xfc, 0x04, 0x97, + 0xa9, 0x5d, 0x78, 0x39, 0x08, 0xec, 0x0e, 0x35, 0x5c, 0xc7, 0xf4, 0x97, 0xf7, 0x03, 0xea, 0xad, + 0x59, 0x8e, 0xe5, 0x1f, 0x50, 0x53, 0xba, 0x93, 0x3e, 0x77, 0x7a, 0xd2, 0x7a, 0x79, 0x67, 0x67, + 0x33, 0x8b, 0x05, 0x47, 0xd5, 0xe5, 0xd3, 0x86, 0x38, 0xf3, 0xc9, 0xcf, 0x44, 0xc9, 0x98, 0x1b, + 0x31, 0x6d, 0xc4, 0xca, 0x31, 0xc1, 0xa5, 0x7d, 0xb7, 0x08, 0x8d, 0x30, 0xe9, 0x03, 0xf9, 0x02, + 0xd4, 0xf6, 0x3c, 0xf7, 0x90, 0x7a, 0xc2, 0x73, 0x27, 0xcf, 0x44, 0xb5, 0x45, 0x11, 0x2a, 0x1a, + 0xf9, 0x3c, 0x54, 0x02, 0xb7, 0x6f, 0x19, 0x69, 0x83, 0xda, 0x0e, 0x2b, 0x44, 0x41, 0xbb, 0xb8, + 0x01, 0xfe, 0xc5, 0x84, 0x6a, 0xd7, 0x18, 0xa9, 0x8c, 0xbd, 0x07, 0x65, 0x5f, 0xf7, 0x6d, 0xb9, + 0x9e, 0xe6, 0xc8, 0x9f, 0xb0, 0xdc, 0xd9, 0x94, 0xf9, 0x13, 0x96, 0x3b, 0x9b, 0xc8, 0x41, 0xb5, + 0x3f, 0x28, 0x42, 0x53, 0xf4, 0x9b, 0x98, 0x15, 0x26, 0xd9, 0x73, 0x6f, 0xf1, 0x50, 0x0a, 0x7f, + 0xd0, 0xa3, 0x1e, 0x37, 0x33, 0xc9, 0x49, 0x2e, 0xee, 0x1f, 0x88, 0x88, 0x61, 0x38, 0x45, 0x54, + 0xa4, 0xba, 0xbe, 0x7c, 0x81, 0x5d, 0x5f, 0x39, 0x53, 0xd7, 0x57, 0x2f, 0xa2, 0xeb, 0x3f, 0x2e, + 0x42, 0x63, 0xd3, 0xda, 0xa7, 0xc6, 0xb1, 0x61, 0xf3, 0xd3, 0x9f, 0x26, 0xb5, 0x69, 0x40, 0xd7, + 0x3d, 0xdd, 0xa0, 0xdb, 0xd4, 0xb3, 0x78, 0x52, 0x24, 0xf6, 0x7d, 0xf0, 0x19, 0x48, 0x9e, 0xfe, + 0x5c, 0x1d, 0xc1, 0x83, 0x23, 0x6b, 0x93, 0x0d, 0x98, 0x32, 0xa9, 0x6f, 0x79, 0xd4, 0xdc, 0x8e, + 0x6d, 0x54, 0xbe, 0xa0, 0x96, 0x9a, 0xd5, 0x18, 0xed, 0xf1, 0x49, 0x6b, 0x5a, 0x19, 0x28, 0xc5, + 0x8e, 0x25, 0x51, 0x95, 0x7d, 0xf2, 0x7d, 0x7d, 0xe0, 0x67, 0xb5, 0x31, 0xf6, 0xc9, 0x6f, 0x67, + 0xb3, 0xe0, 0xa8, 0xba, 0x5a, 0x05, 0x4a, 0x9b, 0x6e, 0x57, 0xfb, 0x4e, 0x09, 0xc2, 0xec, 0x59, + 0xe4, 0xcf, 0x15, 0xa0, 0xa9, 0x3b, 0x8e, 0x1b, 0xc8, 0xcc, 0x54, 0xc2, 0x03, 0x8f, 0xb9, 0x93, + 0x74, 0x2d, 0x2c, 0x47, 0xa0, 0xc2, 0x79, 0x1b, 0x3a, 0x94, 0x63, 0x14, 0x8c, 0xcb, 0x26, 0x83, + 0x94, 0x3f, 0x79, 0x2b, 0x7f, 0x2b, 0xce, 0xe0, 0x3d, 0xbe, 0xf6, 0x35, 0x98, 0x4b, 0x37, 0xf6, + 0x3c, 0xee, 0xa0, 0x5c, 0x8e, 0xf9, 0x22, 0x40, 0x14, 0x53, 0xf2, 0x0c, 0x8c, 0x58, 0x56, 0xc2, + 0x88, 0x35, 0x7e, 0x0a, 0x83, 0xa8, 0xd1, 0x23, 0x0d, 0x57, 0xdf, 0x4a, 0x19, 0xae, 0x36, 0x26, + 0x21, 0xec, 0xc9, 0xc6, 0xaa, 0xbf, 0x55, 0x80, 0xb9, 0x88, 0x59, 0x9e, 0x85, 0xfe, 0x32, 0x4c, + 0x7b, 0x54, 0x37, 0xdb, 0x7a, 0x60, 0x1c, 0xf0, 0x50, 0xef, 0x02, 0x8f, 0xcd, 0xe6, 0xa7, 0xbf, + 0x30, 0x4e, 0xc0, 0x24, 0x1f, 0xd1, 0xa1, 0xc9, 0x0a, 0x76, 0xac, 0x1e, 0x75, 0x07, 0xc1, 0x98, + 0x56, 0x53, 0xbe, 0x61, 0xc1, 0x08, 0x06, 0xe3, 0x98, 0xda, 0x0f, 0x0a, 0x30, 0x13, 0x6f, 0xf0, + 0x85, 0x5b, 0xd4, 0x0e, 0x92, 0x16, 0xb5, 0x95, 0x09, 0xbc, 0x93, 0x11, 0x56, 0xb4, 0x8f, 0x20, + 0xfe, 0x68, 0xdc, 0x72, 0x16, 0x37, 0x16, 0x14, 0x9e, 0x68, 0x2c, 0xf8, 0xf4, 0x27, 0x4c, 0x1a, + 0xa5, 0xe5, 0x96, 0x9f, 0x63, 0x2d, 0xf7, 0x93, 0xcc, 0xba, 0x14, 0xcb, 0x1c, 0x54, 0xcd, 0x91, + 0x39, 0xa8, 0x17, 0x66, 0x0e, 0xaa, 0x4d, 0x6c, 0xd2, 0x39, 0x4b, 0xf6, 0xa0, 0xfa, 0x33, 0xcd, + 0x1e, 0xd4, 0xb8, 0xa8, 0xec, 0x41, 0x90, 0x37, 0x7b, 0xd0, 0x47, 0x05, 0x98, 0x31, 0x13, 0x27, + 0x66, 0xe5, 0x19, 0xf3, 0xf1, 0x97, 0x9a, 0xe4, 0x01, 0x5c, 0x71, 0x64, 0x2a, 0x59, 0x86, 0x29, + 0x91, 0x59, 0x39, 0x7b, 0xa6, 0x3e, 0x99, 0x9c, 0x3d, 0xbf, 0x5f, 0x8b, 0xaf, 0x48, 0xcf, 0xda, + 0x68, 0xfe, 0x46, 0xd2, 0x68, 0x7e, 0x23, 0x6d, 0x34, 0x9f, 0x8d, 0xc5, 0xb3, 0xc6, 0x0d, 0xe7, + 0x3f, 0x19, 0x9b, 0xa8, 0x4b, 0x3c, 0x5b, 0x4f, 0xf8, 0xce, 0x33, 0x26, 0xeb, 0x65, 0x98, 0x95, + 0xda, 0xab, 0x22, 0xf2, 0x59, 0x6e, 0x3a, 0x0a, 0x73, 0x5a, 0x4d, 0x92, 0x31, 0xcd, 0xcf, 0x04, + 0xfa, 0x2a, 0x69, 0xab, 0xd8, 0x2a, 0x44, 0x83, 0x4c, 0x25, 0x54, 0x0d, 0x39, 0xd8, 0xb6, 0xc2, + 0xa3, 0xba, 0x2f, 0x4d, 0xdf, 0xb1, 0x6d, 0x05, 0xf2, 0x52, 0x94, 0xd4, 0xb8, 0xfd, 0xbf, 0xf6, + 0x14, 0xfb, 0xbf, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0xf1, 0x36, 0x4d, 0xf9, 0x39, 0xff, 0xb1, 0xb3, + 0x2d, 0xbc, 0x6c, 0x31, 0x8f, 0xb4, 0xdb, 0xcd, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x29, 0xf6, + 0x97, 0x7f, 0xda, 0xe6, 0x72, 0x20, 0x53, 0x9b, 0x9d, 0x47, 0x46, 0x68, 0xb6, 0xda, 0x8c, 0xe1, + 0x60, 0x02, 0x75, 0x84, 0x8b, 0x00, 0xc6, 0x71, 0x11, 0x90, 0x9f, 0x15, 0x9a, 0xd3, 0x71, 0xf8, + 0x5a, 0x9b, 0xfc, 0xb5, 0x86, 0x21, 0x92, 0x18, 0x27, 0x62, 0x92, 0x97, 0x8d, 0x8a, 0x81, 0xec, + 0x06, 0x55, 0x7d, 0x2a, 0x39, 0x2a, 0x76, 0x93, 0x64, 0x4c, 0xf3, 0x93, 0x6d, 0xb8, 0x1c, 0x16, + 0xc5, 0x9b, 0x31, 0xcd, 0x71, 0xc2, 0x98, 0xb5, 0xdd, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0x0f, 0x81, + 0x0c, 0x3c, 0x8f, 0x3a, 0xc1, 0x6d, 0xdd, 0x3f, 0x90, 0xc1, 0x6f, 0xd1, 0x21, 0x90, 0x88, 0x84, + 0x71, 0x3e, 0xb2, 0x04, 0x20, 0xe0, 0x78, 0xad, 0xd9, 0x64, 0x7c, 0xe9, 0x6e, 0x48, 0xc1, 0x18, + 0x97, 0xf6, 0x51, 0x03, 0x9a, 0xf7, 0xf4, 0xc0, 0x3a, 0xa2, 0xdc, 0x9f, 0x77, 0x31, 0x4e, 0x95, + 0xbf, 0x5a, 0x80, 0x2b, 0xc9, 0xa0, 0xcd, 0x0b, 0xf4, 0xac, 0xf0, 0x64, 0x3f, 0x98, 0x29, 0x0d, + 0x47, 0xb4, 0x82, 0xfb, 0x58, 0x86, 0x62, 0x40, 0x2f, 0xda, 0xc7, 0xd2, 0x19, 0x25, 0x10, 0x47, + 0xb7, 0xe5, 0xd3, 0xe2, 0x63, 0x79, 0xbe, 0xb3, 0x53, 0xa6, 0x3c, 0x40, 0xb5, 0xe7, 0xc6, 0x03, + 0x54, 0x7f, 0x2e, 0xd4, 0xee, 0x7e, 0xcc, 0x03, 0xd4, 0xc8, 0x19, 0x89, 0x24, 0xcf, 0x39, 0x08, + 0xb4, 0x51, 0x9e, 0x24, 0x9e, 0xa2, 0x40, 0x59, 0xe6, 0x99, 0xb6, 0xba, 0xa7, 0xfb, 0x96, 0x21, + 0xd5, 0x8e, 0x1c, 0xd9, 0x78, 0x55, 0x96, 0x3e, 0x11, 0xb0, 0xc0, 0xff, 0xa2, 0xc0, 0x8e, 0x92, + 0x12, 0x16, 0x73, 0x25, 0x25, 0x24, 0x2b, 0x50, 0x76, 0x0e, 0xe9, 0xf1, 0xf9, 0x0e, 0xfb, 0xf3, + 0x5d, 0xd8, 0xbd, 0xbb, 0xf4, 0x18, 0x79, 0x65, 0xed, 0xbb, 0x45, 0x00, 0xf6, 0xf8, 0x67, 0xf3, + 0xc5, 0xfc, 0x38, 0xd4, 0xfc, 0x01, 0xb7, 0x9a, 0x48, 0x85, 0x29, 0x0a, 0xdf, 0x12, 0xc5, 0xa8, + 0xe8, 0xe4, 0xf3, 0x50, 0xf9, 0xd6, 0x80, 0x0e, 0x54, 0x60, 0x41, 0xa8, 0xb8, 0x7f, 0x9d, 0x15, + 0xa2, 0xa0, 0x5d, 0x9c, 0x5d, 0x55, 0xf9, 0x6c, 0x2a, 0x17, 0xe5, 0xb3, 0x69, 0x40, 0xed, 0x9e, + 0xcb, 0xa3, 0x41, 0xb5, 0xff, 0x56, 0x04, 0x88, 0xa2, 0xed, 0xc8, 0xaf, 0x17, 0xe0, 0xa5, 0xf0, + 0x83, 0x0b, 0xc4, 0xfe, 0x8b, 0x27, 0xc0, 0xce, 0xed, 0xbf, 0xc9, 0xfa, 0xd8, 0xf9, 0x0c, 0xb4, + 0x9d, 0x25, 0x0e, 0xb3, 0x5b, 0x41, 0x10, 0xea, 0xb4, 0xd7, 0x0f, 0x8e, 0x57, 0x2d, 0x4f, 0x8e, + 0xc0, 0xcc, 0xa0, 0xce, 0x5b, 0x92, 0x47, 0x54, 0x95, 0x46, 0x02, 0xfe, 0x11, 0x29, 0x0a, 0x86, + 0x38, 0xe4, 0x00, 0xea, 0x8e, 0xfb, 0xbe, 0xcf, 0xba, 0x43, 0x0e, 0xc7, 0xb7, 0xc7, 0xef, 0x72, + 0xd1, 0xad, 0xc2, 0xde, 0x2f, 0xff, 0x60, 0xcd, 0x91, 0x9d, 0xfd, 0x6b, 0x45, 0xb8, 0x94, 0xd1, + 0x0f, 0xe4, 0x6d, 0x98, 0x93, 0x81, 0x8d, 0x51, 0x26, 0xf8, 0x42, 0x94, 0x09, 0xbe, 0x93, 0xa2, + 0xe1, 0x10, 0x37, 0x79, 0x1f, 0x40, 0x37, 0x0c, 0xea, 0xfb, 0x5b, 0xae, 0xa9, 0xf6, 0x03, 0x6f, + 0x31, 0xf5, 0x65, 0x39, 0x2c, 0x7d, 0x7c, 0xd2, 0xfa, 0xa9, 0xac, 0x58, 0xe5, 0x54, 0x3f, 0x47, + 0x15, 0x30, 0x06, 0x49, 0xbe, 0x09, 0x20, 0x36, 0xe1, 0x61, 0x3a, 0x85, 0xa7, 0x58, 0xae, 0x16, + 0x54, 0xb6, 0xae, 0x85, 0xaf, 0x0f, 0x74, 0x27, 0xb0, 0x82, 0x63, 0x91, 0xbd, 0xe6, 0x41, 0x88, + 0x82, 0x31, 0x44, 0xed, 0x9f, 0x16, 0xa1, 0xae, 0x6c, 0xe6, 0xcf, 0xc0, 0x50, 0xda, 0x4d, 0x18, + 0x4a, 0x27, 0x14, 0x9d, 0x9c, 0x65, 0x26, 0x75, 0x53, 0x66, 0xd2, 0xf5, 0xfc, 0xa2, 0x9e, 0x6c, + 0x24, 0xfd, 0xad, 0x22, 0xcc, 0x28, 0xd6, 0xbc, 0x26, 0xd2, 0xaf, 0xc2, 0xac, 0x88, 0x2a, 0xd8, + 0xd2, 0x1f, 0x89, 0x44, 0x3e, 0xbc, 0xc3, 0xca, 0x22, 0x20, 0xb8, 0x9d, 0x24, 0x61, 0x9a, 0x97, + 0x0d, 0x6b, 0x51, 0xb4, 0xcb, 0x36, 0x61, 0xc2, 0x0f, 0x29, 0xf6, 0x9b, 0x7c, 0x58, 0xb7, 0x53, + 0x34, 0x1c, 0xe2, 0x4e, 0xdb, 0x68, 0xcb, 0x17, 0x60, 0xa3, 0xfd, 0xf7, 0x05, 0x98, 0x8a, 0xfa, + 0xeb, 0xc2, 0x2d, 0xb4, 0xfb, 0x49, 0x0b, 0xed, 0x72, 0xee, 0xe1, 0x30, 0xc2, 0x3e, 0xfb, 0x17, + 0x6b, 0x90, 0x08, 0x92, 0x27, 0x7b, 0x70, 0xcd, 0xca, 0x0c, 0xf5, 0x8b, 0xcd, 0x36, 0xe1, 0xa9, + 0xef, 0x8d, 0x91, 0x9c, 0xf8, 0x04, 0x14, 0x32, 0x80, 0xfa, 0x11, 0xf5, 0x02, 0xcb, 0xa0, 0xea, + 0xf9, 0xd6, 0x73, 0xab, 0x64, 0xd2, 0x0a, 0x1d, 0xf6, 0xe9, 0x03, 0x29, 0x00, 0x43, 0x51, 0x64, + 0x0f, 0x2a, 0xd4, 0xec, 0x52, 0x95, 0x5a, 0x29, 0x67, 0x8a, 0xda, 0xb0, 0x3f, 0xd9, 0x3f, 0x1f, + 0x05, 0x34, 0xf1, 0xa1, 0x61, 0x2b, 0x2f, 0xa3, 0x1c, 0x87, 0xe3, 0x2b, 0x58, 0xa1, 0xbf, 0x32, + 0xca, 0xba, 0x10, 0x16, 0x61, 0x24, 0x87, 0x1c, 0x86, 0xe6, 0xce, 0xca, 0x84, 0x26, 0x8f, 0x27, + 0x18, 0x3b, 0x7d, 0x68, 0x3c, 0xd4, 0x03, 0xea, 0xf5, 0x74, 0xef, 0x50, 0xee, 0x36, 0xc6, 0x7f, + 0xc2, 0x77, 0x14, 0x52, 0xf4, 0x84, 0x61, 0x11, 0x46, 0x72, 0x88, 0x0b, 0x8d, 0x40, 0xaa, 0xcf, + 0xca, 0xa6, 0x3b, 0xbe, 0x50, 0xa5, 0x88, 0xfb, 0x32, 0x58, 0x5e, 0xfd, 0xc5, 0x48, 0x06, 0x39, + 0x4a, 0xe4, 0x33, 0x17, 0x59, 0xec, 0xdb, 0x39, 0x7c, 0x03, 0x12, 0x2a, 0x5a, 0x6e, 0xb2, 0xf3, + 0xa2, 0x6b, 0xff, 0xb3, 0x12, 0x4d, 0xcb, 0xcf, 0xda, 0x4e, 0xf8, 0xa5, 0xa4, 0x9d, 0xf0, 0x7a, + 0xda, 0x4e, 0x98, 0x72, 0x56, 0x9f, 0x3f, 0xbc, 0x36, 0x65, 0x5e, 0x2b, 0x5f, 0x80, 0x79, 0xed, + 0x35, 0x68, 0x1e, 0xf1, 0x99, 0x40, 0xe4, 0x69, 0xaa, 0xf0, 0x65, 0x84, 0xcf, 0xec, 0x0f, 0xa2, + 0x62, 0x8c, 0xf3, 0xb0, 0x2a, 0xf2, 0x06, 0x97, 0x30, 0xa5, 0xb1, 0xac, 0xd2, 0x89, 0x8a, 0x31, + 0xce, 0xc3, 0x23, 0xf3, 0x2c, 0xe7, 0x50, 0x54, 0xa8, 0xf1, 0x0a, 0x22, 0x32, 0x4f, 0x15, 0x62, + 0x44, 0x27, 0x37, 0xa1, 0x3e, 0x30, 0xf7, 0x05, 0x6f, 0x9d, 0xf3, 0x72, 0x0d, 0x73, 0x77, 0x75, + 0x4d, 0xe6, 0x8d, 0x52, 0x54, 0xd6, 0x92, 0x9e, 0xde, 0x57, 0x04, 0xbe, 0x37, 0x94, 0x2d, 0xd9, + 0x8a, 0x8a, 0x31, 0xce, 0x43, 0x7e, 0x06, 0x66, 0x3c, 0x6a, 0x0e, 0x0c, 0x1a, 0xd6, 0x02, 0x5e, + 0x4b, 0x26, 0xd4, 0x8c, 0x53, 0x30, 0xc5, 0x39, 0xc2, 0x48, 0xd8, 0x1c, 0xcb, 0x48, 0xf8, 0x35, + 0x98, 0x31, 0x3d, 0xdd, 0x72, 0xa8, 0x79, 0xdf, 0xe1, 0x11, 0x09, 0x32, 0x3e, 0x30, 0xb4, 0x90, + 0xaf, 0x26, 0xa8, 0x98, 0xe2, 0xd6, 0xfe, 0x65, 0x11, 0x2a, 0x22, 0xcd, 0xe7, 0x06, 0x5c, 0xb2, + 0x1c, 0x2b, 0xb0, 0x74, 0x7b, 0x95, 0xda, 0xfa, 0x71, 0x32, 0x2a, 0xe3, 0x65, 0xb6, 0xd1, 0xde, + 0x18, 0x26, 0x63, 0x56, 0x1d, 0xd6, 0x39, 0x81, 0x58, 0xbe, 0x15, 0x8a, 0xb0, 0xa3, 0x89, 0xdc, + 0xd0, 0x09, 0x0a, 0xa6, 0x38, 0x99, 0x32, 0xd4, 0xcf, 0x08, 0xb9, 0xe0, 0xca, 0x50, 0x32, 0xd0, + 0x22, 0xc9, 0xc7, 0x95, 0xf4, 0x01, 0x57, 0x88, 0xc3, 0x53, 0x38, 0x32, 0xaa, 0x4a, 0x28, 0xe9, + 0x29, 0x1a, 0x0e, 0x71, 0x33, 0x84, 0x7d, 0xdd, 0xb2, 0x07, 0x1e, 0x8d, 0x10, 0x2a, 0x11, 0xc2, + 0x5a, 0x8a, 0x86, 0x43, 0xdc, 0xda, 0x7f, 0x2f, 0x00, 0x19, 0x3e, 0x57, 0x40, 0x0e, 0xa0, 0xea, + 0x70, 0x5b, 0x64, 0xee, 0x94, 0xf4, 0x31, 0x93, 0xa6, 0x58, 0x24, 0x64, 0x81, 0xc4, 0x27, 0x0e, + 0xd4, 0xe9, 0xa3, 0x80, 0x7a, 0x4e, 0x78, 0xce, 0x68, 0x32, 0xe9, 0xef, 0xc5, 0xde, 0x4c, 0x22, + 0x63, 0x28, 0x43, 0xfb, 0xbd, 0x22, 0x34, 0x63, 0x7c, 0x4f, 0xdb, 0xe2, 0xf3, 0x54, 0x07, 0xc2, + 0x04, 0xb8, 0xeb, 0xd9, 0x72, 0xbe, 0x8b, 0xa5, 0x3a, 0x90, 0x24, 0xdc, 0xc4, 0x38, 0x1f, 0x59, + 0x02, 0xe8, 0xe9, 0x7e, 0x40, 0x3d, 0xae, 0x0b, 0xa5, 0x12, 0x0c, 0x6c, 0x85, 0x14, 0x8c, 0x71, + 0x91, 0x1b, 0xf2, 0x02, 0x83, 0x72, 0x32, 0x21, 0xe4, 0x88, 0xdb, 0x09, 0x2a, 0x13, 0xb8, 0x9d, + 0x80, 0x74, 0x61, 0x4e, 0xb5, 0x5a, 0x51, 0xcf, 0x97, 0x2e, 0x50, 0x0c, 0xd4, 0x14, 0x04, 0x0e, + 0x81, 0x6a, 0xdf, 0x2d, 0xc0, 0x74, 0xc2, 0x00, 0x25, 0x52, 0x39, 0xaa, 0x53, 0x31, 0x89, 0x54, + 0x8e, 0xb1, 0xc3, 0x2c, 0x5f, 0x84, 0xaa, 0xe8, 0xa0, 0x74, 0xb0, 0xab, 0xe8, 0x42, 0x94, 0x54, + 0xb6, 0xb2, 0x48, 0x13, 0x77, 0x7a, 0x65, 0x91, 0x36, 0x70, 0x54, 0x74, 0xe1, 0x39, 0x12, 0xad, + 0x93, 0x3d, 0x1d, 0xf3, 0x1c, 0x89, 0x72, 0x0c, 0x39, 0xb4, 0x7f, 0xc8, 0xdb, 0x1d, 0x78, 0xc7, + 0xe1, 0xce, 0xba, 0x0b, 0x35, 0x19, 0xe0, 0x28, 0x3f, 0x8d, 0xb7, 0x73, 0x58, 0xc5, 0x38, 0x8e, + 0x0c, 0xe5, 0xd3, 0x8d, 0xc3, 0xfb, 0xfb, 0xfb, 0xa8, 0xd0, 0xc9, 0x2d, 0x68, 0xb8, 0x8e, 0xfc, + 0x82, 0xe5, 0xe3, 0xff, 0x18, 0x5b, 0x39, 0xee, 0xab, 0xc2, 0xc7, 0x27, 0xad, 0x2b, 0xe1, 0x9f, + 0x44, 0x23, 0x31, 0xaa, 0xa9, 0xfd, 0xd9, 0x02, 0xbc, 0x84, 0xae, 0x6d, 0x5b, 0x4e, 0x37, 0xe9, + 0x7a, 0x24, 0x36, 0xcc, 0xf4, 0xf4, 0x47, 0xbb, 0x8e, 0x7e, 0xa4, 0x5b, 0xb6, 0xbe, 0x67, 0xd3, + 0xa7, 0xee, 0x8c, 0x07, 0x81, 0x65, 0x2f, 0x88, 0x0b, 0x1d, 0x17, 0x36, 0x9c, 0xe0, 0xbe, 0xd7, + 0x09, 0x3c, 0xcb, 0xe9, 0x8a, 0x59, 0x72, 0x2b, 0x81, 0x85, 0x29, 0x6c, 0xed, 0xf7, 0x4b, 0xc0, + 0x83, 0xec, 0xc8, 0x97, 0xa1, 0xd1, 0xa3, 0xc6, 0x81, 0xee, 0x58, 0xbe, 0x4a, 0x8a, 0x7b, 0x95, + 0x3d, 0xd7, 0x96, 0x2a, 0x7c, 0xcc, 0x5e, 0xc5, 0x72, 0x67, 0x93, 0x9f, 0x63, 0x89, 0x78, 0x89, + 0x01, 0xd5, 0xae, 0xef, 0xeb, 0x7d, 0x2b, 0x77, 0x8c, 0x87, 0x48, 0x42, 0x2a, 0xa6, 0x23, 0xf1, + 0x1b, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb7, 0x75, 0xcb, 0xc9, 0x7d, 0x01, 0x19, 0x7b, 0x82, 0x6d, + 0x86, 0x24, 0x4c, 0x95, 0xfc, 0x27, 0x0a, 0x6c, 0x32, 0x80, 0xa6, 0x6f, 0x78, 0x7a, 0xcf, 0x3f, + 0xd0, 0x97, 0x5e, 0x7f, 0x23, 0xb7, 0xf2, 0x1f, 0x89, 0x12, 0xba, 0xc8, 0x0a, 0x2e, 0x6f, 0x75, + 0x6e, 0x2f, 0x2f, 0xbd, 0xfe, 0x06, 0xc6, 0xe5, 0xc4, 0xc5, 0xbe, 0xfe, 0xda, 0x92, 0x9c, 0x41, + 0x26, 0x2e, 0xf6, 0xf5, 0xd7, 0x96, 0x30, 0x2e, 0x47, 0xfb, 0x5f, 0x05, 0x68, 0x84, 0xbc, 0x64, + 0x17, 0x80, 0xcd, 0x65, 0x32, 0x6d, 0xe8, 0xb9, 0x2e, 0x6b, 0xe1, 0xd6, 0x9e, 0xdd, 0xb0, 0x32, + 0xc6, 0x80, 0x32, 0xf2, 0xaa, 0x16, 0x27, 0x9d, 0x57, 0x75, 0x11, 0x1a, 0x07, 0xba, 0x63, 0xfa, + 0x07, 0xfa, 0xa1, 0x98, 0xd2, 0x63, 0x99, 0x86, 0x6f, 0x2b, 0x02, 0x46, 0x3c, 0xda, 0x3f, 0xae, + 0x82, 0x08, 0xcc, 0x60, 0x93, 0x8e, 0x69, 0xf9, 0xe2, 0x64, 0x40, 0x81, 0xd7, 0x0c, 0x27, 0x9d, + 0x55, 0x59, 0x8e, 0x21, 0x07, 0xb9, 0x0a, 0xa5, 0x9e, 0xe5, 0x48, 0x0d, 0x84, 0x1b, 0x72, 0xb7, + 0x2c, 0x07, 0x59, 0x19, 0x27, 0xe9, 0x8f, 0xa4, 0x86, 0x21, 0x48, 0xfa, 0x23, 0x64, 0x65, 0xe4, + 0xab, 0x30, 0x6b, 0xbb, 0xee, 0x21, 0x9b, 0x3e, 0x94, 0x22, 0x22, 0xbc, 0xea, 0xdc, 0xb4, 0xb2, + 0x99, 0x24, 0x61, 0x9a, 0x97, 0xec, 0xc2, 0xcb, 0x1f, 0x52, 0xcf, 0x95, 0xf3, 0x65, 0xc7, 0xa6, + 0xb4, 0xaf, 0x60, 0x84, 0x6a, 0xcc, 0x43, 0x48, 0x7f, 0x3e, 0x9b, 0x05, 0x47, 0xd5, 0xe5, 0xc1, + 0xe8, 0xba, 0xd7, 0xa5, 0xc1, 0xb6, 0xe7, 0x32, 0xdd, 0xc5, 0x72, 0xba, 0x0a, 0xb6, 0x1a, 0xc1, + 0xee, 0x64, 0xb3, 0xe0, 0xa8, 0xba, 0xe4, 0x5d, 0x98, 0x17, 0x24, 0xa1, 0xb6, 0x2c, 0x8b, 0x69, + 0xc6, 0xb2, 0xd5, 0xbd, 0x9d, 0xd3, 0xc2, 0x5f, 0xb6, 0x33, 0x82, 0x07, 0x47, 0xd6, 0x26, 0x77, + 0x60, 0x4e, 0x79, 0x4b, 0xb7, 0xa9, 0xd7, 0x09, 0x83, 0x75, 0xa6, 0xdb, 0xd7, 0x4f, 0x4f, 0x5a, + 0xd7, 0x56, 0x69, 0xdf, 0xa3, 0x46, 0xdc, 0xeb, 0xac, 0xb8, 0x70, 0xa8, 0x1e, 0x41, 0xb8, 0xc2, + 0x23, 0x72, 0x76, 0xfb, 0x2b, 0xae, 0x6b, 0x9b, 0xee, 0x43, 0x47, 0x3d, 0xbb, 0x50, 0xd8, 0xb9, + 0x83, 0xb4, 0x93, 0xc9, 0x81, 0x23, 0x6a, 0xb2, 0x27, 0xe7, 0x94, 0x55, 0xf7, 0xa1, 0x93, 0x46, + 0x85, 0xe8, 0xc9, 0x3b, 0x23, 0x78, 0x70, 0x64, 0x6d, 0xb2, 0x06, 0x24, 0xfd, 0x04, 0xbb, 0x7d, + 0xe9, 0xc2, 0xbf, 0x22, 0x32, 0x00, 0xa5, 0xa9, 0x98, 0x51, 0x83, 0x6c, 0xc2, 0xe5, 0x74, 0x29, + 0x13, 0x27, 0xbd, 0xf9, 0x3c, 0xf7, 0x2f, 0x66, 0xd0, 0x31, 0xb3, 0x96, 0xf6, 0x4f, 0x8a, 0x30, + 0x9d, 0x48, 0x19, 0xf1, 0xdc, 0x1d, 0xcd, 0x67, 0x9b, 0x87, 0x9e, 0xdf, 0xdd, 0x58, 0xbd, 0x4d, + 0x75, 0x93, 0x7a, 0x77, 0xa9, 0x4a, 0xef, 0x21, 0x96, 0xc5, 0x04, 0x05, 0x53, 0x9c, 0x64, 0x1f, + 0x2a, 0xc2, 0x4f, 0x90, 0xf7, 0xda, 0x1f, 0xd5, 0x47, 0xdc, 0x59, 0x20, 0xef, 0xca, 0x72, 0x3d, + 0x8a, 0x02, 0x5e, 0x0b, 0x60, 0x2a, 0xce, 0xc1, 0x26, 0x92, 0x48, 0xed, 0xad, 0x25, 0x54, 0xde, + 0x0d, 0x28, 0x05, 0xc1, 0xb8, 0x87, 0xfe, 0x85, 0xdf, 0x69, 0x67, 0x13, 0x19, 0x86, 0xb6, 0xcf, + 0xde, 0x9d, 0xef, 0x5b, 0xae, 0x23, 0x33, 0xc0, 0xef, 0x42, 0x4d, 0xee, 0x9e, 0xc6, 0x4c, 0x5a, + 0xc0, 0x75, 0x25, 0x65, 0x76, 0x55, 0x58, 0xda, 0x7f, 0x28, 0x42, 0x23, 0x34, 0x93, 0x9c, 0x21, + 0xb3, 0xba, 0x0b, 0x8d, 0x30, 0xa2, 0x30, 0xf7, 0x9d, 0xa6, 0x51, 0xa0, 0x1b, 0xdf, 0xd9, 0x87, + 0x7f, 0x31, 0x92, 0x11, 0x8f, 0x56, 0x2c, 0xe5, 0x88, 0x56, 0xec, 0x43, 0x2d, 0xf0, 0xac, 0x6e, + 0x57, 0xee, 0x12, 0xf2, 0x84, 0x2b, 0x86, 0xdd, 0xb5, 0x23, 0x00, 0x65, 0xcf, 0x8a, 0x3f, 0xa8, + 0xc4, 0x68, 0x1f, 0xc0, 0x5c, 0x9a, 0x93, 0xab, 0xd0, 0xc6, 0x01, 0x35, 0x07, 0xb6, 0xea, 0xe3, + 0x48, 0x85, 0x96, 0xe5, 0x18, 0x72, 0x90, 0x9b, 0x50, 0x67, 0xaf, 0xe9, 0x43, 0xd7, 0x51, 0x6a, + 0x2c, 0xdf, 0x8d, 0xec, 0xc8, 0x32, 0x0c, 0xa9, 0xda, 0x7f, 0x2d, 0xc1, 0xd5, 0xc8, 0xd8, 0xb5, + 0xa5, 0x3b, 0x7a, 0xf7, 0x0c, 0x17, 0x59, 0x7e, 0x76, 0x0c, 0xec, 0xbc, 0xd7, 0x63, 0x94, 0x9e, + 0x83, 0xeb, 0x31, 0xfe, 0x4f, 0x11, 0x78, 0xf4, 0x33, 0xf9, 0x36, 0x4c, 0xe9, 0xb1, 0x3b, 0x8c, + 0xe5, 0xeb, 0xbc, 0x95, 0xfb, 0x75, 0xf2, 0x20, 0xeb, 0x30, 0x00, 0x2e, 0x5e, 0x8a, 0x09, 0x81, + 0xc4, 0x85, 0xfa, 0xbe, 0x6e, 0xdb, 0x4c, 0x17, 0xca, 0xed, 0xbc, 0x4b, 0x08, 0xe7, 0xc3, 0x7c, + 0x4d, 0x42, 0x63, 0x28, 0x84, 0x7c, 0x54, 0x80, 0x69, 0x2f, 0xbe, 0x5d, 0x93, 0x2f, 0x24, 0x4f, + 0x68, 0x47, 0x0c, 0x2d, 0x1e, 0x6e, 0x17, 0xdf, 0x13, 0x26, 0x65, 0x6a, 0xff, 0xa5, 0x00, 0xd3, + 0x1d, 0xdb, 0x32, 0x2d, 0xa7, 0x7b, 0x81, 0xb7, 0x73, 0xdc, 0x87, 0x8a, 0x6f, 0x5b, 0x26, 0x1d, + 0x73, 0x35, 0x11, 0xeb, 0x18, 0x03, 0x40, 0x81, 0x93, 0xbc, 0xee, 0xa3, 0x74, 0x86, 0xeb, 0x3e, + 0xfe, 0xb0, 0x0a, 0x32, 0x8e, 0x9f, 0x0c, 0xa0, 0xd1, 0x55, 0xb7, 0x08, 0xc8, 0x67, 0xbc, 0x9d, + 0x23, 0x03, 0x65, 0xe2, 0x3e, 0x02, 0x31, 0xf7, 0x87, 0x85, 0x18, 0x49, 0x22, 0x34, 0x79, 0x79, + 0xf6, 0x6a, 0xce, 0xcb, 0xb3, 0x85, 0xb8, 0xe1, 0xeb, 0xb3, 0x75, 0x28, 0x1f, 0x04, 0x41, 0x5f, + 0x0e, 0xa6, 0xf1, 0x0f, 0x6a, 0x44, 0x49, 0x90, 0x84, 0x4e, 0xc4, 0xfe, 0x23, 0x87, 0x66, 0x22, + 0x1c, 0x3d, 0xbc, 0xa2, 0x70, 0x25, 0x57, 0x18, 0x49, 0x5c, 0x04, 0xfb, 0x8f, 0x1c, 0x9a, 0xfc, + 0x22, 0x34, 0x03, 0x4f, 0x77, 0xfc, 0x7d, 0xd7, 0xeb, 0x51, 0x4f, 0xee, 0x51, 0xd7, 0x72, 0xdc, + 0x1f, 0xbd, 0x13, 0xa1, 0x09, 0x93, 0x6c, 0xa2, 0x08, 0xe3, 0xd2, 0xc8, 0x21, 0xd4, 0x07, 0xa6, + 0x68, 0x98, 0x34, 0x83, 0x2d, 0xe7, 0xb9, 0x12, 0x3c, 0x16, 0x24, 0xa2, 0xfe, 0x61, 0x28, 0x20, + 0x79, 0x1b, 0x67, 0x6d, 0x52, 0xb7, 0x71, 0xc6, 0x47, 0x63, 0x56, 0x86, 0x16, 0xd2, 0x93, 0x7a, + 0xad, 0xd3, 0x95, 0x31, 0x6e, 0x6b, 0xb9, 0x55, 0x4e, 0x21, 0xb2, 0x19, 0xea, 0xc6, 0x4e, 0x17, + 0x95, 0x0c, 0xad, 0x07, 0xd2, 0x77, 0x44, 0x8c, 0xc4, 0x4d, 0x46, 0xe2, 0xd8, 0xe0, 0xe2, 0xd9, + 0xe6, 0x83, 0xf0, 0x4a, 0x9d, 0x58, 0x26, 0xf5, 0xcc, 0x2b, 0x8b, 0xb4, 0xff, 0x58, 0x84, 0xd2, + 0xce, 0x66, 0x47, 0x64, 0x47, 0xe5, 0x77, 0xa3, 0xd1, 0xce, 0xa1, 0xd5, 0x7f, 0x40, 0x3d, 0x6b, + 0xff, 0x58, 0x6e, 0xbd, 0x63, 0xd9, 0x51, 0xd3, 0x1c, 0x98, 0x51, 0x8b, 0xbc, 0x07, 0x53, 0x86, + 0xbe, 0x42, 0xbd, 0x60, 0x1c, 0xc3, 0x02, 0x3f, 0x1f, 0xbd, 0xb2, 0x1c, 0x55, 0xc7, 0x04, 0x18, + 0xd9, 0x05, 0x30, 0x22, 0xe8, 0xd2, 0xb9, 0xcd, 0x21, 0x31, 0xe0, 0x18, 0x10, 0x41, 0x68, 0x1c, + 0x32, 0x56, 0x8e, 0x5a, 0x3e, 0x0f, 0x2a, 0x1f, 0x39, 0x77, 0x55, 0x5d, 0x8c, 0x60, 0x34, 0x07, + 0xa6, 0x13, 0xd7, 0x1b, 0x91, 0xaf, 0x40, 0xdd, 0xed, 0xc7, 0xa6, 0xd3, 0x06, 0x8f, 0xa6, 0xad, + 0xdf, 0x97, 0x65, 0x8f, 0x4f, 0x5a, 0xd3, 0x9b, 0x6e, 0xd7, 0x32, 0x54, 0x01, 0x86, 0xec, 0x44, + 0x83, 0x2a, 0x3f, 0xd4, 0xa8, 0x2e, 0x37, 0xe2, 0x6b, 0x07, 0xbf, 0x7f, 0xc4, 0x47, 0x49, 0xd1, + 0x7e, 0xa9, 0x0c, 0x91, 0xc7, 0x95, 0xf8, 0x50, 0x15, 0x87, 0x36, 0xe4, 0xcc, 0x7d, 0xa1, 0xe7, + 0x43, 0xa4, 0x28, 0xd2, 0x85, 0xd2, 0x07, 0xee, 0x5e, 0xee, 0x89, 0x3b, 0x96, 0xcd, 0x40, 0xd8, + 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x56, 0x80, 0x17, 0xfd, 0xb4, 0xea, 0x2b, 0x87, 0x03, + 0xe6, 0xd7, 0xf1, 0xd3, 0xca, 0xb4, 0x0c, 0x7b, 0x1e, 0x45, 0xc6, 0xe1, 0xb6, 0xb0, 0xfe, 0x17, + 0xae, 0x50, 0x39, 0x9c, 0xd6, 0x73, 0x5e, 0xbe, 0x9a, 0xec, 0xff, 0x64, 0x19, 0x4a, 0x51, 0xda, + 0xaf, 0x14, 0xa1, 0x19, 0x9b, 0xad, 0x73, 0xdf, 0x99, 0xf5, 0x28, 0x75, 0x67, 0xd6, 0xf6, 0xf8, + 0x91, 0x01, 0x51, 0xab, 0x2e, 0xfa, 0xda, 0xac, 0x7f, 0x5e, 0x84, 0xd2, 0xee, 0xea, 0x5a, 0x72, + 0xd3, 0x5a, 0x78, 0x06, 0x9b, 0xd6, 0x03, 0xa8, 0xed, 0x0d, 0x2c, 0x3b, 0xb0, 0x9c, 0xdc, 0xf9, + 0x56, 0xd4, 0x15, 0x63, 0xd2, 0xd7, 0x21, 0x50, 0x51, 0xc1, 0x93, 0x2e, 0xd4, 0xba, 0x22, 0xe1, + 0x65, 0xee, 0x78, 0x49, 0x99, 0x38, 0x53, 0x08, 0x92, 0x7f, 0x50, 0xa1, 0x6b, 0xc7, 0x50, 0xdd, + 0x5d, 0x95, 0x6a, 0xff, 0xb3, 0xed, 0x4d, 0xed, 0x17, 0x21, 0xd4, 0x02, 0x9e, 0xbd, 0xf0, 0xdf, + 0x29, 0x40, 0x52, 0xf1, 0x79, 0xf6, 0xa3, 0xe9, 0x30, 0x3d, 0x9a, 0x56, 0x27, 0xf1, 0xf1, 0x65, + 0x0f, 0x28, 0xed, 0xdf, 0x15, 0x20, 0x75, 0xd2, 0x8e, 0xbc, 0x21, 0x73, 0xa7, 0x25, 0x03, 0xd3, + 0x54, 0xee, 0x34, 0x92, 0xe4, 0x8e, 0xe5, 0x50, 0xfb, 0x98, 0x6d, 0xd7, 0xe2, 0x0e, 0x34, 0xd9, + 0xfc, 0x7b, 0xe3, 0x6f, 0xd7, 0xb2, 0xdc, 0x71, 0x32, 0x78, 0x32, 0x4e, 0xc2, 0xa4, 0x5c, 0xed, + 0x1f, 0x15, 0xa1, 0xfa, 0xcc, 0x0e, 0xfe, 0xd3, 0x44, 0x3c, 0xeb, 0x4a, 0xce, 0xd9, 0x7e, 0x64, + 0x34, 0x6b, 0x2f, 0x15, 0xcd, 0x9a, 0xf7, 0x4e, 0xef, 0xa7, 0xc4, 0xb2, 0xfe, 0x9b, 0x02, 0xc8, + 0xb5, 0x66, 0xc3, 0xf1, 0x03, 0xdd, 0x31, 0x28, 0x31, 0xc2, 0x85, 0x2d, 0x6f, 0xd0, 0x94, 0x0c, + 0x2c, 0x14, 0xba, 0x0c, 0xff, 0xad, 0x16, 0x32, 0xf2, 0x93, 0x50, 0x3f, 0x70, 0xfd, 0x80, 0x2f, + 0x5e, 0xc5, 0xa4, 0xc9, 0xec, 0xb6, 0x2c, 0xc7, 0x90, 0x23, 0xed, 0xce, 0xae, 0x8c, 0x76, 0x67, + 0x6b, 0xbf, 0x59, 0x84, 0xa9, 0x4f, 0x4b, 0xf6, 0x82, 0xac, 0xe8, 0xdf, 0x52, 0xce, 0xe8, 0xdf, + 0xf2, 0x79, 0xa2, 0x7f, 0xb5, 0xef, 0x17, 0x00, 0x9e, 0x59, 0xea, 0x04, 0x33, 0x19, 0x98, 0x9b, + 0x7b, 0x5c, 0x65, 0x87, 0xe5, 0xfe, 0xfd, 0x8a, 0x7a, 0x24, 0x1e, 0x94, 0xfb, 0x71, 0x01, 0x66, + 0xf4, 0x44, 0xa0, 0x6b, 0x6e, 0x7d, 0x39, 0x15, 0x37, 0x1b, 0xc6, 0x69, 0x25, 0xcb, 0x31, 0x25, + 0x96, 0xbc, 0x19, 0xa5, 0xed, 0xbe, 0x17, 0x0d, 0xfb, 0xa1, 0x7c, 0xdb, 0x5c, 0x77, 0x4b, 0x70, + 0x3e, 0x25, 0xb0, 0xb8, 0x34, 0x91, 0xc0, 0xe2, 0xf8, 0x91, 0xc9, 0xf2, 0x13, 0x8f, 0x4c, 0x1e, + 0x41, 0x63, 0xdf, 0x73, 0x7b, 0x3c, 0x76, 0x57, 0x5e, 0x8c, 0x7d, 0x2b, 0xc7, 0x42, 0xd9, 0xdb, + 0xb3, 0x1c, 0x6a, 0xf2, 0xb8, 0xe0, 0xd0, 0x70, 0xb5, 0xa6, 0xf0, 0x31, 0x12, 0xc5, 0x6d, 0xfd, + 0xae, 0x90, 0x5a, 0x9d, 0xa4, 0xd4, 0x70, 0x2e, 0xd9, 0x11, 0xe8, 0xa8, 0xc4, 0x24, 0xe3, 0x75, + 0x6b, 0xcf, 0x26, 0x5e, 0x57, 0xfb, 0x0b, 0x35, 0x35, 0x81, 0x3d, 0x77, 0x19, 0x62, 0x3f, 0x3b, + 0xe8, 0xde, 0xa5, 0x43, 0xa7, 0xd0, 0xeb, 0xcf, 0xf0, 0x14, 0x7a, 0x63, 0x32, 0xa7, 0xd0, 0x21, + 0xdf, 0x29, 0xf4, 0xe6, 0x84, 0x4e, 0xa1, 0x4f, 0x4d, 0xea, 0x14, 0xfa, 0xf4, 0x58, 0xa7, 0xd0, + 0x67, 0xce, 0x74, 0x0a, 0xfd, 0xa4, 0x04, 0xa9, 0xcd, 0xf8, 0x67, 0x8e, 0xb7, 0xff, 0xa7, 0x1c, + 0x6f, 0xdf, 0x29, 0x42, 0x34, 0x11, 0x9f, 0x33, 0x30, 0xe9, 0x5d, 0xa8, 0xf7, 0xf4, 0x47, 0x3c, + 0x70, 0x3a, 0xcf, 0xc5, 0xca, 0x5b, 0x12, 0x03, 0x43, 0x34, 0xe2, 0x03, 0x58, 0xe1, 0xe5, 0x06, + 0xb9, 0x5d, 0x18, 0xd1, 0x3d, 0x09, 0xc2, 0x48, 0x1a, 0xfd, 0xc7, 0x98, 0x18, 0xed, 0x5f, 0x17, + 0x41, 0xde, 0x82, 0x41, 0x28, 0x54, 0xf6, 0xad, 0x47, 0xd4, 0xcc, 0x1d, 0xee, 0x1c, 0xbb, 0xee, + 0x5e, 0xf8, 0x68, 0x78, 0x01, 0x0a, 0x74, 0x6e, 0x7c, 0x17, 0x3e, 0x37, 0xd9, 0x7f, 0x39, 0x8c, + 0xef, 0x71, 0xdf, 0x9d, 0x34, 0xbe, 0x8b, 0x22, 0x54, 0x32, 0x84, 0xad, 0x9f, 0x87, 0x5f, 0xe4, + 0x76, 0x31, 0x26, 0xc2, 0x38, 0x94, 0xad, 0xdf, 0x17, 0x69, 0x28, 0xa4, 0x8c, 0xf6, 0x2f, 0x7c, + 0xef, 0x87, 0xd7, 0x5f, 0xf8, 0xfe, 0x0f, 0xaf, 0xbf, 0xf0, 0x83, 0x1f, 0x5e, 0x7f, 0xe1, 0x97, + 0x4e, 0xaf, 0x17, 0xbe, 0x77, 0x7a, 0xbd, 0xf0, 0xfd, 0xd3, 0xeb, 0x85, 0x1f, 0x9c, 0x5e, 0x2f, + 0xfc, 0xa7, 0xd3, 0xeb, 0x85, 0xbf, 0xfc, 0x9f, 0xaf, 0xbf, 0xf0, 0xf3, 0x5f, 0x8e, 0x9a, 0xb0, + 0xa8, 0x9a, 0xb0, 0xa8, 0x04, 0x2e, 0xf6, 0x0f, 0xbb, 0x8b, 0xac, 0x09, 0x51, 0x89, 0x6a, 0xc2, + 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x57, 0x1d, 0x48, 0x12, 0x9e, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -4053,6 +4091,30 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.ImagePullPolicy != nil { i -= len(*m.ImagePullPolicy) copy(dAtA[i:], *m.ImagePullPolicy) @@ -4170,6 +4232,30 @@ func (m *ContainerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.EnvFrom) > 0 { for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { { @@ -7380,6 +7466,54 @@ func (m *PipelineStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailureThreshold != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailureThreshold)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessThreshold != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessThreshold)) + i-- + dAtA[i] = 0x20 + } + if m.PeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.PeriodSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.InitialDelaySeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *RedisBufferService) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9715,6 +9849,14 @@ func (m *Container) Size() (n int) { l = len(*m.ImagePullPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -9744,6 +9886,14 @@ func (m *ContainerTemplate) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10892,6 +11042,30 @@ func (m *PipelineStatus) Size() (n int) { return n } +func (m *Probe) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitialDelaySeconds != nil { + n += 1 + sovGenerated(uint64(*m.InitialDelaySeconds)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.PeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.PeriodSeconds)) + } + if m.SuccessThreshold != nil { + n += 1 + sovGenerated(uint64(*m.SuccessThreshold)) + } + if m.FailureThreshold != nil { + n += 1 + sovGenerated(uint64(*m.FailureThreshold)) + } + return n +} + func (m *RedisBufferService) Size() (n int) { if m == nil { return 0 @@ -11838,6 +12012,8 @@ func (this *Container) String() string { `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, `ImagePullPolicy:` + valueToStringGenerated(this.ImagePullPolicy) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, `}`, }, "") return s @@ -11862,6 +12038,8 @@ func (this *ContainerTemplate) String() string { `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, `Env:` + repeatedStringForEnv + `,`, `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, `}`, }, "") return s @@ -12679,6 +12857,20 @@ func (this *PipelineStatus) String() string { }, "") return s } +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `InitialDelaySeconds:` + valueToStringGenerated(this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + valueToStringGenerated(this.PeriodSeconds) + `,`, + `SuccessThreshold:` + valueToStringGenerated(this.SuccessThreshold) + `,`, + `FailureThreshold:` + valueToStringGenerated(this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} func (this *RedisBufferService) String() string { if this == nil { return "nil" @@ -15624,6 +15816,78 @@ func (m *Container) Unmarshal(dAtA []byte) error { s := k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) m.ImagePullPolicy = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15843,6 +16107,78 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26300,6 +26636,156 @@ func (m *PipelineStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *Probe) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InitialDelaySeconds = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PeriodSeconds = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessThreshold = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailureThreshold = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RedisBufferService) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 70936b9d47..035f4cf46b 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -304,6 +304,12 @@ message Container { // +optional optional string imagePullPolicy = 9; + + // +optional + optional Probe readinessProbe = 10; + + // +optional + optional Probe livenessProbe = 11; } // ContainerTemplate defines customized spec for a container @@ -322,6 +328,12 @@ message ContainerTemplate { // +optional repeated k8s.io.api.core.v1.EnvFromSource envFrom = 5; + + // +optional + optional Probe readinessProbe = 6; + + // +optional + optional Probe livenessProbe = 7; } message DaemonTemplate { @@ -1240,6 +1252,33 @@ message PipelineStatus { optional bool drainedOnPause = 12; } +// Probe is used to customize the configuration for Readiness and Liveness probes. +message Probe { + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 initialDelaySeconds = 1; + + // Number of seconds after which the probe times out. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 timeoutSeconds = 2; + + // How often (in seconds) to perform the probe. + // +optional + optional int32 periodSeconds = 3; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + optional int32 successThreshold = 4; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + optional int32 failureThreshold = 5; +} + message RedisBufferService { // Native brings up a native Redis service optional NativeRedis native = 1; diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 934d497878..e3b8e28f64 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -345,6 +345,18 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e volumeMounts: volumeMounts, }) + var readyzInitDeploy, readyzPeriodSeconds, readyzTimeoutSeconds, readyzFailureThreshold int32 = NumaContainerReadyzInitialDelaySeconds, NumaContainerReadyzPeriodSeconds, NumaContainerReadyzTimeoutSeconds, NumaContainerReadyzFailureThreshold + var liveZInitDeploy, liveZPeriodSeconds, liveZTimeoutSeconds, liveZFailureThreshold int32 = NumaContainerLivezInitialDelaySeconds, NumaContainerLivezPeriodSeconds, NumaContainerLivezTimeoutSeconds, NumaContainerLivezFailureThreshold + if x := mv.Spec.ContainerTemplate; x != nil { + readyzInitDeploy = GetProbeInitialDelaySecondsOr(x.ReadinessProbe, readyzInitDeploy) + readyzPeriodSeconds = GetProbePeriodSecondsOr(x.ReadinessProbe, readyzPeriodSeconds) + readyzTimeoutSeconds = GetProbeTimeoutSecondsOr(x.ReadinessProbe, readyzTimeoutSeconds) + readyzFailureThreshold = GetProbeFailureThresholdOr(x.ReadinessProbe, readyzFailureThreshold) + liveZInitDeploy = GetProbeInitialDelaySecondsOr(x.LivenessProbe, liveZInitDeploy) + liveZPeriodSeconds = GetProbePeriodSecondsOr(x.LivenessProbe, liveZPeriodSeconds) + liveZTimeoutSeconds = GetProbeTimeoutSecondsOr(x.LivenessProbe, liveZTimeoutSeconds) + liveZFailureThreshold = GetProbeFailureThresholdOr(x.LivenessProbe, liveZFailureThreshold) + } containers[0].ReadinessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -353,9 +365,10 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 3, - PeriodSeconds: 3, - TimeoutSeconds: 1, + InitialDelaySeconds: readyzInitDeploy, + PeriodSeconds: readyzPeriodSeconds, + TimeoutSeconds: readyzTimeoutSeconds, + FailureThreshold: readyzFailureThreshold, } containers[0].LivenessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -365,9 +378,10 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 20, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: liveZInitDeploy, + PeriodSeconds: liveZPeriodSeconds, + TimeoutSeconds: liveZTimeoutSeconds, + FailureThreshold: liveZFailureThreshold, } containers[0].Ports = []corev1.ContainerPort{ {Name: MonoVertexMetricsPortName, ContainerPort: MonoVertexMetricsPort}, diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go index 665acd8b32..ae6a62c09d 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -34,6 +34,20 @@ var ( Namespace: "default", }, Spec: MonoVertexSpec{ + ContainerTemplate: &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](24), + PeriodSeconds: ptr.To[int32](25), + FailureThreshold: ptr.To[int32](2), + TimeoutSeconds: ptr.To[int32](21), + }, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](14), + PeriodSeconds: ptr.To[int32](15), + FailureThreshold: ptr.To[int32](1), + TimeoutSeconds: ptr.To[int32](11), + }, + }, Scale: Scale{ Min: ptr.To[int32](2), Max: ptr.To[int32](4), @@ -195,6 +209,16 @@ func TestMonoVertexGetPodSpec(t *testing.T) { } assert.Contains(t, envNames, "ENV_VAR_NAME") assert.Contains(t, envNames, EnvMonoVertexObject) + assert.NotNil(t, podSpec.Containers[0].ReadinessProbe) + assert.Equal(t, int32(24), podSpec.Containers[0].ReadinessProbe.InitialDelaySeconds) + assert.Equal(t, int32(25), podSpec.Containers[0].ReadinessProbe.PeriodSeconds) + assert.Equal(t, int32(2), podSpec.Containers[0].ReadinessProbe.FailureThreshold) + assert.Equal(t, int32(21), podSpec.Containers[0].ReadinessProbe.TimeoutSeconds) + assert.NotNil(t, podSpec.Containers[0].LivenessProbe) + assert.Equal(t, int32(14), podSpec.Containers[0].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), podSpec.Containers[0].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(1), podSpec.Containers[0].LivenessProbe.FailureThreshold) + assert.Equal(t, int32(11), podSpec.Containers[0].LivenessProbe.TimeoutSeconds) }) } diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index 5987162567..bd59769897 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -89,6 +89,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineList": schema_pkg_apis_numaflow_v1alpha1_PipelineList(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineSpec": schema_pkg_apis_numaflow_v1alpha1_PipelineSpec(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineStatus": schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe": schema_pkg_apis_numaflow_v1alpha1_Probe(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisBufferService": schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), @@ -890,11 +891,21 @@ func schema_pkg_apis_numaflow_v1alpha1_Container(ref common.ReferenceCallback) c Format: "", }, }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe"), + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"}, } } @@ -948,11 +959,21 @@ func schema_pkg_apis_numaflow_v1alpha1_ContainerTemplate(ref common.ReferenceCal }, }, }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe"), + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext"}, } } @@ -4171,6 +4192,54 @@ func schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref common.ReferenceCallba } } +func schema_pkg_apis_numaflow_v1alpha1_Probe(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Probe is used to customize the configuration for Readiness and Liveness probes.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "initialDelaySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds after which the probe times out. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "periodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "How often (in seconds) to perform the probe.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "successThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failureThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/apis/numaflow/v1alpha1/probe.go b/pkg/apis/numaflow/v1alpha1/probe.go new file mode 100644 index 0000000000..44c2801f94 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/probe.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// Probe is used to customize the configuration for Readiness and Liveness probes. +type Probe struct { + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,1,opt,name=initialDelaySeconds"` + // Number of seconds after which the probe times out. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,2,opt,name=timeoutSeconds"` + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds *int32 `json:"periodSeconds,omitempty" protobuf:"varint,3,opt,name=periodSeconds"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + SuccessThreshold *int32 `json:"successThreshold,omitempty" protobuf:"varint,4,opt,name=successThreshold"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold *int32 `json:"failureThreshold,omitempty" protobuf:"varint,5,opt,name=failureThreshold"` +} + +func GetProbeInitialDelaySecondsOr(probe *Probe, defaultValue int32) int32 { + if probe == nil || probe.InitialDelaySeconds == nil { + return defaultValue + } + return *probe.InitialDelaySeconds +} + +func GetProbeTimeoutSecondsOr(probe *Probe, defaultValue int32) int32 { + if probe == nil || probe.TimeoutSeconds == nil { + return defaultValue + } + return *probe.TimeoutSeconds +} + +func GetProbePeriodSecondsOr(probe *Probe, defaultValue int32) int32 { + if probe == nil || probe.PeriodSeconds == nil { + return defaultValue + } + return *probe.PeriodSeconds +} + +func GetProbeSuccessThresholdOr(probe *Probe, defaultValue int32) int32 { + if probe == nil || probe.SuccessThreshold == nil { + return defaultValue + } + return *probe.SuccessThreshold +} +func GetProbeFailureThresholdOr(probe *Probe, defaultValue int32) int32 { + if probe == nil || probe.FailureThreshold == nil { + return defaultValue + } + return *probe.FailureThreshold +} diff --git a/pkg/apis/numaflow/v1alpha1/probe_test.go b/pkg/apis/numaflow/v1alpha1/probe_test.go new file mode 100644 index 0000000000..8383944451 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/probe_test.go @@ -0,0 +1,199 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestGetProbeInitialDelaySecondsOr(t *testing.T) { + tests := []struct { + name string + probe *Probe + defaultValue int32 + expected int32 + }{ + { + name: "nil probe", + probe: nil, + defaultValue: 10, + expected: 10, + }, + { + name: "nil InitialDelaySeconds", + probe: &Probe{}, + defaultValue: 5, + expected: 5, + }, + { + name: "non-nil InitialDelaySeconds", + probe: &Probe{InitialDelaySeconds: ptr.To[int32](15)}, + defaultValue: 10, + expected: 15, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetProbeInitialDelaySecondsOr(tt.probe, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetProbeTimeoutSeconds(t *testing.T) { + tests := []struct { + name string + probe *Probe + defaultValue int32 + expected int32 + }{ + { + name: "nil probe", + probe: nil, + defaultValue: 5, + expected: 5, + }, + { + name: "nil TimeoutSeconds", + probe: &Probe{}, + defaultValue: 3, + expected: 3, + }, + { + name: "non-nil TimeoutSeconds", + probe: &Probe{TimeoutSeconds: ptr.To[int32](8)}, + defaultValue: 5, + expected: 8, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetProbeTimeoutSecondsOr(tt.probe, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetProbePeriodSeconds(t *testing.T) { + tests := []struct { + name string + probe *Probe + defaultValue int32 + expected int32 + }{ + { + name: "nil probe", + probe: nil, + defaultValue: 10, + expected: 10, + }, + { + name: "nil PeriodSeconds", + probe: &Probe{}, + defaultValue: 15, + expected: 15, + }, + { + name: "non-nil PeriodSeconds", + probe: &Probe{PeriodSeconds: ptr.To[int32](20)}, + defaultValue: 10, + expected: 20, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetProbePeriodSecondsOr(tt.probe, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetProbeSuccessThreshold(t *testing.T) { + tests := []struct { + name string + probe *Probe + defaultValue int32 + expected int32 + }{ + { + name: "nil probe", + probe: nil, + defaultValue: 1, + expected: 1, + }, + { + name: "nil SuccessThreshold", + probe: &Probe{}, + defaultValue: 2, + expected: 2, + }, + { + name: "non-nil SuccessThreshold", + probe: &Probe{SuccessThreshold: ptr.To[int32](3)}, + defaultValue: 1, + expected: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetProbeSuccessThresholdOr(tt.probe, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetProbeFailureThreshold(t *testing.T) { + tests := []struct { + name string + probe *Probe + defaultValue int32 + expected int32 + }{ + { + name: "nil probe", + probe: nil, + defaultValue: 3, + expected: 3, + }, + { + name: "nil FailureThreshold", + probe: &Probe{}, + defaultValue: 5, + expected: 5, + }, + { + name: "non-nil FailureThreshold", + probe: &Probe{FailureThreshold: ptr.To[int32](7)}, + defaultValue: 3, + expected: 7, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetProbeFailureThresholdOr(tt.probe, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/apis/numaflow/v1alpha1/sink.go b/pkg/apis/numaflow/v1alpha1/sink.go index d596a079e6..d35f323e9e 100644 --- a/pkg/apis/numaflow/v1alpha1/sink.go +++ b/pkg/apis/numaflow/v1alpha1/sink.go @@ -92,9 +92,10 @@ func (s Sink) getUDSinkContainer(mainContainerReq getContainerReq) corev1.Contai Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: GetProbeInitialDelaySecondsOr(x.LivenessProbe, UDContainerLivezInitialDelaySeconds), + PeriodSeconds: GetProbePeriodSecondsOr(x.LivenessProbe, UDContainerLivezPeriodSeconds), + TimeoutSeconds: GetProbeTimeoutSecondsOr(x.LivenessProbe, UDContainerLivezTimeoutSeconds), + FailureThreshold: GetProbeFailureThresholdOr(x.LivenessProbe, UDContainerLivezFailureThreshold), } return container } @@ -126,9 +127,10 @@ func (s Sink) getFallbackUDSinkContainer(mainContainerReq getContainerReq) corev Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: GetProbeInitialDelaySecondsOr(x.LivenessProbe, UDContainerLivezInitialDelaySeconds), + PeriodSeconds: GetProbePeriodSecondsOr(x.LivenessProbe, UDContainerLivezPeriodSeconds), + TimeoutSeconds: GetProbeTimeoutSecondsOr(x.LivenessProbe, UDContainerLivezTimeoutSeconds), + FailureThreshold: GetProbeFailureThresholdOr(x.LivenessProbe, UDContainerLivezFailureThreshold), } return container } diff --git a/pkg/apis/numaflow/v1alpha1/sink_test.go b/pkg/apis/numaflow/v1alpha1/sink_test.go index 0fe7f002af..f63cb751bd 100644 --- a/pkg/apis/numaflow/v1alpha1/sink_test.go +++ b/pkg/apis/numaflow/v1alpha1/sink_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) func Test_Sink_getContainers(t *testing.T) { @@ -52,6 +53,12 @@ func Test_Sink_getUDSinkContainer(t *testing.T) { EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{ LocalObjectReference: corev1.LocalObjectReference{Name: "test-cm"}, }}}, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](10), + TimeoutSeconds: ptr.To[int32](15), + PeriodSeconds: ptr.To[int32](14), + FailureThreshold: ptr.To[int32](5), + }, }, }, }, @@ -78,6 +85,10 @@ func Test_Sink_getUDSinkContainer(t *testing.T) { }) assert.Equal(t, testImagePullPolicy, c.ImagePullPolicy) assert.True(t, c.LivenessProbe != nil) + assert.Equal(t, int32(10), c.LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), c.LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(14), c.LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), c.LivenessProbe.FailureThreshold) } func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { @@ -103,6 +114,12 @@ func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{ LocalObjectReference: corev1.LocalObjectReference{Name: "test-cm"}, }}}, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](20), + TimeoutSeconds: ptr.To[int32](25), + PeriodSeconds: ptr.To[int32](24), + FailureThreshold: ptr.To[int32](10), + }, }, }, }, @@ -122,6 +139,11 @@ func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { envs[e.Name] = e.Value } assert.Equal(t, envs[EnvUDContainerType], UDContainerFallbackSink) + assert.True(t, c.LivenessProbe != nil) + assert.Equal(t, int32(20), c.LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(25), c.LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(24), c.LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(10), c.LivenessProbe.FailureThreshold) x.UDSink.Container.ImagePullPolicy = &testImagePullPolicy c = x.getUDSinkContainer(getContainerReq{ image: "main-image", diff --git a/pkg/apis/numaflow/v1alpha1/source.go b/pkg/apis/numaflow/v1alpha1/source.go index bc78831ff6..b89016d9c2 100644 --- a/pkg/apis/numaflow/v1alpha1/source.go +++ b/pkg/apis/numaflow/v1alpha1/source.go @@ -102,6 +102,14 @@ func (s Source) getUDTransformerContainer(mainContainerReq getContainerReq) core } } container := c.build() + + var initialDelaySeconds, periodSeconds, timeoutSeconds, failureThreshold int32 = UDContainerLivezInitialDelaySeconds, UDContainerLivezPeriodSeconds, UDContainerLivezTimeoutSeconds, UDContainerLivezFailureThreshold + if x := s.UDTransformer.Container; x != nil { + initialDelaySeconds = GetProbeInitialDelaySecondsOr(x.LivenessProbe, initialDelaySeconds) + periodSeconds = GetProbePeriodSecondsOr(x.LivenessProbe, periodSeconds) + timeoutSeconds = GetProbeTimeoutSecondsOr(x.LivenessProbe, timeoutSeconds) + failureThreshold = GetProbeFailureThresholdOr(x.LivenessProbe, failureThreshold) + } container.LivenessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -110,9 +118,10 @@ func (s Source) getUDTransformerContainer(mainContainerReq getContainerReq) core Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: initialDelaySeconds, + PeriodSeconds: periodSeconds, + TimeoutSeconds: timeoutSeconds, + FailureThreshold: failureThreshold, } return container } @@ -139,6 +148,14 @@ func (s Source) getUDSourceContainer(mainContainerReq getContainerReq) corev1.Co } } container := c.build() + + var initialDelaySeconds, periodSeconds, timeoutSeconds, failureThreshold int32 = UDContainerLivezInitialDelaySeconds, UDContainerLivezPeriodSeconds, UDContainerLivezTimeoutSeconds, UDContainerLivezFailureThreshold + if x := s.UDSource.Container; x != nil { + initialDelaySeconds = GetProbeInitialDelaySecondsOr(x.LivenessProbe, initialDelaySeconds) + periodSeconds = GetProbePeriodSecondsOr(x.LivenessProbe, periodSeconds) + timeoutSeconds = GetProbeTimeoutSecondsOr(x.LivenessProbe, timeoutSeconds) + failureThreshold = GetProbeFailureThresholdOr(x.LivenessProbe, failureThreshold) + } container.LivenessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -147,9 +164,10 @@ func (s Source) getUDSourceContainer(mainContainerReq getContainerReq) corev1.Co Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: initialDelaySeconds, + PeriodSeconds: periodSeconds, + TimeoutSeconds: timeoutSeconds, + FailureThreshold: failureThreshold, } return container } diff --git a/pkg/apis/numaflow/v1alpha1/source_test.go b/pkg/apis/numaflow/v1alpha1/source_test.go index d762eef84a..01f26f2cd6 100644 --- a/pkg/apis/numaflow/v1alpha1/source_test.go +++ b/pkg/apis/numaflow/v1alpha1/source_test.go @@ -23,12 +23,36 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) var testImagePullPolicy = corev1.PullNever func TestSource_getContainers(t *testing.T) { x := Source{ + UDSource: &UDSource{ + Container: &Container{ + Image: "my-image-s", + VolumeMounts: []corev1.VolumeMount{{Name: "my-vm"}}, + Command: []string{"my-cmd-s"}, + Args: []string{"my-arg-s"}, + Env: []corev1.EnvVar{{Name: "my-envvar-s"}}, + EnvFrom: []corev1.EnvFromSource{{ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "test-cm"}, + }}}, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + "cpu": resource.MustParse("2"), + }, + }, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](10), + TimeoutSeconds: ptr.To[int32](15), + PeriodSeconds: ptr.To[int32](14), + FailureThreshold: ptr.To[int32](5), + }, + }, + }, UDTransformer: &UDTransformer{ Container: &Container{ Image: "my-image", @@ -44,6 +68,12 @@ func TestSource_getContainers(t *testing.T) { "cpu": resource.MustParse("2"), }, }, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](20), + TimeoutSeconds: ptr.To[int32](25), + PeriodSeconds: ptr.To[int32](24), + FailureThreshold: ptr.To[int32](5), + }, }, }, } @@ -51,8 +81,33 @@ func TestSource_getContainers(t *testing.T) { image: "main-image", }) assert.NoError(t, err) - assert.Equal(t, 2, len(c)) + assert.Equal(t, 3, len(c)) assert.Equal(t, "main-image", c[0].Image) + + assert.Equal(t, x.UDSource.Container.Image, c[2].Image) + assert.Contains(t, c[2].VolumeMounts, c[2].VolumeMounts[0]) + assert.Equal(t, x.UDSource.Container.Command, c[2].Command) + assert.Equal(t, x.UDSource.Container.Args, c[2].Args) + envsUDSource := map[string]string{} + for _, e := range c[2].Env { + envsUDSource[e.Name] = e.Value + } + assert.Equal(t, envsUDSource[EnvUDContainerType], UDContainerSource) + assert.Equal(t, x.UDSource.Container.EnvFrom, c[2].EnvFrom) + assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[2].Resources) + assert.Equal(t, c[0].ImagePullPolicy, c[2].ImagePullPolicy) + assert.NotNil(t, c[1].LivenessProbe) + assert.Equal(t, int32(10), c[2].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), c[2].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(14), c[2].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), c[2].LivenessProbe.FailureThreshold) + x.UDSource.Container.ImagePullPolicy = &testImagePullPolicy + c, _ = x.getContainers(getContainerReq{ + image: "main-image", + imagePullPolicy: corev1.PullAlways, + }) + assert.Equal(t, testImagePullPolicy, c[2].ImagePullPolicy) + assert.Equal(t, x.UDTransformer.Container.Image, c[1].Image) assert.Contains(t, c[1].VolumeMounts, c[1].VolumeMounts[0]) assert.Equal(t, x.UDTransformer.Container.Command, c[1].Command) @@ -65,6 +120,11 @@ func TestSource_getContainers(t *testing.T) { assert.Equal(t, x.UDTransformer.Container.EnvFrom, c[1].EnvFrom) assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[1].Resources) assert.Equal(t, c[0].ImagePullPolicy, c[1].ImagePullPolicy) + assert.NotNil(t, c[1].LivenessProbe) + assert.Equal(t, int32(20), c[1].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(25), c[1].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(24), c[1].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), c[1].LivenessProbe.FailureThreshold) x.UDTransformer.Container.ImagePullPolicy = &testImagePullPolicy c, _ = x.getContainers(getContainerReq{ image: "main-image", diff --git a/pkg/apis/numaflow/v1alpha1/udf.go b/pkg/apis/numaflow/v1alpha1/udf.go index c29d54b2cb..23e9bcf085 100644 --- a/pkg/apis/numaflow/v1alpha1/udf.go +++ b/pkg/apis/numaflow/v1alpha1/udf.go @@ -98,6 +98,14 @@ func (in UDF) getUDFContainer(mainContainerReq getContainerReq) corev1.Container } c = c.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerFunction}) container := c.build() + + var initialDelaySeconds, periodSeconds, timeoutSeconds, failureThreshold int32 = UDContainerLivezInitialDelaySeconds, UDContainerLivezPeriodSeconds, UDContainerLivezTimeoutSeconds, UDContainerLivezFailureThreshold + if x := in.Container; x != nil { + initialDelaySeconds = GetProbeInitialDelaySecondsOr(x.LivenessProbe, initialDelaySeconds) + periodSeconds = GetProbePeriodSecondsOr(x.LivenessProbe, periodSeconds) + timeoutSeconds = GetProbeTimeoutSecondsOr(x.LivenessProbe, timeoutSeconds) + failureThreshold = GetProbeFailureThresholdOr(x.LivenessProbe, failureThreshold) + } container.LivenessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -106,9 +114,10 @@ func (in UDF) getUDFContainer(mainContainerReq getContainerReq) corev1.Container Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 30, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: initialDelaySeconds, + PeriodSeconds: periodSeconds, + TimeoutSeconds: timeoutSeconds, + FailureThreshold: failureThreshold, } return container } diff --git a/pkg/apis/numaflow/v1alpha1/udf_test.go b/pkg/apis/numaflow/v1alpha1/udf_test.go index f96e9c3756..0bb8e3862a 100644 --- a/pkg/apis/numaflow/v1alpha1/udf_test.go +++ b/pkg/apis/numaflow/v1alpha1/udf_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) func TestUDF_getContainers(t *testing.T) { @@ -40,6 +41,12 @@ func TestUDF_getContainers(t *testing.T) { "cpu": resource.MustParse("2"), }, }, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](10), + TimeoutSeconds: ptr.To[int32](15), + PeriodSeconds: ptr.To[int32](14), + FailureThreshold: ptr.To[int32](5), + }, }, } c, err := x.getContainers(getContainerReq{ @@ -68,6 +75,10 @@ func TestUDF_getContainers(t *testing.T) { }) assert.Equal(t, testImagePullPolicy, c[1].ImagePullPolicy) assert.True(t, c[1].LivenessProbe != nil) + assert.Equal(t, int32(10), c[1].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), c[1].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(14), c[1].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), c[1].LivenessProbe.FailureThreshold) } func Test_getUDFContainer(t *testing.T) { diff --git a/pkg/apis/numaflow/v1alpha1/user_defined_container.go b/pkg/apis/numaflow/v1alpha1/user_defined_container.go index 369a6d3eb3..5e8746097e 100644 --- a/pkg/apis/numaflow/v1alpha1/user_defined_container.go +++ b/pkg/apis/numaflow/v1alpha1/user_defined_container.go @@ -38,4 +38,8 @@ type Container struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,8,opt,name=securityContext"` // +optional ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,9,opt,name=imagePullPolicy,casttype=PullPolicy"` + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,10,opt,name=readinessProbe"` + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,11,opt,name=livenessProbe"` } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index 965e9f4bcc..f403d074c3 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -253,6 +253,18 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { return nil, err } + var readyzInitDeploy, readyzPeriodSeconds, readyzTimeoutSeconds, readyzFailureThreshold int32 = NumaContainerReadyzInitialDelaySeconds, NumaContainerReadyzPeriodSeconds, NumaContainerReadyzTimeoutSeconds, NumaContainerReadyzFailureThreshold + var liveZInitDeploy, liveZPeriodSeconds, liveZTimeoutSeconds, liveZFailureThreshold int32 = NumaContainerLivezInitialDelaySeconds, NumaContainerLivezPeriodSeconds, NumaContainerLivezTimeoutSeconds, NumaContainerLivezFailureThreshold + if x := v.Spec.ContainerTemplate; x != nil { + readyzInitDeploy = GetProbeInitialDelaySecondsOr(x.ReadinessProbe, readyzInitDeploy) + readyzPeriodSeconds = GetProbePeriodSecondsOr(x.ReadinessProbe, readyzPeriodSeconds) + readyzTimeoutSeconds = GetProbeTimeoutSecondsOr(x.ReadinessProbe, readyzTimeoutSeconds) + readyzFailureThreshold = GetProbeFailureThresholdOr(x.ReadinessProbe, readyzFailureThreshold) + liveZInitDeploy = GetProbeInitialDelaySecondsOr(x.LivenessProbe, liveZInitDeploy) + liveZPeriodSeconds = GetProbePeriodSecondsOr(x.LivenessProbe, liveZPeriodSeconds) + liveZTimeoutSeconds = GetProbeTimeoutSecondsOr(x.LivenessProbe, liveZTimeoutSeconds) + liveZFailureThreshold = GetProbeFailureThresholdOr(x.LivenessProbe, liveZFailureThreshold) + } containers[0].ReadinessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -261,10 +273,12 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 3, - PeriodSeconds: 3, - TimeoutSeconds: 1, + InitialDelaySeconds: readyzInitDeploy, + PeriodSeconds: readyzPeriodSeconds, + TimeoutSeconds: readyzTimeoutSeconds, + FailureThreshold: readyzFailureThreshold, } + containers[0].LivenessProbe = &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -273,9 +287,10 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { Scheme: corev1.URISchemeHTTPS, }, }, - InitialDelaySeconds: 20, - PeriodSeconds: 60, - TimeoutSeconds: 30, + InitialDelaySeconds: liveZInitDeploy, + PeriodSeconds: liveZPeriodSeconds, + TimeoutSeconds: liveZTimeoutSeconds, + FailureThreshold: liveZFailureThreshold, } containers[0].Ports = []corev1.ContainerPort{ {Name: VertexMetricsPortName, ContainerPort: VertexMetricsPort}, diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index 1f5572c424..83384cf624 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -286,6 +286,20 @@ func TestGetPodSpec(t *testing.T) { t.Run("test sink", func(t *testing.T) { testObj := testVertex.DeepCopy() testObj.Spec.Sink = &Sink{} + testObj.Spec.ContainerTemplate = &ContainerTemplate{ + ReadinessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](24), + PeriodSeconds: ptr.To[int32](25), + FailureThreshold: ptr.To[int32](2), + TimeoutSeconds: ptr.To[int32](21), + }, + LivenessProbe: &Probe{ + InitialDelaySeconds: ptr.To[int32](14), + PeriodSeconds: ptr.To[int32](15), + FailureThreshold: ptr.To[int32](2), + TimeoutSeconds: ptr.To[int32](11), + }, + } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) assert.Equal(t, 1, len(s.Containers)) @@ -294,10 +308,20 @@ func TestGetPodSpec(t *testing.T) { assert.Equal(t, corev1.PullIfNotPresent, s.Containers[0].ImagePullPolicy) assert.NotNil(t, s.Containers[0].ReadinessProbe) assert.NotNil(t, s.Containers[0].ReadinessProbe.HTTPGet) + assert.Equal(t, "/readyz", s.Containers[0].ReadinessProbe.HTTPGet.Path) + assert.Equal(t, int32(24), s.Containers[0].ReadinessProbe.InitialDelaySeconds) + assert.Equal(t, int32(25), s.Containers[0].ReadinessProbe.PeriodSeconds) + assert.Equal(t, int32(2), s.Containers[0].ReadinessProbe.FailureThreshold) + assert.Equal(t, int32(21), s.Containers[0].ReadinessProbe.TimeoutSeconds) assert.Equal(t, corev1.URISchemeHTTPS, s.Containers[0].ReadinessProbe.HTTPGet.Scheme) assert.Equal(t, VertexMetricsPort, s.Containers[0].ReadinessProbe.HTTPGet.Port.IntValue()) assert.NotNil(t, s.Containers[0].LivenessProbe) assert.NotNil(t, s.Containers[0].LivenessProbe.HTTPGet) + assert.Equal(t, "/livez", s.Containers[0].LivenessProbe.HTTPGet.Path) + assert.Equal(t, int32(14), s.Containers[0].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), s.Containers[0].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(2), s.Containers[0].LivenessProbe.FailureThreshold) + assert.Equal(t, int32(11), s.Containers[0].LivenessProbe.TimeoutSeconds) assert.Equal(t, corev1.URISchemeHTTPS, s.Containers[0].LivenessProbe.HTTPGet.Scheme) assert.Equal(t, VertexMetricsPort, s.Containers[0].LivenessProbe.HTTPGet.Port.IntValue()) assert.Equal(t, 1, len(s.Containers[0].Ports)) diff --git a/pkg/shared/clients/nats/test/server.go b/pkg/shared/clients/nats/test/server.go index af3c25036e..556a7390c2 100644 --- a/pkg/shared/clients/nats/test/server.go +++ b/pkg/shared/clients/nats/test/server.go @@ -27,6 +27,7 @@ import ( func RunNatsServer(t *testing.T) *server.Server { t.Helper() opts := natstestserver.DefaultTestOptions + opts.Port = 4223 return natstestserver.RunServer(&opts) } diff --git a/pkg/sources/nats/nats_test.go b/pkg/sources/nats/nats_test.go index 739d215018..db4a0c25b7 100644 --- a/pkg/sources/nats/nats_test.go +++ b/pkg/sources/nats/nats_test.go @@ -68,7 +68,7 @@ func Test_Single(t *testing.T) { server := natstest.RunNatsServer(t) defer server.Shutdown() - url := "127.0.0.1" + url := server.ClientURL() testSubject := "test-single" testQueue := "test-queue-single" vi := testVertex(t, url, testSubject, testQueue, "test-host", 0) @@ -112,9 +112,9 @@ func Test_Multiple(t *testing.T) { server := natstest.RunNatsServer(t) defer server.Shutdown() - url := "127.0.0.1" testSubject := "test-multiple" testQueue := "test-queue-multiple" + url := server.ClientURL() v1 := testVertex(t, url, testSubject, testQueue, "test-host1", 0) ns1, err := newInstance(t, v1) assert.NoError(t, err) diff --git a/rust/numaflow-models/src/models/container.rs b/rust/numaflow-models/src/models/container.rs index 69f3206a00..3599d6a243 100644 --- a/rust/numaflow-models/src/models/container.rs +++ b/rust/numaflow-models/src/models/container.rs @@ -32,6 +32,10 @@ pub struct Container { pub image: Option, #[serde(rename = "imagePullPolicy", skip_serializing_if = "Option::is_none")] pub image_pull_policy: Option, + #[serde(rename = "livenessProbe", skip_serializing_if = "Option::is_none")] + pub liveness_probe: Option>, + #[serde(rename = "readinessProbe", skip_serializing_if = "Option::is_none")] + pub readiness_probe: Option>, #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] pub resources: Option, #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] @@ -50,6 +54,8 @@ impl Container { env_from: None, image: None, image_pull_policy: None, + liveness_probe: None, + readiness_probe: None, resources: None, security_context: None, volume_mounts: None, diff --git a/rust/numaflow-models/src/models/container_template.rs b/rust/numaflow-models/src/models/container_template.rs index 71bd4e068c..2b478cadf1 100644 --- a/rust/numaflow-models/src/models/container_template.rs +++ b/rust/numaflow-models/src/models/container_template.rs @@ -26,6 +26,10 @@ pub struct ContainerTemplate { pub env_from: Option>, #[serde(rename = "imagePullPolicy", skip_serializing_if = "Option::is_none")] pub image_pull_policy: Option, + #[serde(rename = "livenessProbe", skip_serializing_if = "Option::is_none")] + pub liveness_probe: Option>, + #[serde(rename = "readinessProbe", skip_serializing_if = "Option::is_none")] + pub readiness_probe: Option>, #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] pub resources: Option, #[serde(rename = "securityContext", skip_serializing_if = "Option::is_none")] @@ -39,6 +43,8 @@ impl ContainerTemplate { env: None, env_from: None, image_pull_policy: None, + liveness_probe: None, + readiness_probe: None, resources: None, security_context: None, } diff --git a/rust/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs index 29846575d2..423e21fd9c 100644 --- a/rust/numaflow-models/src/models/mod.rs +++ b/rust/numaflow-models/src/models/mod.rs @@ -120,6 +120,8 @@ pub mod pipeline_spec; pub use self::pipeline_spec::PipelineSpec; pub mod pipeline_status; pub use self::pipeline_status::PipelineStatus; +pub mod probe; +pub use self::probe::Probe; pub mod redis_buffer_service; pub use self::redis_buffer_service::RedisBufferService; pub mod redis_config; diff --git a/rust/numaflow-models/src/models/probe.rs b/rust/numaflow-models/src/models/probe.rs new file mode 100644 index 0000000000..9980e9ea79 --- /dev/null +++ b/rust/numaflow-models/src/models/probe.rs @@ -0,0 +1,54 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// Probe : Probe is used to customize the configuration for Readiness and Liveness probes. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Probe { + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + #[serde(rename = "failureThreshold", skip_serializing_if = "Option::is_none")] + pub failure_threshold: Option, + /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + #[serde( + rename = "initialDelaySeconds", + skip_serializing_if = "Option::is_none" + )] + pub initial_delay_seconds: Option, + /// How often (in seconds) to perform the probe. + #[serde(rename = "periodSeconds", skip_serializing_if = "Option::is_none")] + pub period_seconds: Option, + /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + #[serde(rename = "successThreshold", skip_serializing_if = "Option::is_none")] + pub success_threshold: Option, + /// Number of seconds after which the probe times out. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + #[serde(rename = "timeoutSeconds", skip_serializing_if = "Option::is_none")] + pub timeout_seconds: Option, +} + +impl Probe { + /// Probe is used to customize the configuration for Readiness and Liveness probes. + pub fn new() -> Probe { + Probe { + failure_threshold: None, + initial_delay_seconds: None, + period_seconds: None, + success_threshold: None, + timeout_seconds: None, + } + } +} diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index 2d0989ac7e..1c72af9229 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -316,12 +316,12 @@ func (s *FunctionalSuite) TestFallbackSink() { defer w.DeletePipelineAndWait() pipelineName := "simple-fallback" - // send a message to the pipeline - w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("fallback-message"))) - // wait for all the pods to come up w.Expect().VertexPodsRunning() + // send a message to the pipeline + w.SendMessageTo(pipelineName, "in", NewHttpPostRequest().WithBody([]byte("fallback-message"))) + w.Expect().RedisSinkContains("simple-fallback-output", "fallback-message") } diff --git a/test/fixtures/util.go b/test/fixtures/util.go index c88d28d1af..13a4026384 100644 --- a/test/fixtures/util.go +++ b/test/fixtures/util.go @@ -310,7 +310,7 @@ func WaitForVertexPodRunning(kubeClient kubernetes.Interface, vertexClient flowp } ok = ok && len(podList.Items) > 0 && len(podList.Items) == vertexList.Items[0].GetReplicas() // pod number should equal to desired replicas for _, p := range podList.Items { - ok = ok && p.Status.Phase == corev1.PodRunning + ok = ok && isPodReady(p) } if ok { return nil @@ -319,6 +319,18 @@ func WaitForVertexPodRunning(kubeClient kubernetes.Interface, vertexClient flowp } } +func isPodReady(pod corev1.Pod) bool { + if pod.Status.Phase != corev1.PodRunning { + return false + } + for _, c := range pod.Status.ContainerStatuses { + if !c.Ready { + return false + } + } + return true +} + func WaitForVertexPodScalingTo(kubeClient kubernetes.Interface, vertexClient flowpkg.VertexInterface, namespace, pipelineName, vertexName string, timeout time.Duration, size int) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() From 69143417de0b18a9645ae70af0eb6e2dbb7aa914 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 19 Sep 2024 13:35:31 -0700 Subject: [PATCH 063/188] chore: suppress some misleading logs from raters (#2075) Signed-off-by: Derek Wang --- pkg/daemon/server/service/rater/rater.go | 5 ++--- pkg/mvtxdaemon/server/service/rater/rater.go | 10 +++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pkg/daemon/server/service/rater/rater.go b/pkg/daemon/server/service/rater/rater.go index b858cc968f..86b2ad10de 100644 --- a/pkg/daemon/server/service/rater/rater.go +++ b/pkg/daemon/server/service/rater/rater.go @@ -218,12 +218,11 @@ func sleep(ctx context.Context, duration time.Duration) { // since a pod can read from multiple partitions, we will return a map of partition to read count. func (r *Rater) getPodReadCounts(vertexName, podName string) *PodReadCount { readTotalMetricName := "forwarder_data_read_total" - // scrape the read total metric from pod metric port url := fmt.Sprintf("https://%s.%s.%s.svc:%v/metrics", podName, r.pipeline.Name+"-"+vertexName+"-headless", r.pipeline.Namespace, v1alpha1.VertexMetricsPort) resp, err := r.httpClient.Get(url) if err != nil { - r.log.Errorf("[vertex name %s, pod name %s]: failed reading the metrics endpoint, %v", vertexName, podName, err.Error()) + r.log.Warnf("[vertex name %s, pod name %s]: failed reading the metrics endpoint, the pod might have been scaled down: %v", vertexName, podName, err.Error()) return nil } defer resp.Body.Close() @@ -255,7 +254,7 @@ func (r *Rater) getPodReadCounts(vertexName, podName string) *PodReadCount { podReadCount := &PodReadCount{podName, partitionReadCount} return podReadCount } else { - r.log.Errorf("[vertex name %s, pod name %s]: failed getting the read total metric, the metric is not available.", vertexName, podName) + r.log.Infof("[vertex name %s, pod name %s]: Metric %q is unavailable, the pod might haven't started processing data", vertexName, podName, readTotalMetricName) return nil } } diff --git a/pkg/mvtxdaemon/server/service/rater/rater.go b/pkg/mvtxdaemon/server/service/rater/rater.go index d160edd838..19a3ee87dc 100644 --- a/pkg/mvtxdaemon/server/service/rater/rater.go +++ b/pkg/mvtxdaemon/server/service/rater/rater.go @@ -33,7 +33,7 @@ import ( ) const CountWindow = time.Second * 10 -const MonoVtxReadMetricName = "monovtx_read_total" +const monoVtxReadMetricName = "monovtx_read_total" // MonoVtxRatable is the interface for the Rater struct. type MonoVtxRatable interface { @@ -161,7 +161,7 @@ func (r *Rater) getPodReadCounts(podName string) *PodReadCount { url := fmt.Sprintf("https://%s.%s.%s.svc:%v/metrics", podName, headlessServiceName, r.monoVertex.Namespace, v1alpha1.MonoVertexMetricsPort) resp, err := r.httpClient.Get(url) if err != nil { - r.log.Errorf("[MonoVertex name %s, pod name %s]: failed reading the metrics endpoint, %v", r.monoVertex.Name, podName, err.Error()) + r.log.Warnf("[Pod name %s]: failed reading the metrics endpoint, the pod might have been scaled down: %v", podName, err.Error()) return nil } defer resp.Body.Close() @@ -169,11 +169,11 @@ func (r *Rater) getPodReadCounts(podName string) *PodReadCount { textParser := expfmt.TextParser{} result, err := textParser.TextToMetricFamilies(resp.Body) if err != nil { - r.log.Errorf("[MonoVertex name %s, pod name %s]: failed parsing to prometheus metric families, %v", r.monoVertex.Name, podName, err.Error()) + r.log.Errorf("[Pod name %s]: failed parsing to prometheus metric families, %v", podName, err.Error()) return nil } - if value, ok := result[MonoVtxReadMetricName]; ok && value != nil && len(value.GetMetric()) > 0 { + if value, ok := result[monoVtxReadMetricName]; ok && value != nil && len(value.GetMetric()) > 0 { metricsList := value.GetMetric() // Each pod should be emitting only one metric with this name, so we should be able to take the first value // from the results safely. @@ -182,7 +182,7 @@ func (r *Rater) getPodReadCounts(podName string) *PodReadCount { podReadCount := &PodReadCount{podName, metricsList[0].Untyped.GetValue()} return podReadCount } else { - r.log.Errorf("[MonoVertex name %s, pod name %s]: failed getting the read total metric, the metric is not available.", r.monoVertex.Name, podName) + r.log.Infof("[Pod name %s]: Metric %q is unavailable, the pod might haven't started processing data", podName, monoVtxReadMetricName) return nil } } From ed543ad2e7824f3e6b508de5b07ba08e1d7d9b66 Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Thu, 19 Sep 2024 17:06:19 -0400 Subject: [PATCH 064/188] fix: support version compatibility check for pre-release versions (#2069) Signed-off-by: Keran Yang --- pkg/sdkclient/serverinfo/serverinfo.go | 6 +- pkg/sdkclient/serverinfo/serverinfo_test.go | 234 ++++++++- pkg/sdkclient/serverinfo/types.go | 75 ++- pkg/shared/clients/nats/test/server.go | 4 +- rust/monovertex/src/server_info.rs | 508 +++++++++++++++----- 5 files changed, 677 insertions(+), 150 deletions(-) diff --git a/pkg/sdkclient/serverinfo/serverinfo.go b/pkg/sdkclient/serverinfo/serverinfo.go index 932ab2ff50..d01acd1cda 100644 --- a/pkg/sdkclient/serverinfo/serverinfo.go +++ b/pkg/sdkclient/serverinfo/serverinfo.go @@ -170,7 +170,7 @@ func checkNumaflowCompatibility(numaflowVersion string, minNumaflowVersion strin numaflowConstraint := fmt.Sprintf(">= %s", minNumaflowVersion) if err = checkConstraint(numaflowVersionSemVer, numaflowConstraint); err != nil { return fmt.Errorf("numaflow version %s must be upgraded to at least %s, in order to work with current SDK version: %w", - numaflowVersionSemVer.String(), minNumaflowVersion, err) + numaflowVersionSemVer.String(), humanReadable(minNumaflowVersion), err) } return nil } @@ -193,7 +193,7 @@ func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, minSupported if !c.Check(sdkVersionPEP440) { return fmt.Errorf("SDK version %s must be upgraded to at least %s, in order to work with current numaflow version: %w", - sdkVersionPEP440.String(), sdkRequiredVersion, err) + sdkVersionPEP440.String(), humanReadable(sdkRequiredVersion), err) } } else { sdkVersionSemVer, err := semver.NewVersion(sdkVersion) @@ -203,7 +203,7 @@ func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, minSupported if err := checkConstraint(sdkVersionSemVer, sdkConstraint); err != nil { return fmt.Errorf("SDK version %s must be upgraded to at least %s, in order to work with current numaflow version: %w", - sdkVersionSemVer.String(), sdkRequiredVersion, err) + sdkVersionSemVer.String(), humanReadable(sdkRequiredVersion), err) } } } diff --git a/pkg/sdkclient/serverinfo/serverinfo_test.go b/pkg/sdkclient/serverinfo/serverinfo_test.go index e96919e243..ad7f06e690 100644 --- a/pkg/sdkclient/serverinfo/serverinfo_test.go +++ b/pkg/sdkclient/serverinfo/serverinfo_test.go @@ -97,12 +97,57 @@ func Test_CheckNumaflowCompatibility(t *testing.T) { errMessage string }{ { - name: "Test with incompatible numaflow version", + name: "Test with incompatible numaflow version, min is a stable version 1.1.7", numaflowVersion: "v1.1.6", - minNumaflowVersion: "1.1.7", + minNumaflowVersion: "1.1.7-z", shouldErr: true, errMessage: "numaflow version 1.1.6 must be upgraded to at least 1.1.7, in order to work with current SDK version", }, + { + name: "Test with compatible numaflow version - min is a stable version 1.1.6", + numaflowVersion: "1.1.7", + minNumaflowVersion: "1.1.6-z", + shouldErr: false, + }, + { + name: "Test with incompatible numaflow version - min is a stable version 1.1.7, numaflow version is a pre-release version", + numaflowVersion: "v1.1.7-rc1", + minNumaflowVersion: "1.1.7-z", + shouldErr: true, + errMessage: "numaflow version 1.1.7-rc1 must be upgraded to at least 1.1.7, in order to work with current SDK version", + }, + { + name: "Test with compatible numaflow version - min is a stable version 1.1.6, numaflow version is a pre-release version", + numaflowVersion: "1.1.7-rc1", + minNumaflowVersion: "1.1.6-z", + shouldErr: false, + }, + { + name: "Test with incompatible numaflow version, min is a rc version 1.1.7-rc1", + numaflowVersion: "v1.1.6", + minNumaflowVersion: "1.1.7-rc1", + shouldErr: true, + errMessage: "numaflow version 1.1.6 must be upgraded to at least 1.1.7-rc1, in order to work with current SDK version", + }, + { + name: "Test with compatible numaflow version - min is a rc version 1.1.6-rc1", + numaflowVersion: "1.1.7", + minNumaflowVersion: "1.1.6-rc1", + shouldErr: false, + }, + { + name: "Test with incompatible numaflow version - min is a rc version 1.1.7-rc2, numaflow version is a pre-release version", + numaflowVersion: "v1.1.7-rc1", + minNumaflowVersion: "1.1.7-rc2", + shouldErr: true, + errMessage: "numaflow version 1.1.7-rc1 must be upgraded to at least 1.1.7-rc2, in order to work with current SDK version", + }, + { + name: "Test with compatible numaflow version - min is a rc version 1.1.6-rc2, numaflow version is a pre-release version", + numaflowVersion: "1.1.6-rc2", + minNumaflowVersion: "1.1.6-rc2", + shouldErr: false, + }, { name: "Test with empty MinimumNumaflowVersion field", numaflowVersion: "1.1.7", @@ -111,10 +156,17 @@ func Test_CheckNumaflowCompatibility(t *testing.T) { errMessage: "server info does not contain minimum numaflow version. Upgrade to newer SDK version", }, { - name: "Test with compatible numaflow version", - numaflowVersion: "1.1.7", - minNumaflowVersion: "1.1.6", - shouldErr: false, + name: "Test with invalid numaflow version", + numaflowVersion: "", + minNumaflowVersion: "1.1.7", + shouldErr: true, + errMessage: "error parsing numaflow version: Invalid Semantic Version", + }, + { + name: "Test with empty min numaflow version", + numaflowVersion: "1.1.7", + shouldErr: true, + errMessage: "server info does not contain minimum numaflow version. Upgrade to newer SDK version", }, } for _, tt := range tests { @@ -130,12 +182,13 @@ func Test_CheckNumaflowCompatibility(t *testing.T) { } } -func Test_CheckSDKCompatibility(t *testing.T) { +// this test suite is to test SDK compatibility check when all the minimum-supported versions are stable releases +func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { var testMinimumSupportedSDKVersions = sdkConstraints{ - Go: "0.6.0-0", - Python: "0.6.0a", - Java: "0.6.0-0", - Rust: "0.1.0", + Python: "0.6.0rc100", + Go: "0.6.0-z", + Java: "0.6.0-z", + Rust: "0.1.0-z", } tests := []struct { name string @@ -146,37 +199,57 @@ func Test_CheckSDKCompatibility(t *testing.T) { errMessage string }{ { - name: "Test with incompatible Python version", + name: "python pre-release version is lower than minimum supported version", sdkVersion: "v0.5.3a1", sdkLanguage: Python, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: true, - errMessage: "SDK version 0.5.3a1 must be upgraded to at least 0.6.0a, in order to work with current numaflow version", + errMessage: "SDK version 0.5.3a1 must be upgraded to at least 0.6.0, in order to work with current numaflow version", }, { - name: "Test with compatible Python version", - sdkVersion: "v0.6.0a2", + name: "python pre-release version is compatible with minimum supported version", + sdkVersion: "v0.6.3a1", sdkLanguage: Python, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: false, }, { - name: "Test with incompatible Java version", - sdkVersion: "v0.4.3", - sdkLanguage: Java, + name: "python stable release version is compatible with minimum supported version", + sdkVersion: "v0.6.0", + sdkLanguage: Python, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, + { + name: "python stable release version is lower than minimum supported version", + sdkVersion: "v0.5.3", + sdkLanguage: Python, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: true, - errMessage: "SDK version 0.4.3 must be upgraded to at least 0.6.0-0, in order to work with current numaflow version", + errMessage: "SDK version 0.5.3 must be upgraded to at least 0.6.0, in order to work with current numaflow version", + }, + { + name: "java release version is compatible with minimum supported version", + sdkVersion: "v0.7.3", + sdkLanguage: Java, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, }, { - name: "Test with compatible Go version", - sdkVersion: "v0.6.0-rc2", + name: "golang rc release version is compatible with minimum supported version", + sdkVersion: "v0.6.2-rc2", sdkLanguage: Go, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: false, + }, { + name: "rust pre-release version is compatible with minimum supported version", + sdkVersion: "v0.1.2-0.20240913163521-4910018031a7", + sdkLanguage: Rust, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, }, { - name: "Test with incompatible Rust version", + name: "rust release version is lower than minimum supported version", sdkVersion: "v0.0.3", sdkLanguage: Rust, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, @@ -184,12 +257,125 @@ func Test_CheckSDKCompatibility(t *testing.T) { errMessage: "SDK version 0.0.3 must be upgraded to at least 0.1.0, in order to work with current numaflow version", }, { - name: "Test with compatible Rust version", - sdkVersion: "v0.1.1", + name: "java rc release version is lower than minimum supported version", + sdkVersion: "v0.6.0-rc1", + sdkLanguage: Java, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.6.0-rc1 must be upgraded to at least 0.6.0, in order to work with current numaflow version", + }, + { + name: "golang pre-release version is lower than minimum supported version", + sdkVersion: "v0.6.0-0.20240913163521-4910018031a7", + sdkLanguage: Go, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.6.0-0.20240913163521-4910018031a7 must be upgraded to at least 0.6.0, in order to work with current numaflow version", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, tt.minimumSupportedSDKVersions) + if tt.shouldErr { + assert.Error(t, err, "Expected error") + assert.Contains(t, err.Error(), tt.errMessage) + } else { + assert.NoError(t, err, "Expected no error") + } + }) + } +} + +// this test suite is to test SDK compatibility check when all the minimum-supported versions are pre-releases +func Test_CheckSDKCompatibility_MinimumBeingPreReleases(t *testing.T) { + var testMinimumSupportedSDKVersions = sdkConstraints{ + Python: "0.6.0b1", + Go: "0.6.0-rc2", + Java: "0.6.0-rc2", + Rust: "0.1.0-rc3", + } + tests := []struct { + name string + sdkVersion string + sdkLanguage Language + minimumSupportedSDKVersions sdkConstraints + shouldErr bool + errMessage string + }{ + { + name: "python pre-release version is lower than minimum supported version", + sdkVersion: "v0.5.3a1", + sdkLanguage: Python, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.5.3a1 must be upgraded to at least 0.6.0b1, in order to work with current numaflow version", + }, + { + name: "python pre-release version is compatible with minimum supported version", + sdkVersion: "v0.6.3a1", + sdkLanguage: Python, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, + { + name: "python stable release version is compatible with minimum supported version", + sdkVersion: "v0.6.0", + sdkLanguage: Python, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, + { + name: "python stable release version is lower than minimum supported version", + sdkVersion: "v0.5.3", + sdkLanguage: Python, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.5.3 must be upgraded to at least 0.6.0b1, in order to work with current numaflow version", + }, + { + name: "java release version is compatible with minimum supported version", + sdkVersion: "v0.7.3", + sdkLanguage: Java, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, + { + name: "golang rc release version is compatible with minimum supported version", + sdkVersion: "v0.6.2-rc2", + sdkLanguage: Go, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: false, + }, { + name: "rust pre-release version is compatible with minimum supported version", + sdkVersion: "v0.1.2-0.20240913163521-4910018031a7", sdkLanguage: Rust, minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, shouldErr: false, }, + { + name: "rust release version is lower than minimum supported version", + sdkVersion: "v0.0.3", + sdkLanguage: Rust, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.0.3 must be upgraded to at least 0.1.0-rc3, in order to work with current numaflow version", + }, + { + name: "java rc release version is lower than minimum supported version", + sdkVersion: "v0.6.0-rc1", + sdkLanguage: Java, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.6.0-rc1 must be upgraded to at least 0.6.0-rc2, in order to work with current numaflow version", + }, + { + name: "golang pre-release version is lower than minimum supported version", + sdkVersion: "v0.6.0-0.20240913163521-4910018031a7", + sdkLanguage: Go, + minimumSupportedSDKVersions: testMinimumSupportedSDKVersions, + shouldErr: true, + errMessage: "SDK version 0.6.0-0.20240913163521-4910018031a7 must be upgraded to at least 0.6.0-rc2, in order to work with current numaflow version", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/sdkclient/serverinfo/types.go b/pkg/sdkclient/serverinfo/types.go index fc8fdd9b81..9e4a152d03 100644 --- a/pkg/sdkclient/serverinfo/types.go +++ b/pkg/sdkclient/serverinfo/types.go @@ -16,6 +16,8 @@ limitations under the License. package serverinfo +import "strings" + type Language string const ( @@ -27,11 +29,76 @@ const ( type sdkConstraints map[Language]string +/* +minimumSupportedSDKVersions is the minimum supported version of each SDK for the current numaflow version. +It is used to check if the SDK is compatible with the current numaflow version. + +NOTE: when updating it, please also update MINIMUM_SUPPORTED_SDK_VERSIONS for mono vertex at rust/monovertex/server_info.rs + +Python SDK versioning follows PEP 440 (https://www.python.org/dev/peps/pep-0440/). +The other SDKs follow the semver versioning scheme (https://semver.org/). + +How to update this map: + +There are two types of releases, one is the stable release and the other is the pre-release. +Below are the typical formats of the versioning scheme: + + +------------------+-------------------------+-----------------------------+ + | | PEP 440 | semver | + +------------------+-------------------------+-----------------------------+ + | stable | 0.8.0 | 0.8.0 | + +------------------+-------------------------+-----------------------------+ + | pre-release | 0.8.0a1, | 0.8.0-rc1, | + | | 0.8.0b3, | 0.8.0-0.20240913163521, | + | | or 0.8.0rc1 | etc. | + +------------------+-------------------------+-----------------------------+ + +There are two cases to consider when updating the map: + +1. The minimum supported version is a pre-release version. +In this case, directly put the exact pre-release version in the map. +E.g., if the minimum supported version is "0.8.0-rc1", then put "0.8.0-rc1" for java, go, rust. +"0.8.0b1", "0.8.0b1" for python. +2. The minimum supported version is a stable version. +In this case, put (almost) the largest available pre-release version of the stable version in the map. +This is because the go semver library considers pre-releases to be invalid if the constraint range does not include pre-releases. +Therefore, we have to put a pre-release version of the stable version in the map and choose the largest one. +For python, we use "rc100" as the largest pre-release version. For go, rust, we use "-z" as the largest pre-release version. +E.g., if the minimum supported version is "0.8.0", then put "0.8.0-z" for java, go, rust, "0.8.0rc100" for python. +A constraint ">=0.8.0-z" will match any pre-release version of 0.8.0, including "0.8.0-rc1", "0.8.0-rc2", etc. + +More details about version comparison can be found in the PEP 440 and semver documentation. +*/ var minimumSupportedSDKVersions = sdkConstraints{ - Go: "0.8.0", - Python: "0.8.0", - Java: "0.8.0", - Rust: "0.1.0", + // meaning the minimum supported python SDK version is 0.8.0 + Python: "0.8.0rc100", + // meaning the minimum supported go SDK version is 0.8.0 + Go: "0.8.0-z", + // meaning the minimum supported java SDK version is 0.8.0 + Java: "0.8.0-z", + // meaning the minimum supported rust SDK version is 0.1.0 + Rust: "0.1.0-z", +} + +// humanReadable returns the human-readable minimum supported version. +// it's used for logging purposes. +// it translates the version we used in the constraints to the real minimum supported version. +// e.g., if the given version is "0.8.0rc100", human-readable version is "0.8.0". +// if the given version is "0.8.0-z", "0.8.0". +// if "0.8.0-rc1", "0.8.0-rc1". +func humanReadable(ver string) string { + if ver == "" { + return "" + } + // semver + if strings.HasSuffix(ver, "-z") { + return ver[:len(ver)-2] + } + // PEP 440 + if strings.HasSuffix(ver, "rc100") { + return ver[:len(ver)-5] + } + return ver } type Protocol string diff --git a/pkg/shared/clients/nats/test/server.go b/pkg/shared/clients/nats/test/server.go index 556a7390c2..b64db98645 100644 --- a/pkg/shared/clients/nats/test/server.go +++ b/pkg/shared/clients/nats/test/server.go @@ -27,7 +27,7 @@ import ( func RunNatsServer(t *testing.T) *server.Server { t.Helper() opts := natstestserver.DefaultTestOptions - opts.Port = 4223 + opts.Port = -1 // Use random port to avoid conflicts return natstestserver.RunServer(&opts) } @@ -35,7 +35,7 @@ func RunNatsServer(t *testing.T) *server.Server { func RunJetStreamServer(t *testing.T) *server.Server { t.Helper() opts := natstestserver.DefaultTestOptions - opts.Port = -1 // Random port + opts.Port = -1 // Use random port to avoid conflicts opts.JetStream = true storeDir, err := os.MkdirTemp("", "") if err != nil { diff --git a/rust/monovertex/src/server_info.rs b/rust/monovertex/src/server_info.rs index 225218b158..98484869fd 100644 --- a/rust/monovertex/src/server_info.rs +++ b/rust/monovertex/src/server_info.rs @@ -77,24 +77,6 @@ pub async fn check_for_server_compatibility( Ok(()) } -/// Checks if the given version meets the specified constraint. -fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { - // Parse the given constraint as a semantic version requirement - let version_req = VersionReq::parse(constraint).map_err(|e| { - Error::ServerInfoError(format!( - "Error parsing constraint: {}, constraint string: {}", - e, constraint - )) - })?; - - // Check if the provided version satisfies the parsed constraint - if !version_req.matches(version) { - return Err(Error::ServerInfoError("invalid version".to_string())); - } - - Ok(()) -} - /// Checks if the current numaflow version is compatible with the given minimum numaflow version. fn check_numaflow_compatibility( numaflow_version: &str, @@ -105,8 +87,11 @@ fn check_numaflow_compatibility( return Err(Error::ServerInfoError("invalid version".to_string())); } + // Strip the 'v' prefix if present. + let numaflow_version_stripped = numaflow_version.trim_start_matches('v'); + // Parse the provided numaflow version as a semantic version - let numaflow_version_semver = Version::parse(numaflow_version) + let numaflow_version_semver = Version::parse(numaflow_version_stripped) .map_err(|e| Error::ServerInfoError(format!("Error parsing Numaflow version: {}", e)))?; // Create a version constraint based on the minimum numaflow version @@ -114,7 +99,7 @@ fn check_numaflow_compatibility( check_constraint(&numaflow_version_semver, &numaflow_constraint).map_err(|e| { Error::ServerInfoError(format!( "numaflow version {} must be upgraded to at least {}, in order to work with current SDK version {}", - numaflow_version_semver, min_numaflow_version, e + numaflow_version_semver, human_readable(min_numaflow_version), e )) }) } @@ -141,7 +126,7 @@ fn check_sdk_compatibility( if !specifiers.contains(&sdk_version_pep440) { return Err(Error::ServerInfoError(format!( "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", - sdk_version_pep440, sdk_required_version + sdk_version_pep440, human_readable(sdk_required_version) ))); } } else { @@ -156,7 +141,7 @@ fn check_sdk_compatibility( check_constraint(&sdk_version_semver, &sdk_constraint).map_err(|_| { Error::ServerInfoError(format!( "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", - sdk_version_semver, sdk_required_version + sdk_version_semver, human_readable(sdk_required_version) )) })?; } @@ -176,6 +161,86 @@ fn check_sdk_compatibility( Ok(()) } +// human_readable returns the human-readable minimum supported version. +// it's used for logging purposes. +// it translates the version we used in the constraints to the real minimum supported version. +// e.g., if the given version is "0.8.0rc100", human-readable version is "0.8.0". +// if the given version is "0.8.0-z", "0.8.0". +// if "0.8.0-rc1", "0.8.0-rc1". +fn human_readable(ver: &str) -> String { + if ver.is_empty() { + return String::new(); + } + // semver + if ver.ends_with("-z") { + return ver[..ver.len() - 2].to_string(); + } + // PEP 440 + if ver.ends_with("rc100") { + return ver[..ver.len() - 5].to_string(); + } + ver.to_string() +} + +/// Checks if the given version meets the specified constraint. +fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { + let binding = version.to_string(); + // extract the major.minor.patch version + let mmp_version = Version::parse(binding.split('-').next().unwrap_or_default()).map_err(|e| { + Error::ServerInfoError(format!("Error parsing version: {}, version string: {}", e, binding)) + })?; + let mmp_ver_str_constraint = trim_after_dash(constraint.trim_start_matches(">=")); + let mmp_ver_constraint = format!(">={}", mmp_ver_str_constraint); + + // "-z" is used to indicate the minimum supported version is a stable version + // the reason why we choose the letter z is that it can represent the largest pre-release version. + // e.g., 0.8.0-z means the minimum supported version is 0.8.0. + if constraint.contains("-z") { + if !version.to_string().starts_with(mmp_ver_str_constraint) { + // if the version is prefixed with a different mmp version, + // rust semver lib can't figure out the correct order. + // to work around, we compare the mmp version only. + // e.g., rust semver doesn't treat 0.9.0-rc* as larger than 0.8.0. + // to work around, instead of comparing 0.9.0-rc* with 0.8.0, + // we compare 0.9.0 with 0.8.0. + return check_constraint(&mmp_version, &mmp_ver_constraint); + } + return check_constraint(version, &mmp_ver_constraint); + } else if constraint.contains("-") { + // if the constraint doesn't contain "-z", but contains "-", it's a pre-release version. + if !version.to_string().starts_with(mmp_ver_str_constraint) { + // similar reason as above, we compare the mmp version only. + return check_constraint(&mmp_version, &mmp_ver_constraint); + } + } + + // TODO - remove all the extra check above once rust semver handles pre-release comparison the same way as golang. + // https://github.com/dtolnay/semver/issues/323 + + // Parse the given constraint as a semantic version requirement + let version_req = VersionReq::parse(constraint).map_err(|e| { + Error::ServerInfoError(format!( + "Error parsing constraint: {}, constraint string: {}", + e, constraint + )) + })?; + + // Check if the provided version satisfies the parsed constraint + if !version_req.matches(version) { + return Err(Error::ServerInfoError("invalid version".to_string())); + } + + Ok(()) +} + +fn trim_after_dash(input: &str) -> &str { + if let Some(pos) = input.find('-') { + &input[..pos] + } else { + input + } +} + /// Reads the server info file and returns the parsed ServerInfo struct. /// The cancellation token is used to stop ready-check of server_info file in case it is missing. /// This cancellation token is closed via the global shutdown handler. @@ -257,11 +322,12 @@ mod version { static MINIMUM_SUPPORTED_SDK_VERSIONS: Lazy = Lazy::new(|| { // TODO: populate this from a static file and make it part of the release process // the value of the map matches `minimumSupportedSDKVersions` in pkg/sdkclient/serverinfo/types.go + // please follow the instruction there to update the value let mut m = HashMap::new(); - m.insert("go".to_string(), "0.8.0".to_string()); - m.insert("python".to_string(), "0.8.0".to_string()); - m.insert("java".to_string(), "0.8.0".to_string()); - m.insert("rust".to_string(), "0.1.0".to_string()); + m.insert("go".to_string(), "0.8.0-z".to_string()); + m.insert("python".to_string(), "0.8.0rc100".to_string()); + m.insert("java".to_string(), "0.8.0-z".to_string()); + m.insert("rust".to_string(), "0.1.0-z".to_string()); m }); @@ -397,22 +463,32 @@ mod tests { Ok(()) } - // Helper function to create a SdkConstraints struct - fn create_sdk_constraints() -> version::SdkConstraints { + // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being stable releases + fn create_sdk_constraints_stable_versions() -> SdkConstraints { let mut constraints = HashMap::new(); - constraints.insert("python".to_string(), "1.2.0".to_string()); - constraints.insert("java".to_string(), "2.0.0".to_string()); - constraints.insert("go".to_string(), "0.10.0".to_string()); - constraints.insert("rust".to_string(), "0.1.0".to_string()); + constraints.insert("python".to_string(), "1.2.0rc100".to_string()); + constraints.insert("java".to_string(), "2.0.0-z".to_string()); + constraints.insert("go".to_string(), "0.10.0-z".to_string()); + constraints.insert("rust".to_string(), "0.1.0-z".to_string()); + constraints + } + + // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being pre-releases + fn create_sdk_constraints_pre_release_versions() -> SdkConstraints { + let mut constraints = HashMap::new(); + constraints.insert("python".to_string(), "1.2.0b2".to_string()); + constraints.insert("java".to_string(), "2.0.0-rc2".to_string()); + constraints.insert("go".to_string(), "0.10.0-rc2".to_string()); + constraints.insert("rust".to_string(), "0.1.0-rc3".to_string()); constraints } #[tokio::test] - async fn test_sdk_compatibility_python_valid() { - let sdk_version = "v1.3.0"; + async fn test_sdk_compatibility_min_stable_python_stable_release_valid() { + let sdk_version = "1.3.0"; let sdk_language = "python"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); @@ -420,23 +496,53 @@ mod tests { } #[tokio::test] - async fn test_sdk_compatibility_python_invalid() { + async fn test_sdk_compatibility_min_stable_python_stable_release_invalid() { let sdk_version = "1.1.0"; let sdk_language = "python"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 1.1.0 must be upgraded to at least 1.2.0, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_stable_python_pre_release_valid() { + let sdk_version = "v1.3.0a1"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); } #[tokio::test] - async fn test_sdk_compatibility_java_valid() { + async fn test_sdk_compatibility_min_stable_python_pre_release_invalid() { + let sdk_version = "1.1.0a1"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 1.1.0a1 must be upgraded to at least 1.2.0, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_stable_java_stable_release_valid() { let sdk_version = "v2.1.0"; let sdk_language = "java"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); @@ -444,23 +550,26 @@ mod tests { } #[tokio::test] - async fn test_sdk_compatibility_java_invalid() { - let sdk_version = "1.5.0"; + async fn test_sdk_compatibility_min_stable_java_rc_release_invalid() { + let sdk_version = "2.0.0-rc1"; let sdk_language = "java"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 2.0.0-rc1 must be upgraded to at least 2.0.0, in order to work with the current numaflow version")); } #[tokio::test] - async fn test_sdk_compatibility_go_valid() { - let sdk_version = "0.11.0"; + async fn test_sdk_compatibility_min_stable_go_rc_release_valid() { + let sdk_version = "0.11.0-rc2"; let sdk_language = "go"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); @@ -468,23 +577,26 @@ mod tests { } #[tokio::test] - async fn test_sdk_compatibility_go_invalid() { - let sdk_version = "0.9.0"; + async fn test_sdk_compatibility_min_stable_go_pre_release_invalid() { + let sdk_version = "0.10.0-0.20240913163521-4910018031a7"; let sdk_language = "go"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 0.10.0-0.20240913163521-4910018031a7 must be upgraded to at least 0.10.0, in order to work with the current numaflow version")); } #[tokio::test] - async fn test_sdk_compatibility_rust_valid() { - let sdk_version = "v0.1.0"; + async fn test_sdk_compatibility_min_stable_rust_pre_release_valid() { + let sdk_version = "v0.1.1-0.20240913163521-4910018031a7"; let sdk_language = "rust"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); @@ -492,21 +604,227 @@ mod tests { } #[tokio::test] - async fn test_sdk_compatibility_rust_invalid() { + async fn test_sdk_compatibility_min_stable_rust_stable_release_invalid() { let sdk_version = "0.0.9"; let sdk_language = "rust"; - let min_supported_sdk_versions = create_sdk_constraints(); + let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "ServerInfoError Error - SDK version 0.0.9 must be upgraded to at least 0.1.0, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_python_stable_release_valid() { + let sdk_version = "1.3.0"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_python_stable_release_invalid() { + let sdk_version = "1.1.0"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 1.1.0 must be upgraded to at least 1.2.0b2, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_python_pre_release_valid() { + let sdk_version = "v1.3.0a1"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_python_pre_release_invalid() { + let sdk_version = "1.2.0a1"; + let sdk_language = "python"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 1.2.0a1 must be upgraded to at least 1.2.0b2, in order to work with the current numaflow version")); } #[tokio::test] - async fn test_numaflow_compatibility_valid() { - let numaflow_version = "1.4.0"; - let min_numaflow_version = "1.3.0"; + async fn test_sdk_compatibility_min_pre_release_java_stable_release_valid() { + let sdk_version = "v2.1.0"; + let sdk_language = "java"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_java_rc_release_invalid() { + let sdk_version = "2.0.0-rc1"; + let sdk_language = "java"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 2.0.0-rc1 must be upgraded to at least 2.0.0-rc2, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_go_rc_release_valid() { + let sdk_version = "0.11.0-rc2"; + let sdk_language = "go"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_go_pre_release_invalid() { + let sdk_version = "0.10.0-0.20240913163521-4910018031a7"; + let sdk_language = "go"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "SDK version 0.10.0-0.20240913163521-4910018031a7 must be upgraded to at least 0.10.0-rc2, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_rust_pre_release_valid() { + let sdk_version = "v0.1.1-0.20240913163521-4910018031a7"; + let sdk_language = "rust"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_sdk_compatibility_min_pre_release_rust_stable_release_invalid() { + let sdk_version = "0.0.9"; + let sdk_language = "rust"; + + let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); + let result = + check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + + assert!(result.is_err()); + assert!( + result.unwrap_err().to_string().contains( + "ServerInfoError Error - SDK version 0.0.9 must be upgraded to at least 0.1.0-rc3, in order to work with the current numaflow version")); + } + + #[tokio::test] + async fn test_numaflow_compatibility_invalid_version_string() { + let numaflow_version = "v1.abc.7"; + let min_numaflow_version = "1.1.6-z"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains( + "Error parsing Numaflow version: unexpected character 'a' while parsing minor version number")); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_stable_version_stable_valid() { + let numaflow_version = "v1.1.7"; + let min_numaflow_version = "1.1.6-z"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_stable_version_stable_invalid() { + let numaflow_version = "v1.1.6"; + let min_numaflow_version = "1.1.7-z"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains( + "numaflow version 1.1.6 must be upgraded to at least 1.1.7, in order to work with current SDK version")); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_stable_version_pre_release_valid() { + let numaflow_version = "1.1.7-rc1"; + let min_numaflow_version = "1.1.6-z"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_stable_version_pre_release_invalid() { + let numaflow_version = "v1.1.6-rc1"; + let min_numaflow_version = "1.1.6-z"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains( + "numaflow version 1.1.6-rc1 must be upgraded to at least 1.1.6, in order to work with current SDK version")); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_rc_version_stable_invalid() { + let numaflow_version = "v1.1.6"; + let min_numaflow_version = "1.1.7-rc1"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains( + "numaflow version 1.1.6 must be upgraded to at least 1.1.7-rc1, in order to work with current SDK version")); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_rc_version_stable_valid() { + let numaflow_version = "1.1.7"; + let min_numaflow_version = "1.1.6-rc1"; let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); @@ -514,13 +832,25 @@ mod tests { } #[tokio::test] - async fn test_numaflow_compatibility_invalid() { - let numaflow_version = "1.2.0"; - let min_numaflow_version = "1.3.0"; + async fn test_numaflow_compatibility_min_rc_version_pre_release_valid() { + let numaflow_version = "1.1.7-rc3"; + let min_numaflow_version = "1.1.7-rc2"; + + let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_numaflow_compatibility_min_rc_version_pre_release_invalid() { + let numaflow_version = "v1.1.6-rc1"; + let min_numaflow_version = "1.1.6-rc2"; let result = check_numaflow_compatibility(numaflow_version, min_numaflow_version); assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains( + "numaflow version 1.1.6-rc1 must be upgraded to at least 1.1.6-rc2, in order to work with current SDK version")); } #[tokio::test] @@ -591,7 +921,7 @@ mod tests { #[tokio::test] async fn test_read_server_info_success() { // Create a temporary directory - let dir = tempfile::tempdir().unwrap(); + let dir = tempdir().unwrap(); let file_path = dir.path().join("server_info.txt"); let cln_token = CancellationToken::new(); @@ -632,7 +962,7 @@ mod tests { #[tokio::test] async fn test_read_server_info_retry_limit() { // Create a temporary directory - let dir = tempfile::tempdir().unwrap(); + let dir = tempdir().unwrap(); let file_path = dir.path().join("server_info.txt"); // Write a partial test file not ending with END marker @@ -676,60 +1006,4 @@ mod tests { let _parsed_server_info: ServerInfo = serde_json::from_str(&json_data).expect("Failed to parse JSON"); } - - #[test] - fn test_sdk_compatibility_go_version_with_v_prefix() { - let sdk_version = "v0.11.0"; - let sdk_language = "go"; - - let mut min_supported_sdk_versions = HashMap::new(); - min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); - - let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); - - assert!(result.is_ok()); - } - - #[test] - fn test_sdk_compatibility_go_version_without_v_prefix() { - let sdk_version = "0.11.0"; - let sdk_language = "go"; - - let mut min_supported_sdk_versions = HashMap::new(); - min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); - - let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); - - assert!(result.is_ok()); - } - - #[test] - fn test_sdk_compatibility_go_version_with_v_prefix_invalid() { - let sdk_version = "v0.9.0"; - let sdk_language = "go"; - - let mut min_supported_sdk_versions = HashMap::new(); - min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); - - let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); - - assert!(result.is_err()); - } - - #[test] - fn test_sdk_compatibility_go_version_without_v_prefix_invalid() { - let sdk_version = "0.9.0"; - let sdk_language = "go"; - - let mut min_supported_sdk_versions = HashMap::new(); - min_supported_sdk_versions.insert("go".to_string(), "0.10.0".to_string()); - - let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); - - assert!(result.is_err()); - } } From 669dc186a0d885df92716b627ded236fab7476e7 Mon Sep 17 00:00:00 2001 From: Julie Vogelman Date: Fri, 20 Sep 2024 14:09:44 -0700 Subject: [PATCH 065/188] Fix: Use Merge patch rather than json patch for `pause-timestamp` annotation apply (#2078) Signed-off-by: Julie Vogelman --- pkg/reconciler/pipeline/controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 5776c2873c..29dab84526 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -831,8 +831,8 @@ func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeli func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { // check that annotations / pause timestamp annotation exist if pl.GetAnnotations() == nil || pl.GetAnnotations()[dfv1.KeyPauseTimestamp] == "" { - patchJson := `[{"op": "add", "path": "` + pauseTimestampPath + `", "value": "` + time.Now().Format(time.RFC3339) + `"}]` - if err := r.client.Patch(ctx, pl, client.RawPatch(types.JSONPatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { + patchJson := `{"metadata":{"annotations":{"` + dfv1.KeyPauseTimestamp + `":"` + time.Now().Format(time.RFC3339) + `"}}}` + if err := r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { return true, err } } From 40e960a44184c876173e6bdf69b216df6296bf73 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 23 Sep 2024 08:25:36 +0530 Subject: [PATCH 066/188] feat: Bidirectional Streaming for User Defined Source (#2056) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- .github/workflows/ci.yaml | 8 +- .github/workflows/nightly-build.yml | 5 +- .github/workflows/release.yml | 4 +- go.mod | 27 +- go.sum | 54 ++- pkg/apis/proto/source/v1/source.proto | 59 ++- pkg/daemon/client/grpc_daemon_client.go | 2 +- pkg/daemon/client/grpc_daemon_client_test.go | 7 - pkg/mvtxdaemon/client/grpc_client.go | 2 +- .../pbq/wal/unaligned/fs/compactor_test.go | 3 - pkg/sdkclient/grpc/grpc_utils.go | 6 +- pkg/sdkclient/reducer/client.go | 2 +- pkg/sdkclient/source/client.go | 162 ++++++-- pkg/sdkclient/source/client_test.go | 105 +++-- pkg/sdkclient/source/interface.go | 2 +- pkg/sources/source.go | 2 +- pkg/sources/udsource/grpc_udsource.go | 14 +- pkg/sources/udsource/grpc_udsource_test.go | 113 +++--- pkg/sources/udsource/user_defined_source.go | 4 - rust/Cargo.lock | 174 +++++++- rust/monovertex/Cargo.toml | 7 +- rust/monovertex/proto/source.proto | 59 ++- rust/monovertex/src/config.rs | 66 ++- rust/monovertex/src/forwarder.rs | 369 +++++++++-------- rust/monovertex/src/lib.rs | 376 ++++++++---------- rust/monovertex/src/message.rs | 25 +- rust/monovertex/src/metrics.rs | 222 ++++++++++- rust/monovertex/src/server_info.rs | 37 +- rust/monovertex/src/shared.rs | 20 +- rust/monovertex/src/sink.rs | 104 +---- rust/monovertex/src/source.rs | 282 +++++++------ rust/monovertex/src/startup.rs | 349 ++++++++++++++++ rust/monovertex/src/transformer.rs | 88 +--- rust/servesink/Cargo.toml | 4 +- rust/serving/src/app/tracker.rs | 2 +- rust/src/bin/main.rs | 4 +- .../mono-vertex-with-transformer.yaml | 2 + .../testdata/simple-source-go.yaml | 1 - .../testdata/simple-source-java.yaml | 5 +- .../testdata/simple-source-python.yaml | 3 +- .../testdata/simple-source-rust.yaml | 26 ++ test/udsource-e2e/udsource_test.go | 12 +- 42 files changed, 1832 insertions(+), 986 deletions(-) create mode 100644 rust/monovertex/src/startup.rs create mode 100644 test/udsource-e2e/testdata/simple-source-rust.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9eaa4de89b..e9861ca408 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -113,8 +113,8 @@ jobs: with: tool: grcov - - name: Install Protobuf Compiler - run: sudo apt-get install -y protobuf-compiler + - name: Install Protoc + uses: arduino/setup-protoc@v3 - name: Test Rust working-directory: ./rust @@ -150,7 +150,7 @@ jobs: - run: git diff --exit-code build-rust-amd64: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 defaults: run: working-directory: ./rust @@ -169,6 +169,8 @@ jobs: uses: mozilla-actions/sccache-action@v0.0.5 - name: Install dependencies run: sudo apt-get install -y protobuf-compiler + - name: Print Protoc version + run: protoc --version - name: Build binary run: RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target x86_64-unknown-linux-gnu - name: Rename binary diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 51bdb6bfbc..ed6a898b83 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -41,7 +41,7 @@ jobs: path: dist build-rust-amd64: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 defaults: run: working-directory: ./rust @@ -71,7 +71,7 @@ jobs: path: rust/numaflow-rs-linux-amd64 build-rust-arm64: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 defaults: run: working-directory: ./rust @@ -160,4 +160,3 @@ jobs: - name: Container build and push with arm64/amd64 run: | IMAGE_NAMESPACE=${{ secrets.QUAYIO_ORG }} VERSION=${{ steps.version.outputs.VERSION }} DOCKER_PUSH=true DOCKER_BUILD_ARGS="--label \"quay.expires-after=30d\"" make image-multi - diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dcab109e0e..84cd8f533d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,7 +39,7 @@ jobs: path: dist build-rust-amd64: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 defaults: run: working-directory: ./rust @@ -62,7 +62,7 @@ jobs: path: rust/numaflow-rs-linux-amd64 build-rust-arm64: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 defaults: run: working-directory: ./rust diff --git a/go.mod b/go.mod index 9f54c88017..c251e8216b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/IBM/sarama v1.43.2 - github.com/Masterminds/semver/v3 v3.2.1 + github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig/v3 v3.2.3 github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 github.com/antonmedv/expr v1.9.0 @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.0 + github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 @@ -48,13 +48,13 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.26.0 - golang.org/x/net v0.25.0 - golang.org/x/oauth2 v0.20.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 + golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.8.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 - google.golang.org/grpc v1.59.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 + google.golang.org/grpc v1.66.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.34.2 k8s.io/api v0.29.2 @@ -80,7 +80,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bytedance/sonic v1.11.3 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -113,7 +113,7 @@ require ( github.com/go-playground/validator/v10 v10.19.0 // indirect github.com/gobuffalo/flect v0.2.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -201,14 +201,13 @@ require ( golang.org/x/arch v0.7.0 // indirect golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 77784a6a8d..722bd72ee3 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -94,8 +94,8 @@ github.com/bytedance/sonic v1.11.3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf5 github.com/casbin/casbin/v2 v2.77.2 h1:yQinn/w9x8AswiwqwtrXz93VU48R1aYTXdHEx4RI3jM= github.com/casbin/casbin/v2 v2.77.2/go.mod h1:mzGx0hYW9/ksOSpw3wNjk3NRAroq5VMFYUQ6G43iGPk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= @@ -249,8 +249,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -485,8 +485,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.0 h1:1Pp0AMLXkmUPlvFjKeY3a9X+OLU8oN1OQWxD9jLg8Uo= -github.com/numaproj/numaflow-go v0.8.0/go.mod h1:WoMt31+h3up202zTRI8c/qe42B8UbvwLe2mJH0MAlhI= +github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793 h1:kUQw1LsUvmTjqFfcia6DZOxy8qCQwvdY0TpOnR8w3Xg= +github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793/go.mod h1:g4JZOyUPhjfhv+kR0sX5d8taw/dasgKPXLvQBi39mJ4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -687,8 +687,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -774,8 +774,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -788,8 +788,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -863,15 +863,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -884,8 +884,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1025,12 +1025,10 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1049,8 +1047,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/pkg/apis/proto/source/v1/source.proto b/pkg/apis/proto/source/v1/source.proto index 0fd0bdbb37..7dc1a67412 100644 --- a/pkg/apis/proto/source/v1/source.proto +++ b/pkg/apis/proto/source/v1/source.proto @@ -26,16 +26,19 @@ package source.v1; service Source { // Read returns a stream of datum responses. - // The size of the returned ReadResponse is less than or equal to the num_records specified in ReadRequest. - // If the request timeout is reached on server side, the returned ReadResponse will contain all the datum that have been read (which could be an empty list). - rpc ReadFn(ReadRequest) returns (stream ReadResponse); + // The size of the returned responses is less than or equal to the num_records specified in each ReadRequest. + // If the request timeout is reached on the server side, the returned responses will contain all the datum that have been read (which could be an empty list). + // The server will continue to read and respond to subsequent ReadRequests until the client closes the stream. + // Once it has sent all the datum, the server will send a ReadResponse with the end of transmission flag set to true. + rpc ReadFn(stream ReadRequest) returns (stream ReadResponse); - // AckFn acknowledges a list of datum offsets. + // AckFn acknowledges a stream of datum offsets. // When AckFn is called, it implicitly indicates that the datum stream has been processed by the source vertex. // The caller (numa) expects the AckFn to be successful, and it does not expect any errors. // If there are some irrecoverable errors when the callee (UDSource) is processing the AckFn request, - // then it is best to crash because there are no other retry mechanisms possible. - rpc AckFn(AckRequest) returns (AckResponse); + // then it is best to crash because there are no other retry mechanisms possible. + // Clients sends n requests and expects n responses. + rpc AckFn(stream AckRequest) returns (stream AckResponse); // PendingFn returns the number of pending records at the user defined source. rpc PendingFn(google.protobuf.Empty) returns (PendingResponse); @@ -47,6 +50,14 @@ service Source { rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); } +/* + * Handshake message between client and server to indicate the start of transmission. + */ +message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; +} + /* * ReadRequest is the request for reading datum stream from user defined source. */ @@ -61,6 +72,7 @@ message ReadRequest { } // Required field indicating the request. Request request = 1; + optional Handshake handshake = 2; } /* @@ -84,8 +96,31 @@ message ReadResponse { // e.g. Kafka and Redis Stream message usually include information about the headers. map headers = 5; } + message Status { + // Code to indicate the status of the response. + enum Code { + SUCCESS = 0; + FAILURE = 1; + } + + // Error to indicate the error type. If the code is FAILURE, then the error field will be populated. + enum Error { + UNACKED = 0; + OTHER = 1; + } + + // End of transmission flag. + bool eot = 1; + Code code = 2; + optional Error error = 3; + optional string msg = 4; + } // Required field holding the result. Result result = 1; + // Status of the response. Holds the end of transmission flag and the status code. + Status status = 2; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 3; } /* @@ -94,14 +129,12 @@ message ReadResponse { */ message AckRequest { message Request { - // Required field holding a list of offsets to be acknowledged. - // The offsets must be strictly corresponding to the previously read batch, - // meaning the offsets must be in the same order as the datum responses in the ReadResponse. - // By enforcing ordering, we can save deserialization effort on the server side, assuming the server keeps a local copy of the raw/un-serialized offsets. - repeated Offset offsets = 1; + // Required field holding the offset to be acked + Offset offset = 1; } // Required field holding the request. The list will be ordered and will have the same order as the original Read response. Request request = 1; + optional Handshake handshake = 2; } /* @@ -121,6 +154,8 @@ message AckResponse { } // Required field holding the result. Result result = 1; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 2; } /* @@ -169,4 +204,4 @@ message Offset { // It is useful for sources that have multiple partitions. e.g. Kafka. // If the partition_id is not specified, it is assumed that the source has a single partition. int32 partition_id = 2; -} +} \ No newline at end of file diff --git a/pkg/daemon/client/grpc_daemon_client.go b/pkg/daemon/client/grpc_daemon_client.go index fa30ec0c4e..6b1d0040db 100644 --- a/pkg/daemon/client/grpc_daemon_client.go +++ b/pkg/daemon/client/grpc_daemon_client.go @@ -37,7 +37,7 @@ func NewGRPCDaemonServiceClient(address string) (DaemonClient, error) { config := &tls.Config{ InsecureSkipVerify: true, } - conn, err := grpc.Dial(address, grpc.WithTransportCredentials(credentials.NewTLS(config))) + conn, err := grpc.NewClient(address, grpc.WithTransportCredentials(credentials.NewTLS(config))) if err != nil { return nil, err } diff --git a/pkg/daemon/client/grpc_daemon_client_test.go b/pkg/daemon/client/grpc_daemon_client_test.go index d1bca6e18a..905d963822 100644 --- a/pkg/daemon/client/grpc_daemon_client_test.go +++ b/pkg/daemon/client/grpc_daemon_client_test.go @@ -467,11 +467,4 @@ func TestNewGRPCDaemonServiceClient(t *testing.T) { err = client.Close() assert.NoError(t, err) }) - - t.Run("empty address", func(t *testing.T) { - address := "" - client, err := NewGRPCDaemonServiceClient(address) - assert.Error(t, err) - assert.Nil(t, client) - }) } diff --git a/pkg/mvtxdaemon/client/grpc_client.go b/pkg/mvtxdaemon/client/grpc_client.go index cd6fdbb455..c398b138c2 100644 --- a/pkg/mvtxdaemon/client/grpc_client.go +++ b/pkg/mvtxdaemon/client/grpc_client.go @@ -38,7 +38,7 @@ func NewGRPCClient(address string) (MonoVertexDaemonClient, error) { config := &tls.Config{ InsecureSkipVerify: true, } - conn, err := grpc.Dial(address, grpc.WithTransportCredentials(credentials.NewTLS(config))) + conn, err := grpc.NewClient(address, grpc.WithTransportCredentials(credentials.NewTLS(config))) if err != nil { return nil, err } diff --git a/pkg/reduce/pbq/wal/unaligned/fs/compactor_test.go b/pkg/reduce/pbq/wal/unaligned/fs/compactor_test.go index 1c451507cb..a811921efd 100644 --- a/pkg/reduce/pbq/wal/unaligned/fs/compactor_test.go +++ b/pkg/reduce/pbq/wal/unaligned/fs/compactor_test.go @@ -392,9 +392,6 @@ func TestCompactor_ContextClose(t *testing.T) { } time.Sleep(3 * time.Second) err = c.Stop() - if err != nil { - println(err.Error()) - } assert.NoError(t, err) } diff --git a/pkg/sdkclient/grpc/grpc_utils.go b/pkg/sdkclient/grpc/grpc_utils.go index 42fba83e86..293ba8e8d7 100644 --- a/pkg/sdkclient/grpc/grpc_utils.go +++ b/pkg/sdkclient/grpc/grpc_utils.go @@ -48,7 +48,7 @@ func ConnectToServer(udsSockAddr string, serverInfo *serverinfo.ServerInfo, maxM return nil, fmt.Errorf("failed to start Multiproc Client: %w", err) } - conn, err = grpc.Dial( + conn, err = grpc.NewClient( fmt.Sprintf("%s:///%s", resolver.CustScheme, resolver.CustServiceName), grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -58,12 +58,12 @@ func ConnectToServer(udsSockAddr string, serverInfo *serverinfo.ServerInfo, maxM sockAddr = getUdsSockAddr(udsSockAddr) log.Println("UDS Client:", sockAddr) - conn, err = grpc.Dial(sockAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), + conn, err = grpc.NewClient(sockAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMessageSize), grpc.MaxCallSendMsgSize(maxMessageSize))) } if err != nil { - return nil, fmt.Errorf("failed to execute grpc.Dial(%q): %w", sockAddr, err) + return nil, fmt.Errorf("failed to execute grpc.NewClient(%q): %w", sockAddr, err) } return conn, nil diff --git a/pkg/sdkclient/reducer/client.go b/pkg/sdkclient/reducer/client.go index 6825fdc4c2..b728a715c1 100644 --- a/pkg/sdkclient/reducer/client.go +++ b/pkg/sdkclient/reducer/client.go @@ -64,7 +64,7 @@ func NewFromClient(c reducepb.ReduceClient) (Client, error) { } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn(context.Context) error { return c.conn.Close() } diff --git a/pkg/sdkclient/source/client.go b/pkg/sdkclient/source/client.go index 39b96a13ed..550c888d66 100644 --- a/pkg/sdkclient/source/client.go +++ b/pkg/sdkclient/source/client.go @@ -18,8 +18,9 @@ package source import ( "context" + "errors" "fmt" - "io" + "time" sourcepb "github.com/numaproj/numaflow-go/pkg/apis/proto/source/v1" "google.golang.org/grpc" @@ -28,19 +29,22 @@ import ( "github.com/numaproj/numaflow/pkg/sdkclient" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/shared/logging" ) // client contains the grpc connection and the grpc client. type client struct { - conn *grpc.ClientConn - grpcClt sourcepb.SourceClient + conn *grpc.ClientConn + grpcClt sourcepb.SourceClient + readStream sourcepb.Source_ReadFnClient + ackStream sourcepb.Source_AckFnClient } var _ Client = (*client)(nil) -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SourceAddr) - + var logger = logging.FromContext(ctx) for _, inputOption := range inputOptions { inputOption(opts) } @@ -54,16 +58,102 @@ func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (C c.conn = conn c.grpcClt = sourcepb.NewSourceClient(conn) + + // wait until the server is ready +waitUntilReady: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("failed to connect to the server: %v", ctx.Err()) + default: + ready, _ := c.IsReady(ctx, &emptypb.Empty{}) + if ready { + break waitUntilReady + } else { + logger.Warnw("source client is not ready") + time.Sleep(100 * time.Millisecond) + } + } + } + + c.readStream, err = c.grpcClt.ReadFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create read stream: %v", err) + } + + c.ackStream, err = c.grpcClt.AckFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create ack stream: %v", err) + } + + // Send handshake request for read stream + readHandshakeRequest := &sourcepb.ReadRequest{ + Handshake: &sourcepb.Handshake{ + Sot: true, + }, + } + if err := c.readStream.Send(readHandshakeRequest); err != nil { + return nil, fmt.Errorf("failed to send read handshake request: %v", err) + } + + // Wait for handshake response for read stream + readHandshakeResponse, err := c.readStream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive read handshake response: %v", err) + } + if readHandshakeResponse.GetHandshake() == nil || !readHandshakeResponse.GetHandshake().GetSot() { + return nil, fmt.Errorf("invalid read handshake response") + } + + // Send handshake request for ack stream + ackHandshakeRequest := &sourcepb.AckRequest{ + Handshake: &sourcepb.Handshake{ + Sot: true, + }, + } + if err := c.ackStream.Send(ackHandshakeRequest); err != nil { + return nil, fmt.Errorf("failed to send ack handshake request: %v", err) + } + + // Wait for handshake response for ack stream + ackHandshakeResponse, err := c.ackStream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive ack handshake response: %v", err) + } + if ackHandshakeResponse.GetHandshake() == nil || !ackHandshakeResponse.GetHandshake().GetSot() { + return nil, fmt.Errorf("invalid ack handshake response") + } + return c, nil } // NewFromClient creates a new client object from the grpc client. This is used for testing. -func NewFromClient(c sourcepb.SourceClient) (Client, error) { - return &client{grpcClt: c}, nil +func NewFromClient(ctx context.Context, srcClient sourcepb.SourceClient, inputOptions ...sdkclient.Option) (Client, error) { + var opts = sdkclient.DefaultOptions(sdkclient.SourceAddr) + + for _, inputOption := range inputOptions { + inputOption(opts) + } + + c := new(client) + c.grpcClt = srcClient + + c.readStream, _ = c.grpcClt.ReadFn(ctx) + c.ackStream, _ = c.grpcClt.AckFn(ctx) + + return c, nil } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn(context.Context) error { + err := c.readStream.CloseSend() + if err != nil { + return err + } + err = c.ackStream.CloseSend() + if err != nil { + return err + } return c.conn.Close() } @@ -76,33 +166,51 @@ func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { return resp.GetReady(), nil } -// ReadFn reads data from the source. -func (c *client) ReadFn(ctx context.Context, req *sourcepb.ReadRequest, datumCh chan<- *sourcepb.ReadResponse) error { - stream, err := c.grpcClt.ReadFn(ctx, req) +func (c *client) ReadFn(_ context.Context, req *sourcepb.ReadRequest, datumCh chan<- *sourcepb.ReadResponse) error { + err := c.readStream.Send(req) if err != nil { - return fmt.Errorf("failed to execute c.grpcClt.ReadFn(): %w", err) + return fmt.Errorf("failed to send read request: %v", err) } + for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - var resp *sourcepb.ReadResponse - resp, err = stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - datumCh <- resp + resp, err := c.readStream.Recv() + // we don't need an EOF check because we only close the stream during shutdown. + if errors.Is(err, context.Canceled) { + break + } + if err != nil { + return fmt.Errorf("failed to receive read response: %v", err) } + if resp.GetStatus().GetEot() { + break + } + datumCh <- resp } + return nil } // AckFn acknowledges the data from the source. -func (c *client) AckFn(ctx context.Context, req *sourcepb.AckRequest) (*sourcepb.AckResponse, error) { - return c.grpcClt.AckFn(ctx, req) +func (c *client) AckFn(_ context.Context, reqs []*sourcepb.AckRequest) ([]*sourcepb.AckResponse, error) { + // Send the ack request + for _, req := range reqs { + err := c.ackStream.Send(req) + if err != nil { + return nil, fmt.Errorf("failed to send ack request: %v", err) + } + } + + responses := make([]*sourcepb.AckResponse, len(reqs)) + for i := 0; i < len(reqs); i++ { + // Wait for the ack response + resp, err := c.ackStream.Recv() + // we don't need an EOF check because we only close the stream during shutdown. + if err != nil { + return nil, fmt.Errorf("failed to receive ack response: %v", err) + } + responses[i] = resp + } + + return responses, nil } // PendingFn returns the number of pending data from the source. diff --git a/pkg/sdkclient/source/client_test.go b/pkg/sdkclient/source/client_test.go index 450394b877..d19e3e8737 100644 --- a/pkg/sdkclient/source/client_test.go +++ b/pkg/sdkclient/source/client_test.go @@ -19,9 +19,7 @@ package source import ( "context" "fmt" - "io" "reflect" - "sync" "testing" "time" @@ -61,11 +59,7 @@ func TestIsReady(t *testing.T) { mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sourcepb.ReadyResponse{Ready: true}, nil) mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sourcepb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) + testClient := client{grpcClt: mockClient} ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) assert.True(t, ready) @@ -100,16 +94,28 @@ func TestReadFn(t *testing.T) { for i := 0; i < numRecords; i++ { mockStreamClient.EXPECT().Recv().Return(expectedResp, nil) } - mockStreamClient.EXPECT().Recv().Return(expectedResp, io.EOF) + + eotResponse := &sourcepb.ReadResponse{ + Status: &sourcepb.ReadResponse_Status{ + Eot: true, + Code: 0, + }, + } + mockStreamClient.EXPECT().Recv().Return(eotResponse, nil) mockStreamClient.EXPECT().CloseSend().Return(nil).AnyTimes() - mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(mockStreamClient, nil) - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - assert.True(t, reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - })) + request := &sourcepb.ReadRequest{ + Request: &sourcepb.ReadRequest_Request{ + NumRecords: uint64(numRecords), + }, + } + mockStreamClient.EXPECT().Send(request).Return(nil) + + testClient := &client{ + grpcClt: mockClient, + readStream: mockStreamClient, + } responseCh := make(chan *sourcepb.ReadResponse) @@ -127,18 +133,12 @@ func TestReadFn(t *testing.T) { } }() - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - err = testClient.ReadFn(ctx, &sourcepb.ReadRequest{ - Request: &sourcepb.ReadRequest_Request{ - NumRecords: uint64(numRecords), - }, - }, responseCh) - assert.NoError(t, err) - }() - wg.Wait() + err := testClient.ReadFn(ctx, &sourcepb.ReadRequest{ + Request: &sourcepb.ReadRequest_Request{ + NumRecords: uint64(numRecords), + }, + }, responseCh) + assert.NoError(t, err) close(responseCh) } @@ -150,22 +150,47 @@ func TestAckFn(t *testing.T) { defer ctrl.Finish() mockClient := sourcemock.NewMockSourceClient(ctrl) - mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(&sourcepb.AckResponse{}, nil) - mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(&sourcepb.AckResponse{}, fmt.Errorf("mock connection refused")) + mockStream := sourcemock.NewMockSource_AckFnClient(ctrl) - testClient, err := NewFromClient(mockClient) + // Handshake request and response + mockStream.EXPECT().Send(&sourcepb.AckRequest{ + Handshake: &sourcepb.Handshake{ + Sot: true, + }, + }).Return(nil) + mockStream.EXPECT().Recv().Return(&sourcepb.AckResponse{ + Handshake: &sourcepb.Handshake{ + Sot: true, + }, + }, nil) + + // Ack request and response + mockStream.EXPECT().Send(gomock.Any()).Return(nil) + mockStream.EXPECT().Recv().Return(&sourcepb.AckResponse{}, nil) + + testClient := client{ + grpcClt: mockClient, + ackStream: mockStream, + } + + // Perform handshake + ackHandshakeRequest := &sourcepb.AckRequest{ + Handshake: &sourcepb.Handshake{ + Sot: true, + }, + } + err := testClient.ackStream.Send(ackHandshakeRequest) assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - ack, err := testClient.AckFn(ctx, &sourcepb.AckRequest{}) + ackHandshakeResponse, err := testClient.ackStream.Recv() assert.NoError(t, err) - assert.Equal(t, &sourcepb.AckResponse{}, ack) + assert.NotNil(t, ackHandshakeResponse.GetHandshake()) + assert.True(t, ackHandshakeResponse.GetHandshake().GetSot()) - ack, err = testClient.AckFn(ctx, &sourcepb.AckRequest{}) - assert.EqualError(t, err, "mock connection refused") - assert.Equal(t, &sourcepb.AckResponse{}, ack) + // Test AckFn + ack, err := testClient.AckFn(ctx, []*sourcepb.AckRequest{{}}) + assert.NoError(t, err) + assert.Equal(t, []*sourcepb.AckResponse{{}}, ack) } func TestPendingFn(t *testing.T) { @@ -183,11 +208,9 @@ func TestPendingFn(t *testing.T) { }, nil) mockClient.EXPECT().PendingFn(gomock.Any(), gomock.Any()).Return(&sourcepb.PendingResponse{}, fmt.Errorf("mock connection refused")) - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ + testClient := client{ grpcClt: mockClient, - }) + } pending, err := testClient.PendingFn(ctx, &emptypb.Empty{}) assert.NoError(t, err) diff --git a/pkg/sdkclient/source/interface.go b/pkg/sdkclient/source/interface.go index ea897b8207..cc26f2cd95 100644 --- a/pkg/sdkclient/source/interface.go +++ b/pkg/sdkclient/source/interface.go @@ -32,7 +32,7 @@ type Client interface { // ReadFn reads messages from the udsource. ReadFn(ctx context.Context, req *sourcepb.ReadRequest, datumCh chan<- *sourcepb.ReadResponse) error // AckFn acknowledges messages from the udsource. - AckFn(ctx context.Context, req *sourcepb.AckRequest) (*sourcepb.AckResponse, error) + AckFn(ctx context.Context, req []*sourcepb.AckRequest) ([]*sourcepb.AckResponse, error) // PendingFn returns the number of pending messages from the udsource. PendingFn(ctx context.Context, req *emptypb.Empty) (*sourcepb.PendingResponse, error) // PartitionsFn returns the list of partitions from the udsource. diff --git a/pkg/sources/source.go b/pkg/sources/source.go index e206d76dbc..0b3e23a94b 100644 --- a/pkg/sources/source.go +++ b/pkg/sources/source.go @@ -201,7 +201,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { return err } - srcClient, err := sourceclient.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) + srcClient, err := sourceclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { return fmt.Errorf("failed to create a new gRPC client: %w", err) } diff --git a/pkg/sources/udsource/grpc_udsource.go b/pkg/sources/udsource/grpc_udsource.go index 525326651f..8d0389a2ee 100644 --- a/pkg/sources/udsource/grpc_udsource.go +++ b/pkg/sources/udsource/grpc_udsource.go @@ -175,12 +175,16 @@ func (u *GRPCBasedUDSource) ApplyAckFn(ctx context.Context, offsets []isb.Offset for i, offset := range offsets { rOffsets[i] = ConvertToUserDefinedSourceOffset(offset) } - var r = &sourcepb.AckRequest{ - Request: &sourcepb.AckRequest_Request{ - Offsets: rOffsets, - }, + ackRequests := make([]*sourcepb.AckRequest, len(rOffsets)) + for i, offset := range rOffsets { + var r = &sourcepb.AckRequest{ + Request: &sourcepb.AckRequest_Request{ + Offset: offset, + }, + } + ackRequests[i] = r } - _, err := u.client.AckFn(ctx, r) + _, err := u.client.AckFn(ctx, ackRequests) return err } diff --git a/pkg/sources/udsource/grpc_udsource_test.go b/pkg/sources/udsource/grpc_udsource_test.go index bf7a486fad..e0a0ab4ca5 100644 --- a/pkg/sources/udsource/grpc_udsource_test.go +++ b/pkg/sources/udsource/grpc_udsource_test.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "io" "testing" "time" @@ -31,8 +30,6 @@ import ( "go.uber.org/goleak" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" "github.com/numaproj/numaflow/pkg/isb" @@ -43,24 +40,8 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } -type rpcMsg struct { - msg proto.Message -} - -func (r *rpcMsg) Matches(msg interface{}) bool { - m, ok := msg.(proto.Message) - if !ok { - return false - } - return proto.Equal(m, r.msg) -} - -func (r *rpcMsg) String() string { - return fmt.Sprintf("is %s", r.msg) -} - -func NewMockUDSgRPCBasedUDSource(mockClient *sourcemock.MockSourceClient) *GRPCBasedUDSource { - c, _ := sourceclient.NewFromClient(mockClient) +func NewMockUDSgRPCBasedUDSource(ctx context.Context, mockClient *sourcemock.MockSourceClient) *GRPCBasedUDSource { + c, _ := sourceclient.NewFromClient(ctx, mockClient) return &GRPCBasedUDSource{ vertexName: "testVertex", pipelineName: "testPipeline", @@ -75,6 +56,8 @@ func Test_gRPCBasedUDSource_WaitUntilReadyWithMockClient(t *testing.T) { mockClient := sourcemock.NewMockSourceClient(ctrl) mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sourcepb.ReadyResponse{Ready: true}, nil) + mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -85,7 +68,7 @@ func Test_gRPCBasedUDSource_WaitUntilReadyWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockClient) err := u.WaitUntilReady(ctx) assert.NoError(t, err) } @@ -103,6 +86,8 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { mockSourceClient := sourcemock.NewMockSourceClient(ctrl) mockSourceClient.EXPECT().PendingFn(gomock.Any(), gomock.Any()).Return(testResponse, nil).AnyTimes() + mockSourceClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockSourceClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -113,7 +98,7 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockSourceClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockSourceClient) count, err := u.ApplyPendingFn(ctx) assert.NoError(t, err) assert.Equal(t, int64(123), count) @@ -131,6 +116,8 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { mockSourceClient := sourcemock.NewMockSourceClient(ctrl) mockSourceClient.EXPECT().PendingFn(gomock.Any(), gomock.Any()).Return(testResponse, nil).AnyTimes() + mockSourceClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockSourceClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -141,7 +128,7 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockSourceClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockSourceClient) count, err := u.ApplyPendingFn(ctx) assert.NoError(t, err) assert.Equal(t, isb.PendingNotAvailable, count) @@ -159,6 +146,8 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { mockSourceErrClient := sourcemock.NewMockSourceClient(ctrl) mockSourceErrClient.EXPECT().PendingFn(gomock.Any(), gomock.Any()).Return(testResponse, fmt.Errorf("mock udsource pending error")).AnyTimes() + mockSourceErrClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockSourceErrClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -169,7 +158,7 @@ func Test_gRPCBasedUDSource_ApplyPendingWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockSourceErrClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockSourceErrClient) count, err := u.ApplyPendingFn(ctx) assert.Equal(t, isb.PendingNotAvailable, count) @@ -183,13 +172,8 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { defer ctrl.Finish() mockClient := sourcemock.NewMockSourceClient(ctrl) mockReadClient := sourcemock.NewMockSource_ReadFnClient(ctrl) - - req := &sourcepb.ReadRequest{ - Request: &sourcepb.ReadRequest_Request{ - NumRecords: 1, - TimeoutInMs: 1000, - }, - } + mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(mockReadClient, nil) + mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) offset := &sourcepb.Offset{Offset: []byte(`test_offset`), PartitionId: 0} @@ -202,10 +186,18 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { Keys: []string{"test_key"}, }, } - mockReadClient.EXPECT().Recv().Return(expectedResponse, nil).Times(1) - mockReadClient.EXPECT().Recv().Return(nil, io.EOF).Times(1) - mockClient.EXPECT().ReadFn(gomock.Any(), &rpcMsg{msg: req}).Return(mockReadClient, nil) + + eotResponse := &sourcepb.ReadResponse{Status: &sourcepb.ReadResponse_Status{Eot: true}} + mockReadClient.EXPECT().Recv().Return(eotResponse, nil).Times(1) + + req := &sourcepb.ReadRequest{ + Request: &sourcepb.ReadRequest_Request{ + NumRecords: 1, + TimeoutInMs: 1000, + }, + } + mockReadClient.EXPECT().Send(req).Return(nil).Times(1) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -216,7 +208,7 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockClient) readMessages, err := u.ApplyReadFn(ctx, 1, time.Millisecond*1000) assert.NoError(t, err) assert.Equal(t, 1, len(readMessages)) @@ -232,6 +224,8 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { mockClient := sourcemock.NewMockSourceClient(ctrl) mockReadClient := sourcemock.NewMockSource_ReadFnClient(ctrl) + mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(mockReadClient, nil) + mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(nil, nil) req := &sourcepb.ReadRequest{ Request: &sourcepb.ReadRequest_Request{ @@ -239,6 +233,7 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { TimeoutInMs: 1000, }, } + mockReadClient.EXPECT().Send(req).Return(nil).Times(1) var TestEventTime = time.Unix(1661169600, 0).UTC() expectedResponse := &sourcepb.ReadResponse{ @@ -249,10 +244,7 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { Keys: []string{"test_key"}, }, } - mockReadClient.EXPECT().Recv().Return(expectedResponse, errors.New("mock error for read")).AnyTimes() - mockClient.EXPECT().ReadFn(gomock.Any(), &rpcMsg{msg: req}).Return(mockReadClient, nil) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() go func() { @@ -262,7 +254,7 @@ func Test_gRPCBasedUDSource_ApplyReadWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockClient) readMessages, err := u.ApplyReadFn(ctx, 1, time.Millisecond*1000) assert.Error(t, err) assert.Equal(t, 0, len(readMessages)) @@ -278,16 +270,25 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { offset2 := &sourcepb.Offset{Offset: []byte("test-offset-2"), PartitionId: 0} mockClient := sourcemock.NewMockSourceClient(ctrl) - req := &sourcepb.AckRequest{ + mockAckClient := sourcemock.NewMockSource_AckFnClient(ctrl) + mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(mockAckClient, nil) + + req1 := &sourcepb.AckRequest{ + Request: &sourcepb.AckRequest_Request{ + Offset: offset1, + }, + } + + req2 := &sourcepb.AckRequest{ Request: &sourcepb.AckRequest_Request{ - Offsets: []*sourcepb.Offset{ - offset1, - offset2, - }, + Offset: offset2, }, } - mockClient.EXPECT().AckFn(gomock.Any(), &rpcMsg{msg: req}).Return(&sourcepb.AckResponse{Result: &sourcepb.AckResponse_Result{Success: &emptypb.Empty{}}}, nil).AnyTimes() + mockAckClient.EXPECT().Send(req1).Return(nil).Times(1) + mockAckClient.EXPECT().Send(req2).Return(nil).Times(1) + mockAckClient.EXPECT().Recv().Return(&sourcepb.AckResponse{}, nil).Times(2) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -298,7 +299,7 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockClient) err := u.ApplyAckFn(ctx, []isb.Offset{ NewUserDefinedSourceOffset(offset1), NewUserDefinedSourceOffset(offset2), @@ -314,16 +315,18 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { offset2 := &sourcepb.Offset{Offset: []byte("test-offset-2"), PartitionId: 0} mockClient := sourcemock.NewMockSourceClient(ctrl) - req := &sourcepb.AckRequest{ + mockAckClient := sourcemock.NewMockSource_AckFnClient(ctrl) + mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) + mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(mockAckClient, nil) + + req1 := &sourcepb.AckRequest{ Request: &sourcepb.AckRequest_Request{ - Offsets: []*sourcepb.Offset{ - offset1, - offset2, - }, + Offset: offset1, }, } - mockClient.EXPECT().AckFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) + mockAckClient.EXPECT().Send(req1).Return(status.New(codes.DeadlineExceeded, "mock test err").Err()).Times(1) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() go func() { @@ -333,11 +336,11 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSource(mockClient) + u := NewMockUDSgRPCBasedUDSource(ctx, mockClient) err := u.ApplyAckFn(ctx, []isb.Offset{ NewUserDefinedSourceOffset(offset1), NewUserDefinedSourceOffset(offset2), }) - assert.ErrorIs(t, err, status.New(codes.DeadlineExceeded, "mock test err").Err()) + assert.Equal(t, err.Error(), fmt.Sprintf("failed to send ack request: %s", status.New(codes.DeadlineExceeded, "mock test err").Err())) }) } diff --git a/pkg/sources/udsource/user_defined_source.go b/pkg/sources/udsource/user_defined_source.go index ac1b2debb5..5ba77019a1 100644 --- a/pkg/sources/udsource/user_defined_source.go +++ b/pkg/sources/udsource/user_defined_source.go @@ -62,10 +62,6 @@ func NewUserDefinedSource(ctx context.Context, vertexInstance *dfv1.VertexInstan } } - if err != nil { - u.logger.Errorw("Error instantiating the forwarder", zap.Error(err)) - return nil, err - } return u, nil } diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 7748607ca5..624d5f14a8 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -307,7 +307,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", "syn", "which", @@ -351,18 +351,18 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1098,6 +1098,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.0", "tower-service", + "webpki-roots 0.26.5", ] [[package]] @@ -1135,9 +1136,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1476,6 +1477,7 @@ dependencies = [ "chrono", "hyper-util", "kube", + "log", "numaflow 0.1.1", "numaflow-models", "once_cell", @@ -1603,7 +1605,7 @@ dependencies = [ [[package]] name = "numaflow" version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#d3afabd2fff1d070bb3fd79866c0389f009556b3" +source = "git+https://github.com/numaproj/numaflow-rs.git?branch=handshake#baecc88456f317b08bc869f82596e2b746cf798b" dependencies = [ "chrono", "futures-util", @@ -1628,7 +1630,7 @@ version = "0.0.0-pre" dependencies = [ "k8s-openapi", "kube", - "reqwest", + "reqwest 0.11.27", "serde", "serde_derive", "serde_json", @@ -1966,6 +1968,54 @@ dependencies = [ "prost", ] +[[package]] +name = "quinn" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.0.0", + "rustls 0.23.13", + "socket2", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +dependencies = [ + "bytes", + "rand", + "ring", + "rustc-hash 2.0.0", + "rustls 0.23.13", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +dependencies = [ + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "quote" version = "1.0.37" @@ -2134,10 +2184,52 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.25.4", "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.27.3", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.13", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.26.5", + "windows-registry", +] + [[package]] name = "ring" version = "0.17.8" @@ -2187,6 +2279,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.4.1" @@ -2497,7 +2595,7 @@ name = "servesink" version = "0.1.0" dependencies = [ "numaflow 0.1.1", - "reqwest", + "reqwest 0.12.7", "tokio", "tonic", "tracing", @@ -2678,6 +2776,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "system-configuration" @@ -2906,9 +3007,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -3137,9 +3238,9 @@ checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -3305,6 +3406,15 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "0.26.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.4.2" @@ -3348,6 +3458,36 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index 01eb5afafd..da75a4c8c8 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -6,14 +6,14 @@ edition = "2021" [dependencies] axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } -tonic = "0.12.1" +tonic = "0.12.2" bytes = "1.7.1" thiserror = "1.0.63" tokio = { version = "1.39.3", features = ["full"] } tracing = "0.1.40" tokio-util = "0.7.11" tokio-stream = "0.1.15" -prost = "0.13.1" +prost = "0.13.2" prost-types = "0.13.1" chrono = "0.4.31" base64 = "0.22.1" @@ -34,10 +34,11 @@ backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" kube = "0.94.0" +log = "0.4.22" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "handshake" } [build-dependencies] tonic-build = "0.12.1" diff --git a/rust/monovertex/proto/source.proto b/rust/monovertex/proto/source.proto index 131cc36d30..69ff154127 100644 --- a/rust/monovertex/proto/source.proto +++ b/rust/monovertex/proto/source.proto @@ -7,16 +7,19 @@ package source.v1; service Source { // Read returns a stream of datum responses. - // The size of the returned ReadResponse is less than or equal to the num_records specified in ReadRequest. - // If the request timeout is reached on server side, the returned ReadResponse will contain all the datum that have been read (which could be an empty list). - rpc ReadFn(ReadRequest) returns (stream ReadResponse); + // The size of the returned responses is less than or equal to the num_records specified in each ReadRequest. + // If the request timeout is reached on the server side, the returned responses will contain all the datum that have been read (which could be an empty list). + // The server will continue to read and respond to subsequent ReadRequests until the client closes the stream. + // Once it has sent all the datum, the server will send a ReadResponse with the end of transmission flag set to true. + rpc ReadFn(stream ReadRequest) returns (stream ReadResponse); - // AckFn acknowledges a list of datum offsets. + // AckFn acknowledges a stream of datum offsets. // When AckFn is called, it implicitly indicates that the datum stream has been processed by the source vertex. // The caller (numa) expects the AckFn to be successful, and it does not expect any errors. // If there are some irrecoverable errors when the callee (UDSource) is processing the AckFn request, - // then it is best to crash because there are no other retry mechanisms possible. - rpc AckFn(AckRequest) returns (AckResponse); + // then it is best to crash because there are no other retry mechanisms possible. + // Clients sends n requests and expects n responses. + rpc AckFn(stream AckRequest) returns (stream AckResponse); // PendingFn returns the number of pending records at the user defined source. rpc PendingFn(google.protobuf.Empty) returns (PendingResponse); @@ -28,6 +31,14 @@ service Source { rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); } +/* + * Handshake message between client and server to indicate the start of transmission. + */ +message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; +} + /* * ReadRequest is the request for reading datum stream from user defined source. */ @@ -42,6 +53,7 @@ message ReadRequest { } // Required field indicating the request. Request request = 1; + optional Handshake handshake = 2; } /* @@ -65,8 +77,31 @@ message ReadResponse { // e.g. Kafka and Redis Stream message usually include information about the headers. map headers = 5; } + message Status { + // Code to indicate the status of the response. + enum Code { + SUCCESS = 0; + FAILURE = 1; + } + + // Error to indicate the error type. If the code is FAILURE, then the error field will be populated. + enum Error { + UNACKED = 0; + OTHER = 1; + } + + // End of transmission flag. + bool eot = 1; + Code code = 2; + optional Error error = 3; + optional string msg = 4; + } // Required field holding the result. Result result = 1; + // Status of the response. Holds the end of transmission flag and the status code. + Status status = 2; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 3; } /* @@ -75,14 +110,12 @@ message ReadResponse { */ message AckRequest { message Request { - // Required field holding a list of offsets to be acknowledged. - // The offsets must be strictly corresponding to the previously read batch, - // meaning the offsets must be in the same order as the datum responses in the ReadResponse. - // By enforcing ordering, we can save deserialization effort on the server side, assuming the server keeps a local copy of the raw/un-serialized offsets. - repeated Offset offsets = 1; + // Required field holding the offset to be acked + Offset offset = 1; } // Required field holding the request. The list will be ordered and will have the same order as the original Read response. Request request = 1; + optional Handshake handshake = 2; } /* @@ -102,6 +135,8 @@ message AckResponse { } // Required field holding the result. Result result = 1; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 2; } /* @@ -150,4 +185,4 @@ message Offset { // It is useful for sources that have multiple partitions. e.g. Kafka. // If the partition_id is not specified, it is assumed that the source has a single partition. int32 partition_id = 2; -} \ No newline at end of file +} diff --git a/rust/monovertex/src/config.rs b/rust/monovertex/src/config.rs index 81b115422f..5d245ed397 100644 --- a/rust/monovertex/src/config.rs +++ b/rust/monovertex/src/config.rs @@ -1,13 +1,20 @@ -use std::env; -use std::sync::OnceLock; - +use crate::error::Error; use base64::prelude::BASE64_STANDARD; use base64::Engine; - use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; +use std::env; +use std::sync::OnceLock; -use crate::error::Error; +const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; +const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; +const DEFAULT_SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; +const DEFAULT_FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; +const DEFAULT_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; +const DEFAULT_FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; +const DEFAULT_TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; +const DEFAULT_TRANSFORMER_SERVER_INFO_FILE: &str = + "/var/run/numaflow/sourcetransformer-server-info"; const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; @@ -77,15 +84,46 @@ pub struct Settings { pub batch_size: u64, pub timeout_in_ms: u32, pub metrics_server_listen_port: u16, - pub grpc_max_message_size: usize, - pub is_transformer_enabled: bool, - pub is_fallback_enabled: bool, pub lag_check_interval_in_secs: u16, pub lag_refresh_interval_in_secs: u16, pub sink_max_retry_attempts: u16, pub sink_retry_interval_in_ms: u32, pub sink_retry_on_fail_strategy: OnFailureStrategy, pub sink_default_retry_strategy: RetryStrategy, + pub sdk_config: SDKConfig, +} + +#[derive(Debug, Clone)] +pub struct SDKConfig { + pub grpc_max_message_size: usize, + pub is_transformer_enabled: bool, + pub is_fallback_enabled: bool, + pub source_socket_path: String, + pub sink_socket_path: String, + pub transformer_socket_path: String, + pub fallback_socket_path: String, + pub source_server_info_path: String, + pub sink_server_info_path: String, + pub transformer_server_info_path: String, + pub fallback_server_info_path: String, +} + +impl Default for SDKConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + is_transformer_enabled: false, + is_fallback_enabled: false, + source_socket_path: DEFAULT_SOURCE_SOCKET.to_string(), + sink_socket_path: DEFAULT_SINK_SOCKET.to_string(), + transformer_socket_path: DEFAULT_TRANSFORMER_SOCKET.to_string(), + fallback_socket_path: DEFAULT_FB_SINK_SOCKET.to_string(), + source_server_info_path: DEFAULT_SOURCE_SERVER_INFO_FILE.to_string(), + sink_server_info_path: DEFAULT_SINK_SERVER_INFO_FILE.to_string(), + transformer_server_info_path: DEFAULT_TRANSFORMER_SERVER_INFO_FILE.to_string(), + fallback_server_info_path: DEFAULT_FB_SINK_SERVER_INFO_FILE.to_string(), + } + } } impl Default for Settings { @@ -106,15 +144,13 @@ impl Default for Settings { batch_size: DEFAULT_BATCH_SIZE, timeout_in_ms: DEFAULT_TIMEOUT_IN_MS, metrics_server_listen_port: DEFAULT_METRICS_PORT, - grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - is_transformer_enabled: false, - is_fallback_enabled: false, lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, sink_default_retry_strategy: default_retry_strategy, + sdk_config: Default::default(), } } } @@ -158,14 +194,14 @@ impl Settings { .and_then(|metadata| metadata.name) .ok_or_else(|| Error::ConfigError("Mono vertex name not found".to_string()))?; - settings.is_transformer_enabled = mono_vertex_obj + settings.sdk_config.is_transformer_enabled = mono_vertex_obj .spec .source .ok_or(Error::ConfigError("Source not found".to_string()))? .transformer .is_some(); - settings.is_fallback_enabled = mono_vertex_obj + settings.sdk_config.is_fallback_enabled = mono_vertex_obj .spec .sink .as_deref() @@ -211,7 +247,7 @@ impl Settings { // check if the sink retry strategy is set to fallback and there is no fallback sink configured // then we should return an error if settings.sink_retry_on_fail_strategy == OnFailureStrategy::Fallback - && !settings.is_fallback_enabled + && !settings.sdk_config.is_fallback_enabled { return Err(Error::ConfigError( "Retry Strategy given as fallback but Fallback sink not configured" @@ -221,7 +257,7 @@ impl Settings { } } - settings.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) + settings.sdk_config.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) .unwrap_or_else(|_| DEFAULT_GRPC_MAX_MESSAGE_SIZE.to_string()) .parse() .map_err(|e| { diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index 1ba928123c..d60644b338 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -5,9 +5,10 @@ use crate::error::{Error, Result}; use crate::message::{Message, Offset}; use crate::metrics; use crate::metrics::forward_metrics; -use crate::sink::{proto, SinkClient}; -use crate::source::SourceClient; -use crate::transformer::TransformerClient; +use crate::sink::SinkWriter; +use crate::sink_pb::Status::{Failure, Fallback, Success}; +use crate::source::{SourceAcker, SourceReader}; +use crate::transformer::SourceTransformer; use chrono::Utc; use tokio::task::JoinSet; use tokio::time::sleep; @@ -19,48 +20,52 @@ use tracing::{debug, info}; /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. pub(crate) struct Forwarder { - source_client: SourceClient, - sink_client: SinkClient, - transformer_client: Option, - fallback_client: Option, + source_reader: SourceReader, + source_acker: SourceAcker, + sink_writer: SinkWriter, + source_transformer: Option, + fb_sink_writer: Option, cln_token: CancellationToken, common_labels: Vec<(String, String)>, } /// ForwarderBuilder is used to build a Forwarder instance with optional fields. pub(crate) struct ForwarderBuilder { - source_client: SourceClient, - sink_client: SinkClient, + source_reader: SourceReader, + source_acker: SourceAcker, + sink_writer: SinkWriter, cln_token: CancellationToken, - transformer_client: Option, - fb_sink_client: Option, + source_transformer: Option, + fb_sink_writer: Option, } impl ForwarderBuilder { /// Create a new builder with mandatory fields pub(crate) fn new( - source_client: SourceClient, - sink_client: SinkClient, + source_reader: SourceReader, + source_acker: SourceAcker, + sink_writer: SinkWriter, cln_token: CancellationToken, ) -> Self { Self { - source_client, - sink_client, + source_reader, + source_acker, + sink_writer, cln_token, - transformer_client: None, - fb_sink_client: None, + source_transformer: None, + fb_sink_writer: None, } } /// Set the optional transformer client - pub(crate) fn transformer_client(mut self, transformer_client: TransformerClient) -> Self { - self.transformer_client = Some(transformer_client); + pub(crate) fn source_transformer(mut self, transformer_client: SourceTransformer) -> Self { + self.source_transformer = Some(transformer_client); self } /// Set the optional fallback client - pub(crate) fn fb_sink_client(mut self, fallback_client: SinkClient) -> Self { - self.fb_sink_client = Some(fallback_client); + pub(crate) fn fallback_sink_writer(mut self, fallback_client: SinkWriter) -> Self { + self.fb_sink_writer = Some(fallback_client); self } @@ -69,10 +74,11 @@ impl ForwarderBuilder { pub(crate) fn build(self) -> Forwarder { let common_labels = metrics::forward_metrics_labels().clone(); Forwarder { - source_client: self.source_client, - sink_client: self.sink_client, - transformer_client: self.transformer_client, - fallback_client: self.fb_sink_client, + source_reader: self.source_reader, + source_acker: self.source_acker, + sink_writer: self.sink_writer, + source_transformer: self.source_transformer, + fb_sink_writer: self.fb_sink_writer, cln_token: self.cln_token, common_labels, } @@ -86,6 +92,7 @@ impl Forwarder { pub(crate) async fn start(&mut self) -> Result<()> { let mut processed_msgs_count: usize = 0; let mut last_forwarded_at = std::time::Instant::now(); + info!("Forwarder has started"); loop { let start_time = tokio::time::Instant::now(); if self.cln_token.is_cancelled() { @@ -120,14 +127,19 @@ impl Forwarder { async fn read_and_process_messages(&mut self) -> Result { let start_time = tokio::time::Instant::now(); let messages = self - .source_client - .read_fn(config().batch_size, config().timeout_in_ms) - .await?; + .source_reader + .read(config().batch_size, config().timeout_in_ms) + .await + .map_err(|e| { + Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) + })?; + debug!( "Read batch size: {} and latency - {}ms", messages.len(), start_time.elapsed().as_millis() ); + forward_metrics() .read_time .get_or_create(&self.common_labels) @@ -159,13 +171,27 @@ impl Forwarder { .inc_by(bytes_count); // Apply transformation if transformer is present - let transformed_messages = self.apply_transformer(messages).await?; + let transformed_messages = self.apply_transformer(messages).await.map_err(|e| { + Error::ForwarderError(format!( + "Failed to apply transformation to messages {:?}", + e + )) + })?; // Write the messages to the sink - self.write_to_sink(transformed_messages).await?; + self.write_to_sink(transformed_messages) + .await + .map_err(|e| { + Error::ForwarderError(format!("Failed to write messages to sink {:?}", e)) + })?; // Acknowledge the messages back to the source - self.acknowledge_messages(offsets).await?; + self.acknowledge_messages(offsets).await.map_err(|e| { + Error::ForwarderError(format!( + "Failed to acknowledge messages back to source {:?}", + e + )) + })?; Ok(msg_count as usize) } @@ -173,7 +199,7 @@ impl Forwarder { // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. async fn apply_transformer(&self, messages: Vec) -> Result> { - let Some(transformer_client) = &self.transformer_client else { + let Some(transformer_client) = &self.source_transformer else { // return early if there is no transformer return Ok(messages); }; @@ -241,6 +267,13 @@ impl Forwarder { } Err(e) => Err(e)?, } + + // if we are shutting down, stop the retry + if self.cln_token.is_cancelled() { + return Err(Error::SinkError( + "Cancellation token triggered during retry".to_string(), + )); + } } // If after the retries we still have messages to process, handle the post retry failures @@ -342,7 +375,7 @@ impl Forwarder { messages_to_send: &mut Vec, ) -> Result { let start_time = tokio::time::Instant::now(); - match self.sink_client.sink_fn(messages_to_send.clone()).await { + match self.sink_writer.sink_fn(messages_to_send.clone()).await { Ok(response) => { debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); @@ -360,9 +393,9 @@ impl Forwarder { // construct the error map for the failed messages messages_to_send.retain(|msg| { if let Some(result) = result_map.get(&msg.id) { - return if result.status == proto::Status::Success as i32 { + return if result.status == Success as i32 { false - } else if result.status == proto::Status::Fallback as i32 { + } else if result.status == Fallback as i32 { fallback_msgs.push(msg.clone()); // add to fallback messages false } else { @@ -384,22 +417,22 @@ impl Forwarder { .await; // we need to retry - return Ok(false); + Ok(false) } - Err(e) => return Err(e), + Err(e) => Err(e), } } // Writes the fallback messages to the fallback sink async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> Result<()> { - if self.fallback_client.is_none() { + if self.fb_sink_writer.is_none() { return Err(Error::SinkError( "Response contains fallback messages but no fallback sink is configured" .to_string(), )); } - let fallback_client = self.fallback_client.as_mut().unwrap(); + let fallback_client = self.fb_sink_writer.as_mut().unwrap(); let mut attempts = 0; let mut fallback_error_map = HashMap::new(); // start with the original set of message to be sent. @@ -440,12 +473,12 @@ impl Forwarder { // construct the error map for the failed messages messages_to_send.retain(|msg| { if let Some(result) = result_map.get(&msg.id) { - if result.status == proto::Status::Failure as i32 { + if result.status == Failure as i32 { *fallback_error_map .entry(result.err_msg.clone()) .or_insert(0) += 1; true - } else if result.status == proto::Status::Fallback as i32 { + } else if result.status == Fallback as i32 { contains_fallback_status = true; false } else { @@ -497,7 +530,7 @@ impl Forwarder { let n = offsets.len(); let start_time = tokio::time::Instant::now(); - self.source_client.ack_fn(offsets).await?; + self.source_acker.ack(offsets).await?; debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); @@ -521,14 +554,18 @@ mod tests { use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; + use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::error::Result; use crate::forwarder::ForwarderBuilder; - use crate::sink::{SinkClient, SinkConfig}; - use crate::source::{SourceClient, SourceConfig}; - use crate::transformer::{TransformerClient, TransformerConfig}; + use crate::shared::create_rpc_channel; + use crate::sink::SinkWriter; + use crate::sink_pb::sink_client::SinkClient; + use crate::source::{SourceAcker, SourceReader}; + use crate::source_pb::source_client::SourceClient; + use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; + use crate::transformer::SourceTransformer; struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, @@ -570,13 +607,11 @@ mod tests { .extend(message_offsets) } - async fn ack(&self, offsets: Vec) { - for offset in offsets { - self.yet_to_be_acked - .write() - .unwrap() - .remove(&String::from_utf8(offset.offset).unwrap()); - } + async fn ack(&self, offset: Offset) { + self.yet_to_be_acked + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); } async fn pending(&self) -> usize { @@ -619,10 +654,7 @@ mod tests { #[tonic::async_trait] impl sink::Sinker for InMemorySink { - async fn sink( - &self, - mut input: tokio::sync::mpsc::Receiver, - ) -> Vec { + async fn sink(&self, mut input: mpsc::Receiver) -> Vec { let mut responses: Vec = Vec::new(); while let Some(datum) = input.recv().await { let response = match std::str::from_utf8(&datum.value) { @@ -654,7 +686,7 @@ mod tests { #[tokio::test] async fn test_forwarder_source_sink() { - let (sink_tx, mut sink_rx) = tokio::sync::mpsc::channel(10); + let (sink_tx, mut sink_rx) = mpsc::channel(10); // Start the source server let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -672,11 +704,6 @@ mod tests { .await .unwrap(); }); - let source_config = SourceConfig { - socket_path: source_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Start the sink server let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -694,11 +721,6 @@ mod tests { .await .unwrap(); }); - let sink_config = SinkConfig { - socket_path: sink_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Start the transformer server let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -716,51 +738,58 @@ mod tests { .await .unwrap(); }); - let transformer_config = TransformerConfig { - socket_path: transformer_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Wait for the servers to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let cln_token = CancellationToken::new(); - let source_client = SourceClient::connect(source_config) - .await - .expect("failed to connect to source server"); - - let sink_client = SinkClient::connect(sink_config) - .await - .expect("failed to connect to sink server"); - - let transformer_client = TransformerClient::connect(transformer_config) - .await - .expect("failed to connect to transformer server"); - - let mut forwarder = ForwarderBuilder::new(source_client, sink_client, cln_token.clone()) - .transformer_client(transformer_client) - .build(); + let source_reader = SourceReader::new(SourceClient::new( + create_rpc_channel(source_sock_file.clone()).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); + + let source_acker = SourceAcker::new(SourceClient::new( + create_rpc_channel(source_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); + + let sink_writer = SinkWriter::new(SinkClient::new( + create_rpc_channel(sink_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to sink server"); + + let transformer_client = SourceTransformer::new(SourceTransformClient::new( + create_rpc_channel(transformer_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to transformer server"); - let forwarder_handle = tokio::spawn(async move { - forwarder.start().await.unwrap(); + let mut forwarder = + ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) + .source_transformer(transformer_client) + .build(); + + // Assert the received message in a different task + let assert_handle = tokio::spawn(async move { + let received_message = sink_rx.recv().await.unwrap(); + assert_eq!(received_message.value, "test-message".as_bytes()); + assert_eq!( + received_message.keys, + vec!["test-key-transformed".to_string()] + ); + cln_token.cancel(); }); - // Receive messages from the sink - let received_message = sink_rx.recv().await.unwrap(); - assert_eq!(received_message.value, "test-message".as_bytes()); - assert_eq!( - received_message.keys, - vec!["test-key-transformed".to_string()] - ); + forwarder.start().await.unwrap(); - // stop the forwarder - cln_token.cancel(); - forwarder_handle - .await - .expect("failed to join forwarder task"); + // Wait for the assertion task to complete + assert_handle.await.unwrap(); + drop(forwarder); // stop the servers source_shutdown_tx .send(()) @@ -821,11 +850,6 @@ mod tests { .await .unwrap(); }); - let source_config = SourceConfig { - socket_path: source_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Start the sink server let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -843,40 +867,45 @@ mod tests { .await .unwrap(); }); - let sink_config = SinkConfig { - socket_path: sink_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Wait for the servers to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let cln_token = CancellationToken::new(); - let source_client = SourceClient::connect(source_config) - .await - .expect("failed to connect to source server"); + let source_reader = SourceReader::new(SourceClient::new( + create_rpc_channel(source_sock_file.clone()).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); - let sink_client = SinkClient::connect(sink_config) - .await - .expect("failed to connect to sink server"); + let source_acker = SourceAcker::new(SourceClient::new( + create_rpc_channel(source_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); + + let sink_writer = SinkWriter::new(SinkClient::new( + create_rpc_channel(sink_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to sink server"); let mut forwarder = - ForwarderBuilder::new(source_client, sink_client, cln_token.clone()).build(); + ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) + .build(); - let forwarder_handle = tokio::spawn(async move { - forwarder.start().await?; - Result::<()>::Ok(()) + let cancel_handle = tokio::spawn(async move { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + cln_token.cancel(); }); - // Set a timeout for the forwarder - let timeout_duration = tokio::time::Duration::from_secs(1); - // The future should not complete as we should be retrying - let result = tokio::time::timeout(timeout_duration, forwarder_handle).await; - assert!(result.is_err()); + let forwarder_result = forwarder.start().await; + assert!(forwarder_result.is_err()); + cancel_handle.await.unwrap(); // stop the servers + drop(forwarder); source_shutdown_tx .send(()) .expect("failed to send shutdown signal"); @@ -886,7 +915,7 @@ mod tests { sink_shutdown_tx .send(()) - .expect("failed to send shutdown signal"); + .expect("failed to send sink shutdown signal"); sink_server_handle .await .expect("failed to join sink server task"); @@ -897,10 +926,7 @@ mod tests { #[tonic::async_trait] impl sink::Sinker for FallbackSender { - async fn sink( - &self, - mut input: tokio::sync::mpsc::Receiver, - ) -> Vec { + async fn sink(&self, mut input: mpsc::Receiver) -> Vec { let mut responses = vec![]; while let Some(datum) = input.recv().await { responses.append(&mut vec![sink::Response::fallback(datum.id)]); @@ -911,7 +937,7 @@ mod tests { #[tokio::test] async fn test_fb_sink() { - let (sink_tx, mut sink_rx) = tokio::sync::mpsc::channel(10); + let (sink_tx, mut sink_rx) = mpsc::channel(10); // Start the source server let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -929,11 +955,6 @@ mod tests { .await .unwrap(); }); - let source_config = SourceConfig { - socket_path: source_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Start the primary sink server (which returns status fallback) let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -951,11 +972,6 @@ mod tests { .await .unwrap(); }); - let sink_config = SinkConfig { - socket_path: sink_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Start the fb sink server let (fb_sink_shutdown_tx, fb_sink_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -973,48 +989,53 @@ mod tests { .await .unwrap(); }); - let fb_sink_config = SinkConfig { - socket_path: fb_sink_sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }; // Wait for the servers to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let cln_token = CancellationToken::new(); - let source_client = SourceClient::connect(source_config) - .await - .expect("failed to connect to source server"); - - let sink_client = SinkClient::connect(sink_config) - .await - .expect("failed to connect to sink server"); - - let fb_sink_client = SinkClient::connect(fb_sink_config) - .await - .expect("failed to connect to fb sink server"); + let source_reader = SourceReader::new(SourceClient::new( + create_rpc_channel(source_sock_file.clone()).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); + + let source_acker = SourceAcker::new(SourceClient::new( + create_rpc_channel(source_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to source server"); + + let sink_writer = SinkWriter::new(SinkClient::new( + create_rpc_channel(sink_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to sink server"); + + let fb_sink_writer = SinkWriter::new(SinkClient::new( + create_rpc_channel(fb_sink_sock_file).await.unwrap(), + )) + .await + .expect("failed to connect to fb sink server"); - let mut forwarder = ForwarderBuilder::new(source_client, sink_client, cln_token.clone()) - .fb_sink_client(fb_sink_client) - .build(); - - let forwarder_handle = tokio::spawn(async move { - forwarder.start().await.unwrap(); + let mut forwarder = + ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) + .fallback_sink_writer(fb_sink_writer) + .build(); + + let assert_handle = tokio::spawn(async move { + let received_message = sink_rx.recv().await.unwrap(); + assert_eq!(received_message.value, "test-message".as_bytes()); + assert_eq!(received_message.keys, vec!["test-key".to_string()]); + cln_token.cancel(); }); - // We should receive the message in the fallback sink, since the primary sink returns status fallback - let received_message = sink_rx.recv().await.unwrap(); - assert_eq!(received_message.value, "test-message".as_bytes()); - assert_eq!(received_message.keys, vec!["test-key".to_string()]); + forwarder.start().await.unwrap(); - // stop the forwarder - cln_token.cancel(); - forwarder_handle - .await - .expect("failed to join forwarder task"); + assert_handle.await.unwrap(); + drop(forwarder); // stop the servers source_shutdown_tx .send(()) diff --git a/rust/monovertex/src/lib.rs b/rust/monovertex/src/lib.rs index af69199be2..01b07498a2 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/monovertex/src/lib.rs @@ -1,18 +1,23 @@ -pub(crate) use self::error::Result; -use crate::config::config; -pub(crate) use crate::error::Error; -use crate::forwarder::ForwarderBuilder; -use crate::metrics::{start_metrics_https_server, LagReaderBuilder, MetricsState}; -use crate::sink::{SinkClient, SinkConfig}; -use crate::source::{SourceClient, SourceConfig}; -use crate::transformer::{TransformerClient, TransformerConfig}; -use std::net::SocketAddr; -use std::time::Duration; +extern crate core; + use tokio::signal; use tokio::task::JoinHandle; -use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{error, info}; + +use crate::config::{config, SDKConfig}; + +use crate::forwarder::ForwarderBuilder; +use crate::metrics::MetricsState; +use crate::shared::create_rpc_channel; +use crate::sink::SinkWriter; +use crate::sink_pb::sink_client::SinkClient; +use crate::source::{SourceAcker, SourceReader}; +use crate::source_pb::source_client::SourceClient; +use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::transformer::SourceTransformer; + +pub(crate) use self::error::Result; /// SourcerSinker orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -20,54 +25,33 @@ use tracing::{error, info, warn}; /// - Invokes the SourceTransformer concurrently /// - Calls the Sinker to write the batch to the Sink /// - Send Acknowledgement back to the Source -pub mod error; - -pub(crate) mod source; - -pub(crate) mod sink; - -pub(crate) mod transformer; - -pub(crate) mod forwarder; - -pub(crate) mod config; - -pub(crate) mod message; - -pub(crate) mod shared; - -mod server_info; +mod error; +pub(crate) use crate::error::Error; +mod config; +mod forwarder; +mod message; mod metrics; +mod server_info; +mod shared; +mod sink; +mod source; +mod startup; +mod transformer; + +pub(crate) mod source_pb { + tonic::include_proto!("source.v1"); +} -pub async fn mono_vertex() { - // Initialize the source, sink and transformer configurations - // We are using the default configurations for now. - let source_config = SourceConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }; - - let sink_config = SinkConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }; - - let transformer_config = if config().is_transformer_enabled { - Some(TransformerConfig { - max_message_size: config().grpc_max_message_size, - ..Default::default() - }) - } else { - None - }; +pub(crate) mod sink_pb { + tonic::include_proto!("sink.v1"); +} - let fb_sink_config = if config().is_fallback_enabled { - Some(SinkConfig::fallback_default()) - } else { - None - }; +pub(crate) mod sourcetransform_pb { + tonic::include_proto!("sourcetransformer.v1"); +} +pub async fn mono_vertex() -> Result<()> { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); @@ -79,15 +63,7 @@ pub async fn mono_vertex() { }); // Run the forwarder with cancellation token. - if let Err(e) = init( - source_config, - sink_config, - transformer_config, - fb_sink_config, - cln_token, - ) - .await - { + if let Err(e) = start_forwarder(cln_token, config().sdk_config.clone()).await { error!("Application error: {:?}", e); // abort the signal handler task since we have an error and we are shutting down @@ -97,6 +73,7 @@ pub async fn mono_vertex() { } info!("Gracefully Exiting..."); + Ok(()) } async fn shutdown_signal() { @@ -121,105 +98,104 @@ async fn shutdown_signal() { } } -/// forwards a chunk of data from the source to the sink via an optional transformer. -/// It takes an optional custom_shutdown_rx for shutting down the forwarder, useful for testing. -pub async fn init( - source_config: SourceConfig, - sink_config: SinkConfig, - transformer_config: Option, - fb_sink_config: Option, - cln_token: CancellationToken, -) -> Result<()> { - server_info::check_for_server_compatibility(&source_config.server_info_file, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for source server info file: {:?}", e); - Error::ForwarderError(format!("Error waiting for source server info file: {}", e)) - })?; - let mut source_client = SourceClient::connect(source_config).await?; - - server_info::check_for_server_compatibility(&sink_config.server_info_file, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for sink server info file: {:?}", e); - Error::ForwarderError(format!("Error waiting for sink server info file: {}", e)) - })?; - - let mut sink_client = SinkClient::connect(sink_config).await?; - - let mut transformer_client = if let Some(config) = transformer_config { - server_info::check_for_server_compatibility(&config.server_info_file, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for transformer server info file: {:?}", e); - Error::ForwarderError(format!("Error waiting for transformer server info file: {}", e)) - })?; - Some(TransformerClient::connect(config).await?) +async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> Result<()> { + // make sure that we have compatibility with the server + startup::check_compatibility( + &cln_token, + sdk_config.source_server_info_path.into(), + sdk_config.sink_server_info_path.into(), + if sdk_config.is_transformer_enabled { + Some(sdk_config.transformer_server_info_path.into()) + } else { + None + }, + if sdk_config.is_fallback_enabled { + Some(sdk_config.fallback_server_info_path.into()) + } else { + None + }, + ) + .await?; + + let mut source_grpc_client = + SourceClient::new(create_rpc_channel(sdk_config.source_socket_path.into()).await?) + .max_encoding_message_size(sdk_config.grpc_max_message_size) + .max_encoding_message_size(sdk_config.grpc_max_message_size); + + let mut sink_grpc_client = + SinkClient::new(create_rpc_channel(sdk_config.sink_socket_path.into()).await?) + .max_encoding_message_size(sdk_config.grpc_max_message_size) + .max_encoding_message_size(sdk_config.grpc_max_message_size); + + let mut transformer_grpc_client = if sdk_config.is_transformer_enabled { + let transformer_grpc_client = SourceTransformClient::new( + create_rpc_channel(sdk_config.transformer_socket_path.into()).await?, + ) + .max_encoding_message_size(sdk_config.grpc_max_message_size) + .max_encoding_message_size(sdk_config.grpc_max_message_size); + + Some(transformer_grpc_client.clone()) } else { None }; - let mut fb_sink_client = if let Some(config) = fb_sink_config { - server_info::check_for_server_compatibility(&config.server_info_file, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for fallback sink server info file: {:?}", e); - Error::ForwarderError(format!("Error waiting for fallback sink server info file: {}", e)) - })?; - Some(SinkClient::connect(config).await?) + let mut fb_sink_grpc_client = if sdk_config.is_fallback_enabled { + let fb_sink_grpc_client = + SinkClient::new(create_rpc_channel(sdk_config.fallback_socket_path.into()).await?) + .max_encoding_message_size(sdk_config.grpc_max_message_size) + .max_encoding_message_size(sdk_config.grpc_max_message_size); + + Some(fb_sink_grpc_client.clone()) } else { None }; // readiness check for all the ud containers - wait_until_ready( - &mut source_client, - &mut sink_client, - &mut transformer_client, - &mut fb_sink_client, + startup::wait_until_ready( + cln_token.clone(), + &mut source_grpc_client, + &mut sink_grpc_client, + &mut transformer_grpc_client, + &mut fb_sink_grpc_client, ) .await?; - // Start the metrics server, which server the prometheus metrics. - let metrics_addr: SocketAddr = format!("0.0.0.0:{}", &config().metrics_server_listen_port) - .parse() - .expect("Invalid address"); - // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not // joined. let metrics_state = MetricsState { - source_client: source_client.clone(), - sink_client: sink_client.clone(), - transformer_client: transformer_client.clone(), - fb_sink_client: fb_sink_client.clone(), + source_client: source_grpc_client.clone(), + sink_client: sink_grpc_client.clone(), + transformer_client: transformer_grpc_client.clone(), + fb_sink_client: fb_sink_grpc_client.clone(), }; - tokio::spawn(async move { - if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { - error!("Metrics server error: {:?}", e); - } - }); + + // start the metrics server + // FIXME: what to do with the handle + startup::start_metrics_server(metrics_state).await; // start the lag reader to publish lag metrics - let mut lag_reader = LagReaderBuilder::new(source_client.clone()) - .lag_checking_interval(Duration::from_secs( - config().lag_check_interval_in_secs.into(), - )) - .refresh_interval(Duration::from_secs( - config().lag_refresh_interval_in_secs.into(), - )) - .build(); + let mut lag_reader = startup::create_lag_reader(source_grpc_client.clone()).await; lag_reader.start().await; // build the forwarder - let mut forwarder_builder = ForwarderBuilder::new(source_client, sink_client, cln_token); + let source_reader = SourceReader::new(source_grpc_client.clone()).await?; + let source_acker = SourceAcker::new(source_grpc_client.clone()).await?; + let sink_writer = SinkWriter::new(sink_grpc_client.clone()).await?; + + let mut forwarder_builder = + ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token); + // add transformer if exists - if let Some(transformer_client) = transformer_client { - forwarder_builder = forwarder_builder.transformer_client(transformer_client); + if let Some(transformer_grpc_client) = transformer_grpc_client { + let transformer = SourceTransformer::new(transformer_grpc_client).await?; + forwarder_builder = forwarder_builder.source_transformer(transformer); } + // add fallback sink if exists - if let Some(fb_sink_client) = fb_sink_client { - forwarder_builder = forwarder_builder.fb_sink_client(fb_sink_client); + if let Some(fb_sink_grpc_client) = fb_sink_grpc_client { + let fallback_writer = SinkWriter::new(fb_sink_grpc_client).await?; + forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_writer); } // build the final forwarder let mut forwarder = forwarder_builder.build(); @@ -231,71 +207,24 @@ pub async fn init( Ok(()) } -async fn wait_until_ready( - source_client: &mut SourceClient, - sink_client: &mut SinkClient, - transformer_client: &mut Option, - fb_sink_client: &mut Option, -) -> Result<()> { - loop { - let source_ready = source_client.is_ready().await; - if !source_ready { - info!("UDSource is not ready, waiting..."); - } - - let sink_ready = sink_client.is_ready().await; - if !sink_ready { - info!("UDSink is not ready, waiting..."); - } - - let transformer_ready = if let Some(client) = transformer_client { - let ready = client.is_ready().await; - if !ready { - info!("UDTransformer is not ready, waiting..."); - } - ready - } else { - true - }; - - let fb_sink_ready = if let Some(client) = fb_sink_client { - let ready = client.is_ready().await; - if !ready { - info!("Fallback Sink is not ready, waiting..."); - } - ready - } else { - true - }; - - if source_ready && sink_ready && transformer_ready && fb_sink_ready { - break; - } - - sleep(Duration::from_secs(1)).await; - } - - Ok(()) -} - #[cfg(test)] mod tests { - use std::env; - + use crate::config::SDKConfig; + use crate::server_info::ServerInfo; + use crate::{error, start_forwarder}; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source}; + use std::fs::File; + use std::io::Write; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::sink::SinkConfig; - use crate::source::SourceConfig; - struct SimpleSource; #[tonic::async_trait] impl source::Sourcer for SimpleSource { async fn read(&self, _: SourceReadRequest, _: Sender) {} - async fn ack(&self, _: Vec) {} + async fn ack(&self, _: Offset) {} async fn pending(&self) -> usize { 0 @@ -317,12 +246,32 @@ mod tests { vec![] } } + + async fn write_server_info(file_path: &str, server_info: &ServerInfo) -> error::Result<()> { + let serialized = serde_json::to_string(server_info).unwrap(); + let mut file = File::create(file_path).unwrap(); + file.write_all(serialized.as_bytes()).unwrap(); + file.write_all(b"U+005C__END__").unwrap(); + Ok(()) + } + #[tokio::test] async fn run_forwarder() { let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let src_sock_file = tmp_dir.path().join("source.sock"); let src_info_file = tmp_dir.path().join("source-server-info"); + let server_info_obj = ServerInfo { + protocol: "uds".to_string(), + language: "rust".to_string(), + minimum_numaflow_version: "0.1.0".to_string(), + version: "0.1.0".to_string(), + metadata: None, + }; + + write_server_info(src_info_file.to_str().unwrap(), &server_info_obj) + .await + .unwrap(); let server_info = src_info_file.clone(); let server_socket = src_sock_file.clone(); @@ -334,17 +283,16 @@ mod tests { .await .unwrap(); }); - let source_config = SourceConfig { - socket_path: src_sock_file.to_str().unwrap().to_string(), - server_info_file: src_info_file.to_str().unwrap().to_string(), - max_message_size: 100, - }; let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let sink_sock_file = tmp_dir.path().join("sink.sock"); let sink_server_info = tmp_dir.path().join("sink-server-info"); + write_server_info(sink_server_info.to_str().unwrap(), &server_info_obj) + .await + .unwrap(); + let server_socket = sink_sock_file.clone(); let server_info = sink_server_info.clone(); let sink_server_handle = tokio::spawn(async move { @@ -355,37 +303,31 @@ mod tests { .await .unwrap(); }); - let sink_config = SinkConfig { - socket_path: sink_sock_file.to_str().unwrap().to_string(), - server_info_file: sink_server_info.to_str().unwrap().to_string(), - max_message_size: 100, - }; // wait for the servers to start // FIXME: we need to have a better way, this is flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - unsafe { - env::set_var("SOURCE_SOCKET", src_sock_file.to_str().unwrap()); - env::set_var("SINK_SOCKET", sink_sock_file.to_str().unwrap()); - } - let cln_token = CancellationToken::new(); - let forwarder_cln_token = cln_token.clone(); - let forwarder_handle = tokio::spawn(async move { - let result = - super::init(source_config, sink_config, None, None, forwarder_cln_token).await; - assert!(result.is_ok()); + let token_clone = cln_token.clone(); + tokio::spawn(async move { + // FIXME: we need to have a better way, this is flaky + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + token_clone.cancel(); }); - // wait for the forwarder to start - // FIXME: we need to have a better way, this is flaky - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let sdk_config = SDKConfig { + source_socket_path: src_sock_file.to_str().unwrap().to_string(), + sink_socket_path: sink_sock_file.to_str().unwrap().to_string(), + source_server_info_path: src_info_file.to_str().unwrap().to_string(), + sink_server_info_path: sink_server_info.to_str().unwrap().to_string(), + grpc_max_message_size: 1024, + ..Default::default() + }; - // stop the forwarder - cln_token.cancel(); - forwarder_handle.await.unwrap(); + let result = start_forwarder(cln_token.clone(), sdk_config).await; + assert!(result.is_ok()); // stop the source and sink servers src_shutdown_tx.send(()).unwrap(); diff --git a/rust/monovertex/src/message.rs b/rust/monovertex/src/message.rs index 6df0874948..403c377ec4 100644 --- a/rust/monovertex/src/message.rs +++ b/rust/monovertex/src/message.rs @@ -6,9 +6,10 @@ use chrono::{DateTime, Utc}; use crate::error::Error; use crate::shared::{prost_timestamp_from_utc, utc_from_timestamp}; -use crate::sink::proto; -use crate::source::proto::read_response; -use crate::transformer::proto::SourceTransformRequest; +use crate::sink_pb::SinkRequest; +use crate::source_pb; +use crate::source_pb::{AckRequest, read_response}; +use crate::sourcetransform_pb::SourceTransformRequest; /// A message that is sent from the source to the sink. #[derive(Debug, Clone)] @@ -36,6 +37,22 @@ pub(crate) struct Offset { pub(crate) partition_id: i32, } +impl From for AckRequest { + fn from(offset: Offset) -> Self { + Self { + request: Some(source_pb::ack_request::Request { + offset: Some(source_pb::Offset { + offset: BASE64_STANDARD + .decode(offset.offset) + .expect("we control the encoding, so this should never fail"), + partition_id: offset.partition_id, + }), + }), + handshake: None, + } + } +} + /// Convert the [`Message`] to [`SourceTransformRequest`] impl From for SourceTransformRequest { fn from(message: Message) -> Self { @@ -74,7 +91,7 @@ impl TryFrom for Message { } /// Convert [`Message`] to [`proto::SinkRequest`] -impl From for proto::SinkRequest { +impl From for SinkRequest { fn from(message: Message) -> Self { Self { keys: message.keys, diff --git a/rust/monovertex/src/metrics.rs b/rust/monovertex/src/metrics.rs index 375e7c071c..fc6ab7a0b0 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/monovertex/src/metrics.rs @@ -9,12 +9,6 @@ use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; -use prometheus_client::encoding::text::encode; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::Registry; use rcgen::{generate_simple_self_signed, CertifiedKey}; use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; @@ -24,9 +18,17 @@ use tracing::{debug, error, info}; use crate::config::config; use crate::error::Error; -use crate::sink::SinkClient; -use crate::source::SourceClient; -use crate::transformer::TransformerClient; +use crate::sink_pb::sink_client::SinkClient; +use crate::source_pb::source_client::SourceClient; +use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; +use prometheus_client::encoding::text::encode; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::Registry; +use tonic::transport::Channel; +use tonic::Request; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon @@ -60,10 +62,10 @@ const SINK_TIME: &str = "monovtx_sink_time"; #[derive(Clone)] pub(crate) struct MetricsState { - pub source_client: SourceClient, - pub sink_client: SinkClient, - pub transformer_client: Option, - pub fb_sink_client: Option, + pub source_client: SourceClient, + pub sink_client: SinkClient, + pub transformer_client: Option>, + pub fb_sink_client: Option>, } /// The global register of all metrics. @@ -324,22 +326,27 @@ async fn livez() -> impl IntoResponse { } async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { - if !state.source_client.is_ready().await { + if state + .source_client + .is_ready(Request::new(())) + .await + .is_err() + { error!("Source client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } - if !state.sink_client.is_ready().await { + if state.sink_client.is_ready(Request::new(())).await.is_err() { error!("Sink client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } if let Some(mut transformer_client) = state.transformer_client { - if !transformer_client.is_ready().await { + if transformer_client.is_ready(Request::new(())).await.is_err() { error!("Transformer client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } } if let Some(mut fb_sink_client) = state.fb_sink_client { - if !fb_sink_client.is_ready().await { + if fb_sink_client.is_ready(Request::new(())).await.is_err() { error!("Fallback sink client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } @@ -359,7 +366,7 @@ struct TimestampedPending { /// and exposing the metrics. It maintains a list of pending stats and ensures that /// only the most recent entries are kept. pub(crate) struct LagReader { - source_client: SourceClient, + source_client: SourceClient, lag_checking_interval: Duration, refresh_interval: Duration, buildup_handle: Option>, @@ -369,13 +376,13 @@ pub(crate) struct LagReader { /// LagReaderBuilder is used to build a `LagReader` instance. pub(crate) struct LagReaderBuilder { - source_client: SourceClient, + source_client: SourceClient, lag_checking_interval: Option, refresh_interval: Option, } impl LagReaderBuilder { - pub(crate) fn new(source_client: SourceClient) -> Self { + pub(crate) fn new(source_client: SourceClient) -> Self { Self { source_client, lag_checking_interval: None, @@ -448,14 +455,14 @@ impl Drop for LagReader { /// Periodically checks the pending messages from the source client and build the pending stats. async fn build_pending_info( - mut source_client: SourceClient, + mut source_client: SourceClient, lag_checking_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(lag_checking_interval); loop { ticker.tick().await; - match source_client.pending_fn().await { + match fetch_pending(&mut source_client).await { Ok(pending) => { if pending != -1 { let mut stats = pending_stats.lock().await; @@ -477,6 +484,17 @@ async fn build_pending_info( } } +async fn fetch_pending(source_client: &mut SourceClient) -> crate::error::Result { + let request = Request::new(()); + let response = source_client + .pending_fn(request) + .await? + .into_inner() + .result + .map_or(-1, |r| r.count); // default to -1(unavailable) + Ok(response) +} + // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( refresh_interval: Duration, @@ -539,3 +557,163 @@ async fn calculate_pending( } // TODO add tests + +#[cfg(test)] +mod tests { + use super::*; + use crate::metrics::MetricsState; + use crate::shared::create_rpc_channel; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source, sourcetransform}; + use std::net::SocketAddr; + use tokio::sync::mpsc::Sender; + + struct SimpleSource; + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, _: SourceReadRequest, _: Sender) {} + + async fn ack(&self, _: Offset) {} + + async fn pending(&self) -> usize { + 0 + } + + async fn partitions(&self) -> Option> { + None + } + } + + struct SimpleSink; + + #[tonic::async_trait] + impl sink::Sinker for SimpleSink { + async fn sink( + &self, + _input: tokio::sync::mpsc::Receiver, + ) -> Vec { + vec![] + } + } + + struct NowCat; + + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for NowCat { + async fn transform( + &self, + _input: sourcetransform::SourceTransformRequest, + ) -> Vec { + vec![] + } + } + + #[tokio::test] + async fn test_start_metrics_https_server() { + let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let src_sock_file = tmp_dir.path().join("source.sock"); + let src_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = src_info_file.clone(); + let server_socket = src_sock_file.clone(); + let src_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(src_shutdown_rx) + .await + .unwrap(); + }); + + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let (fb_sink_shutdown_tx, fb_sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = tmp_dir.path().join("sink.sock"); + let sink_server_info = tmp_dir.path().join("sink-server-info"); + let fb_sink_sock_file = tmp_dir.path().join("fallback-sink.sock"); + let fb_sink_server_info = tmp_dir.path().join("fallback-sink-server-info"); + + let server_socket = sink_sock_file.clone(); + let server_info = sink_server_info.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(SimpleSink) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + let fb_server_socket = fb_sink_sock_file.clone(); + let fb_server_info = fb_sink_server_info.clone(); + let fb_sink_server_handle = tokio::spawn(async move { + sink::Server::new(SimpleSink) + .with_socket_file(fb_server_socket) + .with_server_info_file(fb_server_info) + .start_with_shutdown(fb_sink_shutdown_rx) + .await + .unwrap(); + }); + + // start the transformer server + let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let transformer_handle = tokio::spawn(async move { + sourcetransform::Server::new(NowCat) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(transformer_shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the servers to start + // FIXME: we need to have a better way, this is flaky + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let metrics_state = MetricsState { + source_client: SourceClient::new(create_rpc_channel(src_sock_file).await.unwrap()), + sink_client: SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()), + transformer_client: Some(SourceTransformClient::new( + create_rpc_channel(sock_file).await.unwrap(), + )), + fb_sink_client: Some(SinkClient::new( + create_rpc_channel(fb_sink_sock_file).await.unwrap(), + )), + }; + + let addr: SocketAddr = "127.0.0.1:9091".parse().unwrap(); + let metrics_state_clone = metrics_state.clone(); + let server_handle = tokio::spawn(async move { + start_metrics_https_server(addr, metrics_state_clone) + .await + .unwrap(); + }); + + // invoke the sidecar-livez endpoint + let response = sidecar_livez(State(metrics_state)).await; + assert_eq!(response.into_response().status(), StatusCode::NO_CONTENT); + + // invoke the livez endpoint + let response = livez().await; + assert_eq!(response.into_response().status(), StatusCode::NO_CONTENT); + + // invoke the metrics endpoint + let response = metrics_handler().await; + assert_eq!(response.into_response().status(), StatusCode::OK); + + // Stop the servers + server_handle.abort(); + src_shutdown_tx.send(()).unwrap(); + sink_shutdown_tx.send(()).unwrap(); + fb_sink_shutdown_tx.send(()).unwrap(); + transformer_shutdown_tx.send(()).unwrap(); + src_server_handle.await.unwrap(); + sink_server_handle.await.unwrap(); + fb_sink_server_handle.await.unwrap(); + transformer_handle.await.unwrap(); + } +} diff --git a/rust/monovertex/src/server_info.rs b/rust/monovertex/src/server_info.rs index 98484869fd..35495097b8 100644 --- a/rust/monovertex/src/server_info.rs +++ b/rust/monovertex/src/server_info.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::fs; +use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; @@ -21,21 +22,21 @@ const END: &str = "U+005C__END__"; #[derive(Serialize, Deserialize, Debug)] pub(crate) struct ServerInfo { #[serde(default)] - protocol: String, + pub(crate) protocol: String, #[serde(default)] - language: String, + pub(crate) language: String, #[serde(default)] - minimum_numaflow_version: String, + pub(crate) minimum_numaflow_version: String, #[serde(default)] - version: String, + pub(crate) version: String, #[serde(default)] - metadata: Option>, // Metadata is optional + pub(crate) metadata: Option>, // Metadata is optional } /// check_for_server_compatibility waits until the server info file is ready and check whether the /// server is compatible with Numaflow. -pub async fn check_for_server_compatibility( - file_path: &str, +pub(crate) async fn check_for_server_compatibility( + file_path: PathBuf, cln_token: CancellationToken, ) -> error::Result<()> { // Read the server info file @@ -186,9 +187,13 @@ fn human_readable(ver: &str) -> String { fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { let binding = version.to_string(); // extract the major.minor.patch version - let mmp_version = Version::parse(binding.split('-').next().unwrap_or_default()).map_err(|e| { - Error::ServerInfoError(format!("Error parsing version: {}, version string: {}", e, binding)) - })?; + let mmp_version = + Version::parse(binding.split('-').next().unwrap_or_default()).map_err(|e| { + Error::ServerInfoError(format!( + "Error parsing version: {}, version string: {}", + e, binding + )) + })?; let mmp_ver_str_constraint = trim_after_dash(constraint.trim_start_matches(">=")); let mmp_ver_constraint = format!(">={}", mmp_ver_str_constraint); @@ -245,7 +250,7 @@ fn trim_after_dash(input: &str) -> &str { /// The cancellation token is used to stop ready-check of server_info file in case it is missing. /// This cancellation token is closed via the global shutdown handler. async fn read_server_info( - file_path: &str, + file_path: PathBuf, cln_token: CancellationToken, ) -> error::Result { // Infinite loop to keep checking until the file is ready @@ -255,14 +260,14 @@ async fn read_server_info( } // Check if the file exists and has content - if let Ok(metadata) = fs::metadata(file_path) { + if let Ok(metadata) = fs::metadata(file_path.as_path()) { if metadata.len() > 0 { // Break out of the loop if the file is ready (has content) break; } } // Log message indicating the file is not ready and sleep for 1 second before checking again - info!("Server info file {} is not ready, waiting...", file_path); + info!("Server info file {:?} is not ready, waiting...", file_path); sleep(Duration::from_secs(1)).await; } @@ -271,7 +276,7 @@ async fn read_server_info( let contents; loop { // Attempt to read the file - match fs::read_to_string(file_path) { + match fs::read_to_string(file_path.as_path()) { Ok(data) => { if data.ends_with(END) { // If the file ends with the END marker, trim it and break out of the loop @@ -944,7 +949,7 @@ mod tests { let _ = write_server_info(&server_info, file_path.to_str().unwrap()).await; // Call the read_server_info function - let result = read_server_info(file_path.to_str().unwrap(), cln_token).await; + let result = read_server_info(file_path, cln_token).await; assert!(result.is_ok(), "Expected Ok, got {:?}", result); let server_info = result.unwrap(); @@ -973,7 +978,7 @@ mod tests { let _drop_guard = cln_token.clone().drop_guard(); // Call the read_server_info function - let result = read_server_info(file_path.to_str().unwrap(), cln_token).await; + let result = read_server_info(file_path, cln_token).await; assert!(result.is_err(), "Expected Err, got {:?}", result); let error = result.unwrap_err(); diff --git a/rust/monovertex/src/shared.rs b/rust/monovertex/src/shared.rs index 2c63244647..2ce22ba803 100644 --- a/rust/monovertex/src/shared.rs +++ b/rust/monovertex/src/shared.rs @@ -1,13 +1,14 @@ use std::path::PathBuf; +use crate::error::Error; +use backoff::retry::Retry; +use backoff::strategy::fixed; use chrono::{DateTime, TimeZone, Timelike, Utc}; use prost_types::Timestamp; use tokio::net::UnixStream; use tonic::transport::{Channel, Endpoint, Uri}; use tower::service_fn; -use crate::error::Error; - pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { t.map_or(Utc.timestamp_nanos(-1), |t| { DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) @@ -21,6 +22,21 @@ pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { }) } +pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Result { + const RECONNECT_INTERVAL: u64 = 1000; + const MAX_RECONNECT_ATTEMPTS: usize = 5; + + let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(socket_path.clone()).await }, + |_: &Error| true, + ) + .await?; + Ok(channel) +} + pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { let channel = Endpoint::try_from("http://[::]:50051") .map_err(|e| Error::ConnectionError(format!("Failed to create endpoint: {:?}", e)))? diff --git a/rust/monovertex/src/sink.rs b/rust/monovertex/src/sink.rs index fb82273fb6..a2088a8c2f 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -1,78 +1,21 @@ -use crate::config::config; -use crate::error::{Error, Result}; +use crate::error::Result; use crate::message::Message; -use crate::shared::connect_with_uds; -use backoff::retry::Retry; -use backoff::strategy::fixed; +use crate::sink_pb::sink_client::SinkClient; +use crate::sink_pb::{SinkRequest, SinkResponse}; use tonic::transport::Channel; -use tonic::Request; - -pub mod proto { - tonic::include_proto!("sink.v1"); -} - -const RECONNECT_INTERVAL: u64 = 1000; -const MAX_RECONNECT_ATTEMPTS: usize = 5; -const SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; -const FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; - -const SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; -const FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; - -/// SinkConfig is the configuration for the sink server. -#[derive(Debug, Clone)] -pub struct SinkConfig { - pub socket_path: String, - pub server_info_file: String, - pub max_message_size: usize, -} - -impl Default for SinkConfig { - fn default() -> Self { - SinkConfig { - socket_path: SINK_SOCKET.to_string(), - server_info_file: SINK_SERVER_INFO_FILE.to_string(), - max_message_size: config().grpc_max_message_size, - } - } -} - -impl SinkConfig { - /// default config for fallback sink - pub(crate) fn fallback_default() -> Self { - SinkConfig { - max_message_size: config().grpc_max_message_size, - socket_path: FB_SINK_SOCKET.to_string(), - server_info_file: FB_SINK_SERVER_INFO_FILE.to_string(), - } - } -} +/// SinkWriter writes messages to a sink. #[derive(Clone)] -/// SinkClient is a client to interact with the sink server. -pub struct SinkClient { - client: proto::sink_client::SinkClient, +pub struct SinkWriter { + client: SinkClient, } -impl SinkClient { - pub(crate) async fn connect(config: SinkConfig) -> Result { - let interval = - fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); - - let channel = Retry::retry( - interval, - || async { connect_with_uds(config.socket_path.clone().into()).await }, - |_: &Error| true, - ) - .await?; - - let client = proto::sink_client::SinkClient::new(channel) - .max_decoding_message_size(config.max_message_size) - .max_encoding_message_size(config.max_message_size); +impl SinkWriter { + pub(crate) async fn new(client: SinkClient) -> Result { Ok(Self { client }) } - pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { + pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { // create a channel with at least size let (tx, rx) = tokio::sync::mpsc::channel(if messages.is_empty() { 1 @@ -80,7 +23,7 @@ impl SinkClient { messages.len() }); - let requests: Vec = + let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); tokio::spawn(async move { @@ -99,10 +42,6 @@ impl SinkClient { Ok(response) } - - pub(crate) async fn is_ready(&mut self) -> bool { - self.client.is_ready(Request::new(())).await.is_ok() - } } #[cfg(test)] @@ -111,9 +50,9 @@ mod tests { use numaflow::sink; use tracing::info; - use crate::message::Offset; - use super::*; + use crate::message::Offset; + use crate::shared::create_rpc_channel; struct Logger; #[tonic::async_trait] @@ -139,7 +78,7 @@ mod tests { } } #[tokio::test] - async fn sink_operations() { + async fn sink_operations() -> Result<()> { // start the server let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); @@ -160,13 +99,10 @@ mod tests { // wait for the server to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut sink_client = SinkClient::connect(SinkConfig { - socket_path: sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }) - .await - .expect("failed to connect to sink server"); + let mut sink_client = + SinkWriter::new(SinkClient::new(create_rpc_channel(sock_file).await?)) + .await + .expect("failed to connect to sink server"); let messages = vec![ Message { @@ -193,15 +129,13 @@ mod tests { }, ]; - let ready_response = sink_client.is_ready().await; - assert!(ready_response); - - let response = sink_client.sink_fn(messages).await.unwrap(); + let response = sink_client.sink_fn(messages).await?; assert_eq!(response.results.len(), 2); shutdown_tx .send(()) .expect("failed to send shutdown signal"); server_handle.await.expect("failed to join server task"); + Ok(()) } } diff --git a/rust/monovertex/src/source.rs b/rust/monovertex/src/source.rs index 681b0beb58..fdfde1b6d4 100644 --- a/rust/monovertex/src/source.rs +++ b/rust/monovertex/src/source.rs @@ -1,146 +1,176 @@ -use crate::error::{Error, Result}; +use crate::config::config; +use crate::error::Error::SourceError; +use crate::error::Result; use crate::message::{Message, Offset}; -use crate::shared::connect_with_uds; -use backoff::retry::Retry; -use backoff::strategy::fixed; -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use tokio_stream::StreamExt; +use crate::source_pb; +use crate::source_pb::source_client::SourceClient; +use crate::source_pb::{ + ack_response, read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, +}; +use log::info; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; -use tonic::Request; +use tonic::{Request, Streaming}; -pub mod proto { - tonic::include_proto!("source.v1"); -} -const RECONNECT_INTERVAL: u64 = 1000; -const MAX_RECONNECT_ATTEMPTS: usize = 5; -const SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; -const SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; - -/// SourceConfig is the configuration for the source server. -#[derive(Debug, Clone)] -pub struct SourceConfig { - pub socket_path: String, - pub server_info_file: String, - pub max_message_size: usize, -} - -impl Default for SourceConfig { - fn default() -> Self { - SourceConfig { - socket_path: SOURCE_SOCKET.to_string(), - server_info_file: SOURCE_SERVER_INFO_FILE.to_string(), - max_message_size: 64 * 1024 * 1024, // 64 MB - } - } +/// SourceReader reads messages from a source. +#[derive(Debug)] +pub(crate) struct SourceReader { + read_tx: mpsc::Sender, + resp_stream: Streaming, } -/// SourceClient is a client to interact with the source server. -#[derive(Debug, Clone)] -pub(crate) struct SourceClient { - client: proto::source_client::SourceClient, -} - -impl SourceClient { - pub(crate) async fn connect(config: SourceConfig) -> Result { - let interval = - fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); - - let channel = Retry::retry( - interval, - || async { connect_with_uds(config.socket_path.clone().into()).await }, - |_: &Error| true, - ) - .await?; +impl SourceReader { + pub(crate) async fn new(mut client: SourceClient) -> Result { + let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); + let read_stream = ReceiverStream::new(read_rx); + + // do a handshake for read with the server before we start sending read requests + let handshake_request = ReadRequest { + request: None, + handshake: Some(source_pb::Handshake { sot: true }), + }; + read_tx + .send(handshake_request) + .await + .map_err(|e| SourceError(format!("failed to send handshake request: {}", e)))?; - let client = proto::source_client::SourceClient::new(channel) - .max_encoding_message_size(config.max_message_size) - .max_decoding_message_size(config.max_message_size); + let mut resp_stream = client + .read_fn(Request::new(read_stream)) + .await? + .into_inner(); + + // first response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let handshake_response = resp_stream.message().await?.ok_or(SourceError( + "failed to receive handshake response".to_string(), + ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. + if handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(SourceError("invalid handshake response".to_string())); + } - Ok(Self { client }) + Ok(Self { + read_tx, + resp_stream, + }) } - pub(crate) async fn read_fn( + pub(crate) async fn read( &mut self, num_records: u64, timeout_in_ms: u32, ) -> Result> { - let request = Request::new(proto::ReadRequest { - request: Some(proto::read_request::Request { + let request = ReadRequest { + request: Some(read_request::Request { num_records, timeout_in_ms, }), - }); + handshake: None, + }; + + self.read_tx + .send(request) + .await + .map_err(|e| SourceError(e.to_string()))?; - let mut stream = self.client.read_fn(request).await?.into_inner(); let mut messages = Vec::with_capacity(num_records as usize); - while let Some(response) = stream.next().await { - let result = response? + while let Some(response) = self.resp_stream.message().await? { + if response.status.map_or(false, |status| status.eot) { + break; + } + + let result = response .result - .ok_or_else(|| Error::SourceError("Empty message".to_string()))?; + .ok_or_else(|| SourceError("Empty message".to_string()))?; messages.push(result.try_into()?); } - Ok(messages) } +} - pub(crate) async fn ack_fn(&mut self, offsets: Vec) -> Result { - let offsets = offsets - .into_iter() - .map(|offset| proto::Offset { - offset: BASE64_STANDARD - .decode(offset.offset) - .expect("we control the encoding, so this should never fail"), - partition_id: offset.partition_id, - }) - .collect(); - - let request = Request::new(proto::AckRequest { - request: Some(proto::ack_request::Request { offsets }), - }); +/// SourceAcker acks the messages from a source. +#[derive(Debug)] +pub(crate) struct SourceAcker { + ack_tx: mpsc::Sender, + ack_resp_stream: Streaming, +} - Ok(self.client.ack_fn(request).await?.into_inner()) - } +impl SourceAcker { + pub(crate) async fn new(mut client: SourceClient) -> Result { + let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); + let ack_stream = ReceiverStream::new(ack_rx); + + // do a handshake for ack with the server before we start sending ack requests + let ack_handshake_request = AckRequest { + request: None, + handshake: Some(source_pb::Handshake { sot: true }), + }; + ack_tx + .send(ack_handshake_request) + .await + .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; + + let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); + + // first response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( + "failed to receive ack handshake response".to_string(), + ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. + if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(SourceError("invalid ack handshake response".to_string())); + } - pub(crate) async fn pending_fn(&mut self) -> Result { - let request = Request::new(()); - let response = self - .client - .pending_fn(request) - .await? - .into_inner() - .result - .map_or(-1, |r| r.count); // default to -1(unavailable) - Ok(response) + Ok(Self { + ack_tx, + ack_resp_stream, + }) } - #[allow(dead_code)] - // TODO: remove dead_code - pub(crate) async fn partitions_fn(&mut self) -> Result> { - let request = Request::new(()); - let response = self.client.partitions_fn(request).await?.into_inner(); - Ok(response.result.map_or(vec![], |r| r.partitions)) - } + pub(crate) async fn ack(&mut self, offsets: Vec) -> Result { + let n = offsets.len(); + + // send n ack requests + for offset in offsets { + let request = offset.into(); + self.ack_tx + .send(request) + .await + .map_err(|e| SourceError(e.to_string()))?; + } - pub(crate) async fn is_ready(&mut self) -> bool { - self.client.is_ready(Request::new(())).await.is_ok() + // make sure we get n responses for the n requests. + for _ in 0..n { + let _ = self + .ack_resp_stream + .message() + .await? + .ok_or(SourceError("failed to receive ack response".to_string()))?; + } + + Ok(AckResponse { + result: Some(ack_response::Result { success: Some(()) }), + handshake: None, + }) } } #[cfg(test)] mod tests { use std::collections::HashSet; - use std::error::Error; + use crate::shared::create_rpc_channel; + use crate::source::{SourceAcker, SourceReader}; + use crate::source_pb::source_client::SourceClient; use chrono::Utc; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; use tokio::sync::mpsc::Sender; - use crate::source::{SourceClient, SourceConfig}; - struct SimpleSource { num: usize, yet_to_ack: std::sync::RwLock>, @@ -180,13 +210,11 @@ mod tests { self.yet_to_ack.write().unwrap().extend(message_offsets) } - async fn ack(&self, offsets: Vec) { - for offset in offsets { - self.yet_to_ack - .write() - .unwrap() - .remove(&String::from_utf8(offset.offset).unwrap()); - } + async fn ack(&self, offset: Offset) { + self.yet_to_ack + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); } async fn pending(&self) -> usize { @@ -199,7 +227,7 @@ mod tests { } #[tokio::test] - async fn source_operations() -> Result<(), Box> { + async fn source_operations() { // start the server let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); @@ -214,43 +242,43 @@ mod tests { .with_server_info_file(server_info) .start_with_shutdown(shutdown_rx) .await - .unwrap(); + .unwrap() }); // wait for the server to start // TODO: flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut source_client = SourceClient::connect(SourceConfig { - socket_path: sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }) + let mut source_reader = SourceReader::new(SourceClient::new( + create_rpc_channel(sock_file.clone()).await.unwrap(), + )) .await - .expect("failed to connect to source server"); + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); - let response = source_client.is_ready().await; - assert!(response); + let mut source_acker = SourceAcker::new(SourceClient::new( + create_rpc_channel(sock_file).await.unwrap(), + )) + .await + .map_err(|e| panic!("failed to create source acker: {:?}", e)) + .unwrap(); - let messages = source_client.read_fn(5, 1000).await.unwrap(); + let messages = source_reader.read(5, 1000).await.unwrap(); assert_eq!(messages.len(), 5); - let response = source_client - .ack_fn(messages.iter().map(|m| m.offset.clone()).collect()) + let response = source_acker + .ack(messages.iter().map(|m| m.offset.clone()).collect()) .await .unwrap(); assert!(response.result.unwrap().success.is_some()); - let pending = source_client.pending_fn().await.unwrap(); - assert_eq!(pending, 0); - - let partitions = source_client.partitions_fn().await.unwrap(); - assert_eq!(partitions, vec![2]); - + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(source_reader); + drop(source_acker); shutdown_tx .send(()) .expect("failed to send shutdown signal"); server_handle.await.expect("failed to join server task"); - Ok(()) } } diff --git a/rust/monovertex/src/startup.rs b/rust/monovertex/src/startup.rs new file mode 100644 index 0000000000..2614d045b7 --- /dev/null +++ b/rust/monovertex/src/startup.rs @@ -0,0 +1,349 @@ +use std::net::SocketAddr; +use std::path::PathBuf; +use std::time::Duration; + +use crate::config::config; +use crate::error::Error; +use crate::metrics::{start_metrics_https_server, LagReader, LagReaderBuilder, MetricsState}; +use crate::sink_pb::sink_client::SinkClient; +use crate::source_pb::source_client::SourceClient; +use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::{error, server_info}; + +use tokio::task::JoinHandle; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; +use tonic::Request; +use tracing::{info, warn}; + +pub(crate) async fn check_compatibility( + cln_token: &CancellationToken, + source_file_path: PathBuf, + sink_file_path: PathBuf, + transformer_file_path: Option, + fb_sink_file_path: Option, +) -> error::Result<()> { + server_info::check_for_server_compatibility(source_file_path, cln_token.clone()) + .await + .map_err(|e| { + warn!("Error waiting for source server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + + server_info::check_for_server_compatibility(sink_file_path, cln_token.clone()) + .await + .map_err(|e| { + error!("Error waiting for sink server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + + if let Some(transformer_path) = transformer_file_path { + server_info::check_for_server_compatibility(transformer_path, cln_token.clone()) + .await + .map_err(|e| { + error!("Error waiting for transformer server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + } + + if let Some(fb_sink_path) = fb_sink_file_path { + server_info::check_for_server_compatibility(fb_sink_path, cln_token.clone()) + .await + .map_err(|e| { + warn!("Error waiting for fallback sink server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + } + Ok(()) +} + +pub(crate) async fn start_metrics_server(metrics_state: MetricsState) -> JoinHandle<()> { + tokio::spawn(async { + // Start the metrics server, which server the prometheus metrics. + let metrics_addr: SocketAddr = format!("0.0.0.0:{}", &config().metrics_server_listen_port) + .parse() + .expect("Invalid address"); + + if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { + error!("Metrics server error: {:?}", e); + } + }) +} + +pub(crate) async fn create_lag_reader(lag_reader_grpc_client: SourceClient) -> LagReader { + LagReaderBuilder::new(lag_reader_grpc_client) + .lag_checking_interval(Duration::from_secs( + config().lag_check_interval_in_secs.into(), + )) + .refresh_interval(Duration::from_secs( + config().lag_refresh_interval_in_secs.into(), + )) + .build() +} + +pub(crate) async fn wait_until_ready( + cln_token: CancellationToken, + source_client: &mut SourceClient, + sink_client: &mut SinkClient, + transformer_client: &mut Option>, + fb_sink_client: &mut Option>, +) -> error::Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::ForwarderError( + "Cancellation token is cancelled".to_string(), + )); + } + let source_ready = source_client.is_ready(Request::new(())).await.is_ok(); + if !source_ready { + info!("UDSource is not ready, waiting..."); + } + + let sink_ready = sink_client.is_ready(Request::new(())).await.is_ok(); + if !sink_ready { + info!("UDSink is not ready, waiting..."); + } + + let transformer_ready = if let Some(client) = transformer_client { + let ready = client.is_ready(Request::new(())).await.is_ok(); + if !ready { + info!("UDTransformer is not ready, waiting..."); + } + ready + } else { + true + }; + + let fb_sink_ready = if let Some(client) = fb_sink_client { + let ready = client.is_ready(Request::new(())).await.is_ok(); + if !ready { + info!("Fallback Sink is not ready, waiting..."); + } + ready + } else { + true + }; + + if source_ready && sink_ready && transformer_ready && fb_sink_ready { + break; + } + + sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::server_info::ServerInfo; + use crate::shared::create_rpc_channel; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source, sourcetransform}; + use std::fs::File; + use std::io::Write; + use tempfile::tempdir; + use tokio::sync::mpsc; + use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; + + async fn write_server_info(file_path: &str, server_info: &ServerInfo) -> error::Result<()> { + let serialized = serde_json::to_string(server_info).unwrap(); + let mut file = File::create(file_path).unwrap(); + file.write_all(serialized.as_bytes()).unwrap(); + file.write_all(b"U+005C__END__").unwrap(); + Ok(()) + } + + #[tokio::test] + async fn test_check_compatibility_success() { + let dir = tempdir().unwrap(); + let source_file_path = dir.path().join("source_server_info.json"); + let sink_file_path = dir.path().join("sink_server_info.json"); + let transformer_file_path = dir.path().join("transformer_server_info.json"); + let fb_sink_file_path = dir.path().join("fb_sink_server_info.json"); + + let server_info = ServerInfo { + protocol: "uds".to_string(), + language: "rust".to_string(), + minimum_numaflow_version: "0.1.0".to_string(), + version: "0.1.0".to_string(), + metadata: None, + }; + + write_server_info(source_file_path.to_str().unwrap(), &server_info) + .await + .unwrap(); + write_server_info(sink_file_path.to_str().unwrap(), &server_info) + .await + .unwrap(); + write_server_info(transformer_file_path.to_str().unwrap(), &server_info) + .await + .unwrap(); + write_server_info(fb_sink_file_path.to_str().unwrap(), &server_info) + .await + .unwrap(); + + let cln_token = CancellationToken::new(); + let result = + check_compatibility(&cln_token, source_file_path, sink_file_path, None, None).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_check_compatibility_failure() { + let cln_token = CancellationToken::new(); + let dir = tempdir().unwrap(); + let source_file_path = dir.path().join("source_server_info.json"); + let sink_file_path = dir.path().join("sink_server_info.json"); + let transformer_file_path = dir.path().join("transformer_server_info.json"); + let fb_sink_file_path = dir.path().join("fb_sink_server_info.json"); + + // do not write server info files to simulate failure + // cancel the token after 100ms to simulate cancellation + let token = cln_token.clone(); + let handle = tokio::spawn(async move { + sleep(Duration::from_millis(100)).await; + token.cancel(); + }); + let result = check_compatibility( + &cln_token, + source_file_path, + sink_file_path, + Some(transformer_file_path), + Some(fb_sink_file_path), + ) + .await; + + assert!(result.is_err()); + handle.await.unwrap(); + } + + struct SimpleSource {} + + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, _request: SourceReadRequest, _transmitter: Sender) {} + + async fn ack(&self, _offset: Offset) {} + + async fn pending(&self) -> usize { + 0 + } + + async fn partitions(&self) -> Option> { + Some(vec![0]) + } + } + + struct SimpleTransformer; + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformer { + async fn transform( + &self, + _input: sourcetransform::SourceTransformRequest, + ) -> Vec { + vec![] + } + } + + struct InMemorySink {} + + #[tonic::async_trait] + impl sink::Sinker for InMemorySink { + async fn sink(&self, mut _input: mpsc::Receiver) -> Vec { + vec![] + } + } + + #[tokio::test] + async fn test_wait_until_ready() { + // Start the source server + let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let source_sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let source_socket = source_sock_file.clone(); + let source_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource {}) + .with_socket_file(source_socket) + .with_server_info_file(server_info) + .start_with_shutdown(source_shutdown_rx) + .await + .unwrap(); + }); + + // Start the sink server + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); + let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let sink_socket = sink_sock_file.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(InMemorySink {}) + .with_socket_file(sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + + // Start the transformer server + let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let transformer_sock_file = tmp_dir.path().join("transformer.sock"); + let server_info_file = tmp_dir.path().join("transformer-server-info"); + + let server_info = server_info_file.clone(); + let transformer_socket = transformer_sock_file.clone(); + let transformer_server_handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer {}) + .with_socket_file(transformer_socket) + .with_server_info_file(server_info) + .start_with_shutdown(transformer_shutdown_rx) + .await + .unwrap(); + }); + + // Wait for the servers to start + sleep(Duration::from_millis(100)).await; + + let mut source_grpc_client = + SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); + let mut sink_grpc_client = + SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); + let mut transformer_grpc_client = Some(SourceTransformClient::new( + create_rpc_channel(transformer_sock_file.clone()) + .await + .unwrap(), + )); + + let mut fb_sink_grpc_client = None; + + let cln_token = CancellationToken::new(); + let result = wait_until_ready( + cln_token, + &mut source_grpc_client, + &mut sink_grpc_client, + &mut transformer_grpc_client, + &mut fb_sink_grpc_client, + ) + .await; + assert!(result.is_ok()); + + source_shutdown_tx.send(()).unwrap(); + sink_shutdown_tx.send(()).unwrap(); + transformer_shutdown_tx.send(()).unwrap(); + + source_server_handle.await.unwrap(); + sink_server_handle.await.unwrap(); + transformer_server_handle.await.unwrap(); + } +} diff --git a/rust/monovertex/src/transformer.rs b/rust/monovertex/src/transformer.rs index f891a851fc..f7797b5d7d 100644 --- a/rust/monovertex/src/transformer.rs +++ b/rust/monovertex/src/transformer.rs @@ -1,61 +1,20 @@ -use crate::error::{Error, Result}; +use crate::error::Result; use crate::message::Message; -use crate::shared::{connect_with_uds, utc_from_timestamp}; -use crate::transformer::proto::SourceTransformRequest; -use backoff::retry::Retry; -use backoff::strategy::fixed; +use crate::shared::utc_from_timestamp; +use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::sourcetransform_pb::SourceTransformRequest; use tonic::transport::Channel; -use tonic::Request; - -pub mod proto { - tonic::include_proto!("sourcetransformer.v1"); -} const DROP: &str = "U+005C__DROP__"; -const RECONNECT_INTERVAL: u64 = 1000; -const MAX_RECONNECT_ATTEMPTS: usize = 5; -const TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; -const TRANSFORMER_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcetransformer-server-info"; - -/// TransformerConfig is the configuration for the transformer server. -#[derive(Debug, Clone)] -pub struct TransformerConfig { - pub socket_path: String, - pub server_info_file: String, - pub max_message_size: usize, -} - -impl Default for TransformerConfig { - fn default() -> Self { - TransformerConfig { - socket_path: TRANSFORMER_SOCKET.to_string(), - server_info_file: TRANSFORMER_SERVER_INFO_FILE.to_string(), - max_message_size: 64 * 1024 * 1024, // 64 MB - } - } -} /// TransformerClient is a client to interact with the transformer server. #[derive(Clone)] -pub struct TransformerClient { - client: proto::source_transform_client::SourceTransformClient, +pub struct SourceTransformer { + client: SourceTransformClient, } -impl TransformerClient { - pub(crate) async fn connect(config: TransformerConfig) -> Result { - let interval = - fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); - - let channel = Retry::retry( - interval, - || async { connect_with_uds(config.socket_path.clone().into()).await }, - |_: &Error| true, - ) - .await?; - - let client = proto::source_transform_client::SourceTransformClient::new(channel) - .max_decoding_message_size(config.max_message_size) - .max_encoding_message_size(config.max_message_size); +impl SourceTransformer { + pub(crate) async fn new(client: SourceTransformClient) -> Result { Ok(Self { client }) } @@ -92,21 +51,18 @@ impl TransformerClient { Ok(Some(messages)) } - - pub(crate) async fn is_ready(&mut self) -> bool { - self.client.is_ready(Request::new(())).await.is_ok() - } } #[cfg(test)] mod tests { use std::error::Error; + use crate::shared::create_rpc_channel; + use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; + use crate::transformer::SourceTransformer; use numaflow::sourcetransform; use tempfile::TempDir; - use crate::transformer::{TransformerClient, TransformerConfig}; - struct NowCat; #[tonic::async_trait] @@ -143,11 +99,9 @@ mod tests { // wait for the server to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut client = TransformerClient::connect(TransformerConfig { - socket_path: sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }) + let mut client = SourceTransformer::new(SourceTransformClient::new( + create_rpc_channel(sock_file).await?, + )) .await?; let message = crate::message::Message { @@ -162,9 +116,6 @@ mod tests { headers: Default::default(), }; - let resp = client.is_ready().await; - assert!(resp); - let resp = client.transform_fn(message).await?; assert!(resp.is_some()); assert_eq!(resp.unwrap().len(), 1); @@ -212,11 +163,9 @@ mod tests { // wait for the server to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut client = TransformerClient::connect(TransformerConfig { - socket_path: sock_file.to_str().unwrap().to_string(), - server_info_file: server_info_file.to_str().unwrap().to_string(), - max_message_size: 4 * 1024 * 1024, - }) + let mut client = SourceTransformer::new(SourceTransformClient::new( + create_rpc_channel(sock_file).await?, + )) .await?; let message = crate::message::Message { @@ -231,9 +180,6 @@ mod tests { headers: Default::default(), }; - let resp = client.is_ready().await; - assert!(resp); - let resp = client.transform_fn(message).await?; assert!(resp.is_none()); diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index e820030494..90a7c44696 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "handshake" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } [dependencies.reqwest] -version = "^0.11" +version = "0.12.7" default-features = false features = ["rustls-tls"] \ No newline at end of file diff --git a/rust/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs index 85d3c2b76d..12420f948c 100644 --- a/rust/serving/src/app/tracker.rs +++ b/rust/serving/src/app/tracker.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::app::callback::CallbackRequest; -use crate::pipeline::{Edge, PipelineDCG, OperatorType}; +use crate::pipeline::{Edge, OperatorType, PipelineDCG}; use crate::Error; fn compare_slice(operator: &OperatorType, a: &[String], b: &[String]) -> bool { diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index fdbd58a6a0..0b000dc032 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -30,7 +30,9 @@ async fn main() { info!("Error running servesink: {}", e); } } else if args.contains(&"--monovertex".to_string()) { - monovertex::mono_vertex().await; + if let Err(e) = monovertex::mono_vertex().await { + error!("Error running monovertex: {}", e); + } } else { error!("Invalid argument. Use --serve, --servesink, or --monovertex."); } diff --git a/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml index acdb0b29f6..e491448505 100644 --- a/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml +++ b/test/monovertex-e2e/testdata/mono-vertex-with-transformer.yaml @@ -3,6 +3,8 @@ kind: MonoVertex metadata: name: transformer-mono-vertex spec: + scale: + min: 1 source: udsource: container: diff --git a/test/udsource-e2e/testdata/simple-source-go.yaml b/test/udsource-e2e/testdata/simple-source-go.yaml index 28e27af1f6..65c6472479 100644 --- a/test/udsource-e2e/testdata/simple-source-go.yaml +++ b/test/udsource-e2e/testdata/simple-source-go.yaml @@ -11,7 +11,6 @@ spec: # A simple user-defined source for e2e testing # See https://github.com/numaproj/numaflow-go/tree/main/pkg/sourcer/examples/simple_source image: quay.io/numaio/numaflow-go/source-simple-source:stable - imagePullPolicy: Always limits: readBatchSize: 500 scale: diff --git a/test/udsource-e2e/testdata/simple-source-java.yaml b/test/udsource-e2e/testdata/simple-source-java.yaml index 2d030bc06b..4c883b94b3 100644 --- a/test/udsource-e2e/testdata/simple-source-java.yaml +++ b/test/udsource-e2e/testdata/simple-source-java.yaml @@ -5,16 +5,19 @@ metadata: spec: vertices: - name: in + scale: + min: 1 source: udsource: container: # A simple user-defined source for e2e testing # See https://github.com/numaproj/numaflow-java/tree/main/examples/src/main/java/io/numaproj/numaflow/examples/source/simple image: quay.io/numaio/numaflow-java/source-simple-source:stable - imagePullPolicy: Always limits: readBatchSize: 500 - name: out + scale: + min: 1 sink: log: {} edges: diff --git a/test/udsource-e2e/testdata/simple-source-python.yaml b/test/udsource-e2e/testdata/simple-source-python.yaml index 47bc7a175b..9862b63bb6 100644 --- a/test/udsource-e2e/testdata/simple-source-python.yaml +++ b/test/udsource-e2e/testdata/simple-source-python.yaml @@ -5,13 +5,14 @@ metadata: spec: vertices: - name: in + scale: + min: 1 source: udsource: container: # A simple user-defined source for e2e testing # See https://github.com/numaproj/numaflow-python/tree/main/examples/source/simple_source image: quay.io/numaio/numaflow-python/simple-source:stable - imagePullPolicy: Always limits: readBatchSize: 500 - name: out diff --git a/test/udsource-e2e/testdata/simple-source-rust.yaml b/test/udsource-e2e/testdata/simple-source-rust.yaml new file mode 100644 index 0000000000..5a2d670710 --- /dev/null +++ b/test/udsource-e2e/testdata/simple-source-rust.yaml @@ -0,0 +1,26 @@ +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: Pipeline +metadata: + name: simple-source-rust +spec: + vertices: + - name: in + source: + udsource: + container: + # A simple user-defined source for e2e testing + # https://github.com/numaproj/numaflow-rs/tree/main/examples/simple-source + image: quay.io/numaio/numaflow-rs/simple-source:stable + limits: + readBatchSize: 500 + scale: + min: 1 + - name: out + sink: + log: {} + scale: + min: 1 + max: 1 + edges: + - from: in + to: out diff --git a/test/udsource-e2e/udsource_test.go b/test/udsource-e2e/udsource_test.go index ef5a9ffacf..8d3de49c74 100644 --- a/test/udsource-e2e/udsource_test.go +++ b/test/udsource-e2e/udsource_test.go @@ -50,12 +50,16 @@ func (s *UserDefinedSourceSuite) testSimpleSourcePython() { s.testSimpleSource("python", false) } +func (s *UserDefinedSourceSuite) testSimpleSourceRust() { + s.testSimpleSource("rust", false) +} + func (s *UserDefinedSourceSuite) TestUDSource() { var wg sync.WaitGroup - wg.Add(3) + wg.Add(4) go func() { defer wg.Done() - s.testSimpleSourcePython() + // s.testSimpleSourcePython() // FIXME: python udsource }() go func() { defer wg.Done() @@ -65,6 +69,10 @@ func (s *UserDefinedSourceSuite) TestUDSource() { defer wg.Done() s.testSimpleSourceGo() }() + go func() { + defer wg.Done() + s.testSimpleSourceRust() + }() wg.Wait() } From b4f9278570f67cba3d85fffe7ca287c5b00da489 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 22 Sep 2024 21:59:57 -0700 Subject: [PATCH 067/188] fix: rollback codegen script (#2079) --- hack/update-codegen.sh | 6 +- .../v1alpha1/zz_generated.deepcopy.go | 62 +++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 1137a938cc..3ab861b73b 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -17,13 +17,15 @@ cd "${FAKE_REPOPATH}" CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${FAKE_REPOPATH}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +chmod +x ${CODEGEN_PKG}/*.sh + subheader "running codegen" -bash -x ${CODEGEN_PKG}/kube_codegen.sh "deepcopy" \ +bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ "numaflow:v1alpha1" \ --go-header-file hack/boilerplate/boilerplate.go.txt -bash -x ${CODEGEN_PKG}/kube_codegen.sh "client,informer,lister" \ +bash -x ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ "numaflow:v1alpha1" \ --plural-exceptions="Vertex:Vertices,MonoVertex:MonoVertices" \ diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index b6325ce920..49b93292ff 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -212,6 +212,7 @@ func (in *AbstractVertex) DeepCopyInto(out *AbstractVertex) { *out = new(ContainerTemplate) (*in).DeepCopyInto(*out) } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) return } @@ -422,6 +423,16 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(v1.PullPolicy) **out = **in } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } return } @@ -458,6 +469,16 @@ func (in *ContainerTemplate) DeepCopyInto(out *ContainerTemplate) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } return } @@ -1917,6 +1938,47 @@ func (in *PipelineStatus) DeepCopy() *PipelineStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + if in.InitialDelaySeconds != nil { + in, out := &in.InitialDelaySeconds, &out.InitialDelaySeconds + *out = new(int32) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.PeriodSeconds != nil { + in, out := &in.PeriodSeconds, &out.PeriodSeconds + *out = new(int32) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(int32) + **out = **in + } + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedisBufferService) DeepCopyInto(out *RedisBufferService) { *out = *in From b8c61debb784db0eaae31352138fd9b447f9d151 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Mon, 23 Sep 2024 09:09:40 -0700 Subject: [PATCH 068/188] chore: fix numaflow-rs (#2081) Signed-off-by: Vigith Maurice --- rust/Cargo.lock | 10 +++++----- rust/monovertex/Cargo.toml | 2 +- rust/servesink/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 624d5f14a8..81b0a5149b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1098,7 +1098,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", ] [[package]] @@ -1605,7 +1605,7 @@ dependencies = [ [[package]] name = "numaflow" version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?branch=handshake#baecc88456f317b08bc869f82596e2b746cf798b" +source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#362f2b0a0705c34ce3693b8714885dfbae7843e8" dependencies = [ "chrono", "futures-util", @@ -2226,7 +2226,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", "windows-registry", ] @@ -3408,9 +3408,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index da75a4c8c8..ce973f79ac 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -38,7 +38,7 @@ log = "0.4.22" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "handshake" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } [build-dependencies] tonic-build = "0.12.1" diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 90a7c44696..7b037c7208 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "handshake" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } From 6d1ebd04f2089c81bd8e0c5e763cd7c363cb7623 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 23 Sep 2024 19:47:26 -0700 Subject: [PATCH 069/188] feat: add pause for monovertex (#2077) Signed-off-by: Sidhant Kohli --- api/json-schema/schema.json | 13 + api/openapi-spec/swagger.json | 13 + .../numaflow.numaproj.io_monovertices.yaml | 15 + config/install.yaml | 15 + config/namespace-install.yaml | 15 + docs/APIs.md | 106 ++ pkg/apis/numaflow/v1alpha1/generated.pb.go | 1315 ++++++++++------- pkg/apis/numaflow/v1alpha1/generated.proto | 12 + .../numaflow/v1alpha1/mono_vertex_types.go | 39 +- .../v1alpha1/mono_vertex_types_test.go | 32 + .../numaflow/v1alpha1/openapi_generated.go | 29 +- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 4 +- .../numaflow/v1alpha1/pipeline_types_test.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 17 + pkg/reconciler/monovertex/controller.go | 14 +- pkg/reconciler/monovertex/controller_test.go | 3 +- pkg/reconciler/monovertex/scaling/scaling.go | 10 +- rust/numaflow-models/src/models/mod.rs | 2 + .../src/models/mono_vertex_lifecycle.rs | 32 + .../src/models/mono_vertex_spec.rs | 3 + 20 files changed, 1118 insertions(+), 573 deletions(-) create mode 100644 rust/numaflow-models/src/models/mono_vertex_lifecycle.rs diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 24ca429580..27258328ee 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19132,6 +19132,15 @@ ], "type": "object" }, + "io.numaproj.numaflow.v1alpha1.MonoVertexLifecycle": { + "properties": { + "desiredPhase": { + "description": "DesiredPhase used to bring the pipeline from current phase to desired phase", + "type": "string" + } + }, + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.MonoVertexLimits": { "properties": { "readBatchSize": { @@ -19213,6 +19222,10 @@ }, "type": "array" }, + "lifecycle": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLifecycle", + "description": "Lifecycle defines the Lifecycle properties of a MonoVertex" + }, "limits": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits", "description": "Limits define the limitations such as read batch size for the mono vertex." diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index e2e29fbabf..4bf6fb8758 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19127,6 +19127,15 @@ } } }, + "io.numaproj.numaflow.v1alpha1.MonoVertexLifecycle": { + "type": "object", + "properties": { + "desiredPhase": { + "description": "DesiredPhase used to bring the pipeline from current phase to desired phase", + "type": "string" + } + } + }, "io.numaproj.numaflow.v1alpha1.MonoVertexLimits": { "type": "object", "properties": { @@ -19209,6 +19218,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Container" } }, + "lifecycle": { + "description": "Lifecycle defines the Lifecycle properties of a MonoVertex", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLifecycle" + }, "limits": { "description": "Limits define the limitations such as read batch size for the mono vertex.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.MonoVertexLimits" diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 4d8a23ba14..e4b27b015c 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -2299,6 +2299,21 @@ spec: - name type: object type: array + lifecycle: + default: + desiredPhase: Running + properties: + desiredPhase: + default: Running + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + type: object limits: properties: readBatchSize: diff --git a/config/install.yaml b/config/install.yaml index c6551b513a..367657d66f 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -5195,6 +5195,21 @@ spec: - name type: object type: array + lifecycle: + default: + desiredPhase: Running + properties: + desiredPhase: + default: Running + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + type: object limits: properties: readBatchSize: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 07fc13628d..48c6d0677c 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -5195,6 +5195,21 @@ spec: - name type: object type: array + lifecycle: + default: + desiredPhase: Running + properties: + desiredPhase: + default: Running + enum: + - "" + - Running + - Failed + - Pausing + - Paused + - Deleting + type: string + type: object limits: properties: readBatchSize: diff --git a/docs/APIs.md b/docs/APIs.md index 7b698883e7..eb9b923b4f 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5893,6 +5893,27 @@ The strategy to use to replace existing pods with new ones. + + + + +lifecycle
+ +MonoVertexLifecycle + + + + +(Optional) +

+ +Lifecycle defines the Lifecycle properties of a MonoVertex +

+ + + + + @@ -5919,6 +5940,69 @@ MonoVertexStatus +

+ +MonoVertexLifecycle +

+ +

+ +(Appears on: +MonoVertexSpec) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +desiredPhase
+ +MonoVertexPhase +
+ +(Optional) +

+ +DesiredPhase used to bring the pipeline from current phase to desired +phase +

+ +
+

MonoVertexLimits @@ -6010,6 +6094,7 @@ MonoVertexPhase (string alias)

(Appears on: +MonoVertexLifecycle, MonoVertexStatus)

@@ -6282,6 +6367,27 @@ The strategy to use to replace existing pods with new ones. + + + + +lifecycle
+ +MonoVertexLifecycle + + + + +(Optional) +

+ +Lifecycle defines the Lifecycle properties of a MonoVertex +

+ + + + + diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 138962c4c2..8905ee00c2 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -1282,10 +1282,38 @@ func (m *MonoVertex) XXX_DiscardUnknown() { var xxx_messageInfo_MonoVertex proto.InternalMessageInfo +func (m *MonoVertexLifecycle) Reset() { *m = MonoVertexLifecycle{} } +func (*MonoVertexLifecycle) ProtoMessage() {} +func (*MonoVertexLifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{44} +} +func (m *MonoVertexLifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MonoVertexLifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MonoVertexLifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonoVertexLifecycle.Merge(m, src) +} +func (m *MonoVertexLifecycle) XXX_Size() int { + return m.Size() +} +func (m *MonoVertexLifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_MonoVertexLifecycle.DiscardUnknown(m) +} + +var xxx_messageInfo_MonoVertexLifecycle proto.InternalMessageInfo + func (m *MonoVertexLimits) Reset() { *m = MonoVertexLimits{} } func (*MonoVertexLimits) ProtoMessage() {} func (*MonoVertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{44} + return fileDescriptor_9d0d1b17d3865563, []int{45} } func (m *MonoVertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1313,7 +1341,7 @@ var xxx_messageInfo_MonoVertexLimits proto.InternalMessageInfo func (m *MonoVertexList) Reset() { *m = MonoVertexList{} } func (*MonoVertexList) ProtoMessage() {} func (*MonoVertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{45} + return fileDescriptor_9d0d1b17d3865563, []int{46} } func (m *MonoVertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1341,7 +1369,7 @@ var xxx_messageInfo_MonoVertexList proto.InternalMessageInfo func (m *MonoVertexSpec) Reset() { *m = MonoVertexSpec{} } func (*MonoVertexSpec) ProtoMessage() {} func (*MonoVertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{46} + return fileDescriptor_9d0d1b17d3865563, []int{47} } func (m *MonoVertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1369,7 +1397,7 @@ var xxx_messageInfo_MonoVertexSpec proto.InternalMessageInfo func (m *MonoVertexStatus) Reset() { *m = MonoVertexStatus{} } func (*MonoVertexStatus) ProtoMessage() {} func (*MonoVertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{47} + return fileDescriptor_9d0d1b17d3865563, []int{48} } func (m *MonoVertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1397,7 +1425,7 @@ var xxx_messageInfo_MonoVertexStatus proto.InternalMessageInfo func (m *NativeRedis) Reset() { *m = NativeRedis{} } func (*NativeRedis) ProtoMessage() {} func (*NativeRedis) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{48} + return fileDescriptor_9d0d1b17d3865563, []int{49} } func (m *NativeRedis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1425,7 +1453,7 @@ var xxx_messageInfo_NativeRedis proto.InternalMessageInfo func (m *NatsAuth) Reset() { *m = NatsAuth{} } func (*NatsAuth) ProtoMessage() {} func (*NatsAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{49} + return fileDescriptor_9d0d1b17d3865563, []int{50} } func (m *NatsAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1453,7 +1481,7 @@ var xxx_messageInfo_NatsAuth proto.InternalMessageInfo func (m *NatsSource) Reset() { *m = NatsSource{} } func (*NatsSource) ProtoMessage() {} func (*NatsSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{50} + return fileDescriptor_9d0d1b17d3865563, []int{51} } func (m *NatsSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1481,7 +1509,7 @@ var xxx_messageInfo_NatsSource proto.InternalMessageInfo func (m *NoStore) Reset() { *m = NoStore{} } func (*NoStore) ProtoMessage() {} func (*NoStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{51} + return fileDescriptor_9d0d1b17d3865563, []int{52} } func (m *NoStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1509,7 +1537,7 @@ var xxx_messageInfo_NoStore proto.InternalMessageInfo func (m *PBQStorage) Reset() { *m = PBQStorage{} } func (*PBQStorage) ProtoMessage() {} func (*PBQStorage) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{52} + return fileDescriptor_9d0d1b17d3865563, []int{53} } func (m *PBQStorage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1537,7 +1565,7 @@ var xxx_messageInfo_PBQStorage proto.InternalMessageInfo func (m *PersistenceStrategy) Reset() { *m = PersistenceStrategy{} } func (*PersistenceStrategy) ProtoMessage() {} func (*PersistenceStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{53} + return fileDescriptor_9d0d1b17d3865563, []int{54} } func (m *PersistenceStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1565,7 +1593,7 @@ var xxx_messageInfo_PersistenceStrategy proto.InternalMessageInfo func (m *Pipeline) Reset() { *m = Pipeline{} } func (*Pipeline) ProtoMessage() {} func (*Pipeline) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{54} + return fileDescriptor_9d0d1b17d3865563, []int{55} } func (m *Pipeline) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1593,7 +1621,7 @@ var xxx_messageInfo_Pipeline proto.InternalMessageInfo func (m *PipelineLimits) Reset() { *m = PipelineLimits{} } func (*PipelineLimits) ProtoMessage() {} func (*PipelineLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{55} + return fileDescriptor_9d0d1b17d3865563, []int{56} } func (m *PipelineLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1621,7 +1649,7 @@ var xxx_messageInfo_PipelineLimits proto.InternalMessageInfo func (m *PipelineList) Reset() { *m = PipelineList{} } func (*PipelineList) ProtoMessage() {} func (*PipelineList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{56} + return fileDescriptor_9d0d1b17d3865563, []int{57} } func (m *PipelineList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1649,7 +1677,7 @@ var xxx_messageInfo_PipelineList proto.InternalMessageInfo func (m *PipelineSpec) Reset() { *m = PipelineSpec{} } func (*PipelineSpec) ProtoMessage() {} func (*PipelineSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{57} + return fileDescriptor_9d0d1b17d3865563, []int{58} } func (m *PipelineSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1677,7 +1705,7 @@ var xxx_messageInfo_PipelineSpec proto.InternalMessageInfo func (m *PipelineStatus) Reset() { *m = PipelineStatus{} } func (*PipelineStatus) ProtoMessage() {} func (*PipelineStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{58} + return fileDescriptor_9d0d1b17d3865563, []int{59} } func (m *PipelineStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1705,7 +1733,7 @@ var xxx_messageInfo_PipelineStatus proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{59} + return fileDescriptor_9d0d1b17d3865563, []int{60} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1733,7 +1761,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *RedisBufferService) Reset() { *m = RedisBufferService{} } func (*RedisBufferService) ProtoMessage() {} func (*RedisBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{60} + return fileDescriptor_9d0d1b17d3865563, []int{61} } func (m *RedisBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1761,7 +1789,7 @@ var xxx_messageInfo_RedisBufferService proto.InternalMessageInfo func (m *RedisConfig) Reset() { *m = RedisConfig{} } func (*RedisConfig) ProtoMessage() {} func (*RedisConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{61} + return fileDescriptor_9d0d1b17d3865563, []int{62} } func (m *RedisConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1789,7 +1817,7 @@ var xxx_messageInfo_RedisConfig proto.InternalMessageInfo func (m *RedisSettings) Reset() { *m = RedisSettings{} } func (*RedisSettings) ProtoMessage() {} func (*RedisSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{62} + return fileDescriptor_9d0d1b17d3865563, []int{63} } func (m *RedisSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1817,7 +1845,7 @@ var xxx_messageInfo_RedisSettings proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1845,7 +1873,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RollingUpdateStrategy) Reset() { *m = RollingUpdateStrategy{} } func (*RollingUpdateStrategy) ProtoMessage() {} func (*RollingUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *RollingUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1873,7 +1901,7 @@ var xxx_messageInfo_RollingUpdateStrategy proto.InternalMessageInfo func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1901,7 +1929,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1929,7 +1957,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1957,7 +1985,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1985,7 +2013,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2013,7 +2041,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2041,7 +2069,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2069,7 +2097,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2097,7 +2125,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2125,7 +2153,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2153,7 +2181,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2181,7 +2209,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2209,7 +2237,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2237,7 +2265,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2265,7 +2293,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2293,7 +2321,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2321,7 +2349,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2349,7 +2377,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2377,7 +2405,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2405,7 +2433,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{84} + return fileDescriptor_9d0d1b17d3865563, []int{85} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2433,7 +2461,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{85} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2461,7 +2489,7 @@ var xxx_messageInfo_UDTransformer proto.InternalMessageInfo func (m *UpdateStrategy) Reset() { *m = UpdateStrategy{} } func (*UpdateStrategy) ProtoMessage() {} func (*UpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{86} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *UpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2489,7 +2517,7 @@ var xxx_messageInfo_UpdateStrategy proto.InternalMessageInfo func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{87} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2517,7 +2545,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{88} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2545,7 +2573,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{89} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2573,7 +2601,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{90} + return fileDescriptor_9d0d1b17d3865563, []int{91} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2601,7 +2629,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{91} + return fileDescriptor_9d0d1b17d3865563, []int{92} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2629,7 +2657,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{92} + return fileDescriptor_9d0d1b17d3865563, []int{93} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2657,7 +2685,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{93} + return fileDescriptor_9d0d1b17d3865563, []int{94} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2685,7 +2713,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{94} + return fileDescriptor_9d0d1b17d3865563, []int{95} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2713,7 +2741,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{95} + return fileDescriptor_9d0d1b17d3865563, []int{96} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2791,6 +2819,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Metadata.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Metadata.LabelsEntry") proto.RegisterType((*MonoVertex)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertex") + proto.RegisterType((*MonoVertexLifecycle)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexLifecycle") proto.RegisterType((*MonoVertexLimits)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexLimits") proto.RegisterType((*MonoVertexList)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexList") proto.RegisterType((*MonoVertexSpec)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.MonoVertexSpec") @@ -2851,510 +2880,513 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8046 bytes of a gzipped FileDescriptorProto + // 8084 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0x57, 0x76, 0x9e, 0xfa, 0xbf, 0xfb, 0x34, 0xff, 0x74, 0x67, 0x34, 0xe2, 0xcc, 0x4a, 0xd3, 0xe3, 0x5a, - 0xef, 0x7a, 0x1c, 0xdb, 0x64, 0x44, 0xaf, 0xb4, 0x5a, 0xdb, 0xbb, 0x12, 0x9b, 0x1c, 0x72, 0x38, + 0xef, 0xee, 0x38, 0xb6, 0xc9, 0x88, 0x5e, 0x69, 0xb5, 0xb6, 0x77, 0x25, 0x36, 0x39, 0xe4, 0x50, 0x43, 0xce, 0x70, 0x4f, 0x93, 0x23, 0xad, 0x15, 0xaf, 0x52, 0xac, 0xba, 0x6c, 0x96, 0x58, 0x5d, - 0xd5, 0x5b, 0x55, 0xcd, 0x19, 0xca, 0x09, 0xd6, 0xb6, 0x12, 0x68, 0x83, 0x24, 0x48, 0xe0, 0x27, - 0x03, 0x81, 0x13, 0x24, 0x08, 0xe0, 0x07, 0xc3, 0x79, 0x08, 0xb2, 0x79, 0x08, 0x90, 0x1f, 0x07, - 0x41, 0xb2, 0xf9, 0x5f, 0x04, 0x01, 0xb2, 0x79, 0x21, 0xb2, 0x0c, 0xf2, 0x90, 0x00, 0x0e, 0x8c, - 0x18, 0x89, 0x9d, 0x81, 0x11, 0x07, 0xf7, 0xaf, 0xfe, 0xba, 0x7a, 0x86, 0xec, 0x6a, 0x8e, 0x46, - 0x89, 0xde, 0xba, 0xef, 0x39, 0xf7, 0x3b, 0xb7, 0x6e, 0xdd, 0xba, 0xf7, 0xdc, 0x73, 0xce, 0x3d, - 0x17, 0xd6, 0xbb, 0x56, 0x70, 0x30, 0xd8, 0x5b, 0x30, 0xdc, 0xde, 0xa2, 0x33, 0xe8, 0xe9, 0x7d, - 0xcf, 0xfd, 0x80, 0xff, 0xd8, 0xb7, 0xdd, 0x87, 0x8b, 0xfd, 0xc3, 0xee, 0xa2, 0xde, 0xb7, 0xfc, - 0xa8, 0xe4, 0xe8, 0x35, 0xdd, 0xee, 0x1f, 0xe8, 0xaf, 0x2d, 0x76, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, - 0xe6, 0x42, 0xdf, 0x73, 0x03, 0x97, 0x7c, 0x39, 0x02, 0x5a, 0x50, 0x40, 0x0b, 0xaa, 0xda, 0x42, - 0xff, 0xb0, 0xbb, 0xc0, 0x80, 0xa2, 0x12, 0x05, 0x74, 0xed, 0xa7, 0x62, 0x2d, 0xe8, 0xba, 0x5d, - 0x77, 0x91, 0xe3, 0xed, 0x0d, 0xf6, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x72, 0xae, 0x69, 0x87, - 0x6f, 0xfa, 0x0b, 0x96, 0xcb, 0x9a, 0xb5, 0x68, 0xb8, 0x1e, 0x5d, 0x3c, 0x1a, 0x6a, 0xcb, 0xb5, - 0x2f, 0x45, 0x3c, 0x3d, 0xdd, 0x38, 0xb0, 0x1c, 0xea, 0x1d, 0xab, 0x67, 0x59, 0xf4, 0xa8, 0xef, - 0x0e, 0x3c, 0x83, 0x9e, 0xab, 0x96, 0xbf, 0xd8, 0xa3, 0x81, 0x9e, 0x25, 0x6b, 0x71, 0x54, 0x2d, - 0x6f, 0xe0, 0x04, 0x56, 0x6f, 0x58, 0xcc, 0x1b, 0x4f, 0xab, 0xe0, 0x1b, 0x07, 0xb4, 0xa7, 0x0f, - 0xd5, 0xfb, 0xe9, 0x51, 0xf5, 0x06, 0x81, 0x65, 0x2f, 0x5a, 0x4e, 0xe0, 0x07, 0x5e, 0xba, 0x92, - 0xf6, 0xdb, 0x00, 0x97, 0x96, 0xf7, 0xfc, 0xc0, 0xd3, 0x8d, 0x60, 0xdb, 0x35, 0x77, 0x68, 0xaf, - 0x6f, 0xeb, 0x01, 0x25, 0x87, 0x50, 0x67, 0x0f, 0x64, 0xea, 0x81, 0x3e, 0x5f, 0xb8, 0x51, 0xb8, - 0xd9, 0x5c, 0x5a, 0x5e, 0x18, 0xf3, 0x05, 0x2e, 0x6c, 0x49, 0xa0, 0xf6, 0xd4, 0xe9, 0x49, 0xab, - 0xae, 0xfe, 0x61, 0x28, 0x80, 0xfc, 0x5a, 0x01, 0xa6, 0x1c, 0xd7, 0xa4, 0x1d, 0x6a, 0x53, 0x23, - 0x70, 0xbd, 0xf9, 0xe2, 0x8d, 0xd2, 0xcd, 0xe6, 0xd2, 0x37, 0xc7, 0x96, 0x98, 0xf1, 0x44, 0x0b, - 0xf7, 0x62, 0x02, 0x6e, 0x39, 0x81, 0x77, 0xdc, 0xbe, 0xfc, 0xbd, 0x93, 0xd6, 0x0b, 0xa7, 0x27, - 0xad, 0xa9, 0x38, 0x09, 0x13, 0x2d, 0x21, 0xbb, 0xd0, 0x0c, 0x5c, 0x9b, 0x75, 0x99, 0xe5, 0x3a, - 0xfe, 0x7c, 0x89, 0x37, 0xec, 0xfa, 0x82, 0xe8, 0x6a, 0x26, 0x7e, 0x81, 0x8d, 0xb1, 0x85, 0xa3, - 0xd7, 0x16, 0x76, 0x42, 0xb6, 0xf6, 0x25, 0x09, 0xdc, 0x8c, 0xca, 0x7c, 0x8c, 0xe3, 0x10, 0x0a, - 0xb3, 0x3e, 0x35, 0x06, 0x9e, 0x15, 0x1c, 0xaf, 0xb8, 0x4e, 0x40, 0x1f, 0x05, 0xf3, 0x65, 0xde, - 0xcb, 0x5f, 0xcc, 0x82, 0xde, 0x76, 0xcd, 0x4e, 0x92, 0xbb, 0x7d, 0xe9, 0xf4, 0xa4, 0x35, 0x9b, - 0x2a, 0xc4, 0x34, 0x26, 0x71, 0x60, 0xce, 0xea, 0xe9, 0x5d, 0xba, 0x3d, 0xb0, 0xed, 0x0e, 0x35, - 0x3c, 0x1a, 0xf8, 0xf3, 0x15, 0xfe, 0x08, 0x37, 0xb3, 0xe4, 0x6c, 0xba, 0x86, 0x6e, 0xdf, 0xdf, - 0xfb, 0x80, 0x1a, 0x01, 0xd2, 0x7d, 0xea, 0x51, 0xc7, 0xa0, 0xed, 0x79, 0xf9, 0x30, 0x73, 0x1b, - 0x29, 0x24, 0x1c, 0xc2, 0x26, 0xeb, 0xf0, 0x62, 0xdf, 0xb3, 0x5c, 0xde, 0x04, 0x5b, 0xf7, 0xfd, - 0x7b, 0x7a, 0x8f, 0xce, 0x57, 0x6f, 0x14, 0x6e, 0x36, 0xda, 0x57, 0x25, 0xcc, 0x8b, 0xdb, 0x69, - 0x06, 0x1c, 0xae, 0x43, 0x6e, 0x42, 0x5d, 0x15, 0xce, 0xd7, 0x6e, 0x14, 0x6e, 0x56, 0xc4, 0xd8, - 0x51, 0x75, 0x31, 0xa4, 0x92, 0x35, 0xa8, 0xeb, 0xfb, 0xfb, 0x96, 0xc3, 0x38, 0xeb, 0xbc, 0x0b, - 0x5f, 0xc9, 0x7a, 0xb4, 0x65, 0xc9, 0x23, 0x70, 0xd4, 0x3f, 0x0c, 0xeb, 0x92, 0x3b, 0x40, 0x7c, - 0xea, 0x1d, 0x59, 0x06, 0x5d, 0x36, 0x0c, 0x77, 0xe0, 0x04, 0xbc, 0xed, 0x0d, 0xde, 0xf6, 0x6b, - 0xb2, 0xed, 0xa4, 0x33, 0xc4, 0x81, 0x19, 0xb5, 0xc8, 0xdb, 0x30, 0x27, 0xbf, 0xd5, 0xa8, 0x17, - 0x80, 0x23, 0x5d, 0x66, 0x1d, 0x89, 0x29, 0x1a, 0x0e, 0x71, 0x13, 0x13, 0x5e, 0xd1, 0x07, 0x81, - 0xdb, 0x63, 0x90, 0x49, 0xa1, 0x3b, 0xee, 0x21, 0x75, 0xe6, 0x9b, 0x37, 0x0a, 0x37, 0xeb, 0xed, - 0x1b, 0xa7, 0x27, 0xad, 0x57, 0x96, 0x9f, 0xc0, 0x87, 0x4f, 0x44, 0x21, 0xf7, 0xa1, 0x61, 0x3a, - 0xfe, 0xb6, 0x6b, 0x5b, 0xc6, 0xf1, 0xfc, 0x14, 0x6f, 0xe0, 0x6b, 0xf2, 0x51, 0x1b, 0xab, 0xf7, - 0x3a, 0x82, 0xf0, 0xf8, 0xa4, 0xf5, 0xca, 0xf0, 0x94, 0xba, 0x10, 0xd2, 0x31, 0xc2, 0x20, 0x5b, - 0x1c, 0x70, 0xc5, 0x75, 0xf6, 0xad, 0xee, 0xfc, 0x34, 0x7f, 0x1b, 0x37, 0x46, 0x0c, 0xe8, 0xd5, - 0x7b, 0x1d, 0xc1, 0xd7, 0x9e, 0x96, 0xe2, 0xc4, 0x5f, 0x8c, 0x10, 0x88, 0x09, 0x33, 0x6a, 0x32, - 0x5e, 0xb1, 0x75, 0xab, 0xe7, 0xcf, 0xcf, 0xf0, 0xc1, 0xfb, 0xa3, 0x23, 0x30, 0x31, 0xce, 0xdc, - 0xbe, 0x22, 0x1f, 0x65, 0x26, 0x51, 0xec, 0x63, 0x0a, 0xf3, 0xda, 0x5b, 0xf0, 0xe2, 0xd0, 0xdc, - 0x40, 0xe6, 0xa0, 0x74, 0x48, 0x8f, 0xf9, 0xd4, 0xd7, 0x40, 0xf6, 0x93, 0x5c, 0x86, 0xca, 0x91, - 0x6e, 0x0f, 0xe8, 0x7c, 0x91, 0x97, 0x89, 0x3f, 0x3f, 0x53, 0x7c, 0xb3, 0xa0, 0xfd, 0x8d, 0x12, - 0x4c, 0xa9, 0x19, 0xa7, 0x63, 0x39, 0x87, 0xe4, 0x1d, 0x28, 0xd9, 0x6e, 0x57, 0xce, 0x9b, 0x3f, - 0x37, 0xf6, 0x2c, 0xb6, 0xe9, 0x76, 0xdb, 0xb5, 0xd3, 0x93, 0x56, 0x69, 0xd3, 0xed, 0x22, 0x43, - 0x24, 0x06, 0x54, 0x0e, 0xf5, 0xfd, 0x43, 0x9d, 0xb7, 0xa1, 0xb9, 0xd4, 0x1e, 0x1b, 0xfa, 0x2e, - 0x43, 0x61, 0x6d, 0x6d, 0x37, 0x4e, 0x4f, 0x5a, 0x15, 0xfe, 0x17, 0x05, 0x36, 0x71, 0xa1, 0xb1, - 0x67, 0xeb, 0xc6, 0xe1, 0x81, 0x6b, 0xd3, 0xf9, 0x52, 0x4e, 0x41, 0x6d, 0x85, 0x24, 0x5e, 0x73, - 0xf8, 0x17, 0x23, 0x19, 0xc4, 0x80, 0xea, 0xc0, 0xf4, 0x2d, 0xe7, 0x50, 0xce, 0x81, 0x6f, 0x8d, - 0x2d, 0x6d, 0x77, 0x95, 0x3f, 0x13, 0x9c, 0x9e, 0xb4, 0xaa, 0xe2, 0x37, 0x4a, 0x68, 0xed, 0x0f, - 0xa6, 0x60, 0x46, 0xbd, 0xa4, 0x07, 0xd4, 0x0b, 0xe8, 0x23, 0x72, 0x03, 0xca, 0x0e, 0xfb, 0x34, - 0xf9, 0x4b, 0x6e, 0x4f, 0xc9, 0xe1, 0x52, 0xe6, 0x9f, 0x24, 0xa7, 0xb0, 0x96, 0x89, 0xa1, 0x22, - 0x3b, 0x7c, 0xfc, 0x96, 0x75, 0x38, 0x8c, 0x68, 0x99, 0xf8, 0x8d, 0x12, 0x9a, 0xbc, 0x07, 0x65, - 0xfe, 0xf0, 0xa2, 0xab, 0xbf, 0x3a, 0xbe, 0x08, 0xf6, 0xe8, 0x75, 0xf6, 0x04, 0xfc, 0xc1, 0x39, - 0x28, 0x1b, 0x8a, 0x03, 0x73, 0x5f, 0x76, 0xec, 0xcf, 0xe5, 0xe8, 0xd8, 0x35, 0x31, 0x14, 0x77, - 0x57, 0xd7, 0x90, 0x21, 0x92, 0xbf, 0x54, 0x80, 0x17, 0x0d, 0xd7, 0x09, 0x74, 0xa6, 0x67, 0xa8, - 0x45, 0x76, 0xbe, 0xc2, 0xe5, 0xdc, 0x19, 0x5b, 0xce, 0x4a, 0x1a, 0xb1, 0xfd, 0x12, 0x5b, 0x33, - 0x86, 0x8a, 0x71, 0x58, 0x36, 0xf9, 0x2b, 0x05, 0x78, 0x89, 0xcd, 0xe5, 0x43, 0xcc, 0x7c, 0x05, - 0x9a, 0x6c, 0xab, 0xae, 0x9e, 0x9e, 0xb4, 0x5e, 0xda, 0xc8, 0x12, 0x86, 0xd9, 0x6d, 0x60, 0xad, - 0xbb, 0xa4, 0x0f, 0xab, 0x25, 0x7c, 0x75, 0x6b, 0x2e, 0x6d, 0x4e, 0x52, 0xd5, 0x69, 0x7f, 0x4e, - 0x0e, 0xe5, 0x2c, 0xcd, 0x0e, 0xb3, 0x5a, 0x41, 0x6e, 0x41, 0xed, 0xc8, 0xb5, 0x07, 0x3d, 0xea, - 0xcf, 0xd7, 0xf9, 0x14, 0x7b, 0x2d, 0x6b, 0x8a, 0x7d, 0xc0, 0x59, 0xda, 0xb3, 0x12, 0xbe, 0x26, - 0xfe, 0xfb, 0xa8, 0xea, 0x12, 0x0b, 0xaa, 0xb6, 0xd5, 0xb3, 0x02, 0x9f, 0x2f, 0x9c, 0xcd, 0xa5, - 0x5b, 0x63, 0x3f, 0x96, 0xf8, 0x44, 0x37, 0x39, 0x98, 0xf8, 0x6a, 0xc4, 0x6f, 0x94, 0x02, 0xd8, - 0x54, 0xe8, 0x1b, 0xba, 0x2d, 0x16, 0xd6, 0xe6, 0xd2, 0xd7, 0xc6, 0xff, 0x6c, 0x18, 0x4a, 0x7b, - 0x5a, 0x3e, 0x53, 0x85, 0xff, 0x45, 0x81, 0x4d, 0x7e, 0x01, 0x66, 0x12, 0x6f, 0xd3, 0x9f, 0x6f, - 0xf2, 0xde, 0x79, 0x35, 0xab, 0x77, 0x42, 0xae, 0x68, 0xe5, 0x49, 0x8c, 0x10, 0x1f, 0x53, 0x60, - 0xe4, 0x2e, 0xd4, 0x7d, 0xcb, 0xa4, 0x86, 0xee, 0xf9, 0xf3, 0x53, 0x67, 0x01, 0x9e, 0x93, 0xc0, - 0xf5, 0x8e, 0xac, 0x86, 0x21, 0x00, 0x59, 0x00, 0xe8, 0xeb, 0x5e, 0x60, 0x09, 0x45, 0x75, 0x9a, - 0x2b, 0x4d, 0x33, 0xa7, 0x27, 0x2d, 0xd8, 0x0e, 0x4b, 0x31, 0xc6, 0xc1, 0xf8, 0x59, 0xdd, 0x0d, - 0xa7, 0x3f, 0x08, 0xc4, 0xc2, 0xda, 0x10, 0xfc, 0x9d, 0xb0, 0x14, 0x63, 0x1c, 0xe4, 0xb7, 0x0a, - 0xf0, 0xb9, 0xe8, 0xef, 0xf0, 0x47, 0x36, 0x3b, 0xf1, 0x8f, 0xac, 0x75, 0x7a, 0xd2, 0xfa, 0x5c, - 0x67, 0xb4, 0x48, 0x7c, 0x52, 0x7b, 0xc8, 0xc7, 0x05, 0x98, 0x19, 0xf4, 0x4d, 0x3d, 0xa0, 0x9d, - 0x80, 0xed, 0x78, 0xba, 0xc7, 0xf3, 0x73, 0xbc, 0x89, 0xeb, 0xe3, 0xcf, 0x82, 0x09, 0xb8, 0xe8, - 0x35, 0x27, 0xcb, 0x31, 0x25, 0x56, 0x7b, 0x07, 0xa6, 0x97, 0x07, 0xc1, 0x81, 0xeb, 0x59, 0x1f, - 0x72, 0xf5, 0x9f, 0xac, 0x41, 0x25, 0xe0, 0x6a, 0x9c, 0xd0, 0x10, 0xbe, 0x90, 0xf5, 0xd2, 0x85, - 0x4a, 0x7d, 0x97, 0x1e, 0x2b, 0xbd, 0x44, 0xac, 0xd4, 0x42, 0xad, 0x13, 0xd5, 0xb5, 0x3f, 0x53, - 0x80, 0x5a, 0x5b, 0x37, 0x0e, 0xdd, 0xfd, 0x7d, 0xf2, 0x2e, 0xd4, 0x2d, 0x27, 0xa0, 0xde, 0x91, - 0x6e, 0x4b, 0xd8, 0x85, 0x18, 0x6c, 0xb8, 0x21, 0x8c, 0x1e, 0x8f, 0xed, 0xbe, 0x98, 0xa0, 0xd5, - 0x81, 0xdc, 0xb5, 0x70, 0xcd, 0x78, 0x43, 0x62, 0x60, 0x88, 0x46, 0x5a, 0x50, 0xf1, 0x03, 0xda, - 0xf7, 0xf9, 0x1a, 0x38, 0x2d, 0x9a, 0xd1, 0x61, 0x05, 0x28, 0xca, 0xb5, 0xbf, 0x5e, 0x80, 0x46, - 0x5b, 0xf7, 0x2d, 0x83, 0x3d, 0x25, 0x59, 0x81, 0xf2, 0xc0, 0xa7, 0xde, 0xf9, 0x9e, 0x8d, 0x2f, - 0x5b, 0xbb, 0x3e, 0xf5, 0x90, 0x57, 0x26, 0xf7, 0xa1, 0xde, 0xd7, 0x7d, 0xff, 0xa1, 0xeb, 0x99, - 0x72, 0xe9, 0x3d, 0x23, 0x90, 0xd8, 0x26, 0xc8, 0xaa, 0x18, 0x82, 0x68, 0x4d, 0x88, 0x74, 0x0f, - 0xed, 0xf7, 0x0a, 0x70, 0xa9, 0x3d, 0xd8, 0xdf, 0xa7, 0x9e, 0xd4, 0x8a, 0xa5, 0xbe, 0x49, 0xa1, - 0xe2, 0x51, 0xd3, 0xf2, 0x65, 0xdb, 0x57, 0xc7, 0x1e, 0x28, 0xc8, 0x50, 0xa4, 0x7a, 0xcb, 0xfb, - 0x8b, 0x17, 0xa0, 0x40, 0x27, 0x03, 0x68, 0x7c, 0x40, 0xd9, 0x6e, 0x9c, 0xea, 0x3d, 0xf9, 0x74, - 0xb7, 0xc7, 0x16, 0x75, 0x87, 0x06, 0x1d, 0x8e, 0x14, 0xd7, 0xa6, 0xc3, 0x42, 0x8c, 0x24, 0x69, - 0xbf, 0x5d, 0x81, 0xa9, 0x15, 0xb7, 0xb7, 0x67, 0x39, 0xd4, 0xbc, 0x65, 0x76, 0x29, 0x79, 0x1f, - 0xca, 0xd4, 0xec, 0x52, 0xf9, 0xb4, 0xe3, 0x2b, 0x1e, 0x0c, 0x2c, 0x52, 0x9f, 0xd8, 0x3f, 0xe4, - 0xc0, 0x64, 0x13, 0x66, 0xf6, 0x3d, 0xb7, 0x27, 0xe6, 0xf2, 0x9d, 0xe3, 0xbe, 0xd4, 0x9d, 0xdb, - 0x3f, 0xaa, 0x3e, 0x9c, 0xb5, 0x04, 0xf5, 0xf1, 0x49, 0x0b, 0xa2, 0x7f, 0x98, 0xaa, 0x4b, 0xde, - 0x85, 0xf9, 0xa8, 0x24, 0x9c, 0xd4, 0x56, 0xd8, 0x76, 0x86, 0xeb, 0x4e, 0x95, 0xf6, 0x2b, 0xa7, - 0x27, 0xad, 0xf9, 0xb5, 0x11, 0x3c, 0x38, 0xb2, 0x36, 0x9b, 0x2a, 0xe6, 0x22, 0xa2, 0x58, 0x68, - 0xa4, 0xca, 0x34, 0xa1, 0x15, 0x8c, 0xef, 0xfb, 0xd6, 0x52, 0x22, 0x70, 0x48, 0x28, 0x59, 0x83, - 0xa9, 0xc0, 0x8d, 0xf5, 0x57, 0x85, 0xf7, 0x97, 0xa6, 0x0c, 0x15, 0x3b, 0xee, 0xc8, 0xde, 0x4a, - 0xd4, 0x23, 0x08, 0x57, 0xd4, 0xff, 0x54, 0x4f, 0x55, 0x79, 0x4f, 0x5d, 0x3b, 0x3d, 0x69, 0x5d, - 0xd9, 0xc9, 0xe4, 0xc0, 0x11, 0x35, 0xc9, 0x2f, 0x17, 0x60, 0x46, 0x91, 0x64, 0x1f, 0xd5, 0x26, - 0xd9, 0x47, 0x84, 0x8d, 0x88, 0x9d, 0x84, 0x00, 0x4c, 0x09, 0xd4, 0x7e, 0xa7, 0x0a, 0x8d, 0x70, - 0xaa, 0x27, 0x9f, 0x87, 0x0a, 0x37, 0x41, 0x48, 0x0d, 0x3e, 0x5c, 0xc3, 0xb9, 0xa5, 0x02, 0x05, - 0x8d, 0x7c, 0x01, 0x6a, 0x86, 0xdb, 0xeb, 0xe9, 0x8e, 0xc9, 0xcd, 0x4a, 0x8d, 0x76, 0x93, 0xa9, - 0x2e, 0x2b, 0xa2, 0x08, 0x15, 0x8d, 0xbc, 0x02, 0x65, 0xdd, 0xeb, 0x0a, 0x0b, 0x4f, 0x43, 0xcc, - 0x47, 0xcb, 0x5e, 0xd7, 0x47, 0x5e, 0x4a, 0xbe, 0x02, 0x25, 0xea, 0x1c, 0xcd, 0x97, 0x47, 0xeb, - 0x46, 0xb7, 0x9c, 0xa3, 0x07, 0xba, 0xd7, 0x6e, 0xca, 0x36, 0x94, 0x6e, 0x39, 0x47, 0xc8, 0xea, - 0x90, 0x4d, 0xa8, 0x51, 0xe7, 0x88, 0xbd, 0x7b, 0x69, 0x7a, 0xf9, 0x91, 0x11, 0xd5, 0x19, 0x8b, - 0xdc, 0x26, 0x84, 0x1a, 0x96, 0x2c, 0x46, 0x05, 0x41, 0xbe, 0x01, 0x53, 0x42, 0xd9, 0xda, 0x62, - 0xef, 0xc4, 0x9f, 0xaf, 0x72, 0xc8, 0xd6, 0x68, 0x6d, 0x8d, 0xf3, 0x45, 0xa6, 0xae, 0x58, 0xa1, - 0x8f, 0x09, 0x28, 0xf2, 0x0d, 0x68, 0xa8, 0x9d, 0xb1, 0x7a, 0xb3, 0x99, 0x56, 0x22, 0xb5, 0x9d, - 0x46, 0xfa, 0xad, 0x81, 0xe5, 0xd1, 0x1e, 0x75, 0x02, 0xbf, 0xfd, 0xa2, 0xb2, 0x1b, 0x28, 0xaa, - 0x8f, 0x11, 0x1a, 0xd9, 0x1b, 0x36, 0x77, 0x09, 0x5b, 0xcd, 0xe7, 0x47, 0xcc, 0xea, 0x63, 0xd8, - 0xba, 0xbe, 0x09, 0xb3, 0xa1, 0x3d, 0x4a, 0x9a, 0x34, 0x84, 0xf5, 0xe6, 0x4b, 0xac, 0xfa, 0x46, - 0x92, 0xf4, 0xf8, 0xa4, 0xf5, 0x6a, 0x86, 0x51, 0x23, 0x62, 0xc0, 0x34, 0x18, 0xf9, 0x10, 0x66, - 0x3c, 0xaa, 0x9b, 0x96, 0x43, 0x7d, 0x7f, 0xdb, 0x73, 0xf7, 0xf2, 0x6b, 0x9e, 0x1c, 0x45, 0x0c, - 0x7b, 0x4c, 0x20, 0x63, 0x4a, 0x12, 0x79, 0x08, 0xd3, 0xb6, 0x75, 0x44, 0x23, 0xd1, 0xcd, 0x89, - 0x88, 0x7e, 0xf1, 0xf4, 0xa4, 0x35, 0xbd, 0x19, 0x07, 0xc6, 0xa4, 0x1c, 0xed, 0xef, 0x54, 0x60, - 0x78, 0xf3, 0x95, 0x1c, 0x29, 0x85, 0x49, 0x8f, 0x94, 0xf4, 0x5b, 0x14, 0x6b, 0xc6, 0x9b, 0xb2, - 0xda, 0x04, 0xde, 0x64, 0xc6, 0x68, 0x2c, 0x4d, 0x7a, 0x34, 0x3e, 0x37, 0x13, 0xc6, 0xf0, 0xb0, - 0xad, 0x7e, 0x72, 0xc3, 0xb6, 0xf6, 0x8c, 0x86, 0xed, 0x77, 0xca, 0x30, 0xb3, 0xaa, 0xd3, 0x9e, - 0xeb, 0x3c, 0x75, 0xff, 0x5d, 0x78, 0x2e, 0xf6, 0xdf, 0x37, 0xa1, 0xee, 0xd1, 0xbe, 0x6d, 0x19, - 0xba, 0x50, 0xb3, 0xa5, 0xbd, 0x1b, 0x65, 0x19, 0x86, 0xd4, 0x11, 0x76, 0x97, 0xd2, 0x73, 0x69, - 0x77, 0x29, 0x7f, 0xf2, 0x76, 0x17, 0xed, 0x97, 0x8b, 0xc0, 0x55, 0x52, 0x72, 0x03, 0xca, 0x4c, - 0xdd, 0x4a, 0x5b, 0xfb, 0xf8, 0xd7, 0xc2, 0x29, 0xe4, 0x1a, 0x14, 0x03, 0x57, 0x4e, 0x37, 0x20, - 0xe9, 0xc5, 0x1d, 0x17, 0x8b, 0x81, 0x4b, 0x3e, 0x04, 0x30, 0x5c, 0xc7, 0xb4, 0x94, 0x1b, 0x28, - 0xdf, 0x83, 0xad, 0xb9, 0xde, 0x43, 0xdd, 0x33, 0x57, 0x42, 0x44, 0xb1, 0xf3, 0x8e, 0xfe, 0x63, - 0x4c, 0x1a, 0x79, 0x0b, 0xaa, 0xae, 0xb3, 0x36, 0xb0, 0x6d, 0xde, 0xa1, 0x8d, 0xf6, 0x8f, 0x9d, - 0x9e, 0xb4, 0xaa, 0xf7, 0x79, 0xc9, 0xe3, 0x93, 0xd6, 0x55, 0xb1, 0x93, 0x61, 0xff, 0xde, 0xf1, - 0xac, 0xc0, 0x72, 0xba, 0xe1, 0x46, 0x54, 0x56, 0xd3, 0x7e, 0xb5, 0x00, 0xcd, 0x35, 0xeb, 0x11, - 0x35, 0xdf, 0xb1, 0x1c, 0xd3, 0x7d, 0x48, 0x10, 0xaa, 0x36, 0x75, 0xba, 0xc1, 0xc1, 0x98, 0x3b, - 0x45, 0x61, 0x8f, 0xe1, 0x08, 0x28, 0x91, 0xc8, 0x22, 0x34, 0xc4, 0x3e, 0xc3, 0x72, 0xba, 0xbc, - 0x0f, 0xeb, 0xd1, 0x4c, 0xdf, 0x51, 0x04, 0x8c, 0x78, 0xb4, 0x63, 0x78, 0x71, 0xa8, 0x1b, 0x88, - 0x09, 0xe5, 0x40, 0xef, 0xaa, 0x45, 0x65, 0x6d, 0xec, 0x0e, 0xde, 0xd1, 0xbb, 0xb1, 0xce, 0xe5, - 0xda, 0xdc, 0x8e, 0xce, 0xb4, 0x39, 0x86, 0xae, 0xfd, 0x61, 0x01, 0xea, 0x6b, 0x03, 0xc7, 0xe0, - 0x9b, 0xf1, 0xa7, 0x5b, 0x81, 0x95, 0x6a, 0x58, 0xcc, 0x54, 0x0d, 0x07, 0x50, 0x3d, 0x7c, 0x18, - 0xaa, 0x8e, 0xcd, 0xa5, 0xad, 0xf1, 0x47, 0x85, 0x6c, 0xd2, 0xc2, 0x5d, 0x8e, 0x27, 0x9c, 0x94, - 0x33, 0xb2, 0x41, 0xd5, 0xbb, 0xef, 0x70, 0xa1, 0x52, 0xd8, 0xb5, 0xaf, 0x40, 0x33, 0xc6, 0x76, - 0x2e, 0x7f, 0xc5, 0xdf, 0x2d, 0x43, 0x75, 0xbd, 0xd3, 0x59, 0xde, 0xde, 0x20, 0xaf, 0x43, 0x53, - 0xfa, 0xaf, 0xee, 0x45, 0x7d, 0x10, 0xba, 0x2f, 0x3b, 0x11, 0x09, 0xe3, 0x7c, 0x4c, 0xf1, 0xf6, - 0xa8, 0x6e, 0xf7, 0xe4, 0xc7, 0x12, 0x2a, 0xde, 0xc8, 0x0a, 0x51, 0xd0, 0x88, 0x0e, 0x33, 0x6c, - 0x2f, 0xcf, 0xba, 0x50, 0xec, 0xd3, 0xe5, 0x67, 0x73, 0xc6, 0x9d, 0x3c, 0x5f, 0x60, 0x76, 0x13, - 0x00, 0x98, 0x02, 0x24, 0x6f, 0x42, 0x5d, 0x1f, 0x04, 0x07, 0x7c, 0xab, 0x24, 0xbe, 0x8d, 0x57, - 0xb8, 0x7b, 0x4f, 0x96, 0x3d, 0x3e, 0x69, 0x4d, 0xdd, 0xc5, 0xf6, 0xeb, 0xea, 0x3f, 0x86, 0xdc, - 0xac, 0x71, 0xca, 0x36, 0x20, 0x1b, 0x57, 0x39, 0x77, 0xe3, 0xb6, 0x13, 0x00, 0x98, 0x02, 0x24, - 0xef, 0xc1, 0xd4, 0x21, 0x3d, 0x0e, 0xf4, 0x3d, 0x29, 0xa0, 0x7a, 0x1e, 0x01, 0x73, 0x4c, 0x59, - 0xbf, 0x1b, 0xab, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xe5, 0x43, 0xea, 0xed, 0x51, 0xcf, 0x95, 0x76, - 0x06, 0x29, 0xa4, 0x76, 0x1e, 0x21, 0xf3, 0xa7, 0x27, 0xad, 0xcb, 0x77, 0x33, 0x60, 0x30, 0x13, - 0x5c, 0xfb, 0xdf, 0x45, 0x98, 0x5d, 0x17, 0x01, 0x04, 0xae, 0x27, 0x34, 0x0f, 0x72, 0x15, 0x4a, - 0x5e, 0x7f, 0xc0, 0x47, 0x4e, 0x49, 0xb8, 0x08, 0x70, 0x7b, 0x17, 0x59, 0x19, 0x79, 0x17, 0xea, - 0xa6, 0x9c, 0x32, 0xa4, 0x99, 0x63, 0x2c, 0x93, 0x94, 0xfa, 0x87, 0x21, 0x1a, 0xdb, 0xd3, 0xf5, - 0xfc, 0x6e, 0xc7, 0xfa, 0x90, 0xca, 0x9d, 0x3f, 0xdf, 0xd3, 0x6d, 0x89, 0x22, 0x54, 0x34, 0xb6, - 0xaa, 0x1e, 0xd2, 0x63, 0xb1, 0xef, 0x2d, 0x47, 0xab, 0xea, 0x5d, 0x59, 0x86, 0x21, 0x95, 0xb4, - 0xd4, 0xc7, 0xc2, 0x46, 0x41, 0x59, 0xd8, 0x6c, 0x1e, 0xb0, 0x02, 0xf9, 0xdd, 0xb0, 0x29, 0xf3, - 0x03, 0x2b, 0x08, 0xa8, 0x27, 0x5f, 0xe3, 0x58, 0x53, 0xe6, 0x1d, 0x8e, 0x80, 0x12, 0x89, 0xfc, - 0x04, 0x34, 0x38, 0x78, 0xdb, 0x76, 0xf7, 0xf8, 0x8b, 0x6b, 0x08, 0xeb, 0xcd, 0x03, 0x55, 0x88, - 0x11, 0x5d, 0xfb, 0xa3, 0x22, 0x5c, 0x59, 0xa7, 0x81, 0xd0, 0x6a, 0x56, 0x69, 0xdf, 0x76, 0x8f, - 0x99, 0x3e, 0x8d, 0xf4, 0x5b, 0xe4, 0x6d, 0x00, 0xcb, 0xdf, 0xeb, 0x1c, 0x19, 0xfc, 0x3b, 0x10, - 0xdf, 0xf0, 0x0d, 0xf9, 0x49, 0xc2, 0x46, 0xa7, 0x2d, 0x29, 0x8f, 0x13, 0xff, 0x30, 0x56, 0x27, - 0xda, 0x48, 0x17, 0x9f, 0xb0, 0x91, 0xee, 0x00, 0xf4, 0x23, 0xad, 0xbc, 0xc4, 0x39, 0x7f, 0x5a, - 0x89, 0x39, 0x8f, 0x42, 0x1e, 0x83, 0xc9, 0xa3, 0x27, 0x3b, 0x30, 0x67, 0xd2, 0x7d, 0x7d, 0x60, - 0x07, 0xe1, 0x4e, 0x42, 0x7e, 0xc4, 0x67, 0xdf, 0x8c, 0x84, 0xc1, 0x0d, 0xab, 0x29, 0x24, 0x1c, - 0xc2, 0xd6, 0xfe, 0x5e, 0x09, 0xae, 0xad, 0xd3, 0x20, 0xb4, 0xad, 0xc9, 0xd9, 0xb1, 0xd3, 0xa7, - 0x06, 0x7b, 0x0b, 0x1f, 0x17, 0xa0, 0x6a, 0xeb, 0x7b, 0xd4, 0x66, 0xab, 0x17, 0x7b, 0x9a, 0xf7, - 0xc7, 0x5e, 0x08, 0x46, 0x4b, 0x59, 0xd8, 0xe4, 0x12, 0x52, 0x4b, 0x83, 0x28, 0x44, 0x29, 0x9e, - 0x4d, 0xea, 0x86, 0x3d, 0xf0, 0x03, 0xea, 0x6d, 0xbb, 0x5e, 0x20, 0xf5, 0xc9, 0x70, 0x52, 0x5f, - 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x86, 0x6d, 0x51, 0x27, 0xe0, 0xb5, 0xc4, 0x77, 0x45, - 0xd4, 0xfb, 0x5d, 0x09, 0x29, 0x18, 0xe3, 0x62, 0xa2, 0x7a, 0xae, 0x63, 0x05, 0xae, 0x10, 0x55, - 0x4e, 0x8a, 0xda, 0x8a, 0x48, 0x18, 0xe7, 0xe3, 0xd5, 0x68, 0xe0, 0x59, 0x86, 0xcf, 0xab, 0x55, - 0x52, 0xd5, 0x22, 0x12, 0xc6, 0xf9, 0xd8, 0x9a, 0x17, 0x7b, 0xfe, 0x73, 0xad, 0x79, 0xbf, 0xd9, - 0x80, 0xeb, 0x89, 0x6e, 0x0d, 0xf4, 0x80, 0xee, 0x0f, 0xec, 0x0e, 0x0d, 0xd4, 0x0b, 0x1c, 0x73, - 0x2d, 0xfc, 0xf3, 0xd1, 0x7b, 0x17, 0x61, 0x4b, 0xc6, 0x64, 0xde, 0xfb, 0x50, 0x03, 0xcf, 0xf4, - 0xee, 0x17, 0xa1, 0xe1, 0xe8, 0x81, 0xcf, 0x3f, 0x5c, 0xf9, 0x8d, 0x86, 0x6a, 0xd8, 0x3d, 0x45, - 0xc0, 0x88, 0x87, 0x6c, 0xc3, 0x65, 0xd9, 0xc5, 0xb7, 0x1e, 0xf5, 0x5d, 0x2f, 0xa0, 0x9e, 0xa8, - 0x2b, 0x97, 0x53, 0x59, 0xf7, 0xf2, 0x56, 0x06, 0x0f, 0x66, 0xd6, 0x24, 0x5b, 0x70, 0xc9, 0x10, - 0xa1, 0x1c, 0xd4, 0x76, 0x75, 0x53, 0x01, 0x0a, 0x53, 0x66, 0xb8, 0x35, 0x5a, 0x19, 0x66, 0xc1, - 0xac, 0x7a, 0xe9, 0xd1, 0x5c, 0x1d, 0x6b, 0x34, 0xd7, 0xc6, 0x19, 0xcd, 0xf5, 0xf1, 0x46, 0x73, - 0xe3, 0x6c, 0xa3, 0x99, 0xf5, 0x3c, 0x1b, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x58, 0x61, 0x63, 0x91, - 0x42, 0x61, 0xcf, 0x77, 0x32, 0x78, 0x30, 0xb3, 0x26, 0xd9, 0x83, 0x6b, 0xa2, 0xfc, 0x96, 0x63, - 0x78, 0xc7, 0x7d, 0xb6, 0xf0, 0xc4, 0x70, 0x9b, 0x09, 0x5b, 0xf2, 0xb5, 0xce, 0x48, 0x4e, 0x7c, - 0x02, 0x0a, 0xf9, 0x59, 0x98, 0x16, 0x6f, 0x69, 0x4b, 0xef, 0x73, 0x58, 0x11, 0x37, 0xf4, 0x92, - 0x84, 0x9d, 0x5e, 0x89, 0x13, 0x31, 0xc9, 0x4b, 0x96, 0x61, 0xb6, 0x7f, 0x64, 0xb0, 0x9f, 0x1b, - 0xfb, 0xf7, 0x28, 0x35, 0xa9, 0xc9, 0x1d, 0x95, 0x8d, 0xf6, 0xcb, 0xca, 0xba, 0xb3, 0x9d, 0x24, - 0x63, 0x9a, 0x9f, 0xbc, 0x09, 0x53, 0x7e, 0xa0, 0x7b, 0x81, 0x34, 0xe0, 0xce, 0xcf, 0x88, 0xb8, - 0x2a, 0x65, 0xdf, 0xec, 0xc4, 0x68, 0x98, 0xe0, 0xcc, 0x5c, 0x2f, 0x66, 0x2f, 0x6e, 0xbd, 0xc8, - 0x33, 0x5b, 0xfd, 0xb3, 0x22, 0xdc, 0x58, 0xa7, 0xc1, 0x96, 0xeb, 0x48, 0xf3, 0x77, 0xd6, 0xb2, - 0x7f, 0x26, 0xeb, 0x77, 0x72, 0xd1, 0x2e, 0x4e, 0x74, 0xd1, 0x2e, 0x4d, 0x68, 0xd1, 0x2e, 0x5f, - 0xe0, 0xa2, 0xfd, 0x0f, 0x8a, 0xf0, 0x72, 0xa2, 0x27, 0xb7, 0x5d, 0x53, 0x4d, 0xf8, 0x9f, 0x75, - 0xe0, 0x19, 0x3a, 0xf0, 0xb1, 0xd0, 0x3b, 0xb9, 0x03, 0x33, 0xa5, 0xf1, 0x7c, 0x94, 0xd6, 0x78, - 0xde, 0xcb, 0xb3, 0xf2, 0x65, 0x48, 0x38, 0xd3, 0x8a, 0x77, 0x07, 0x88, 0x27, 0xdd, 0xad, 0xc2, - 0xf4, 0x13, 0x53, 0x7a, 0xc2, 0xc0, 0x4d, 0x1c, 0xe2, 0xc0, 0x8c, 0x5a, 0xa4, 0x03, 0x2f, 0xf9, - 0xd4, 0x09, 0x2c, 0x87, 0xda, 0x49, 0x38, 0xa1, 0x0d, 0xbd, 0x2a, 0xe1, 0x5e, 0xea, 0x64, 0x31, - 0x61, 0x76, 0xdd, 0x3c, 0xf3, 0xc0, 0xbf, 0x02, 0xae, 0x72, 0x8a, 0xae, 0x99, 0x98, 0xc6, 0xf2, - 0x71, 0x5a, 0x63, 0x79, 0x3f, 0xff, 0x7b, 0x1b, 0x4f, 0x5b, 0x59, 0x02, 0xe0, 0x6f, 0x21, 0xae, - 0xae, 0x84, 0x8b, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x05, 0x48, 0xf5, 0x73, 0x5c, 0x53, 0x09, - 0x17, 0xa0, 0x4e, 0x9c, 0x88, 0x49, 0xde, 0x91, 0xda, 0x4e, 0x65, 0x6c, 0x6d, 0xe7, 0x0e, 0x90, - 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0x71, 0xc3, 0x1b, 0x43, 0x1c, 0x98, 0x51, 0x6b, 0xc4, 0x50, - 0xae, 0x4d, 0x76, 0x28, 0xd7, 0xc7, 0x1f, 0xca, 0xe4, 0x7d, 0xb8, 0xca, 0x45, 0xc9, 0xfe, 0x49, - 0x02, 0x0b, 0xbd, 0xe7, 0x47, 0x24, 0xf0, 0x55, 0x1c, 0xc5, 0x88, 0xa3, 0x31, 0xd8, 0xfb, 0x31, - 0x3c, 0x6a, 0x32, 0xe1, 0xba, 0x3d, 0x5a, 0x27, 0x5a, 0xc9, 0xe0, 0xc1, 0xcc, 0x9a, 0x6c, 0x88, - 0x05, 0x6c, 0x18, 0xea, 0x7b, 0x36, 0x35, 0x65, 0xdc, 0x74, 0x38, 0xc4, 0x76, 0x36, 0x3b, 0x92, - 0x82, 0x31, 0xae, 0x2c, 0x35, 0x65, 0xea, 0x9c, 0x6a, 0xca, 0x3a, 0xb7, 0xd2, 0xef, 0x27, 0xb4, - 0x21, 0xa9, 0xeb, 0x84, 0x91, 0xf0, 0x2b, 0x69, 0x06, 0x1c, 0xae, 0xc3, 0xb5, 0x44, 0xc3, 0xb3, - 0xfa, 0x81, 0x9f, 0xc4, 0x9a, 0x49, 0x69, 0x89, 0x19, 0x3c, 0x98, 0x59, 0x93, 0xe9, 0xe7, 0x07, - 0x54, 0xb7, 0x83, 0x83, 0x24, 0xe0, 0x6c, 0x52, 0x3f, 0xbf, 0x3d, 0xcc, 0x82, 0x59, 0xf5, 0x32, - 0x17, 0xa4, 0xb9, 0xe7, 0x53, 0xad, 0xfa, 0x95, 0x12, 0x5c, 0x5d, 0xa7, 0x41, 0x18, 0x52, 0xf6, - 0x99, 0x19, 0xe5, 0x13, 0x30, 0xa3, 0xfc, 0x46, 0x05, 0x2e, 0xad, 0xd3, 0x60, 0x48, 0x1b, 0xfb, - 0xff, 0xb4, 0xfb, 0xb7, 0xe0, 0x52, 0x14, 0xc5, 0xd8, 0x09, 0x5c, 0x4f, 0xac, 0xe5, 0xa9, 0xdd, - 0x72, 0x67, 0x98, 0x05, 0xb3, 0xea, 0x91, 0x6f, 0xc0, 0xcb, 0x7c, 0xa9, 0x77, 0xba, 0xc2, 0x3e, - 0x2b, 0x8c, 0x09, 0xb1, 0x73, 0x38, 0x2d, 0x09, 0xf9, 0x72, 0x27, 0x9b, 0x0d, 0x47, 0xd5, 0x27, - 0xdf, 0x86, 0xa9, 0xbe, 0xd5, 0xa7, 0xb6, 0xe5, 0x70, 0xfd, 0x2c, 0x77, 0xf0, 0xcf, 0x76, 0x0c, - 0x2c, 0xda, 0xc0, 0xc5, 0x4b, 0x31, 0x21, 0x30, 0x73, 0xa4, 0xd6, 0x2f, 0x70, 0xa4, 0xfe, 0x8f, - 0x22, 0xd4, 0xd6, 0x3d, 0x77, 0xd0, 0x6f, 0x1f, 0x93, 0x2e, 0x54, 0x1f, 0x72, 0xe7, 0x99, 0x74, - 0x4d, 0x8d, 0x7f, 0x12, 0x40, 0xf8, 0xe0, 0x22, 0x95, 0x48, 0xfc, 0x47, 0x09, 0xcf, 0x06, 0xf1, - 0x21, 0x3d, 0xa6, 0xa6, 0xf4, 0xa1, 0x85, 0x83, 0xf8, 0x2e, 0x2b, 0x44, 0x41, 0x23, 0x3d, 0x98, - 0xd5, 0x6d, 0xdb, 0x7d, 0x48, 0xcd, 0x4d, 0x3d, 0xe0, 0x7e, 0x6f, 0xe9, 0x5b, 0x39, 0xaf, 0x59, - 0x9a, 0x07, 0x33, 0x2c, 0x27, 0xa1, 0x30, 0x8d, 0x4d, 0x3e, 0x80, 0x9a, 0x1f, 0xb8, 0x9e, 0x52, - 0xb6, 0x9a, 0x4b, 0x2b, 0xe3, 0xbf, 0xf4, 0xf6, 0xd7, 0x3b, 0x02, 0x4a, 0xd8, 0xec, 0xe5, 0x1f, - 0x54, 0x02, 0xb4, 0x5f, 0x2f, 0x00, 0xdc, 0xde, 0xd9, 0xd9, 0x96, 0xee, 0x05, 0x13, 0xca, 0xfa, - 0x20, 0x74, 0x54, 0x8e, 0xef, 0x10, 0x4c, 0x04, 0xe0, 0x4a, 0x1f, 0xde, 0x20, 0x38, 0x40, 0x8e, - 0x4e, 0x7e, 0x1c, 0x6a, 0x52, 0x41, 0x96, 0xdd, 0x1e, 0xc6, 0x53, 0x48, 0x25, 0x1a, 0x15, 0x5d, - 0xfb, 0xdb, 0x45, 0x80, 0x0d, 0xd3, 0xa6, 0x1d, 0x75, 0x78, 0xa3, 0x11, 0x1c, 0x78, 0xd4, 0x3f, - 0x70, 0x6d, 0x73, 0x4c, 0x6f, 0x2a, 0xb7, 0xf9, 0xef, 0x28, 0x10, 0x8c, 0xf0, 0x88, 0x09, 0x53, - 0x7e, 0x40, 0xfb, 0x2a, 0x26, 0x77, 0x4c, 0x27, 0xca, 0x9c, 0xb0, 0x8b, 0x44, 0x38, 0x98, 0x40, - 0x25, 0x3a, 0x34, 0x2d, 0xc7, 0x10, 0x1f, 0x48, 0xfb, 0x78, 0xcc, 0x81, 0x34, 0xcb, 0x76, 0x1c, - 0x1b, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0xdf, 0x2d, 0xc2, 0x15, 0x2e, 0x8f, 0x35, 0x23, 0x11, 0x79, - 0x4b, 0xfe, 0xe4, 0xd0, 0x41, 0xd3, 0x3f, 0x7e, 0x36, 0xd1, 0xe2, 0x9c, 0xe2, 0x16, 0x0d, 0xf4, - 0x48, 0x9f, 0x8b, 0xca, 0x62, 0xa7, 0x4b, 0x07, 0x50, 0xf6, 0xd9, 0x7c, 0x25, 0x7a, 0xaf, 0x33, - 0xf6, 0x10, 0xca, 0x7e, 0x00, 0x3e, 0x7b, 0x85, 0x5e, 0x63, 0x3e, 0x6b, 0x71, 0x71, 0xe4, 0x4f, - 0x43, 0xd5, 0x0f, 0xf4, 0x60, 0xa0, 0x3e, 0xcd, 0xdd, 0x49, 0x0b, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, - 0xfe, 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xae, 0x65, 0x57, 0xdc, 0xb4, 0xfc, 0x80, 0xfc, 0x89, - 0xa1, 0x6e, 0x3f, 0xe3, 0x1b, 0x67, 0xb5, 0x79, 0xa7, 0x87, 0x67, 0x11, 0x54, 0x49, 0xac, 0xcb, - 0x03, 0xa8, 0x58, 0x01, 0xed, 0xa9, 0xfd, 0xe5, 0xfd, 0x09, 0x3f, 0x7a, 0x6c, 0x69, 0x67, 0x52, - 0x50, 0x08, 0xd3, 0xbe, 0x53, 0x1c, 0xf5, 0xc8, 0x7c, 0xf9, 0xb0, 0x93, 0xd1, 0xdd, 0x77, 0xf3, - 0x45, 0x77, 0x27, 0x1b, 0x34, 0x1c, 0xe4, 0xfd, 0xa7, 0x86, 0x83, 0xbc, 0xef, 0xe7, 0x0f, 0xf2, - 0x4e, 0x75, 0xc3, 0xc8, 0x58, 0xef, 0x1f, 0x94, 0xe0, 0x95, 0x27, 0x0d, 0x1b, 0xb6, 0x9e, 0xc9, - 0xd1, 0x99, 0x77, 0x3d, 0x7b, 0xf2, 0x38, 0x24, 0x4b, 0x50, 0xe9, 0x1f, 0xe8, 0xbe, 0x52, 0xca, - 0xd4, 0x86, 0xa5, 0xb2, 0xcd, 0x0a, 0x1f, 0xb3, 0x49, 0x83, 0x2b, 0x73, 0xfc, 0x2f, 0x0a, 0x56, - 0x36, 0x1d, 0xf7, 0xa8, 0xef, 0x47, 0x36, 0x81, 0x70, 0x3a, 0xde, 0x12, 0xc5, 0xa8, 0xe8, 0x24, - 0x80, 0xaa, 0x30, 0x31, 0xcb, 0x95, 0x69, 0xfc, 0x40, 0xae, 0x8c, 0x03, 0x01, 0xd1, 0x43, 0x49, - 0x6f, 0x85, 0x94, 0x45, 0x16, 0xa0, 0x1c, 0x44, 0xe1, 0xd9, 0x6a, 0x6b, 0x5e, 0xce, 0xd0, 0x4f, - 0x39, 0x1f, 0xdb, 0xd8, 0xbb, 0x7b, 0xdc, 0xa8, 0x6e, 0x4a, 0xff, 0xb9, 0xe5, 0x3a, 0x5c, 0x21, - 0x2b, 0x45, 0x1b, 0xfb, 0xfb, 0x43, 0x1c, 0x98, 0x51, 0x4b, 0xfb, 0xb7, 0x75, 0xb8, 0x92, 0x3d, - 0x1e, 0x58, 0xbf, 0x1d, 0x51, 0xcf, 0x67, 0xd8, 0x85, 0x64, 0xbf, 0x3d, 0x10, 0xc5, 0xa8, 0xe8, - 0x9f, 0xea, 0x80, 0xb3, 0xdf, 0x28, 0xc0, 0x55, 0x4f, 0xfa, 0x88, 0x9e, 0x45, 0xd0, 0xd9, 0xab, - 0xc2, 0x9c, 0x31, 0x42, 0x20, 0x8e, 0x6e, 0x0b, 0xf9, 0x9b, 0x05, 0x98, 0xef, 0xa5, 0xec, 0x1c, - 0x17, 0x78, 0x56, 0x92, 0x9f, 0x7f, 0xd8, 0x1a, 0x21, 0x0f, 0x47, 0xb6, 0x84, 0x7c, 0x1b, 0x9a, - 0x7d, 0x36, 0x2e, 0xfc, 0x80, 0x3a, 0x86, 0x0a, 0x10, 0x1d, 0xff, 0x4b, 0xda, 0x8e, 0xb0, 0xc2, - 0xb3, 0x52, 0x5c, 0x3f, 0x88, 0x11, 0x30, 0x2e, 0xf1, 0x39, 0x3f, 0x1c, 0x79, 0x13, 0xea, 0x3e, - 0x0d, 0x02, 0xcb, 0xe9, 0x8a, 0xfd, 0x46, 0x43, 0x7c, 0x2b, 0x1d, 0x59, 0x86, 0x21, 0x95, 0xfc, - 0x04, 0x34, 0xb8, 0xcb, 0x69, 0xd9, 0xeb, 0xfa, 0xf3, 0x0d, 0x1e, 0x2e, 0x36, 0x2d, 0x02, 0xe0, - 0x64, 0x21, 0x46, 0x74, 0xf2, 0x25, 0x98, 0xda, 0xe3, 0x9f, 0xaf, 0x3c, 0x2f, 0x2f, 0x6c, 0x5c, - 0x5c, 0x5b, 0x6b, 0xc7, 0xca, 0x31, 0xc1, 0x45, 0x96, 0x00, 0x68, 0xe8, 0x97, 0x4b, 0xdb, 0xb3, - 0x22, 0x8f, 0x1d, 0xc6, 0xb8, 0xc8, 0xab, 0x50, 0x0a, 0x6c, 0x9f, 0xdb, 0xb0, 0xea, 0xd1, 0x16, - 0x74, 0x67, 0xb3, 0x83, 0xac, 0x5c, 0xfb, 0xa3, 0x02, 0xcc, 0xa6, 0x8e, 0x11, 0xb1, 0x2a, 0x03, - 0xcf, 0x96, 0xd3, 0x48, 0x58, 0x65, 0x17, 0x37, 0x91, 0x95, 0x93, 0xf7, 0xa5, 0x5a, 0x5e, 0xcc, - 0x99, 0x1a, 0xe4, 0x9e, 0x1e, 0xf8, 0x4c, 0x0f, 0x1f, 0xd2, 0xc8, 0xb9, 0x9b, 0x2f, 0x6a, 0x8f, - 0x5c, 0x07, 0x62, 0x6e, 0xbe, 0x88, 0x86, 0x09, 0xce, 0x94, 0xc1, 0xaf, 0x7c, 0x16, 0x83, 0x9f, - 0xf6, 0xab, 0xc5, 0x58, 0x0f, 0x48, 0xcd, 0xfe, 0x29, 0x3d, 0xf0, 0x45, 0xb6, 0x80, 0x86, 0x8b, - 0x7b, 0x23, 0xbe, 0xfe, 0xf1, 0xc5, 0x58, 0x52, 0xc9, 0x3b, 0xa2, 0xef, 0x4b, 0x39, 0x0f, 0x60, - 0xef, 0x6c, 0x76, 0x44, 0x74, 0x95, 0x7a, 0x6b, 0xe1, 0x2b, 0x28, 0x5f, 0xd0, 0x2b, 0xd0, 0xfe, - 0x45, 0x09, 0x9a, 0x77, 0xdc, 0xbd, 0x4f, 0x49, 0x04, 0x75, 0xf6, 0x32, 0x55, 0xfc, 0x04, 0x97, - 0xa9, 0x5d, 0x78, 0x39, 0x08, 0xec, 0x0e, 0x35, 0x5c, 0xc7, 0xf4, 0x97, 0xf7, 0x03, 0xea, 0xad, - 0x59, 0x8e, 0xe5, 0x1f, 0x50, 0x53, 0xba, 0x93, 0x3e, 0x77, 0x7a, 0xd2, 0x7a, 0x79, 0x67, 0x67, - 0x33, 0x8b, 0x05, 0x47, 0xd5, 0xe5, 0xd3, 0x86, 0x38, 0xf3, 0xc9, 0xcf, 0x44, 0xc9, 0x98, 0x1b, - 0x31, 0x6d, 0xc4, 0xca, 0x31, 0xc1, 0xa5, 0x7d, 0xb7, 0x08, 0x8d, 0x30, 0xe9, 0x03, 0xf9, 0x02, - 0xd4, 0xf6, 0x3c, 0xf7, 0x90, 0x7a, 0xc2, 0x73, 0x27, 0xcf, 0x44, 0xb5, 0x45, 0x11, 0x2a, 0x1a, - 0xf9, 0x3c, 0x54, 0x02, 0xb7, 0x6f, 0x19, 0x69, 0x83, 0xda, 0x0e, 0x2b, 0x44, 0x41, 0xbb, 0xb8, - 0x01, 0xfe, 0xc5, 0x84, 0x6a, 0xd7, 0x18, 0xa9, 0x8c, 0xbd, 0x07, 0x65, 0x5f, 0xf7, 0x6d, 0xb9, - 0x9e, 0xe6, 0xc8, 0x9f, 0xb0, 0xdc, 0xd9, 0x94, 0xf9, 0x13, 0x96, 0x3b, 0x9b, 0xc8, 0x41, 0xb5, - 0x3f, 0x28, 0x42, 0x53, 0xf4, 0x9b, 0x98, 0x15, 0x26, 0xd9, 0x73, 0x6f, 0xf1, 0x50, 0x0a, 0x7f, - 0xd0, 0xa3, 0x1e, 0x37, 0x33, 0xc9, 0x49, 0x2e, 0xee, 0x1f, 0x88, 0x88, 0x61, 0x38, 0x45, 0x54, - 0xa4, 0xba, 0xbe, 0x7c, 0x81, 0x5d, 0x5f, 0x39, 0x53, 0xd7, 0x57, 0x2f, 0xa2, 0xeb, 0x3f, 0x2e, - 0x42, 0x63, 0xd3, 0xda, 0xa7, 0xc6, 0xb1, 0x61, 0xf3, 0xd3, 0x9f, 0x26, 0xb5, 0x69, 0x40, 0xd7, - 0x3d, 0xdd, 0xa0, 0xdb, 0xd4, 0xb3, 0x78, 0x52, 0x24, 0xf6, 0x7d, 0xf0, 0x19, 0x48, 0x9e, 0xfe, - 0x5c, 0x1d, 0xc1, 0x83, 0x23, 0x6b, 0x93, 0x0d, 0x98, 0x32, 0xa9, 0x6f, 0x79, 0xd4, 0xdc, 0x8e, - 0x6d, 0x54, 0xbe, 0xa0, 0x96, 0x9a, 0xd5, 0x18, 0xed, 0xf1, 0x49, 0x6b, 0x5a, 0x19, 0x28, 0xc5, - 0x8e, 0x25, 0x51, 0x95, 0x7d, 0xf2, 0x7d, 0x7d, 0xe0, 0x67, 0xb5, 0x31, 0xf6, 0xc9, 0x6f, 0x67, - 0xb3, 0xe0, 0xa8, 0xba, 0x5a, 0x05, 0x4a, 0x9b, 0x6e, 0x57, 0xfb, 0x4e, 0x09, 0xc2, 0xec, 0x59, - 0xe4, 0xcf, 0x15, 0xa0, 0xa9, 0x3b, 0x8e, 0x1b, 0xc8, 0xcc, 0x54, 0xc2, 0x03, 0x8f, 0xb9, 0x93, - 0x74, 0x2d, 0x2c, 0x47, 0xa0, 0xc2, 0x79, 0x1b, 0x3a, 0x94, 0x63, 0x14, 0x8c, 0xcb, 0x26, 0x83, - 0x94, 0x3f, 0x79, 0x2b, 0x7f, 0x2b, 0xce, 0xe0, 0x3d, 0xbe, 0xf6, 0x35, 0x98, 0x4b, 0x37, 0xf6, - 0x3c, 0xee, 0xa0, 0x5c, 0x8e, 0xf9, 0x22, 0x40, 0x14, 0x53, 0xf2, 0x0c, 0x8c, 0x58, 0x56, 0xc2, - 0x88, 0x35, 0x7e, 0x0a, 0x83, 0xa8, 0xd1, 0x23, 0x0d, 0x57, 0xdf, 0x4a, 0x19, 0xae, 0x36, 0x26, - 0x21, 0xec, 0xc9, 0xc6, 0xaa, 0xbf, 0x55, 0x80, 0xb9, 0x88, 0x59, 0x9e, 0x85, 0xfe, 0x32, 0x4c, - 0x7b, 0x54, 0x37, 0xdb, 0x7a, 0x60, 0x1c, 0xf0, 0x50, 0xef, 0x02, 0x8f, 0xcd, 0xe6, 0xa7, 0xbf, - 0x30, 0x4e, 0xc0, 0x24, 0x1f, 0xd1, 0xa1, 0xc9, 0x0a, 0x76, 0xac, 0x1e, 0x75, 0x07, 0xc1, 0x98, - 0x56, 0x53, 0xbe, 0x61, 0xc1, 0x08, 0x06, 0xe3, 0x98, 0xda, 0x0f, 0x0a, 0x30, 0x13, 0x6f, 0xf0, - 0x85, 0x5b, 0xd4, 0x0e, 0x92, 0x16, 0xb5, 0x95, 0x09, 0xbc, 0x93, 0x11, 0x56, 0xb4, 0x8f, 0x20, - 0xfe, 0x68, 0xdc, 0x72, 0x16, 0x37, 0x16, 0x14, 0x9e, 0x68, 0x2c, 0xf8, 0xf4, 0x27, 0x4c, 0x1a, - 0xa5, 0xe5, 0x96, 0x9f, 0x63, 0x2d, 0xf7, 0x93, 0xcc, 0xba, 0x14, 0xcb, 0x1c, 0x54, 0xcd, 0x91, - 0x39, 0xa8, 0x17, 0x66, 0x0e, 0xaa, 0x4d, 0x6c, 0xd2, 0x39, 0x4b, 0xf6, 0xa0, 0xfa, 0x33, 0xcd, - 0x1e, 0xd4, 0xb8, 0xa8, 0xec, 0x41, 0x90, 0x37, 0x7b, 0xd0, 0x47, 0x05, 0x98, 0x31, 0x13, 0x27, - 0x66, 0xe5, 0x19, 0xf3, 0xf1, 0x97, 0x9a, 0xe4, 0x01, 0x5c, 0x71, 0x64, 0x2a, 0x59, 0x86, 0x29, - 0x91, 0x59, 0x39, 0x7b, 0xa6, 0x3e, 0x99, 0x9c, 0x3d, 0xbf, 0x5f, 0x8b, 0xaf, 0x48, 0xcf, 0xda, - 0x68, 0xfe, 0x46, 0xd2, 0x68, 0x7e, 0x23, 0x6d, 0x34, 0x9f, 0x8d, 0xc5, 0xb3, 0xc6, 0x0d, 0xe7, - 0x3f, 0x19, 0x9b, 0xa8, 0x4b, 0x3c, 0x5b, 0x4f, 0xf8, 0xce, 0x33, 0x26, 0xeb, 0x65, 0x98, 0x95, - 0xda, 0xab, 0x22, 0xf2, 0x59, 0x6e, 0x3a, 0x0a, 0x73, 0x5a, 0x4d, 0x92, 0x31, 0xcd, 0xcf, 0x04, - 0xfa, 0x2a, 0x69, 0xab, 0xd8, 0x2a, 0x44, 0x83, 0x4c, 0x25, 0x54, 0x0d, 0x39, 0xd8, 0xb6, 0xc2, - 0xa3, 0xba, 0x2f, 0x4d, 0xdf, 0xb1, 0x6d, 0x05, 0xf2, 0x52, 0x94, 0xd4, 0xb8, 0xfd, 0xbf, 0xf6, - 0x14, 0xfb, 0xbf, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0xf1, 0x36, 0x4d, 0xf9, 0x39, 0xff, 0xb1, 0xb3, - 0x2d, 0xbc, 0x6c, 0x31, 0x8f, 0xb4, 0xdb, 0xcd, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x29, 0xf6, - 0x97, 0x7f, 0xda, 0xe6, 0x72, 0x20, 0x53, 0x9b, 0x9d, 0x47, 0x46, 0x68, 0xb6, 0xda, 0x8c, 0xe1, - 0x60, 0x02, 0x75, 0x84, 0x8b, 0x00, 0xc6, 0x71, 0x11, 0x90, 0x9f, 0x15, 0x9a, 0xd3, 0x71, 0xf8, - 0x5a, 0x9b, 0xfc, 0xb5, 0x86, 0x21, 0x92, 0x18, 0x27, 0x62, 0x92, 0x97, 0x8d, 0x8a, 0x81, 0xec, - 0x06, 0x55, 0x7d, 0x2a, 0x39, 0x2a, 0x76, 0x93, 0x64, 0x4c, 0xf3, 0x93, 0x6d, 0xb8, 0x1c, 0x16, - 0xc5, 0x9b, 0x31, 0xcd, 0x71, 0xc2, 0x98, 0xb5, 0xdd, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0x0f, 0x81, - 0x0c, 0x3c, 0x8f, 0x3a, 0xc1, 0x6d, 0xdd, 0x3f, 0x90, 0xc1, 0x6f, 0xd1, 0x21, 0x90, 0x88, 0x84, - 0x71, 0x3e, 0xb2, 0x04, 0x20, 0xe0, 0x78, 0xad, 0xd9, 0x64, 0x7c, 0xe9, 0x6e, 0x48, 0xc1, 0x18, - 0x97, 0xf6, 0x51, 0x03, 0x9a, 0xf7, 0xf4, 0xc0, 0x3a, 0xa2, 0xdc, 0x9f, 0x77, 0x31, 0x4e, 0x95, - 0xbf, 0x5a, 0x80, 0x2b, 0xc9, 0xa0, 0xcd, 0x0b, 0xf4, 0xac, 0xf0, 0x64, 0x3f, 0x98, 0x29, 0x0d, - 0x47, 0xb4, 0x82, 0xfb, 0x58, 0x86, 0x62, 0x40, 0x2f, 0xda, 0xc7, 0xd2, 0x19, 0x25, 0x10, 0x47, - 0xb7, 0xe5, 0xd3, 0xe2, 0x63, 0x79, 0xbe, 0xb3, 0x53, 0xa6, 0x3c, 0x40, 0xb5, 0xe7, 0xc6, 0x03, - 0x54, 0x7f, 0x2e, 0xd4, 0xee, 0x7e, 0xcc, 0x03, 0xd4, 0xc8, 0x19, 0x89, 0x24, 0xcf, 0x39, 0x08, - 0xb4, 0x51, 0x9e, 0x24, 0x9e, 0xa2, 0x40, 0x59, 0xe6, 0x99, 0xb6, 0xba, 0xa7, 0xfb, 0x96, 0x21, - 0xd5, 0x8e, 0x1c, 0xd9, 0x78, 0x55, 0x96, 0x3e, 0x11, 0xb0, 0xc0, 0xff, 0xa2, 0xc0, 0x8e, 0x92, - 0x12, 0x16, 0x73, 0x25, 0x25, 0x24, 0x2b, 0x50, 0x76, 0x0e, 0xe9, 0xf1, 0xf9, 0x0e, 0xfb, 0xf3, - 0x5d, 0xd8, 0xbd, 0xbb, 0xf4, 0x18, 0x79, 0x65, 0xed, 0xbb, 0x45, 0x00, 0xf6, 0xf8, 0x67, 0xf3, - 0xc5, 0xfc, 0x38, 0xd4, 0xfc, 0x01, 0xb7, 0x9a, 0x48, 0x85, 0x29, 0x0a, 0xdf, 0x12, 0xc5, 0xa8, - 0xe8, 0xe4, 0xf3, 0x50, 0xf9, 0xd6, 0x80, 0x0e, 0x54, 0x60, 0x41, 0xa8, 0xb8, 0x7f, 0x9d, 0x15, - 0xa2, 0xa0, 0x5d, 0x9c, 0x5d, 0x55, 0xf9, 0x6c, 0x2a, 0x17, 0xe5, 0xb3, 0x69, 0x40, 0xed, 0x9e, - 0xcb, 0xa3, 0x41, 0xb5, 0xff, 0x56, 0x04, 0x88, 0xa2, 0xed, 0xc8, 0xaf, 0x17, 0xe0, 0xa5, 0xf0, - 0x83, 0x0b, 0xc4, 0xfe, 0x8b, 0x27, 0xc0, 0xce, 0xed, 0xbf, 0xc9, 0xfa, 0xd8, 0xf9, 0x0c, 0xb4, - 0x9d, 0x25, 0x0e, 0xb3, 0x5b, 0x41, 0x10, 0xea, 0xb4, 0xd7, 0x0f, 0x8e, 0x57, 0x2d, 0x4f, 0x8e, - 0xc0, 0xcc, 0xa0, 0xce, 0x5b, 0x92, 0x47, 0x54, 0x95, 0x46, 0x02, 0xfe, 0x11, 0x29, 0x0a, 0x86, - 0x38, 0xe4, 0x00, 0xea, 0x8e, 0xfb, 0xbe, 0xcf, 0xba, 0x43, 0x0e, 0xc7, 0xb7, 0xc7, 0xef, 0x72, - 0xd1, 0xad, 0xc2, 0xde, 0x2f, 0xff, 0x60, 0xcd, 0x91, 0x9d, 0xfd, 0x6b, 0x45, 0xb8, 0x94, 0xd1, - 0x0f, 0xe4, 0x6d, 0x98, 0x93, 0x81, 0x8d, 0x51, 0x26, 0xf8, 0x42, 0x94, 0x09, 0xbe, 0x93, 0xa2, - 0xe1, 0x10, 0x37, 0x79, 0x1f, 0x40, 0x37, 0x0c, 0xea, 0xfb, 0x5b, 0xae, 0xa9, 0xf6, 0x03, 0x6f, - 0x31, 0xf5, 0x65, 0x39, 0x2c, 0x7d, 0x7c, 0xd2, 0xfa, 0xa9, 0xac, 0x58, 0xe5, 0x54, 0x3f, 0x47, - 0x15, 0x30, 0x06, 0x49, 0xbe, 0x09, 0x20, 0x36, 0xe1, 0x61, 0x3a, 0x85, 0xa7, 0x58, 0xae, 0x16, - 0x54, 0xb6, 0xae, 0x85, 0xaf, 0x0f, 0x74, 0x27, 0xb0, 0x82, 0x63, 0x91, 0xbd, 0xe6, 0x41, 0x88, - 0x82, 0x31, 0x44, 0xed, 0x9f, 0x16, 0xa1, 0xae, 0x6c, 0xe6, 0xcf, 0xc0, 0x50, 0xda, 0x4d, 0x18, - 0x4a, 0x27, 0x14, 0x9d, 0x9c, 0x65, 0x26, 0x75, 0x53, 0x66, 0xd2, 0xf5, 0xfc, 0xa2, 0x9e, 0x6c, - 0x24, 0xfd, 0xad, 0x22, 0xcc, 0x28, 0xd6, 0xbc, 0x26, 0xd2, 0xaf, 0xc2, 0xac, 0x88, 0x2a, 0xd8, - 0xd2, 0x1f, 0x89, 0x44, 0x3e, 0xbc, 0xc3, 0xca, 0x22, 0x20, 0xb8, 0x9d, 0x24, 0x61, 0x9a, 0x97, - 0x0d, 0x6b, 0x51, 0xb4, 0xcb, 0x36, 0x61, 0xc2, 0x0f, 0x29, 0xf6, 0x9b, 0x7c, 0x58, 0xb7, 0x53, - 0x34, 0x1c, 0xe2, 0x4e, 0xdb, 0x68, 0xcb, 0x17, 0x60, 0xa3, 0xfd, 0xf7, 0x05, 0x98, 0x8a, 0xfa, - 0xeb, 0xc2, 0x2d, 0xb4, 0xfb, 0x49, 0x0b, 0xed, 0x72, 0xee, 0xe1, 0x30, 0xc2, 0x3e, 0xfb, 0x17, - 0x6b, 0x90, 0x08, 0x92, 0x27, 0x7b, 0x70, 0xcd, 0xca, 0x0c, 0xf5, 0x8b, 0xcd, 0x36, 0xe1, 0xa9, - 0xef, 0x8d, 0x91, 0x9c, 0xf8, 0x04, 0x14, 0x32, 0x80, 0xfa, 0x11, 0xf5, 0x02, 0xcb, 0xa0, 0xea, - 0xf9, 0xd6, 0x73, 0xab, 0x64, 0xd2, 0x0a, 0x1d, 0xf6, 0xe9, 0x03, 0x29, 0x00, 0x43, 0x51, 0x64, - 0x0f, 0x2a, 0xd4, 0xec, 0x52, 0x95, 0x5a, 0x29, 0x67, 0x8a, 0xda, 0xb0, 0x3f, 0xd9, 0x3f, 0x1f, - 0x05, 0x34, 0xf1, 0xa1, 0x61, 0x2b, 0x2f, 0xa3, 0x1c, 0x87, 0xe3, 0x2b, 0x58, 0xa1, 0xbf, 0x32, - 0xca, 0xba, 0x10, 0x16, 0x61, 0x24, 0x87, 0x1c, 0x86, 0xe6, 0xce, 0xca, 0x84, 0x26, 0x8f, 0x27, - 0x18, 0x3b, 0x7d, 0x68, 0x3c, 0xd4, 0x03, 0xea, 0xf5, 0x74, 0xef, 0x50, 0xee, 0x36, 0xc6, 0x7f, - 0xc2, 0x77, 0x14, 0x52, 0xf4, 0x84, 0x61, 0x11, 0x46, 0x72, 0x88, 0x0b, 0x8d, 0x40, 0xaa, 0xcf, - 0xca, 0xa6, 0x3b, 0xbe, 0x50, 0xa5, 0x88, 0xfb, 0x32, 0x58, 0x5e, 0xfd, 0xc5, 0x48, 0x06, 0x39, - 0x4a, 0xe4, 0x33, 0x17, 0x59, 0xec, 0xdb, 0x39, 0x7c, 0x03, 0x12, 0x2a, 0x5a, 0x6e, 0xb2, 0xf3, - 0xa2, 0x6b, 0xff, 0xb3, 0x12, 0x4d, 0xcb, 0xcf, 0xda, 0x4e, 0xf8, 0xa5, 0xa4, 0x9d, 0xf0, 0x7a, - 0xda, 0x4e, 0x98, 0x72, 0x56, 0x9f, 0x3f, 0xbc, 0x36, 0x65, 0x5e, 0x2b, 0x5f, 0x80, 0x79, 0xed, - 0x35, 0x68, 0x1e, 0xf1, 0x99, 0x40, 0xe4, 0x69, 0xaa, 0xf0, 0x65, 0x84, 0xcf, 0xec, 0x0f, 0xa2, - 0x62, 0x8c, 0xf3, 0xb0, 0x2a, 0xf2, 0x06, 0x97, 0x30, 0xa5, 0xb1, 0xac, 0xd2, 0x89, 0x8a, 0x31, - 0xce, 0xc3, 0x23, 0xf3, 0x2c, 0xe7, 0x50, 0x54, 0xa8, 0xf1, 0x0a, 0x22, 0x32, 0x4f, 0x15, 0x62, - 0x44, 0x27, 0x37, 0xa1, 0x3e, 0x30, 0xf7, 0x05, 0x6f, 0x9d, 0xf3, 0x72, 0x0d, 0x73, 0x77, 0x75, - 0x4d, 0xe6, 0x8d, 0x52, 0x54, 0xd6, 0x92, 0x9e, 0xde, 0x57, 0x04, 0xbe, 0x37, 0x94, 0x2d, 0xd9, - 0x8a, 0x8a, 0x31, 0xce, 0x43, 0x7e, 0x06, 0x66, 0x3c, 0x6a, 0x0e, 0x0c, 0x1a, 0xd6, 0x02, 0x5e, - 0x4b, 0x26, 0xd4, 0x8c, 0x53, 0x30, 0xc5, 0x39, 0xc2, 0x48, 0xd8, 0x1c, 0xcb, 0x48, 0xf8, 0x35, - 0x98, 0x31, 0x3d, 0xdd, 0x72, 0xa8, 0x79, 0xdf, 0xe1, 0x11, 0x09, 0x32, 0x3e, 0x30, 0xb4, 0x90, - 0xaf, 0x26, 0xa8, 0x98, 0xe2, 0xd6, 0xfe, 0x65, 0x11, 0x2a, 0x22, 0xcd, 0xe7, 0x06, 0x5c, 0xb2, - 0x1c, 0x2b, 0xb0, 0x74, 0x7b, 0x95, 0xda, 0xfa, 0x71, 0x32, 0x2a, 0xe3, 0x65, 0xb6, 0xd1, 0xde, - 0x18, 0x26, 0x63, 0x56, 0x1d, 0xd6, 0x39, 0x81, 0x58, 0xbe, 0x15, 0x8a, 0xb0, 0xa3, 0x89, 0xdc, - 0xd0, 0x09, 0x0a, 0xa6, 0x38, 0x99, 0x32, 0xd4, 0xcf, 0x08, 0xb9, 0xe0, 0xca, 0x50, 0x32, 0xd0, - 0x22, 0xc9, 0xc7, 0x95, 0xf4, 0x01, 0x57, 0x88, 0xc3, 0x53, 0x38, 0x32, 0xaa, 0x4a, 0x28, 0xe9, - 0x29, 0x1a, 0x0e, 0x71, 0x33, 0x84, 0x7d, 0xdd, 0xb2, 0x07, 0x1e, 0x8d, 0x10, 0x2a, 0x11, 0xc2, - 0x5a, 0x8a, 0x86, 0x43, 0xdc, 0xda, 0x7f, 0x2f, 0x00, 0x19, 0x3e, 0x57, 0x40, 0x0e, 0xa0, 0xea, - 0x70, 0x5b, 0x64, 0xee, 0x94, 0xf4, 0x31, 0x93, 0xa6, 0x58, 0x24, 0x64, 0x81, 0xc4, 0x27, 0x0e, - 0xd4, 0xe9, 0xa3, 0x80, 0x7a, 0x4e, 0x78, 0xce, 0x68, 0x32, 0xe9, 0xef, 0xc5, 0xde, 0x4c, 0x22, - 0x63, 0x28, 0x43, 0xfb, 0xbd, 0x22, 0x34, 0x63, 0x7c, 0x4f, 0xdb, 0xe2, 0xf3, 0x54, 0x07, 0xc2, - 0x04, 0xb8, 0xeb, 0xd9, 0x72, 0xbe, 0x8b, 0xa5, 0x3a, 0x90, 0x24, 0xdc, 0xc4, 0x38, 0x1f, 0x59, - 0x02, 0xe8, 0xe9, 0x7e, 0x40, 0x3d, 0xae, 0x0b, 0xa5, 0x12, 0x0c, 0x6c, 0x85, 0x14, 0x8c, 0x71, - 0x91, 0x1b, 0xf2, 0x02, 0x83, 0x72, 0x32, 0x21, 0xe4, 0x88, 0xdb, 0x09, 0x2a, 0x13, 0xb8, 0x9d, - 0x80, 0x74, 0x61, 0x4e, 0xb5, 0x5a, 0x51, 0xcf, 0x97, 0x2e, 0x50, 0x0c, 0xd4, 0x14, 0x04, 0x0e, - 0x81, 0x6a, 0xdf, 0x2d, 0xc0, 0x74, 0xc2, 0x00, 0x25, 0x52, 0x39, 0xaa, 0x53, 0x31, 0x89, 0x54, - 0x8e, 0xb1, 0xc3, 0x2c, 0x5f, 0x84, 0xaa, 0xe8, 0xa0, 0x74, 0xb0, 0xab, 0xe8, 0x42, 0x94, 0x54, - 0xb6, 0xb2, 0x48, 0x13, 0x77, 0x7a, 0x65, 0x91, 0x36, 0x70, 0x54, 0x74, 0xe1, 0x39, 0x12, 0xad, - 0x93, 0x3d, 0x1d, 0xf3, 0x1c, 0x89, 0x72, 0x0c, 0x39, 0xb4, 0x7f, 0xc8, 0xdb, 0x1d, 0x78, 0xc7, - 0xe1, 0xce, 0xba, 0x0b, 0x35, 0x19, 0xe0, 0x28, 0x3f, 0x8d, 0xb7, 0x73, 0x58, 0xc5, 0x38, 0x8e, - 0x0c, 0xe5, 0xd3, 0x8d, 0xc3, 0xfb, 0xfb, 0xfb, 0xa8, 0xd0, 0xc9, 0x2d, 0x68, 0xb8, 0x8e, 0xfc, - 0x82, 0xe5, 0xe3, 0xff, 0x18, 0x5b, 0x39, 0xee, 0xab, 0xc2, 0xc7, 0x27, 0xad, 0x2b, 0xe1, 0x9f, - 0x44, 0x23, 0x31, 0xaa, 0xa9, 0xfd, 0xd9, 0x02, 0xbc, 0x84, 0xae, 0x6d, 0x5b, 0x4e, 0x37, 0xe9, - 0x7a, 0x24, 0x36, 0xcc, 0xf4, 0xf4, 0x47, 0xbb, 0x8e, 0x7e, 0xa4, 0x5b, 0xb6, 0xbe, 0x67, 0xd3, - 0xa7, 0xee, 0x8c, 0x07, 0x81, 0x65, 0x2f, 0x88, 0x0b, 0x1d, 0x17, 0x36, 0x9c, 0xe0, 0xbe, 0xd7, - 0x09, 0x3c, 0xcb, 0xe9, 0x8a, 0x59, 0x72, 0x2b, 0x81, 0x85, 0x29, 0x6c, 0xed, 0xf7, 0x4b, 0xc0, - 0x83, 0xec, 0xc8, 0x97, 0xa1, 0xd1, 0xa3, 0xc6, 0x81, 0xee, 0x58, 0xbe, 0x4a, 0x8a, 0x7b, 0x95, - 0x3d, 0xd7, 0x96, 0x2a, 0x7c, 0xcc, 0x5e, 0xc5, 0x72, 0x67, 0x93, 0x9f, 0x63, 0x89, 0x78, 0x89, - 0x01, 0xd5, 0xae, 0xef, 0xeb, 0x7d, 0x2b, 0x77, 0x8c, 0x87, 0x48, 0x42, 0x2a, 0xa6, 0x23, 0xf1, - 0x1b, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb7, 0x75, 0xcb, 0xc9, 0x7d, 0x01, 0x19, 0x7b, 0x82, 0x6d, - 0x86, 0x24, 0x4c, 0x95, 0xfc, 0x27, 0x0a, 0x6c, 0x32, 0x80, 0xa6, 0x6f, 0x78, 0x7a, 0xcf, 0x3f, - 0xd0, 0x97, 0x5e, 0x7f, 0x23, 0xb7, 0xf2, 0x1f, 0x89, 0x12, 0xba, 0xc8, 0x0a, 0x2e, 0x6f, 0x75, - 0x6e, 0x2f, 0x2f, 0xbd, 0xfe, 0x06, 0xc6, 0xe5, 0xc4, 0xc5, 0xbe, 0xfe, 0xda, 0x92, 0x9c, 0x41, - 0x26, 0x2e, 0xf6, 0xf5, 0xd7, 0x96, 0x30, 0x2e, 0x47, 0xfb, 0x5f, 0x05, 0x68, 0x84, 0xbc, 0x64, - 0x17, 0x80, 0xcd, 0x65, 0x32, 0x6d, 0xe8, 0xb9, 0x2e, 0x6b, 0xe1, 0xd6, 0x9e, 0xdd, 0xb0, 0x32, - 0xc6, 0x80, 0x32, 0xf2, 0xaa, 0x16, 0x27, 0x9d, 0x57, 0x75, 0x11, 0x1a, 0x07, 0xba, 0x63, 0xfa, - 0x07, 0xfa, 0xa1, 0x98, 0xd2, 0x63, 0x99, 0x86, 0x6f, 0x2b, 0x02, 0x46, 0x3c, 0xda, 0x3f, 0xae, - 0x82, 0x08, 0xcc, 0x60, 0x93, 0x8e, 0x69, 0xf9, 0xe2, 0x64, 0x40, 0x81, 0xd7, 0x0c, 0x27, 0x9d, - 0x55, 0x59, 0x8e, 0x21, 0x07, 0xb9, 0x0a, 0xa5, 0x9e, 0xe5, 0x48, 0x0d, 0x84, 0x1b, 0x72, 0xb7, - 0x2c, 0x07, 0x59, 0x19, 0x27, 0xe9, 0x8f, 0xa4, 0x86, 0x21, 0x48, 0xfa, 0x23, 0x64, 0x65, 0xe4, - 0xab, 0x30, 0x6b, 0xbb, 0xee, 0x21, 0x9b, 0x3e, 0x94, 0x22, 0x22, 0xbc, 0xea, 0xdc, 0xb4, 0xb2, - 0x99, 0x24, 0x61, 0x9a, 0x97, 0xec, 0xc2, 0xcb, 0x1f, 0x52, 0xcf, 0x95, 0xf3, 0x65, 0xc7, 0xa6, - 0xb4, 0xaf, 0x60, 0x84, 0x6a, 0xcc, 0x43, 0x48, 0x7f, 0x3e, 0x9b, 0x05, 0x47, 0xd5, 0xe5, 0xc1, - 0xe8, 0xba, 0xd7, 0xa5, 0xc1, 0xb6, 0xe7, 0x32, 0xdd, 0xc5, 0x72, 0xba, 0x0a, 0xb6, 0x1a, 0xc1, - 0xee, 0x64, 0xb3, 0xe0, 0xa8, 0xba, 0xe4, 0x5d, 0x98, 0x17, 0x24, 0xa1, 0xb6, 0x2c, 0x8b, 0x69, - 0xc6, 0xb2, 0xd5, 0xbd, 0x9d, 0xd3, 0xc2, 0x5f, 0xb6, 0x33, 0x82, 0x07, 0x47, 0xd6, 0x26, 0x77, - 0x60, 0x4e, 0x79, 0x4b, 0xb7, 0xa9, 0xd7, 0x09, 0x83, 0x75, 0xa6, 0xdb, 0xd7, 0x4f, 0x4f, 0x5a, - 0xd7, 0x56, 0x69, 0xdf, 0xa3, 0x46, 0xdc, 0xeb, 0xac, 0xb8, 0x70, 0xa8, 0x1e, 0x41, 0xb8, 0xc2, - 0x23, 0x72, 0x76, 0xfb, 0x2b, 0xae, 0x6b, 0x9b, 0xee, 0x43, 0x47, 0x3d, 0xbb, 0x50, 0xd8, 0xb9, - 0x83, 0xb4, 0x93, 0xc9, 0x81, 0x23, 0x6a, 0xb2, 0x27, 0xe7, 0x94, 0x55, 0xf7, 0xa1, 0x93, 0x46, - 0x85, 0xe8, 0xc9, 0x3b, 0x23, 0x78, 0x70, 0x64, 0x6d, 0xb2, 0x06, 0x24, 0xfd, 0x04, 0xbb, 0x7d, - 0xe9, 0xc2, 0xbf, 0x22, 0x32, 0x00, 0xa5, 0xa9, 0x98, 0x51, 0x83, 0x6c, 0xc2, 0xe5, 0x74, 0x29, - 0x13, 0x27, 0xbd, 0xf9, 0x3c, 0xf7, 0x2f, 0x66, 0xd0, 0x31, 0xb3, 0x96, 0xf6, 0x4f, 0x8a, 0x30, - 0x9d, 0x48, 0x19, 0xf1, 0xdc, 0x1d, 0xcd, 0x67, 0x9b, 0x87, 0x9e, 0xdf, 0xdd, 0x58, 0xbd, 0x4d, - 0x75, 0x93, 0x7a, 0x77, 0xa9, 0x4a, 0xef, 0x21, 0x96, 0xc5, 0x04, 0x05, 0x53, 0x9c, 0x64, 0x1f, - 0x2a, 0xc2, 0x4f, 0x90, 0xf7, 0xda, 0x1f, 0xd5, 0x47, 0xdc, 0x59, 0x20, 0xef, 0xca, 0x72, 0x3d, - 0x8a, 0x02, 0x5e, 0x0b, 0x60, 0x2a, 0xce, 0xc1, 0x26, 0x92, 0x48, 0xed, 0xad, 0x25, 0x54, 0xde, - 0x0d, 0x28, 0x05, 0xc1, 0xb8, 0x87, 0xfe, 0x85, 0xdf, 0x69, 0x67, 0x13, 0x19, 0x86, 0xb6, 0xcf, - 0xde, 0x9d, 0xef, 0x5b, 0xae, 0x23, 0x33, 0xc0, 0xef, 0x42, 0x4d, 0xee, 0x9e, 0xc6, 0x4c, 0x5a, - 0xc0, 0x75, 0x25, 0x65, 0x76, 0x55, 0x58, 0xda, 0x7f, 0x28, 0x42, 0x23, 0x34, 0x93, 0x9c, 0x21, - 0xb3, 0xba, 0x0b, 0x8d, 0x30, 0xa2, 0x30, 0xf7, 0x9d, 0xa6, 0x51, 0xa0, 0x1b, 0xdf, 0xd9, 0x87, - 0x7f, 0x31, 0x92, 0x11, 0x8f, 0x56, 0x2c, 0xe5, 0x88, 0x56, 0xec, 0x43, 0x2d, 0xf0, 0xac, 0x6e, - 0x57, 0xee, 0x12, 0xf2, 0x84, 0x2b, 0x86, 0xdd, 0xb5, 0x23, 0x00, 0x65, 0xcf, 0x8a, 0x3f, 0xa8, - 0xc4, 0x68, 0x1f, 0xc0, 0x5c, 0x9a, 0x93, 0xab, 0xd0, 0xc6, 0x01, 0x35, 0x07, 0xb6, 0xea, 0xe3, - 0x48, 0x85, 0x96, 0xe5, 0x18, 0x72, 0x90, 0x9b, 0x50, 0x67, 0xaf, 0xe9, 0x43, 0xd7, 0x51, 0x6a, - 0x2c, 0xdf, 0x8d, 0xec, 0xc8, 0x32, 0x0c, 0xa9, 0xda, 0x7f, 0x2d, 0xc1, 0xd5, 0xc8, 0xd8, 0xb5, - 0xa5, 0x3b, 0x7a, 0xf7, 0x0c, 0x17, 0x59, 0x7e, 0x76, 0x0c, 0xec, 0xbc, 0xd7, 0x63, 0x94, 0x9e, - 0x83, 0xeb, 0x31, 0xfe, 0x4f, 0x11, 0x78, 0xf4, 0x33, 0xf9, 0x36, 0x4c, 0xe9, 0xb1, 0x3b, 0x8c, - 0xe5, 0xeb, 0xbc, 0x95, 0xfb, 0x75, 0xf2, 0x20, 0xeb, 0x30, 0x00, 0x2e, 0x5e, 0x8a, 0x09, 0x81, - 0xc4, 0x85, 0xfa, 0xbe, 0x6e, 0xdb, 0x4c, 0x17, 0xca, 0xed, 0xbc, 0x4b, 0x08, 0xe7, 0xc3, 0x7c, - 0x4d, 0x42, 0x63, 0x28, 0x84, 0x7c, 0x54, 0x80, 0x69, 0x2f, 0xbe, 0x5d, 0x93, 0x2f, 0x24, 0x4f, - 0x68, 0x47, 0x0c, 0x2d, 0x1e, 0x6e, 0x17, 0xdf, 0x13, 0x26, 0x65, 0x6a, 0xff, 0xa5, 0x00, 0xd3, - 0x1d, 0xdb, 0x32, 0x2d, 0xa7, 0x7b, 0x81, 0xb7, 0x73, 0xdc, 0x87, 0x8a, 0x6f, 0x5b, 0x26, 0x1d, - 0x73, 0x35, 0x11, 0xeb, 0x18, 0x03, 0x40, 0x81, 0x93, 0xbc, 0xee, 0xa3, 0x74, 0x86, 0xeb, 0x3e, - 0xfe, 0xb0, 0x0a, 0x32, 0x8e, 0x9f, 0x0c, 0xa0, 0xd1, 0x55, 0xb7, 0x08, 0xc8, 0x67, 0xbc, 0x9d, - 0x23, 0x03, 0x65, 0xe2, 0x3e, 0x02, 0x31, 0xf7, 0x87, 0x85, 0x18, 0x49, 0x22, 0x34, 0x79, 0x79, - 0xf6, 0x6a, 0xce, 0xcb, 0xb3, 0x85, 0xb8, 0xe1, 0xeb, 0xb3, 0x75, 0x28, 0x1f, 0x04, 0x41, 0x5f, - 0x0e, 0xa6, 0xf1, 0x0f, 0x6a, 0x44, 0x49, 0x90, 0x84, 0x4e, 0xc4, 0xfe, 0x23, 0x87, 0x66, 0x22, - 0x1c, 0x3d, 0xbc, 0xa2, 0x70, 0x25, 0x57, 0x18, 0x49, 0x5c, 0x04, 0xfb, 0x8f, 0x1c, 0x9a, 0xfc, - 0x22, 0x34, 0x03, 0x4f, 0x77, 0xfc, 0x7d, 0xd7, 0xeb, 0x51, 0x4f, 0xee, 0x51, 0xd7, 0x72, 0xdc, - 0x1f, 0xbd, 0x13, 0xa1, 0x09, 0x93, 0x6c, 0xa2, 0x08, 0xe3, 0xd2, 0xc8, 0x21, 0xd4, 0x07, 0xa6, - 0x68, 0x98, 0x34, 0x83, 0x2d, 0xe7, 0xb9, 0x12, 0x3c, 0x16, 0x24, 0xa2, 0xfe, 0x61, 0x28, 0x20, - 0x79, 0x1b, 0x67, 0x6d, 0x52, 0xb7, 0x71, 0xc6, 0x47, 0x63, 0x56, 0x86, 0x16, 0xd2, 0x93, 0x7a, - 0xad, 0xd3, 0x95, 0x31, 0x6e, 0x6b, 0xb9, 0x55, 0x4e, 0x21, 0xb2, 0x19, 0xea, 0xc6, 0x4e, 0x17, - 0x95, 0x0c, 0xad, 0x07, 0xd2, 0x77, 0x44, 0x8c, 0xc4, 0x4d, 0x46, 0xe2, 0xd8, 0xe0, 0xe2, 0xd9, - 0xe6, 0x83, 0xf0, 0x4a, 0x9d, 0x58, 0x26, 0xf5, 0xcc, 0x2b, 0x8b, 0xb4, 0xff, 0x58, 0x84, 0xd2, - 0xce, 0x66, 0x47, 0x64, 0x47, 0xe5, 0x77, 0xa3, 0xd1, 0xce, 0xa1, 0xd5, 0x7f, 0x40, 0x3d, 0x6b, - 0xff, 0x58, 0x6e, 0xbd, 0x63, 0xd9, 0x51, 0xd3, 0x1c, 0x98, 0x51, 0x8b, 0xbc, 0x07, 0x53, 0x86, - 0xbe, 0x42, 0xbd, 0x60, 0x1c, 0xc3, 0x02, 0x3f, 0x1f, 0xbd, 0xb2, 0x1c, 0x55, 0xc7, 0x04, 0x18, - 0xd9, 0x05, 0x30, 0x22, 0xe8, 0xd2, 0xb9, 0xcd, 0x21, 0x31, 0xe0, 0x18, 0x10, 0x41, 0x68, 0x1c, - 0x32, 0x56, 0x8e, 0x5a, 0x3e, 0x0f, 0x2a, 0x1f, 0x39, 0x77, 0x55, 0x5d, 0x8c, 0x60, 0x34, 0x07, - 0xa6, 0x13, 0xd7, 0x1b, 0x91, 0xaf, 0x40, 0xdd, 0xed, 0xc7, 0xa6, 0xd3, 0x06, 0x8f, 0xa6, 0xad, - 0xdf, 0x97, 0x65, 0x8f, 0x4f, 0x5a, 0xd3, 0x9b, 0x6e, 0xd7, 0x32, 0x54, 0x01, 0x86, 0xec, 0x44, - 0x83, 0x2a, 0x3f, 0xd4, 0xa8, 0x2e, 0x37, 0xe2, 0x6b, 0x07, 0xbf, 0x7f, 0xc4, 0x47, 0x49, 0xd1, - 0x7e, 0xa9, 0x0c, 0x91, 0xc7, 0x95, 0xf8, 0x50, 0x15, 0x87, 0x36, 0xe4, 0xcc, 0x7d, 0xa1, 0xe7, - 0x43, 0xa4, 0x28, 0xd2, 0x85, 0xd2, 0x07, 0xee, 0x5e, 0xee, 0x89, 0x3b, 0x96, 0xcd, 0x40, 0xd8, - 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x56, 0x80, 0x17, 0xfd, 0xb4, 0xea, 0x2b, 0x87, 0x03, - 0xe6, 0xd7, 0xf1, 0xd3, 0xca, 0xb4, 0x0c, 0x7b, 0x1e, 0x45, 0xc6, 0xe1, 0xb6, 0xb0, 0xfe, 0x17, - 0xae, 0x50, 0x39, 0x9c, 0xd6, 0x73, 0x5e, 0xbe, 0x9a, 0xec, 0xff, 0x64, 0x19, 0x4a, 0x51, 0xda, - 0xaf, 0x14, 0xa1, 0x19, 0x9b, 0xad, 0x73, 0xdf, 0x99, 0xf5, 0x28, 0x75, 0x67, 0xd6, 0xf6, 0xf8, - 0x91, 0x01, 0x51, 0xab, 0x2e, 0xfa, 0xda, 0xac, 0x7f, 0x5e, 0x84, 0xd2, 0xee, 0xea, 0x5a, 0x72, - 0xd3, 0x5a, 0x78, 0x06, 0x9b, 0xd6, 0x03, 0xa8, 0xed, 0x0d, 0x2c, 0x3b, 0xb0, 0x9c, 0xdc, 0xf9, - 0x56, 0xd4, 0x15, 0x63, 0xd2, 0xd7, 0x21, 0x50, 0x51, 0xc1, 0x93, 0x2e, 0xd4, 0xba, 0x22, 0xe1, - 0x65, 0xee, 0x78, 0x49, 0x99, 0x38, 0x53, 0x08, 0x92, 0x7f, 0x50, 0xa1, 0x6b, 0xc7, 0x50, 0xdd, - 0x5d, 0x95, 0x6a, 0xff, 0xb3, 0xed, 0x4d, 0xed, 0x17, 0x21, 0xd4, 0x02, 0x9e, 0xbd, 0xf0, 0xdf, - 0x29, 0x40, 0x52, 0xf1, 0x79, 0xf6, 0xa3, 0xe9, 0x30, 0x3d, 0x9a, 0x56, 0x27, 0xf1, 0xf1, 0x65, - 0x0f, 0x28, 0xed, 0xdf, 0x15, 0x20, 0x75, 0xd2, 0x8e, 0xbc, 0x21, 0x73, 0xa7, 0x25, 0x03, 0xd3, - 0x54, 0xee, 0x34, 0x92, 0xe4, 0x8e, 0xe5, 0x50, 0xfb, 0x98, 0x6d, 0xd7, 0xe2, 0x0e, 0x34, 0xd9, - 0xfc, 0x7b, 0xe3, 0x6f, 0xd7, 0xb2, 0xdc, 0x71, 0x32, 0x78, 0x32, 0x4e, 0xc2, 0xa4, 0x5c, 0xed, - 0x1f, 0x15, 0xa1, 0xfa, 0xcc, 0x0e, 0xfe, 0xd3, 0x44, 0x3c, 0xeb, 0x4a, 0xce, 0xd9, 0x7e, 0x64, - 0x34, 0x6b, 0x2f, 0x15, 0xcd, 0x9a, 0xf7, 0x4e, 0xef, 0xa7, 0xc4, 0xb2, 0xfe, 0x9b, 0x02, 0xc8, - 0xb5, 0x66, 0xc3, 0xf1, 0x03, 0xdd, 0x31, 0x28, 0x31, 0xc2, 0x85, 0x2d, 0x6f, 0xd0, 0x94, 0x0c, - 0x2c, 0x14, 0xba, 0x0c, 0xff, 0xad, 0x16, 0x32, 0xf2, 0x93, 0x50, 0x3f, 0x70, 0xfd, 0x80, 0x2f, - 0x5e, 0xc5, 0xa4, 0xc9, 0xec, 0xb6, 0x2c, 0xc7, 0x90, 0x23, 0xed, 0xce, 0xae, 0x8c, 0x76, 0x67, - 0x6b, 0xbf, 0x59, 0x84, 0xa9, 0x4f, 0x4b, 0xf6, 0x82, 0xac, 0xe8, 0xdf, 0x52, 0xce, 0xe8, 0xdf, - 0xf2, 0x79, 0xa2, 0x7f, 0xb5, 0xef, 0x17, 0x00, 0x9e, 0x59, 0xea, 0x04, 0x33, 0x19, 0x98, 0x9b, - 0x7b, 0x5c, 0x65, 0x87, 0xe5, 0xfe, 0xfd, 0x8a, 0x7a, 0x24, 0x1e, 0x94, 0xfb, 0x71, 0x01, 0x66, - 0xf4, 0x44, 0xa0, 0x6b, 0x6e, 0x7d, 0x39, 0x15, 0x37, 0x1b, 0xc6, 0x69, 0x25, 0xcb, 0x31, 0x25, - 0x96, 0xbc, 0x19, 0xa5, 0xed, 0xbe, 0x17, 0x0d, 0xfb, 0xa1, 0x7c, 0xdb, 0x5c, 0x77, 0x4b, 0x70, - 0x3e, 0x25, 0xb0, 0xb8, 0x34, 0x91, 0xc0, 0xe2, 0xf8, 0x91, 0xc9, 0xf2, 0x13, 0x8f, 0x4c, 0x1e, - 0x41, 0x63, 0xdf, 0x73, 0x7b, 0x3c, 0x76, 0x57, 0x5e, 0x8c, 0x7d, 0x2b, 0xc7, 0x42, 0xd9, 0xdb, - 0xb3, 0x1c, 0x6a, 0xf2, 0xb8, 0xe0, 0xd0, 0x70, 0xb5, 0xa6, 0xf0, 0x31, 0x12, 0xc5, 0x6d, 0xfd, - 0xae, 0x90, 0x5a, 0x9d, 0xa4, 0xd4, 0x70, 0x2e, 0xd9, 0x11, 0xe8, 0xa8, 0xc4, 0x24, 0xe3, 0x75, - 0x6b, 0xcf, 0x26, 0x5e, 0x57, 0xfb, 0x0b, 0x35, 0x35, 0x81, 0x3d, 0x77, 0x19, 0x62, 0x3f, 0x3b, - 0xe8, 0xde, 0xa5, 0x43, 0xa7, 0xd0, 0xeb, 0xcf, 0xf0, 0x14, 0x7a, 0x63, 0x32, 0xa7, 0xd0, 0x21, - 0xdf, 0x29, 0xf4, 0xe6, 0x84, 0x4e, 0xa1, 0x4f, 0x4d, 0xea, 0x14, 0xfa, 0xf4, 0x58, 0xa7, 0xd0, - 0x67, 0xce, 0x74, 0x0a, 0xfd, 0xa4, 0x04, 0xa9, 0xcd, 0xf8, 0x67, 0x8e, 0xb7, 0xff, 0xa7, 0x1c, - 0x6f, 0xdf, 0x29, 0x42, 0x34, 0x11, 0x9f, 0x33, 0x30, 0xe9, 0x5d, 0xa8, 0xf7, 0xf4, 0x47, 0x3c, - 0x70, 0x3a, 0xcf, 0xc5, 0xca, 0x5b, 0x12, 0x03, 0x43, 0x34, 0xe2, 0x03, 0x58, 0xe1, 0xe5, 0x06, - 0xb9, 0x5d, 0x18, 0xd1, 0x3d, 0x09, 0xc2, 0x48, 0x1a, 0xfd, 0xc7, 0x98, 0x18, 0xed, 0x5f, 0x17, - 0x41, 0xde, 0x82, 0x41, 0x28, 0x54, 0xf6, 0xad, 0x47, 0xd4, 0xcc, 0x1d, 0xee, 0x1c, 0xbb, 0xee, - 0x5e, 0xf8, 0x68, 0x78, 0x01, 0x0a, 0x74, 0x6e, 0x7c, 0x17, 0x3e, 0x37, 0xd9, 0x7f, 0x39, 0x8c, - 0xef, 0x71, 0xdf, 0x9d, 0x34, 0xbe, 0x8b, 0x22, 0x54, 0x32, 0x84, 0xad, 0x9f, 0x87, 0x5f, 0xe4, - 0x76, 0x31, 0x26, 0xc2, 0x38, 0x94, 0xad, 0xdf, 0x17, 0x69, 0x28, 0xa4, 0x8c, 0xf6, 0x2f, 0x7c, - 0xef, 0x87, 0xd7, 0x5f, 0xf8, 0xfe, 0x0f, 0xaf, 0xbf, 0xf0, 0x83, 0x1f, 0x5e, 0x7f, 0xe1, 0x97, - 0x4e, 0xaf, 0x17, 0xbe, 0x77, 0x7a, 0xbd, 0xf0, 0xfd, 0xd3, 0xeb, 0x85, 0x1f, 0x9c, 0x5e, 0x2f, - 0xfc, 0xa7, 0xd3, 0xeb, 0x85, 0xbf, 0xfc, 0x9f, 0xaf, 0xbf, 0xf0, 0xf3, 0x5f, 0x8e, 0x9a, 0xb0, - 0xa8, 0x9a, 0xb0, 0xa8, 0x04, 0x2e, 0xf6, 0x0f, 0xbb, 0x8b, 0xac, 0x09, 0x51, 0x89, 0x6a, 0xc2, - 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x57, 0x1d, 0x48, 0x12, 0x9e, 0x00, 0x00, + 0xd5, 0x5b, 0x55, 0xcd, 0x19, 0xca, 0x31, 0xd6, 0xde, 0x4d, 0xa0, 0x0d, 0x92, 0x20, 0x81, 0x9f, + 0x0c, 0x04, 0x4e, 0x90, 0x20, 0x80, 0x1f, 0x0c, 0xe7, 0x21, 0xc8, 0xe6, 0x21, 0x40, 0x7e, 0x1c, + 0x04, 0xc9, 0xe6, 0x7f, 0x11, 0x04, 0x88, 0xf2, 0x42, 0x64, 0x19, 0xe4, 0x21, 0x01, 0x1c, 0x18, + 0x31, 0x12, 0x3b, 0x03, 0x23, 0x1b, 0xdc, 0xbf, 0xfa, 0xeb, 0xea, 0x19, 0xb2, 0xab, 0x39, 0x1a, + 0xc5, 0x7a, 0xeb, 0xbe, 0xe7, 0xdc, 0xef, 0xdc, 0xba, 0x75, 0xeb, 0xde, 0x73, 0xcf, 0x39, 0xf7, + 0x5c, 0x58, 0xef, 0x5a, 0xc1, 0xc1, 0x60, 0x6f, 0xc1, 0x70, 0x7b, 0x8b, 0xce, 0xa0, 0xa7, 0xf7, + 0x3d, 0xf7, 0x7d, 0xfe, 0x63, 0xdf, 0x76, 0x1f, 0x2c, 0xf6, 0x0f, 0xbb, 0x8b, 0x7a, 0xdf, 0xf2, + 0xa3, 0x92, 0xa3, 0x57, 0x74, 0xbb, 0x7f, 0xa0, 0xbf, 0xb2, 0xd8, 0xa5, 0x0e, 0xf5, 0xf4, 0x80, + 0x9a, 0x0b, 0x7d, 0xcf, 0x0d, 0x5c, 0xf2, 0xa5, 0x08, 0x68, 0x41, 0x01, 0x2d, 0xa8, 0x6a, 0x0b, + 0xfd, 0xc3, 0xee, 0x02, 0x03, 0x8a, 0x4a, 0x14, 0xd0, 0xb5, 0x9f, 0x8e, 0xb5, 0xa0, 0xeb, 0x76, + 0xdd, 0x45, 0x8e, 0xb7, 0x37, 0xd8, 0xe7, 0xff, 0xf8, 0x1f, 0xfe, 0x4b, 0xc8, 0xb9, 0xa6, 0x1d, + 0xbe, 0xee, 0x2f, 0x58, 0x2e, 0x6b, 0xd6, 0xa2, 0xe1, 0x7a, 0x74, 0xf1, 0x68, 0xa8, 0x2d, 0xd7, + 0xbe, 0x18, 0xf1, 0xf4, 0x74, 0xe3, 0xc0, 0x72, 0xa8, 0x77, 0xac, 0x9e, 0x65, 0xd1, 0xa3, 0xbe, + 0x3b, 0xf0, 0x0c, 0x7a, 0xae, 0x5a, 0xfe, 0x62, 0x8f, 0x06, 0x7a, 0x96, 0xac, 0xc5, 0x51, 0xb5, + 0xbc, 0x81, 0x13, 0x58, 0xbd, 0x61, 0x31, 0xaf, 0x3d, 0xa9, 0x82, 0x6f, 0x1c, 0xd0, 0x9e, 0x3e, + 0x54, 0xef, 0x67, 0x46, 0xd5, 0x1b, 0x04, 0x96, 0xbd, 0x68, 0x39, 0x81, 0x1f, 0x78, 0xe9, 0x4a, + 0xda, 0xef, 0x00, 0x5c, 0x5a, 0xde, 0xf3, 0x03, 0x4f, 0x37, 0x82, 0x6d, 0xd7, 0xdc, 0xa1, 0xbd, + 0xbe, 0xad, 0x07, 0x94, 0x1c, 0x42, 0x9d, 0x3d, 0x90, 0xa9, 0x07, 0xfa, 0x7c, 0xe1, 0x46, 0xe1, + 0x66, 0x73, 0x69, 0x79, 0x61, 0xcc, 0x17, 0xb8, 0xb0, 0x25, 0x81, 0xda, 0x53, 0xa7, 0x27, 0xad, + 0xba, 0xfa, 0x87, 0xa1, 0x00, 0xf2, 0xeb, 0x05, 0x98, 0x72, 0x5c, 0x93, 0x76, 0xa8, 0x4d, 0x8d, + 0xc0, 0xf5, 0xe6, 0x8b, 0x37, 0x4a, 0x37, 0x9b, 0x4b, 0xdf, 0x18, 0x5b, 0x62, 0xc6, 0x13, 0x2d, + 0xdc, 0x8d, 0x09, 0xb8, 0xe5, 0x04, 0xde, 0x71, 0xfb, 0xf2, 0xf7, 0x4f, 0x5a, 0xcf, 0x9d, 0x9e, + 0xb4, 0xa6, 0xe2, 0x24, 0x4c, 0xb4, 0x84, 0xec, 0x42, 0x33, 0x70, 0x6d, 0xd6, 0x65, 0x96, 0xeb, + 0xf8, 0xf3, 0x25, 0xde, 0xb0, 0xeb, 0x0b, 0xa2, 0xab, 0x99, 0xf8, 0x05, 0x36, 0xc6, 0x16, 0x8e, + 0x5e, 0x59, 0xd8, 0x09, 0xd9, 0xda, 0x97, 0x24, 0x70, 0x33, 0x2a, 0xf3, 0x31, 0x8e, 0x43, 0x28, + 0xcc, 0xfa, 0xd4, 0x18, 0x78, 0x56, 0x70, 0xbc, 0xe2, 0x3a, 0x01, 0x7d, 0x18, 0xcc, 0x97, 0x79, + 0x2f, 0x7f, 0x3e, 0x0b, 0x7a, 0xdb, 0x35, 0x3b, 0x49, 0xee, 0xf6, 0xa5, 0xd3, 0x93, 0xd6, 0x6c, + 0xaa, 0x10, 0xd3, 0x98, 0xc4, 0x81, 0x39, 0xab, 0xa7, 0x77, 0xe9, 0xf6, 0xc0, 0xb6, 0x3b, 0xd4, + 0xf0, 0x68, 0xe0, 0xcf, 0x57, 0xf8, 0x23, 0xdc, 0xcc, 0x92, 0xb3, 0xe9, 0x1a, 0xba, 0x7d, 0x6f, + 0xef, 0x7d, 0x6a, 0x04, 0x48, 0xf7, 0xa9, 0x47, 0x1d, 0x83, 0xb6, 0xe7, 0xe5, 0xc3, 0xcc, 0x6d, + 0xa4, 0x90, 0x70, 0x08, 0x9b, 0xac, 0xc3, 0xf3, 0x7d, 0xcf, 0x72, 0x79, 0x13, 0x6c, 0xdd, 0xf7, + 0xef, 0xea, 0x3d, 0x3a, 0x5f, 0xbd, 0x51, 0xb8, 0xd9, 0x68, 0x5f, 0x95, 0x30, 0xcf, 0x6f, 0xa7, + 0x19, 0x70, 0xb8, 0x0e, 0xb9, 0x09, 0x75, 0x55, 0x38, 0x5f, 0xbb, 0x51, 0xb8, 0x59, 0x11, 0x63, + 0x47, 0xd5, 0xc5, 0x90, 0x4a, 0xd6, 0xa0, 0xae, 0xef, 0xef, 0x5b, 0x0e, 0xe3, 0xac, 0xf3, 0x2e, + 0x7c, 0x29, 0xeb, 0xd1, 0x96, 0x25, 0x8f, 0xc0, 0x51, 0xff, 0x30, 0xac, 0x4b, 0xde, 0x02, 0xe2, + 0x53, 0xef, 0xc8, 0x32, 0xe8, 0xb2, 0x61, 0xb8, 0x03, 0x27, 0xe0, 0x6d, 0x6f, 0xf0, 0xb6, 0x5f, + 0x93, 0x6d, 0x27, 0x9d, 0x21, 0x0e, 0xcc, 0xa8, 0x45, 0xde, 0x84, 0x39, 0xf9, 0xad, 0x46, 0xbd, + 0x00, 0x1c, 0xe9, 0x32, 0xeb, 0x48, 0x4c, 0xd1, 0x70, 0x88, 0x9b, 0x98, 0xf0, 0x92, 0x3e, 0x08, + 0xdc, 0x1e, 0x83, 0x4c, 0x0a, 0xdd, 0x71, 0x0f, 0xa9, 0x33, 0xdf, 0xbc, 0x51, 0xb8, 0x59, 0x6f, + 0xdf, 0x38, 0x3d, 0x69, 0xbd, 0xb4, 0xfc, 0x18, 0x3e, 0x7c, 0x2c, 0x0a, 0xb9, 0x07, 0x0d, 0xd3, + 0xf1, 0xb7, 0x5d, 0xdb, 0x32, 0x8e, 0xe7, 0xa7, 0x78, 0x03, 0x5f, 0x91, 0x8f, 0xda, 0x58, 0xbd, + 0xdb, 0x11, 0x84, 0x47, 0x27, 0xad, 0x97, 0x86, 0xa7, 0xd4, 0x85, 0x90, 0x8e, 0x11, 0x06, 0xd9, + 0xe2, 0x80, 0x2b, 0xae, 0xb3, 0x6f, 0x75, 0xe7, 0xa7, 0xf9, 0xdb, 0xb8, 0x31, 0x62, 0x40, 0xaf, + 0xde, 0xed, 0x08, 0xbe, 0xf6, 0xb4, 0x14, 0x27, 0xfe, 0x62, 0x84, 0x40, 0x4c, 0x98, 0x51, 0x93, + 0xf1, 0x8a, 0xad, 0x5b, 0x3d, 0x7f, 0x7e, 0x86, 0x0f, 0xde, 0x1f, 0x1f, 0x81, 0x89, 0x71, 0xe6, + 0xf6, 0x15, 0xf9, 0x28, 0x33, 0x89, 0x62, 0x1f, 0x53, 0x98, 0xd7, 0xde, 0x80, 0xe7, 0x87, 0xe6, + 0x06, 0x32, 0x07, 0xa5, 0x43, 0x7a, 0xcc, 0xa7, 0xbe, 0x06, 0xb2, 0x9f, 0xe4, 0x32, 0x54, 0x8e, + 0x74, 0x7b, 0x40, 0xe7, 0x8b, 0xbc, 0x4c, 0xfc, 0xf9, 0xd9, 0xe2, 0xeb, 0x05, 0xed, 0x6f, 0x96, + 0x60, 0x4a, 0xcd, 0x38, 0x1d, 0xcb, 0x39, 0x24, 0x6f, 0x43, 0xc9, 0x76, 0xbb, 0x72, 0xde, 0xfc, + 0xf9, 0xb1, 0x67, 0xb1, 0x4d, 0xb7, 0xdb, 0xae, 0x9d, 0x9e, 0xb4, 0x4a, 0x9b, 0x6e, 0x17, 0x19, + 0x22, 0x31, 0xa0, 0x72, 0xa8, 0xef, 0x1f, 0xea, 0xbc, 0x0d, 0xcd, 0xa5, 0xf6, 0xd8, 0xd0, 0x77, + 0x18, 0x0a, 0x6b, 0x6b, 0xbb, 0x71, 0x7a, 0xd2, 0xaa, 0xf0, 0xbf, 0x28, 0xb0, 0x89, 0x0b, 0x8d, + 0x3d, 0x5b, 0x37, 0x0e, 0x0f, 0x5c, 0x9b, 0xce, 0x97, 0x72, 0x0a, 0x6a, 0x2b, 0x24, 0xf1, 0x9a, + 0xc3, 0xbf, 0x18, 0xc9, 0x20, 0x06, 0x54, 0x07, 0xa6, 0x6f, 0x39, 0x87, 0x72, 0x0e, 0x7c, 0x63, + 0x6c, 0x69, 0xbb, 0xab, 0xfc, 0x99, 0xe0, 0xf4, 0xa4, 0x55, 0x15, 0xbf, 0x51, 0x42, 0x6b, 0x7f, + 0x38, 0x05, 0x33, 0xea, 0x25, 0xdd, 0xa7, 0x5e, 0x40, 0x1f, 0x92, 0x1b, 0x50, 0x76, 0xd8, 0xa7, + 0xc9, 0x5f, 0x72, 0x7b, 0x4a, 0x0e, 0x97, 0x32, 0xff, 0x24, 0x39, 0x85, 0xb5, 0x4c, 0x0c, 0x15, + 0xd9, 0xe1, 0xe3, 0xb7, 0xac, 0xc3, 0x61, 0x44, 0xcb, 0xc4, 0x6f, 0x94, 0xd0, 0xe4, 0x5d, 0x28, + 0xf3, 0x87, 0x17, 0x5d, 0xfd, 0x95, 0xf1, 0x45, 0xb0, 0x47, 0xaf, 0xb3, 0x27, 0xe0, 0x0f, 0xce, + 0x41, 0xd9, 0x50, 0x1c, 0x98, 0xfb, 0xb2, 0x63, 0x7f, 0x3e, 0x47, 0xc7, 0xae, 0x89, 0xa1, 0xb8, + 0xbb, 0xba, 0x86, 0x0c, 0x91, 0xfc, 0xe5, 0x02, 0x3c, 0x6f, 0xb8, 0x4e, 0xa0, 0x33, 0x3d, 0x43, + 0x2d, 0xb2, 0xf3, 0x15, 0x2e, 0xe7, 0xad, 0xb1, 0xe5, 0xac, 0xa4, 0x11, 0xdb, 0x2f, 0xb0, 0x35, + 0x63, 0xa8, 0x18, 0x87, 0x65, 0x93, 0xbf, 0x5a, 0x80, 0x17, 0xd8, 0x5c, 0x3e, 0xc4, 0xcc, 0x57, + 0xa0, 0xc9, 0xb6, 0xea, 0xea, 0xe9, 0x49, 0xeb, 0x85, 0x8d, 0x2c, 0x61, 0x98, 0xdd, 0x06, 0xd6, + 0xba, 0x4b, 0xfa, 0xb0, 0x5a, 0xc2, 0x57, 0xb7, 0xe6, 0xd2, 0xe6, 0x24, 0x55, 0x9d, 0xf6, 0x67, + 0xe4, 0x50, 0xce, 0xd2, 0xec, 0x30, 0xab, 0x15, 0xe4, 0x16, 0xd4, 0x8e, 0x5c, 0x7b, 0xd0, 0xa3, + 0xfe, 0x7c, 0x9d, 0x4f, 0xb1, 0xd7, 0xb2, 0xa6, 0xd8, 0xfb, 0x9c, 0xa5, 0x3d, 0x2b, 0xe1, 0x6b, + 0xe2, 0xbf, 0x8f, 0xaa, 0x2e, 0xb1, 0xa0, 0x6a, 0x5b, 0x3d, 0x2b, 0xf0, 0xf9, 0xc2, 0xd9, 0x5c, + 0xba, 0x35, 0xf6, 0x63, 0x89, 0x4f, 0x74, 0x93, 0x83, 0x89, 0xaf, 0x46, 0xfc, 0x46, 0x29, 0x80, + 0x4d, 0x85, 0xbe, 0xa1, 0xdb, 0x62, 0x61, 0x6d, 0x2e, 0x7d, 0x75, 0xfc, 0xcf, 0x86, 0xa1, 0xb4, + 0xa7, 0xe5, 0x33, 0x55, 0xf8, 0x5f, 0x14, 0xd8, 0xe4, 0x17, 0x61, 0x26, 0xf1, 0x36, 0xfd, 0xf9, + 0x26, 0xef, 0x9d, 0x97, 0xb3, 0x7a, 0x27, 0xe4, 0x8a, 0x56, 0x9e, 0xc4, 0x08, 0xf1, 0x31, 0x05, + 0x46, 0xee, 0x40, 0xdd, 0xb7, 0x4c, 0x6a, 0xe8, 0x9e, 0x3f, 0x3f, 0x75, 0x16, 0xe0, 0x39, 0x09, + 0x5c, 0xef, 0xc8, 0x6a, 0x18, 0x02, 0x90, 0x05, 0x80, 0xbe, 0xee, 0x05, 0x96, 0x50, 0x54, 0xa7, + 0xb9, 0xd2, 0x34, 0x73, 0x7a, 0xd2, 0x82, 0xed, 0xb0, 0x14, 0x63, 0x1c, 0x8c, 0x9f, 0xd5, 0xdd, + 0x70, 0xfa, 0x83, 0x40, 0x2c, 0xac, 0x0d, 0xc1, 0xdf, 0x09, 0x4b, 0x31, 0xc6, 0x41, 0x7e, 0xbb, + 0x00, 0x9f, 0x89, 0xfe, 0x0e, 0x7f, 0x64, 0xb3, 0x13, 0xff, 0xc8, 0x5a, 0xa7, 0x27, 0xad, 0xcf, + 0x74, 0x46, 0x8b, 0xc4, 0xc7, 0xb5, 0x87, 0x7c, 0x58, 0x80, 0x99, 0x41, 0xdf, 0xd4, 0x03, 0xda, + 0x09, 0xd8, 0x8e, 0xa7, 0x7b, 0x3c, 0x3f, 0xc7, 0x9b, 0xb8, 0x3e, 0xfe, 0x2c, 0x98, 0x80, 0x8b, + 0x5e, 0x73, 0xb2, 0x1c, 0x53, 0x62, 0xb5, 0xb7, 0x61, 0x7a, 0x79, 0x10, 0x1c, 0xb8, 0x9e, 0xf5, + 0x01, 0x57, 0xff, 0xc9, 0x1a, 0x54, 0x02, 0xae, 0xc6, 0x09, 0x0d, 0xe1, 0x73, 0x59, 0x2f, 0x5d, + 0xa8, 0xd4, 0x77, 0xe8, 0xb1, 0xd2, 0x4b, 0xc4, 0x4a, 0x2d, 0xd4, 0x3a, 0x51, 0x5d, 0xfb, 0xb3, + 0x05, 0xa8, 0xb5, 0x75, 0xe3, 0xd0, 0xdd, 0xdf, 0x27, 0xef, 0x40, 0xdd, 0x72, 0x02, 0xea, 0x1d, + 0xe9, 0xb6, 0x84, 0x5d, 0x88, 0xc1, 0x86, 0x1b, 0xc2, 0xe8, 0xf1, 0xd8, 0xee, 0x8b, 0x09, 0x5a, + 0x1d, 0xc8, 0x5d, 0x0b, 0xd7, 0x8c, 0x37, 0x24, 0x06, 0x86, 0x68, 0xa4, 0x05, 0x15, 0x3f, 0xa0, + 0x7d, 0x9f, 0xaf, 0x81, 0xd3, 0xa2, 0x19, 0x1d, 0x56, 0x80, 0xa2, 0x5c, 0xfb, 0x1b, 0x05, 0x68, + 0xb4, 0x75, 0xdf, 0x32, 0xd8, 0x53, 0x92, 0x15, 0x28, 0x0f, 0x7c, 0xea, 0x9d, 0xef, 0xd9, 0xf8, + 0xb2, 0xb5, 0xeb, 0x53, 0x0f, 0x79, 0x65, 0x72, 0x0f, 0xea, 0x7d, 0xdd, 0xf7, 0x1f, 0xb8, 0x9e, + 0x29, 0x97, 0xde, 0x33, 0x02, 0x89, 0x6d, 0x82, 0xac, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x48, 0xf7, + 0xd0, 0x7e, 0xbf, 0x00, 0x97, 0xda, 0x83, 0xfd, 0x7d, 0xea, 0x49, 0xad, 0x58, 0xea, 0x9b, 0x14, + 0x2a, 0x1e, 0x35, 0x2d, 0x5f, 0xb6, 0x7d, 0x75, 0xec, 0x81, 0x82, 0x0c, 0x45, 0xaa, 0xb7, 0xbc, + 0xbf, 0x78, 0x01, 0x0a, 0x74, 0x32, 0x80, 0xc6, 0xfb, 0x94, 0xed, 0xc6, 0xa9, 0xde, 0x93, 0x4f, + 0x77, 0x7b, 0x6c, 0x51, 0x6f, 0xd1, 0xa0, 0xc3, 0x91, 0xe2, 0xda, 0x74, 0x58, 0x88, 0x91, 0x24, + 0xed, 0x77, 0x2a, 0x30, 0xb5, 0xe2, 0xf6, 0xf6, 0x2c, 0x87, 0x9a, 0xb7, 0xcc, 0x2e, 0x25, 0xef, + 0x41, 0x99, 0x9a, 0x5d, 0x2a, 0x9f, 0x76, 0x7c, 0xc5, 0x83, 0x81, 0x45, 0xea, 0x13, 0xfb, 0x87, + 0x1c, 0x98, 0x6c, 0xc2, 0xcc, 0xbe, 0xe7, 0xf6, 0xc4, 0x5c, 0xbe, 0x73, 0xdc, 0x97, 0xba, 0x73, + 0xfb, 0xc7, 0xd5, 0x87, 0xb3, 0x96, 0xa0, 0x3e, 0x3a, 0x69, 0x41, 0xf4, 0x0f, 0x53, 0x75, 0xc9, + 0x3b, 0x30, 0x1f, 0x95, 0x84, 0x93, 0xda, 0x0a, 0xdb, 0xce, 0x70, 0xdd, 0xa9, 0xd2, 0x7e, 0xe9, + 0xf4, 0xa4, 0x35, 0xbf, 0x36, 0x82, 0x07, 0x47, 0xd6, 0x66, 0x53, 0xc5, 0x5c, 0x44, 0x14, 0x0b, + 0x8d, 0x54, 0x99, 0x26, 0xb4, 0x82, 0xf1, 0x7d, 0xdf, 0x5a, 0x4a, 0x04, 0x0e, 0x09, 0x25, 0x6b, + 0x30, 0x15, 0xb8, 0xb1, 0xfe, 0xaa, 0xf0, 0xfe, 0xd2, 0x94, 0xa1, 0x62, 0xc7, 0x1d, 0xd9, 0x5b, + 0x89, 0x7a, 0x04, 0xe1, 0x8a, 0xfa, 0x9f, 0xea, 0xa9, 0x2a, 0xef, 0xa9, 0x6b, 0xa7, 0x27, 0xad, + 0x2b, 0x3b, 0x99, 0x1c, 0x38, 0xa2, 0x26, 0xf9, 0xd5, 0x02, 0xcc, 0x28, 0x92, 0xec, 0xa3, 0xda, + 0x24, 0xfb, 0x88, 0xb0, 0x11, 0xb1, 0x93, 0x10, 0x80, 0x29, 0x81, 0xda, 0xef, 0x56, 0xa1, 0x11, + 0x4e, 0xf5, 0xe4, 0xb3, 0x50, 0xe1, 0x26, 0x08, 0xa9, 0xc1, 0x87, 0x6b, 0x38, 0xb7, 0x54, 0xa0, + 0xa0, 0x91, 0xcf, 0x41, 0xcd, 0x70, 0x7b, 0x3d, 0xdd, 0x31, 0xb9, 0x59, 0xa9, 0xd1, 0x6e, 0x32, + 0xd5, 0x65, 0x45, 0x14, 0xa1, 0xa2, 0x91, 0x97, 0xa0, 0xac, 0x7b, 0x5d, 0x61, 0xe1, 0x69, 0x88, + 0xf9, 0x68, 0xd9, 0xeb, 0xfa, 0xc8, 0x4b, 0xc9, 0x97, 0xa1, 0x44, 0x9d, 0xa3, 0xf9, 0xf2, 0x68, + 0xdd, 0xe8, 0x96, 0x73, 0x74, 0x5f, 0xf7, 0xda, 0x4d, 0xd9, 0x86, 0xd2, 0x2d, 0xe7, 0x08, 0x59, + 0x1d, 0xb2, 0x09, 0x35, 0xea, 0x1c, 0xb1, 0x77, 0x2f, 0x4d, 0x2f, 0x3f, 0x36, 0xa2, 0x3a, 0x63, + 0x91, 0xdb, 0x84, 0x50, 0xc3, 0x92, 0xc5, 0xa8, 0x20, 0xc8, 0xd7, 0x61, 0x4a, 0x28, 0x5b, 0x5b, + 0xec, 0x9d, 0xf8, 0xf3, 0x55, 0x0e, 0xd9, 0x1a, 0xad, 0xad, 0x71, 0xbe, 0xc8, 0xd4, 0x15, 0x2b, + 0xf4, 0x31, 0x01, 0x45, 0xbe, 0x0e, 0x0d, 0xb5, 0x33, 0x56, 0x6f, 0x36, 0xd3, 0x4a, 0xa4, 0xb6, + 0xd3, 0x48, 0xbf, 0x39, 0xb0, 0x3c, 0xda, 0xa3, 0x4e, 0xe0, 0xb7, 0x9f, 0x57, 0x76, 0x03, 0x45, + 0xf5, 0x31, 0x42, 0x23, 0x7b, 0xc3, 0xe6, 0x2e, 0x61, 0xab, 0xf9, 0xec, 0x88, 0x59, 0x7d, 0x0c, + 0x5b, 0xd7, 0x37, 0x60, 0x36, 0xb4, 0x47, 0x49, 0x93, 0x86, 0xb0, 0xde, 0x7c, 0x91, 0x55, 0xdf, + 0x48, 0x92, 0x1e, 0x9d, 0xb4, 0x5e, 0xce, 0x30, 0x6a, 0x44, 0x0c, 0x98, 0x06, 0x23, 0x1f, 0xc0, + 0x8c, 0x47, 0x75, 0xd3, 0x72, 0xa8, 0xef, 0x6f, 0x7b, 0xee, 0x5e, 0x7e, 0xcd, 0x93, 0xa3, 0x88, + 0x61, 0x8f, 0x09, 0x64, 0x4c, 0x49, 0x22, 0x0f, 0x60, 0xda, 0xb6, 0x8e, 0x68, 0x24, 0xba, 0x39, + 0x11, 0xd1, 0xcf, 0x9f, 0x9e, 0xb4, 0xa6, 0x37, 0xe3, 0xc0, 0x98, 0x94, 0xa3, 0xfd, 0xdd, 0x0a, + 0x0c, 0x6f, 0xbe, 0x92, 0x23, 0xa5, 0x30, 0xe9, 0x91, 0x92, 0x7e, 0x8b, 0x62, 0xcd, 0x78, 0x5d, + 0x56, 0x9b, 0xc0, 0x9b, 0xcc, 0x18, 0x8d, 0xa5, 0x49, 0x8f, 0xc6, 0x67, 0x66, 0xc2, 0x18, 0x1e, + 0xb6, 0xd5, 0x8f, 0x6f, 0xd8, 0xd6, 0x9e, 0xd2, 0xb0, 0xfd, 0x6e, 0x19, 0x66, 0x56, 0x75, 0xda, + 0x73, 0x9d, 0x27, 0xee, 0xbf, 0x0b, 0xcf, 0xc4, 0xfe, 0xfb, 0x26, 0xd4, 0x3d, 0xda, 0xb7, 0x2d, + 0x43, 0x17, 0x6a, 0xb6, 0xb4, 0x77, 0xa3, 0x2c, 0xc3, 0x90, 0x3a, 0xc2, 0xee, 0x52, 0x7a, 0x26, + 0xed, 0x2e, 0xe5, 0x8f, 0xdf, 0xee, 0xa2, 0xfd, 0x6a, 0x11, 0xb8, 0x4a, 0x4a, 0x6e, 0x40, 0x99, + 0xa9, 0x5b, 0x69, 0x6b, 0x1f, 0xff, 0x5a, 0x38, 0x85, 0x5c, 0x83, 0x62, 0xe0, 0xca, 0xe9, 0x06, + 0x24, 0xbd, 0xb8, 0xe3, 0x62, 0x31, 0x70, 0xc9, 0x07, 0x00, 0x86, 0xeb, 0x98, 0x96, 0x72, 0x03, + 0xe5, 0x7b, 0xb0, 0x35, 0xd7, 0x7b, 0xa0, 0x7b, 0xe6, 0x4a, 0x88, 0x28, 0x76, 0xde, 0xd1, 0x7f, + 0x8c, 0x49, 0x23, 0x6f, 0x40, 0xd5, 0x75, 0xd6, 0x06, 0xb6, 0xcd, 0x3b, 0xb4, 0xd1, 0xfe, 0xc2, + 0xe9, 0x49, 0xab, 0x7a, 0x8f, 0x97, 0x3c, 0x3a, 0x69, 0x5d, 0x15, 0x3b, 0x19, 0xf6, 0xef, 0x6d, + 0xcf, 0x0a, 0x2c, 0xa7, 0x1b, 0x6e, 0x44, 0x65, 0x35, 0xed, 0xd7, 0x0a, 0xd0, 0x5c, 0xb3, 0x1e, + 0x52, 0xf3, 0x6d, 0xcb, 0x31, 0xdd, 0x07, 0x04, 0xa1, 0x6a, 0x53, 0xa7, 0x1b, 0x1c, 0x8c, 0xb9, + 0x53, 0x14, 0xf6, 0x18, 0x8e, 0x80, 0x12, 0x89, 0x2c, 0x42, 0x43, 0xec, 0x33, 0x2c, 0xa7, 0xcb, + 0xfb, 0xb0, 0x1e, 0xcd, 0xf4, 0x1d, 0x45, 0xc0, 0x88, 0x47, 0x3b, 0x86, 0xe7, 0x87, 0xba, 0x81, + 0x98, 0x50, 0x0e, 0xf4, 0xae, 0x5a, 0x54, 0xd6, 0xc6, 0xee, 0xe0, 0x1d, 0xbd, 0x1b, 0xeb, 0x5c, + 0xae, 0xcd, 0xed, 0xe8, 0x4c, 0x9b, 0x63, 0xe8, 0xda, 0x1f, 0x15, 0xa0, 0xbe, 0x36, 0x70, 0x0c, + 0xbe, 0x19, 0x7f, 0xb2, 0x15, 0x58, 0xa9, 0x86, 0xc5, 0x4c, 0xd5, 0x70, 0x00, 0xd5, 0xc3, 0x07, + 0xa1, 0xea, 0xd8, 0x5c, 0xda, 0x1a, 0x7f, 0x54, 0xc8, 0x26, 0x2d, 0xdc, 0xe1, 0x78, 0xc2, 0x49, + 0x39, 0x23, 0x1b, 0x54, 0xbd, 0xf3, 0x36, 0x17, 0x2a, 0x85, 0x5d, 0xfb, 0x32, 0x34, 0x63, 0x6c, + 0xe7, 0xf2, 0x57, 0xfc, 0xbd, 0x32, 0x54, 0xd7, 0x3b, 0x9d, 0xe5, 0xed, 0x0d, 0xf2, 0x2a, 0x34, + 0xa5, 0xff, 0xea, 0x6e, 0xd4, 0x07, 0xa1, 0xfb, 0xb2, 0x13, 0x91, 0x30, 0xce, 0xc7, 0x14, 0x6f, + 0x8f, 0xea, 0x76, 0x4f, 0x7e, 0x2c, 0xa1, 0xe2, 0x8d, 0xac, 0x10, 0x05, 0x8d, 0xe8, 0x30, 0xc3, + 0xf6, 0xf2, 0xac, 0x0b, 0xc5, 0x3e, 0x5d, 0x7e, 0x36, 0x67, 0xdc, 0xc9, 0xf3, 0x05, 0x66, 0x37, + 0x01, 0x80, 0x29, 0x40, 0xf2, 0x3a, 0xd4, 0xf5, 0x41, 0x70, 0xc0, 0xb7, 0x4a, 0xe2, 0xdb, 0x78, + 0x89, 0xbb, 0xf7, 0x64, 0xd9, 0xa3, 0x93, 0xd6, 0xd4, 0x1d, 0x6c, 0xbf, 0xaa, 0xfe, 0x63, 0xc8, + 0xcd, 0x1a, 0xa7, 0x6c, 0x03, 0xb2, 0x71, 0x95, 0x73, 0x37, 0x6e, 0x3b, 0x01, 0x80, 0x29, 0x40, + 0xf2, 0x2e, 0x4c, 0x1d, 0xd2, 0xe3, 0x40, 0xdf, 0x93, 0x02, 0xaa, 0xe7, 0x11, 0x30, 0xc7, 0x94, + 0xf5, 0x3b, 0xb1, 0xea, 0x98, 0x00, 0x23, 0x3e, 0x5c, 0x3e, 0xa4, 0xde, 0x1e, 0xf5, 0x5c, 0x69, + 0x67, 0x90, 0x42, 0x6a, 0xe7, 0x11, 0x32, 0x7f, 0x7a, 0xd2, 0xba, 0x7c, 0x27, 0x03, 0x06, 0x33, + 0xc1, 0xb5, 0xff, 0x53, 0x84, 0xd9, 0x75, 0x11, 0x40, 0xe0, 0x7a, 0x42, 0xf3, 0x20, 0x57, 0xa1, + 0xe4, 0xf5, 0x07, 0x7c, 0xe4, 0x94, 0x84, 0x8b, 0x00, 0xb7, 0x77, 0x91, 0x95, 0x91, 0x77, 0xa0, + 0x6e, 0xca, 0x29, 0x43, 0x9a, 0x39, 0xc6, 0x32, 0x49, 0xa9, 0x7f, 0x18, 0xa2, 0xb1, 0x3d, 0x5d, + 0xcf, 0xef, 0x76, 0xac, 0x0f, 0xa8, 0xdc, 0xf9, 0xf3, 0x3d, 0xdd, 0x96, 0x28, 0x42, 0x45, 0x63, + 0xab, 0xea, 0x21, 0x3d, 0x16, 0xfb, 0xde, 0x72, 0xb4, 0xaa, 0xde, 0x91, 0x65, 0x18, 0x52, 0x49, + 0x4b, 0x7d, 0x2c, 0x6c, 0x14, 0x94, 0x85, 0xcd, 0xe6, 0x3e, 0x2b, 0x90, 0xdf, 0x0d, 0x9b, 0x32, + 0xdf, 0xb7, 0x82, 0x80, 0x7a, 0xf2, 0x35, 0x8e, 0x35, 0x65, 0xbe, 0xc5, 0x11, 0x50, 0x22, 0x91, + 0x9f, 0x84, 0x06, 0x07, 0x6f, 0xdb, 0xee, 0x1e, 0x7f, 0x71, 0x0d, 0x61, 0xbd, 0xb9, 0xaf, 0x0a, + 0x31, 0xa2, 0x6b, 0x3f, 0x2a, 0xc2, 0x95, 0x75, 0x1a, 0x08, 0xad, 0x66, 0x95, 0xf6, 0x6d, 0xf7, + 0x98, 0xe9, 0xd3, 0x48, 0xbf, 0x49, 0xde, 0x04, 0xb0, 0xfc, 0xbd, 0xce, 0x91, 0xc1, 0xbf, 0x03, + 0xf1, 0x0d, 0xdf, 0x90, 0x9f, 0x24, 0x6c, 0x74, 0xda, 0x92, 0xf2, 0x28, 0xf1, 0x0f, 0x63, 0x75, + 0xa2, 0x8d, 0x74, 0xf1, 0x31, 0x1b, 0xe9, 0x0e, 0x40, 0x3f, 0xd2, 0xca, 0x4b, 0x9c, 0xf3, 0x67, + 0x94, 0x98, 0xf3, 0x28, 0xe4, 0x31, 0x98, 0x3c, 0x7a, 0xb2, 0x03, 0x73, 0x26, 0xdd, 0xd7, 0x07, + 0x76, 0x10, 0xee, 0x24, 0xe4, 0x47, 0x7c, 0xf6, 0xcd, 0x48, 0x18, 0xdc, 0xb0, 0x9a, 0x42, 0xc2, + 0x21, 0x6c, 0xed, 0xef, 0x97, 0xe0, 0xda, 0x3a, 0x0d, 0x42, 0xdb, 0x9a, 0x9c, 0x1d, 0x3b, 0x7d, + 0x6a, 0xb0, 0xb7, 0xf0, 0x61, 0x01, 0xaa, 0xb6, 0xbe, 0x47, 0x6d, 0xb6, 0x7a, 0xb1, 0xa7, 0x79, + 0x6f, 0xec, 0x85, 0x60, 0xb4, 0x94, 0x85, 0x4d, 0x2e, 0x21, 0xb5, 0x34, 0x88, 0x42, 0x94, 0xe2, + 0xd9, 0xa4, 0x6e, 0xd8, 0x03, 0x3f, 0xa0, 0xde, 0xb6, 0xeb, 0x05, 0x52, 0x9f, 0x0c, 0x27, 0xf5, + 0x95, 0x88, 0x84, 0x71, 0x3e, 0xb2, 0x04, 0x60, 0xd8, 0x16, 0x75, 0x02, 0x5e, 0x4b, 0x7c, 0x57, + 0x44, 0xbd, 0xdf, 0x95, 0x90, 0x82, 0x31, 0x2e, 0x26, 0xaa, 0xe7, 0x3a, 0x56, 0xe0, 0x0a, 0x51, + 0xe5, 0xa4, 0xa8, 0xad, 0x88, 0x84, 0x71, 0x3e, 0x5e, 0x8d, 0x06, 0x9e, 0x65, 0xf8, 0xbc, 0x5a, + 0x25, 0x55, 0x2d, 0x22, 0x61, 0x9c, 0x8f, 0xad, 0x79, 0xb1, 0xe7, 0x3f, 0xd7, 0x9a, 0xf7, 0x5b, + 0x0d, 0xb8, 0x9e, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0xfe, 0xc0, 0xee, 0xd0, 0x40, 0xbd, 0xc0, 0x31, + 0xd7, 0xc2, 0xbf, 0x10, 0xbd, 0x77, 0x11, 0xb6, 0x64, 0x4c, 0xe6, 0xbd, 0x0f, 0x35, 0xf0, 0x4c, + 0xef, 0x7e, 0x11, 0x1a, 0x8e, 0x1e, 0xf8, 0xfc, 0xc3, 0x95, 0xdf, 0x68, 0xa8, 0x86, 0xdd, 0x55, + 0x04, 0x8c, 0x78, 0xc8, 0x36, 0x5c, 0x96, 0x5d, 0x7c, 0xeb, 0x61, 0xdf, 0xf5, 0x02, 0xea, 0x89, + 0xba, 0x72, 0x39, 0x95, 0x75, 0x2f, 0x6f, 0x65, 0xf0, 0x60, 0x66, 0x4d, 0xb2, 0x05, 0x97, 0x0c, + 0x11, 0xca, 0x41, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x65, 0x86, 0x5b, 0xa3, 0x95, 0x61, 0x16, + 0xcc, 0xaa, 0x97, 0x1e, 0xcd, 0xd5, 0xb1, 0x46, 0x73, 0x6d, 0x9c, 0xd1, 0x5c, 0x1f, 0x6f, 0x34, + 0x37, 0xce, 0x36, 0x9a, 0x59, 0xcf, 0xb3, 0x71, 0x44, 0x3d, 0xa6, 0x9e, 0x88, 0x15, 0x36, 0x16, + 0x29, 0x14, 0xf6, 0x7c, 0x27, 0x83, 0x07, 0x33, 0x6b, 0x92, 0x3d, 0xb8, 0x26, 0xca, 0x6f, 0x39, + 0x86, 0x77, 0xdc, 0x67, 0x0b, 0x4f, 0x0c, 0xb7, 0x99, 0xb0, 0x25, 0x5f, 0xeb, 0x8c, 0xe4, 0xc4, + 0xc7, 0xa0, 0x90, 0x9f, 0x83, 0x69, 0xf1, 0x96, 0xb6, 0xf4, 0x3e, 0x87, 0x15, 0x71, 0x43, 0x2f, + 0x48, 0xd8, 0xe9, 0x95, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x19, 0x66, 0xfb, 0x47, 0x06, 0xfb, 0xb9, + 0xb1, 0x7f, 0x97, 0x52, 0x93, 0x9a, 0xdc, 0x51, 0xd9, 0x68, 0xbf, 0xa8, 0xac, 0x3b, 0xdb, 0x49, + 0x32, 0xa6, 0xf9, 0xc9, 0xeb, 0x30, 0xe5, 0x07, 0xba, 0x17, 0x48, 0x03, 0xee, 0xfc, 0x8c, 0x88, + 0xab, 0x52, 0xf6, 0xcd, 0x4e, 0x8c, 0x86, 0x09, 0xce, 0xcc, 0xf5, 0x62, 0xf6, 0xe2, 0xd6, 0x8b, + 0x3c, 0xb3, 0xd5, 0x3f, 0x2f, 0xc2, 0x8d, 0x75, 0x1a, 0x6c, 0xb9, 0x8e, 0x34, 0x7f, 0x67, 0x2d, + 0xfb, 0x67, 0xb2, 0x7e, 0x27, 0x17, 0xed, 0xe2, 0x44, 0x17, 0xed, 0xd2, 0x84, 0x16, 0xed, 0xf2, + 0x05, 0x2e, 0xda, 0xff, 0xb0, 0x08, 0x2f, 0x26, 0x7a, 0x72, 0xdb, 0x35, 0xd5, 0x84, 0xff, 0x69, + 0x07, 0x9e, 0xa1, 0x03, 0x1f, 0x09, 0xbd, 0x93, 0x3b, 0x30, 0x53, 0x1a, 0xcf, 0x77, 0xd2, 0x1a, + 0xcf, 0xbb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0x67, 0x5a, 0xf1, 0xde, 0x02, 0xe2, 0x49, 0x77, 0xab, + 0x30, 0xfd, 0xc4, 0x94, 0x9e, 0x30, 0x70, 0x13, 0x87, 0x38, 0x30, 0xa3, 0x16, 0xe9, 0xc0, 0x0b, + 0x3e, 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x68, 0x43, 0x2f, 0x4b, 0xb8, 0x17, 0x3a, 0x59, + 0x4c, 0x98, 0x5d, 0x37, 0xcf, 0x3c, 0xf0, 0xaf, 0x81, 0xab, 0x9c, 0xa2, 0x6b, 0x26, 0xa6, 0xb1, + 0x7c, 0x98, 0xd6, 0x58, 0xde, 0xcb, 0xff, 0xde, 0xc6, 0xd3, 0x56, 0x96, 0x00, 0xf8, 0x5b, 0x88, + 0xab, 0x2b, 0xe1, 0x22, 0x8d, 0x21, 0x05, 0x63, 0x5c, 0x6c, 0x01, 0x52, 0xfd, 0x1c, 0xd7, 0x54, + 0xc2, 0x05, 0xa8, 0x13, 0x27, 0x62, 0x92, 0x77, 0xa4, 0xb6, 0x53, 0x19, 0x5b, 0xdb, 0x79, 0x0b, + 0x48, 0xc2, 0xf0, 0x28, 0xf0, 0xaa, 0xc9, 0xb8, 0xe1, 0x8d, 0x21, 0x0e, 0xcc, 0xa8, 0x35, 0x62, + 0x28, 0xd7, 0x26, 0x3b, 0x94, 0xeb, 0xe3, 0x0f, 0x65, 0xf2, 0x1e, 0x5c, 0xe5, 0xa2, 0x64, 0xff, + 0x24, 0x81, 0x85, 0xde, 0xf3, 0x63, 0x12, 0xf8, 0x2a, 0x8e, 0x62, 0xc4, 0xd1, 0x18, 0xec, 0xfd, + 0x18, 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xad, 0x13, 0xad, 0x64, 0xf0, 0x60, 0x66, 0x4d, 0x36, + 0xc4, 0x02, 0x36, 0x0c, 0xf5, 0x3d, 0x9b, 0x9a, 0x32, 0x6e, 0x3a, 0x1c, 0x62, 0x3b, 0x9b, 0x1d, + 0x49, 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x75, 0x4e, 0x35, 0x65, 0x9d, 0x5b, 0xe9, 0xf7, 0x13, + 0xda, 0x90, 0xd4, 0x75, 0xc2, 0x48, 0xf8, 0x95, 0x34, 0x03, 0x0e, 0xd7, 0xe1, 0x5a, 0xa2, 0xe1, + 0x59, 0xfd, 0xc0, 0x4f, 0x62, 0xcd, 0xa4, 0xb4, 0xc4, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0xf4, 0xf3, + 0x03, 0xaa, 0xdb, 0xc1, 0x41, 0x12, 0x70, 0x36, 0xa9, 0x9f, 0xdf, 0x1e, 0x66, 0xc1, 0xac, 0x7a, + 0x99, 0x0b, 0xd2, 0xdc, 0xb3, 0xa9, 0x56, 0x7d, 0xbb, 0x04, 0x57, 0xd7, 0x69, 0x10, 0x86, 0x94, + 0x7d, 0x6a, 0x46, 0xf9, 0x18, 0xcc, 0x28, 0xbf, 0x59, 0x81, 0x4b, 0xeb, 0x34, 0x18, 0xd2, 0xc6, + 0xfe, 0x98, 0x76, 0xff, 0x16, 0x5c, 0x8a, 0xa2, 0x18, 0x3b, 0x81, 0xeb, 0x89, 0xb5, 0x3c, 0xb5, + 0x5b, 0xee, 0x0c, 0xb3, 0x60, 0x56, 0x3d, 0xf2, 0x75, 0x78, 0x91, 0x2f, 0xf5, 0x4e, 0x57, 0xd8, + 0x67, 0x85, 0x31, 0x21, 0x76, 0x0e, 0xa7, 0x25, 0x21, 0x5f, 0xec, 0x64, 0xb3, 0xe1, 0xa8, 0xfa, + 0xe4, 0x5b, 0x30, 0xd5, 0xb7, 0xfa, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0xfe, 0xd9, 0x8e, + 0x81, 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, 0xfa, 0x05, 0x8e, 0xd4, 0xff, + 0x59, 0x84, 0xda, 0xba, 0xe7, 0x0e, 0xfa, 0xed, 0x63, 0xd2, 0x85, 0xea, 0x03, 0xee, 0x3c, 0x93, + 0xae, 0xa9, 0xf1, 0x4f, 0x02, 0x08, 0x1f, 0x5c, 0xa4, 0x12, 0x89, 0xff, 0x28, 0xe1, 0xd9, 0x20, + 0x3e, 0xa4, 0xc7, 0xd4, 0x94, 0x3e, 0xb4, 0x70, 0x10, 0xdf, 0x61, 0x85, 0x28, 0x68, 0xa4, 0x07, + 0xb3, 0xba, 0x6d, 0xbb, 0x0f, 0xa8, 0xb9, 0xa9, 0x07, 0xdc, 0xef, 0x2d, 0x7d, 0x2b, 0xe7, 0x35, + 0x4b, 0xf3, 0x60, 0x86, 0xe5, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xfb, 0x50, 0xf3, 0x03, 0xd7, 0x53, + 0xca, 0x56, 0x73, 0x69, 0x65, 0xfc, 0x97, 0xde, 0xfe, 0x5a, 0x47, 0x40, 0x09, 0x9b, 0xbd, 0xfc, + 0x83, 0x4a, 0x80, 0xf6, 0x1b, 0x05, 0x80, 0xdb, 0x3b, 0x3b, 0xdb, 0xd2, 0xbd, 0x60, 0x42, 0x59, + 0x1f, 0x84, 0x8e, 0xca, 0xf1, 0x1d, 0x82, 0x89, 0x00, 0x5c, 0xe9, 0xc3, 0x1b, 0x04, 0x07, 0xc8, + 0xd1, 0xc9, 0x4f, 0x40, 0x4d, 0x2a, 0xc8, 0xb2, 0xdb, 0xc3, 0x78, 0x0a, 0xa9, 0x44, 0xa3, 0xa2, + 0x6b, 0x7f, 0xa7, 0x08, 0xb0, 0x61, 0xda, 0xb4, 0xa3, 0x0e, 0x6f, 0x34, 0x82, 0x03, 0x8f, 0xfa, + 0x07, 0xae, 0x6d, 0x8e, 0xe9, 0x4d, 0xe5, 0x36, 0xff, 0x1d, 0x05, 0x82, 0x11, 0x1e, 0x31, 0x61, + 0xca, 0x0f, 0x68, 0x5f, 0xc5, 0xe4, 0x8e, 0xe9, 0x44, 0x99, 0x13, 0x76, 0x91, 0x08, 0x07, 0x13, + 0xa8, 0x44, 0x87, 0xa6, 0xe5, 0x18, 0xe2, 0x03, 0x69, 0x1f, 0x8f, 0x39, 0x90, 0x66, 0xd9, 0x8e, + 0x63, 0x23, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x7b, 0x45, 0xb8, 0xc2, 0xe5, 0xb1, 0x66, 0x24, 0x22, + 0x6f, 0xc9, 0x9f, 0x1e, 0x3a, 0x68, 0xfa, 0x27, 0xcf, 0x26, 0x5a, 0x9c, 0x53, 0xdc, 0xa2, 0x81, + 0x1e, 0xe9, 0x73, 0x51, 0x59, 0xec, 0x74, 0xe9, 0x00, 0xca, 0x3e, 0x9b, 0xaf, 0x44, 0xef, 0x75, + 0xc6, 0x1e, 0x42, 0xd9, 0x0f, 0xc0, 0x67, 0xaf, 0xd0, 0x6b, 0xcc, 0x67, 0x2d, 0x2e, 0x8e, 0xfc, + 0x32, 0x54, 0xfd, 0x40, 0x0f, 0x06, 0xea, 0xd3, 0xdc, 0x9d, 0xb4, 0x60, 0x0e, 0x1e, 0xcd, 0x23, + 0xe2, 0x3f, 0x4a, 0xa1, 0xda, 0xef, 0x15, 0xe0, 0x5a, 0x76, 0xc5, 0x4d, 0xcb, 0x0f, 0xc8, 0x9f, + 0x1a, 0xea, 0xf6, 0x33, 0xbe, 0x71, 0x56, 0x9b, 0x77, 0x7a, 0x78, 0x16, 0x41, 0x95, 0xc4, 0xba, + 0x3c, 0x80, 0x8a, 0x15, 0xd0, 0x9e, 0xda, 0x5f, 0xde, 0x9b, 0xf0, 0xa3, 0xc7, 0x96, 0x76, 0x26, + 0x05, 0x85, 0x30, 0xed, 0xbb, 0xc5, 0x51, 0x8f, 0xcc, 0x97, 0x0f, 0x3b, 0x19, 0xdd, 0x7d, 0x27, + 0x5f, 0x74, 0x77, 0xb2, 0x41, 0xc3, 0x41, 0xde, 0x7f, 0x66, 0x38, 0xc8, 0xfb, 0x5e, 0xfe, 0x20, + 0xef, 0x54, 0x37, 0x8c, 0x8c, 0xf5, 0xfe, 0xa8, 0x04, 0x2f, 0x3d, 0x6e, 0xd8, 0xb0, 0xf5, 0x4c, + 0x8e, 0xce, 0xbc, 0xeb, 0xd9, 0xe3, 0xc7, 0x21, 0x59, 0x82, 0x4a, 0xff, 0x40, 0xf7, 0x95, 0x52, + 0xa6, 0x36, 0x2c, 0x95, 0x6d, 0x56, 0xf8, 0x88, 0x4d, 0x1a, 0x5c, 0x99, 0xe3, 0x7f, 0x51, 0xb0, + 0xb2, 0xe9, 0xb8, 0x47, 0x7d, 0x3f, 0xb2, 0x09, 0x84, 0xd3, 0xf1, 0x96, 0x28, 0x46, 0x45, 0x27, + 0x01, 0x54, 0x85, 0x89, 0x59, 0xae, 0x4c, 0xe3, 0x07, 0x72, 0x65, 0x1c, 0x08, 0x88, 0x1e, 0x4a, + 0x7a, 0x2b, 0xa4, 0x2c, 0xb2, 0x00, 0xe5, 0x20, 0x0a, 0xcf, 0x56, 0x5b, 0xf3, 0x72, 0x86, 0x7e, + 0xca, 0xf9, 0xd8, 0xc6, 0xde, 0xdd, 0xe3, 0x46, 0x75, 0x53, 0xfa, 0xcf, 0x2d, 0xd7, 0xe1, 0x0a, + 0x59, 0x29, 0xda, 0xd8, 0xdf, 0x1b, 0xe2, 0xc0, 0x8c, 0x5a, 0xda, 0xbf, 0xab, 0xc3, 0x95, 0xec, + 0xf1, 0xc0, 0xfa, 0xed, 0x88, 0x7a, 0x3e, 0xc3, 0x2e, 0x24, 0xfb, 0xed, 0xbe, 0x28, 0x46, 0x45, + 0xff, 0x44, 0x07, 0x9c, 0xfd, 0x66, 0x01, 0xae, 0x7a, 0xd2, 0x47, 0xf4, 0x34, 0x82, 0xce, 0x5e, + 0x16, 0xe6, 0x8c, 0x11, 0x02, 0x71, 0x74, 0x5b, 0xc8, 0xdf, 0x2a, 0xc0, 0x7c, 0x2f, 0x65, 0xe7, + 0xb8, 0xc0, 0xb3, 0x92, 0xfc, 0xfc, 0xc3, 0xd6, 0x08, 0x79, 0x38, 0xb2, 0x25, 0xe4, 0x5b, 0xd0, + 0xec, 0xb3, 0x71, 0xe1, 0x07, 0xd4, 0x31, 0x54, 0x80, 0xe8, 0xf8, 0x5f, 0xd2, 0x76, 0x84, 0x15, + 0x9e, 0x95, 0xe2, 0xfa, 0x41, 0x8c, 0x80, 0x71, 0x89, 0xcf, 0xf8, 0xe1, 0xc8, 0x9b, 0x50, 0xf7, + 0x69, 0x10, 0x58, 0x4e, 0x57, 0xec, 0x37, 0x1a, 0xe2, 0x5b, 0xe9, 0xc8, 0x32, 0x0c, 0xa9, 0xe4, + 0x27, 0xa1, 0xc1, 0x5d, 0x4e, 0xcb, 0x5e, 0xd7, 0x9f, 0x6f, 0xf0, 0x70, 0xb1, 0x69, 0x11, 0x00, + 0x27, 0x0b, 0x31, 0xa2, 0x93, 0x2f, 0xc2, 0xd4, 0x1e, 0xff, 0x7c, 0xe5, 0x79, 0x79, 0x61, 0xe3, + 0xe2, 0xda, 0x5a, 0x3b, 0x56, 0x8e, 0x09, 0x2e, 0xb2, 0x04, 0x40, 0x43, 0xbf, 0x5c, 0xda, 0x9e, + 0x15, 0x79, 0xec, 0x30, 0xc6, 0x45, 0x5e, 0x86, 0x52, 0x60, 0xfb, 0xdc, 0x86, 0x55, 0x8f, 0xb6, + 0xa0, 0x3b, 0x9b, 0x1d, 0x64, 0xe5, 0xda, 0x8f, 0x0a, 0x30, 0x9b, 0x3a, 0x46, 0xc4, 0xaa, 0x0c, + 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x95, 0x5d, 0xdc, 0x44, 0x56, 0x4e, 0xde, 0x93, 0x6a, 0x79, 0x31, + 0x67, 0x6a, 0x90, 0xbb, 0x7a, 0xe0, 0x33, 0x3d, 0x7c, 0x48, 0x23, 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, + 0x72, 0x1d, 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, 0xbf, 0xf2, 0x59, 0x0c, 0x7e, + 0xda, 0xaf, 0x15, 0x63, 0x3d, 0x20, 0x35, 0xfb, 0x27, 0xf4, 0xc0, 0xe7, 0xd9, 0x02, 0x1a, 0x2e, + 0xee, 0x8d, 0xf8, 0xfa, 0xc7, 0x17, 0x63, 0x49, 0x25, 0x6f, 0x8b, 0xbe, 0x2f, 0xe5, 0x3c, 0x80, + 0xbd, 0xb3, 0xd9, 0x11, 0xd1, 0x55, 0xea, 0xad, 0x85, 0xaf, 0xa0, 0x7c, 0x41, 0xaf, 0x40, 0xfb, + 0x97, 0x25, 0x68, 0xbe, 0xe5, 0xee, 0x7d, 0x42, 0x22, 0xa8, 0xb3, 0x97, 0xa9, 0xe2, 0xc7, 0xb8, + 0x4c, 0xed, 0xc2, 0x8b, 0x41, 0x60, 0x77, 0xa8, 0xe1, 0x3a, 0xa6, 0xbf, 0xbc, 0x1f, 0x50, 0x6f, + 0xcd, 0x72, 0x2c, 0xff, 0x80, 0x9a, 0xd2, 0x9d, 0xf4, 0x99, 0xd3, 0x93, 0xd6, 0x8b, 0x3b, 0x3b, + 0x9b, 0x59, 0x2c, 0x38, 0xaa, 0x2e, 0x9f, 0x36, 0xc4, 0x99, 0x4f, 0x7e, 0x26, 0x4a, 0xc6, 0xdc, + 0x88, 0x69, 0x23, 0x56, 0x8e, 0x09, 0x2e, 0xed, 0x7b, 0x45, 0x68, 0x84, 0x49, 0x1f, 0xc8, 0xe7, + 0xa0, 0xb6, 0xe7, 0xb9, 0x87, 0xd4, 0x13, 0x9e, 0x3b, 0x79, 0x26, 0xaa, 0x2d, 0x8a, 0x50, 0xd1, + 0xc8, 0x67, 0xa1, 0x12, 0xb8, 0x7d, 0xcb, 0x48, 0x1b, 0xd4, 0x76, 0x58, 0x21, 0x0a, 0xda, 0xc5, + 0x0d, 0xf0, 0xcf, 0x27, 0x54, 0xbb, 0xc6, 0x48, 0x65, 0xec, 0x5d, 0x28, 0xfb, 0xba, 0x6f, 0xcb, + 0xf5, 0x34, 0x47, 0xfe, 0x84, 0xe5, 0xce, 0xa6, 0xcc, 0x9f, 0xb0, 0xdc, 0xd9, 0x44, 0x0e, 0xaa, + 0xfd, 0x61, 0x11, 0x9a, 0xa2, 0xdf, 0xc4, 0xac, 0x30, 0xc9, 0x9e, 0x7b, 0x83, 0x87, 0x52, 0xf8, + 0x83, 0x1e, 0xf5, 0xb8, 0x99, 0x49, 0x4e, 0x72, 0x71, 0xff, 0x40, 0x44, 0x0c, 0xc3, 0x29, 0xa2, + 0x22, 0xd5, 0xf5, 0xe5, 0x0b, 0xec, 0xfa, 0xca, 0x99, 0xba, 0xbe, 0x7a, 0x11, 0x5d, 0xff, 0x61, + 0x11, 0x1a, 0x9b, 0xd6, 0x3e, 0x35, 0x8e, 0x0d, 0x9b, 0x9f, 0xfe, 0x34, 0xa9, 0x4d, 0x03, 0xba, + 0xee, 0xe9, 0x06, 0xdd, 0xa6, 0x9e, 0xc5, 0x93, 0x22, 0xb1, 0xef, 0x83, 0xcf, 0x40, 0xf2, 0xf4, + 0xe7, 0xea, 0x08, 0x1e, 0x1c, 0x59, 0x9b, 0x6c, 0xc0, 0x94, 0x49, 0x7d, 0xcb, 0xa3, 0xe6, 0x76, + 0x6c, 0xa3, 0xf2, 0x39, 0xb5, 0xd4, 0xac, 0xc6, 0x68, 0x8f, 0x4e, 0x5a, 0xd3, 0xca, 0x40, 0x29, + 0x76, 0x2c, 0x89, 0xaa, 0xec, 0x93, 0xef, 0xeb, 0x03, 0x3f, 0xab, 0x8d, 0xb1, 0x4f, 0x7e, 0x3b, + 0x9b, 0x05, 0x47, 0xd5, 0xd5, 0x2a, 0x50, 0xda, 0x74, 0xbb, 0xda, 0x77, 0x4b, 0x10, 0x66, 0xcf, + 0x22, 0x7f, 0xbe, 0x00, 0x4d, 0xdd, 0x71, 0xdc, 0x40, 0x66, 0xa6, 0x12, 0x1e, 0x78, 0xcc, 0x9d, + 0xa4, 0x6b, 0x61, 0x39, 0x02, 0x15, 0xce, 0xdb, 0xd0, 0xa1, 0x1c, 0xa3, 0x60, 0x5c, 0x36, 0x19, + 0xa4, 0xfc, 0xc9, 0x5b, 0xf9, 0x5b, 0x71, 0x06, 0xef, 0xf1, 0xb5, 0xaf, 0xc2, 0x5c, 0xba, 0xb1, + 0xe7, 0x71, 0x07, 0xe5, 0x72, 0xcc, 0x17, 0x01, 0xa2, 0x98, 0x92, 0xa7, 0x60, 0xc4, 0xb2, 0x12, + 0x46, 0xac, 0xf1, 0x53, 0x18, 0x44, 0x8d, 0x1e, 0x69, 0xb8, 0xfa, 0x66, 0xca, 0x70, 0xb5, 0x31, + 0x09, 0x61, 0x8f, 0x37, 0x56, 0xed, 0xc1, 0xa5, 0x88, 0x37, 0xfa, 0xe6, 0xef, 0xa4, 0xbe, 0x4c, + 0xa1, 0x8b, 0x7d, 0x61, 0xc4, 0x97, 0x39, 0x1b, 0x0b, 0xf2, 0x19, 0xfe, 0x36, 0xb5, 0xbf, 0x5d, + 0x80, 0xb9, 0xb8, 0x10, 0x7e, 0xde, 0xfa, 0x4b, 0x30, 0xed, 0x51, 0xdd, 0x6c, 0xeb, 0x81, 0x71, + 0xc0, 0xc3, 0xc9, 0x0b, 0x3c, 0xfe, 0x9b, 0x9f, 0x30, 0xc3, 0x38, 0x01, 0x93, 0x7c, 0x44, 0x87, + 0x26, 0x2b, 0xd8, 0xb1, 0x7a, 0xd4, 0x1d, 0x04, 0x63, 0x5a, 0x66, 0xf9, 0xa6, 0x08, 0x23, 0x18, + 0x8c, 0x63, 0x6a, 0x1f, 0x15, 0x60, 0x26, 0xde, 0xe0, 0x0b, 0xb7, 0xda, 0x1d, 0x24, 0xad, 0x76, + 0x2b, 0x13, 0x78, 0xef, 0x23, 0x2c, 0x75, 0xdf, 0x6e, 0xc6, 0x1f, 0x8d, 0x5b, 0xe7, 0xe2, 0x06, + 0x89, 0xc2, 0x63, 0x0d, 0x12, 0x9f, 0xfc, 0xa4, 0x4c, 0xa3, 0x34, 0xe9, 0xf2, 0x33, 0xac, 0x49, + 0x7f, 0x9c, 0x99, 0x9d, 0x62, 0xd9, 0x89, 0xaa, 0x39, 0xb2, 0x13, 0xf5, 0xc2, 0xec, 0x44, 0xb5, + 0x89, 0x4d, 0x6c, 0x67, 0xc9, 0x50, 0x54, 0x7f, 0xaa, 0x19, 0x8a, 0x1a, 0x17, 0x95, 0xa1, 0x08, + 0xf2, 0x66, 0x28, 0xfa, 0x4e, 0x01, 0x66, 0xcc, 0xc4, 0xa9, 0x5c, 0x79, 0x8e, 0x7d, 0xfc, 0xe5, + 0x2c, 0x79, 0xc8, 0x57, 0x1c, 0xcb, 0x4a, 0x96, 0x61, 0x4a, 0x64, 0x56, 0x5e, 0xa0, 0xa9, 0x8f, + 0x25, 0x2f, 0x10, 0xf9, 0x65, 0x68, 0xd8, 0x6a, 0xad, 0x93, 0xd9, 0x12, 0x37, 0x27, 0x32, 0x24, + 0x25, 0x66, 0x14, 0xf9, 0x1f, 0x16, 0x61, 0x24, 0x51, 0xfb, 0x83, 0x5a, 0x7c, 0x41, 0x7c, 0xda, + 0x7e, 0x81, 0xd7, 0x92, 0x7e, 0x81, 0x1b, 0x69, 0xbf, 0xc0, 0xd0, 0x6a, 0x2e, 0x7d, 0x03, 0x3f, + 0x15, 0x5b, 0x27, 0x4a, 0x3c, 0x21, 0x51, 0x38, 0xe4, 0x32, 0xd6, 0x8a, 0x65, 0x98, 0x95, 0x4a, + 0x80, 0x22, 0xf2, 0x49, 0x76, 0x3a, 0x8a, 0xe4, 0x5a, 0x4d, 0x92, 0x31, 0xcd, 0xcf, 0x04, 0xfa, + 0x2a, 0x2f, 0xad, 0xd8, 0x0d, 0x45, 0x63, 0x5c, 0xe5, 0x8c, 0x0d, 0x39, 0xd8, 0xce, 0xc9, 0xa3, + 0xba, 0x2f, 0xad, 0xfb, 0xb1, 0x9d, 0x13, 0xf2, 0x52, 0x94, 0xd4, 0xb8, 0x8b, 0xa3, 0xf6, 0x04, + 0x17, 0x87, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0x31, 0x98, 0x4c, 0x39, 0x9b, 0xfc, 0x89, 0xb3, 0xad, + 0xfb, 0x4c, 0x97, 0x88, 0x14, 0xf8, 0xcd, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x29, 0xf6, 0x97, + 0xcf, 0x2c, 0xe6, 0x72, 0x20, 0xb3, 0xb7, 0x9d, 0x47, 0x46, 0x68, 0x99, 0xdb, 0x8c, 0xe1, 0x60, + 0x02, 0x75, 0x84, 0x17, 0x04, 0xc6, 0xf1, 0x82, 0x90, 0x9f, 0x13, 0x8a, 0xdb, 0x71, 0xf8, 0x5a, + 0x9b, 0xfc, 0xb5, 0x86, 0x51, 0xa0, 0x18, 0x27, 0x62, 0x92, 0x97, 0x8d, 0x8a, 0x81, 0xec, 0x06, + 0x55, 0x7d, 0x2a, 0x39, 0x2a, 0x76, 0x93, 0x64, 0x4c, 0xf3, 0x93, 0x6d, 0xb8, 0x1c, 0x16, 0xc5, + 0x9b, 0x31, 0xcd, 0x71, 0xc2, 0xb0, 0xbc, 0xdd, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0xcf, 0xb9, 0x0c, + 0x3c, 0x8f, 0x3a, 0xc1, 0x6d, 0xdd, 0x3f, 0x90, 0xf1, 0x7d, 0xd1, 0x39, 0x97, 0x88, 0x84, 0x71, + 0x3e, 0xb2, 0x04, 0x20, 0xe0, 0x78, 0xad, 0xd9, 0x64, 0x08, 0xed, 0x6e, 0x48, 0xc1, 0x18, 0x97, + 0xf6, 0x9d, 0x06, 0x34, 0xef, 0xea, 0x81, 0x75, 0x44, 0xb9, 0xcb, 0xf2, 0x62, 0xfc, 0x46, 0x7f, + 0xad, 0x00, 0x57, 0x92, 0x71, 0xa9, 0x17, 0xe8, 0x3c, 0xe2, 0xf9, 0x8c, 0x30, 0x53, 0x1a, 0x8e, + 0x68, 0x05, 0x77, 0x23, 0x0d, 0x85, 0xb9, 0x5e, 0xb4, 0x1b, 0xa9, 0x33, 0x4a, 0x20, 0x8e, 0x6e, + 0xcb, 0x27, 0xc5, 0x8d, 0xf4, 0x6c, 0x27, 0xe0, 0x4c, 0x39, 0xb9, 0x6a, 0xcf, 0x8c, 0x93, 0xab, + 0xfe, 0x4c, 0x68, 0xfd, 0xfd, 0x98, 0x93, 0xab, 0x91, 0x33, 0xd8, 0x4a, 0x1e, 0xe5, 0x10, 0x68, + 0xa3, 0x9c, 0x65, 0x3c, 0x0b, 0x83, 0x72, 0x3e, 0x30, 0x65, 0x79, 0x4f, 0xf7, 0x2d, 0x43, 0xaa, + 0x1d, 0x39, 0x12, 0x0e, 0xab, 0x44, 0x84, 0x22, 0x26, 0x83, 0xff, 0x45, 0x81, 0x1d, 0xe5, 0x5d, + 0x2c, 0xe6, 0xca, 0xbb, 0x48, 0x56, 0xa0, 0xec, 0x1c, 0xd2, 0xe3, 0xf3, 0xe5, 0x33, 0xe0, 0x9b, + 0xc0, 0xbb, 0x77, 0xe8, 0x31, 0xf2, 0xca, 0xda, 0xf7, 0x8a, 0x00, 0xec, 0xf1, 0xcf, 0xe6, 0x6e, + 0xfa, 0x09, 0xa8, 0xf9, 0x03, 0x6e, 0x18, 0x92, 0x0a, 0x53, 0x14, 0xa1, 0x26, 0x8a, 0x51, 0xd1, + 0xc9, 0x67, 0xa1, 0xf2, 0xcd, 0x01, 0x1d, 0xa8, 0xd8, 0x89, 0x70, 0xdf, 0xf0, 0x35, 0x56, 0x88, + 0x82, 0x76, 0x71, 0xa6, 0x63, 0xe5, 0x96, 0xaa, 0x5c, 0x94, 0x5b, 0xaa, 0x01, 0xb5, 0xbb, 0x2e, + 0x0f, 0x78, 0xd5, 0xfe, 0x7b, 0x11, 0x20, 0x0a, 0x28, 0x24, 0xbf, 0x51, 0x80, 0x17, 0xc2, 0x0f, + 0x2e, 0x10, 0xdb, 0x3f, 0x9e, 0xe3, 0x3b, 0xb7, 0x8b, 0x2a, 0xeb, 0x63, 0xe7, 0x33, 0xd0, 0x76, + 0x96, 0x38, 0xcc, 0x6e, 0x05, 0x41, 0xa8, 0xd3, 0x5e, 0x3f, 0x38, 0x5e, 0xb5, 0x3c, 0x39, 0x02, + 0x33, 0xe3, 0x56, 0x6f, 0x49, 0x1e, 0x51, 0x55, 0xda, 0x28, 0xf8, 0x47, 0xa4, 0x28, 0x18, 0xe2, + 0x90, 0x03, 0xa8, 0x3b, 0xee, 0x7b, 0x3e, 0xeb, 0x0e, 0x39, 0x1c, 0xdf, 0x1c, 0xbf, 0xcb, 0x45, + 0xb7, 0x0a, 0x97, 0x86, 0xfc, 0x83, 0x35, 0x47, 0x76, 0xf6, 0xaf, 0x17, 0xe1, 0x52, 0x46, 0x3f, + 0x90, 0x37, 0x61, 0x4e, 0xc6, 0x6e, 0x46, 0xc9, 0xee, 0x0b, 0x51, 0xb2, 0xfb, 0x4e, 0x8a, 0x86, + 0x43, 0xdc, 0xe4, 0x3d, 0x00, 0xdd, 0x30, 0xa8, 0xef, 0x6f, 0xb9, 0xa6, 0xda, 0x0f, 0xbc, 0xc1, + 0xd4, 0x97, 0xe5, 0xb0, 0xf4, 0xd1, 0x49, 0xeb, 0xa7, 0xb3, 0xc2, 0xb1, 0x53, 0xfd, 0x1c, 0x55, + 0xc0, 0x18, 0x24, 0xf9, 0x06, 0x80, 0xb0, 0x01, 0x84, 0x19, 0x23, 0x9e, 0x60, 0x38, 0x5b, 0x50, + 0x09, 0xc9, 0x16, 0xbe, 0x36, 0xd0, 0x9d, 0xc0, 0x0a, 0x8e, 0x45, 0x82, 0x9e, 0xfb, 0x21, 0x0a, + 0xc6, 0x10, 0xb5, 0x7f, 0x56, 0x84, 0xba, 0x72, 0x0b, 0x3c, 0x05, 0x5b, 0x70, 0x37, 0x61, 0x0b, + 0x9e, 0x50, 0x00, 0x76, 0x96, 0x25, 0xd8, 0x4d, 0x59, 0x82, 0xd7, 0xf3, 0x8b, 0x7a, 0xbc, 0x1d, + 0xf8, 0xb7, 0x8b, 0x30, 0xa3, 0x58, 0xf3, 0x5a, 0x68, 0xbf, 0x02, 0xb3, 0x22, 0x70, 0x62, 0x4b, + 0x7f, 0x28, 0x72, 0x15, 0xf1, 0x0e, 0x2b, 0x8b, 0x98, 0xe7, 0x76, 0x92, 0x84, 0x69, 0x5e, 0x36, + 0xac, 0x45, 0xd1, 0x2e, 0xdb, 0x84, 0x09, 0x57, 0xab, 0xd8, 0x6f, 0xf2, 0x61, 0xdd, 0x4e, 0xd1, + 0x70, 0x88, 0x3b, 0x6d, 0x22, 0x2e, 0x5f, 0x80, 0x89, 0xf8, 0x3f, 0x14, 0x60, 0x2a, 0xea, 0xaf, + 0x0b, 0x37, 0x10, 0xef, 0x27, 0x0d, 0xc4, 0xcb, 0xb9, 0x87, 0xc3, 0x08, 0xf3, 0xf0, 0x5f, 0xaa, + 0x41, 0xe2, 0x1c, 0x00, 0xd9, 0x83, 0x6b, 0x56, 0x66, 0x34, 0x63, 0x6c, 0xb6, 0x09, 0x0f, 0xb6, + 0x6f, 0x8c, 0xe4, 0xc4, 0xc7, 0xa0, 0x90, 0x01, 0xd4, 0x8f, 0xa8, 0x17, 0x58, 0x06, 0x55, 0xcf, + 0xb7, 0x9e, 0x5b, 0x25, 0x93, 0x46, 0xf0, 0xb0, 0x4f, 0xef, 0x4b, 0x01, 0x18, 0x8a, 0x22, 0x7b, + 0x50, 0xa1, 0x66, 0x97, 0xaa, 0xec, 0x51, 0x39, 0xb3, 0xf0, 0x86, 0xfd, 0xc9, 0xfe, 0xf9, 0x28, + 0xa0, 0x89, 0x1f, 0x37, 0x34, 0x95, 0x73, 0x2a, 0x58, 0x67, 0x34, 0x2f, 0x91, 0xc3, 0xd0, 0xda, + 0x5a, 0x99, 0xd0, 0xe4, 0xf1, 0x18, 0x5b, 0xab, 0x0f, 0x8d, 0x07, 0x7a, 0x40, 0xbd, 0x9e, 0xee, + 0x1d, 0xca, 0xdd, 0xc6, 0xf8, 0x4f, 0xf8, 0xb6, 0x42, 0x8a, 0x9e, 0x30, 0x2c, 0xc2, 0x48, 0x0e, + 0x71, 0xa1, 0x11, 0x48, 0xf5, 0x59, 0x99, 0x94, 0xc7, 0x17, 0xaa, 0x14, 0x71, 0x5f, 0x9e, 0x07, + 0x50, 0x7f, 0x31, 0x92, 0x41, 0x8e, 0x12, 0x29, 0xdb, 0x45, 0xa2, 0xfe, 0x76, 0x0e, 0xd7, 0x84, + 0x84, 0x8a, 0x96, 0x9b, 0xec, 0xd4, 0xef, 0xda, 0xff, 0xaa, 0x44, 0xd3, 0xf2, 0xd3, 0xb6, 0x13, + 0x7e, 0x31, 0x69, 0x27, 0xbc, 0x9e, 0xb6, 0x13, 0xa6, 0xfc, 0xf1, 0xe7, 0x8f, 0x20, 0x4e, 0x99, + 0xd7, 0xca, 0x17, 0x60, 0x5e, 0x7b, 0x05, 0x9a, 0x47, 0x7c, 0x26, 0x10, 0xa9, 0xa8, 0x2a, 0x7c, + 0x19, 0xe1, 0x33, 0xfb, 0xfd, 0xa8, 0x18, 0xe3, 0x3c, 0xac, 0x8a, 0xbc, 0xa4, 0x26, 0xcc, 0xda, + 0x2c, 0xab, 0x74, 0xa2, 0x62, 0x8c, 0xf3, 0xf0, 0xe0, 0x43, 0xcb, 0x39, 0x14, 0x15, 0x6a, 0xbc, + 0x82, 0x08, 0x3e, 0x54, 0x85, 0x18, 0xd1, 0xc9, 0x4d, 0xa8, 0x0f, 0xcc, 0x7d, 0xc1, 0x5b, 0xe7, + 0xbc, 0x5c, 0xc3, 0xdc, 0x5d, 0x5d, 0x93, 0xa9, 0xb1, 0x14, 0x95, 0xb5, 0xa4, 0xa7, 0xf7, 0x15, + 0x81, 0xef, 0x0d, 0x65, 0x4b, 0xb6, 0xa2, 0x62, 0x8c, 0xf3, 0x90, 0x9f, 0x85, 0x19, 0x8f, 0x9a, + 0x03, 0x83, 0x86, 0xb5, 0x80, 0xd7, 0x92, 0x39, 0x43, 0xe3, 0x14, 0x4c, 0x71, 0x8e, 0x30, 0x12, + 0x36, 0xc7, 0x32, 0x12, 0x7e, 0x15, 0x66, 0x4c, 0x4f, 0xb7, 0x1c, 0x6a, 0xde, 0x73, 0x78, 0xd0, + 0x85, 0x0c, 0x81, 0x0c, 0x0d, 0xf4, 0xab, 0x09, 0x2a, 0xa6, 0xb8, 0xb5, 0x7f, 0x55, 0x84, 0x8a, + 0xc8, 0x64, 0xba, 0x01, 0x97, 0x2c, 0xc7, 0x0a, 0x2c, 0xdd, 0x5e, 0xa5, 0xb6, 0x7e, 0x9c, 0x0c, + 0x3c, 0x79, 0x91, 0x6d, 0xb4, 0x37, 0x86, 0xc9, 0x98, 0x55, 0x87, 0x75, 0x4e, 0x20, 0x96, 0x6f, + 0x85, 0x22, 0xec, 0x68, 0x22, 0xfd, 0x75, 0x82, 0x82, 0x29, 0x4e, 0xa6, 0x0c, 0xf5, 0x33, 0xa2, + 0x4a, 0xb8, 0x32, 0x94, 0x8c, 0x25, 0x49, 0xf2, 0x71, 0x25, 0x7d, 0xc0, 0x15, 0xe2, 0xf0, 0xa0, + 0x91, 0x0c, 0x1c, 0x13, 0x4a, 0x7a, 0x8a, 0x86, 0x43, 0xdc, 0x0c, 0x61, 0x5f, 0xb7, 0xec, 0x81, + 0x47, 0x23, 0x84, 0x4a, 0x84, 0xb0, 0x96, 0xa2, 0xe1, 0x10, 0xb7, 0xf6, 0x3f, 0x0a, 0x40, 0x86, + 0x8f, 0x4e, 0x90, 0x03, 0xa8, 0x3a, 0xdc, 0x16, 0x99, 0x3b, 0xeb, 0x7e, 0xcc, 0xa4, 0x29, 0x16, + 0x09, 0x59, 0x20, 0xf1, 0x89, 0x03, 0x75, 0xfa, 0x30, 0xa0, 0x9e, 0x13, 0x1e, 0xa5, 0x9a, 0x4c, + 0x86, 0x7f, 0xb1, 0x37, 0x93, 0xc8, 0x18, 0xca, 0xd0, 0x7e, 0xbf, 0x08, 0xcd, 0x18, 0xdf, 0x93, + 0xb6, 0xf8, 0x3c, 0x9b, 0x83, 0x30, 0x01, 0xee, 0x7a, 0xb6, 0x9c, 0xef, 0x62, 0xd9, 0x1c, 0x24, + 0x09, 0x37, 0x31, 0xce, 0x47, 0x96, 0x00, 0x7a, 0xba, 0x1f, 0x50, 0x8f, 0xeb, 0x42, 0xa9, 0x1c, + 0x0a, 0x5b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0x86, 0xbc, 0xa3, 0xa1, 0x9c, 0xcc, 0x79, 0x39, 0xe2, + 0x02, 0x86, 0xca, 0x04, 0x2e, 0x60, 0x20, 0x5d, 0x98, 0x53, 0xad, 0x56, 0xd4, 0xf3, 0x65, 0x44, + 0x14, 0x03, 0x35, 0x05, 0x81, 0x43, 0xa0, 0xda, 0xf7, 0x0a, 0x30, 0x9d, 0x30, 0x40, 0x89, 0x6c, + 0x95, 0xea, 0xe0, 0x4f, 0x22, 0x5b, 0x65, 0xec, 0xbc, 0xce, 0xe7, 0xa1, 0x2a, 0x3a, 0x28, 0x1d, + 0xcf, 0x2b, 0xba, 0x10, 0x25, 0x95, 0xad, 0x2c, 0xd2, 0xc4, 0x9d, 0x5e, 0x59, 0xa4, 0x0d, 0x1c, + 0x15, 0x5d, 0x78, 0x8e, 0x44, 0xeb, 0x64, 0x4f, 0xc7, 0x3c, 0x47, 0xa2, 0x1c, 0x43, 0x0e, 0xed, + 0x1f, 0xf1, 0x76, 0x07, 0xde, 0x71, 0xb8, 0xb3, 0xee, 0x42, 0x4d, 0xc6, 0x70, 0xca, 0x4f, 0xe3, + 0xcd, 0x1c, 0x56, 0x31, 0x8e, 0x23, 0xa3, 0x15, 0x75, 0xe3, 0xf0, 0xde, 0xfe, 0x3e, 0x2a, 0x74, + 0x72, 0x0b, 0x1a, 0xae, 0x23, 0xbf, 0x60, 0xf9, 0xf8, 0x5f, 0x60, 0x2b, 0xc7, 0x3d, 0x55, 0xf8, + 0xe8, 0xa4, 0x75, 0x25, 0xfc, 0x93, 0x68, 0x24, 0x46, 0x35, 0xb5, 0x3f, 0x57, 0x80, 0x17, 0xd0, + 0xb5, 0x6d, 0xcb, 0xe9, 0x26, 0x3d, 0x9f, 0xc4, 0x86, 0x99, 0x9e, 0xfe, 0x70, 0xd7, 0xd1, 0x8f, + 0x74, 0xcb, 0xd6, 0xf7, 0x6c, 0xfa, 0xc4, 0x9d, 0xf1, 0x20, 0xb0, 0xec, 0x05, 0x71, 0x67, 0xe5, + 0xc2, 0x86, 0x13, 0xdc, 0xf3, 0x3a, 0x81, 0x67, 0x39, 0x5d, 0x31, 0x4b, 0x6e, 0x25, 0xb0, 0x30, + 0x85, 0xad, 0xfd, 0x41, 0x09, 0x78, 0x1c, 0x21, 0xf9, 0x12, 0x34, 0x7a, 0xd4, 0x38, 0xd0, 0x1d, + 0xcb, 0x57, 0x79, 0x7f, 0xaf, 0xb2, 0xe7, 0xda, 0x52, 0x85, 0x8f, 0xd8, 0xab, 0x58, 0xee, 0x6c, + 0xf2, 0xa3, 0x3a, 0x11, 0x2f, 0x31, 0xa0, 0xda, 0xf5, 0x7d, 0xbd, 0x6f, 0xe5, 0x0e, 0x31, 0x11, + 0x79, 0x56, 0xc5, 0x74, 0x24, 0x7e, 0xa3, 0x84, 0x26, 0x06, 0x54, 0xfa, 0xb6, 0x6e, 0x39, 0xb9, + 0xef, 0x58, 0x63, 0x4f, 0xb0, 0xcd, 0x90, 0x84, 0xa9, 0x92, 0xff, 0x44, 0x81, 0x4d, 0x06, 0xd0, + 0xf4, 0x0d, 0x4f, 0xef, 0xf9, 0x07, 0xfa, 0xd2, 0xab, 0xaf, 0xe5, 0x56, 0xfe, 0x23, 0x51, 0x42, + 0x17, 0x59, 0xc1, 0xe5, 0xad, 0xce, 0xed, 0xe5, 0xa5, 0x57, 0x5f, 0xc3, 0xb8, 0x9c, 0xb8, 0xd8, + 0x57, 0x5f, 0x59, 0x92, 0x33, 0xc8, 0xc4, 0xc5, 0xbe, 0xfa, 0xca, 0x12, 0xc6, 0xe5, 0x68, 0xff, + 0xbb, 0x00, 0x8d, 0x90, 0x97, 0xec, 0x02, 0xb0, 0xb9, 0x4c, 0x66, 0x46, 0x3d, 0xd7, 0x7d, 0x34, + 0xdc, 0xda, 0xb3, 0x1b, 0x56, 0xc6, 0x18, 0x50, 0x46, 0xea, 0xd8, 0xe2, 0xa4, 0x53, 0xc7, 0x2e, + 0x42, 0xe3, 0x40, 0x77, 0x4c, 0xff, 0x40, 0x3f, 0x14, 0x53, 0x7a, 0x2c, 0x99, 0xf2, 0x6d, 0x45, + 0xc0, 0x88, 0x47, 0xfb, 0x27, 0x55, 0x10, 0x71, 0x21, 0x6c, 0xd2, 0x31, 0x2d, 0x5f, 0x1c, 0x7e, + 0x28, 0xf0, 0x9a, 0xe1, 0xa4, 0xb3, 0x2a, 0xcb, 0x31, 0xe4, 0x20, 0x57, 0xa1, 0xd4, 0xb3, 0x1c, + 0xa9, 0x81, 0x70, 0x43, 0xee, 0x96, 0xe5, 0x20, 0x2b, 0xe3, 0x24, 0xfd, 0xa1, 0xd4, 0x30, 0x04, + 0x49, 0x7f, 0x88, 0xac, 0x8c, 0x7c, 0x05, 0x66, 0x6d, 0xd7, 0x3d, 0x64, 0xd3, 0x87, 0x52, 0x44, + 0x84, 0x57, 0x9d, 0x9b, 0x56, 0x36, 0x93, 0x24, 0x4c, 0xf3, 0x92, 0x5d, 0x78, 0xf1, 0x03, 0xea, + 0xb9, 0x72, 0xbe, 0xec, 0xd8, 0x94, 0xf6, 0x15, 0x8c, 0x50, 0x8d, 0x79, 0x94, 0xec, 0x2f, 0x64, + 0xb3, 0xe0, 0xa8, 0xba, 0x3c, 0xde, 0x5e, 0xf7, 0xba, 0x34, 0xd8, 0xf6, 0x5c, 0xa6, 0xbb, 0x58, + 0x4e, 0x57, 0xc1, 0x56, 0x23, 0xd8, 0x9d, 0x6c, 0x16, 0x1c, 0x55, 0x97, 0xbc, 0x03, 0xf3, 0x82, + 0x24, 0xd4, 0x96, 0x65, 0x31, 0xcd, 0x58, 0xb6, 0xba, 0x9a, 0x74, 0x5a, 0xf8, 0xcb, 0x76, 0x46, + 0xf0, 0xe0, 0xc8, 0xda, 0xe4, 0x2d, 0x98, 0x53, 0xde, 0xd2, 0x6d, 0xea, 0x75, 0xc2, 0x58, 0xa1, + 0xe9, 0xf6, 0xf5, 0xd3, 0x93, 0xd6, 0xb5, 0x55, 0xda, 0xf7, 0xa8, 0x11, 0xf7, 0x3a, 0x2b, 0x2e, + 0x1c, 0xaa, 0x47, 0x10, 0xae, 0xf0, 0x80, 0xa0, 0xdd, 0xfe, 0x8a, 0xeb, 0xda, 0xa6, 0xfb, 0xc0, + 0x51, 0xcf, 0x2e, 0x14, 0x76, 0xee, 0x20, 0xed, 0x64, 0x72, 0xe0, 0x88, 0x9a, 0xec, 0xc9, 0x39, + 0x65, 0xd5, 0x7d, 0xe0, 0xa4, 0x51, 0x21, 0x7a, 0xf2, 0xce, 0x08, 0x1e, 0x1c, 0x59, 0x9b, 0xac, + 0x01, 0x49, 0x3f, 0xc1, 0x6e, 0x5f, 0xba, 0xf0, 0xaf, 0x88, 0x24, 0x47, 0x69, 0x2a, 0x66, 0xd4, + 0x20, 0x9b, 0x70, 0x39, 0x5d, 0xca, 0xc4, 0x49, 0x6f, 0x3e, 0x4f, 0x6f, 0x8c, 0x19, 0x74, 0xcc, + 0xac, 0xa5, 0xfd, 0xd3, 0x22, 0x4c, 0x27, 0xb2, 0x62, 0x3c, 0x73, 0xd9, 0x07, 0xd8, 0xe6, 0xa1, + 0xe7, 0x77, 0x37, 0x56, 0x6f, 0x53, 0xdd, 0xa4, 0xde, 0x1d, 0xaa, 0x32, 0x98, 0x88, 0x65, 0x31, + 0x41, 0xc1, 0x14, 0x27, 0xd9, 0x87, 0x8a, 0xf0, 0x13, 0xe4, 0xbd, 0xd9, 0x48, 0xf5, 0x11, 0x77, + 0x16, 0xc8, 0xeb, 0xc0, 0x5c, 0x8f, 0xa2, 0x80, 0xd7, 0x02, 0x98, 0x8a, 0x73, 0xb0, 0x89, 0x24, + 0x52, 0x7b, 0x6b, 0x09, 0x95, 0x77, 0x03, 0x4a, 0x41, 0x30, 0x6e, 0x5e, 0x03, 0xe1, 0x77, 0xda, + 0xd9, 0x44, 0x86, 0xa1, 0xed, 0xb3, 0x77, 0xe7, 0xfb, 0x96, 0xeb, 0xc8, 0x24, 0xf7, 0xbb, 0x50, + 0x93, 0xbb, 0xa7, 0x31, 0xf3, 0x32, 0x70, 0x5d, 0x49, 0x99, 0x5d, 0x15, 0x96, 0xf6, 0x1f, 0x8b, + 0xd0, 0x08, 0xcd, 0x24, 0x67, 0x48, 0x1e, 0xef, 0x42, 0x23, 0x0c, 0x68, 0xcc, 0x7d, 0x6d, 0x6b, + 0x14, 0x67, 0xc7, 0x77, 0xf6, 0xe1, 0x5f, 0x8c, 0x64, 0xc4, 0x83, 0x25, 0x4b, 0x39, 0x82, 0x25, + 0xfb, 0x50, 0x0b, 0x3c, 0xab, 0xdb, 0x95, 0xbb, 0x84, 0x3c, 0xd1, 0x92, 0x61, 0x77, 0xed, 0x08, + 0x40, 0xd9, 0xb3, 0xe2, 0x0f, 0x2a, 0x31, 0xda, 0xfb, 0x30, 0x97, 0xe6, 0xe4, 0x2a, 0xb4, 0x71, + 0x40, 0xcd, 0x81, 0xad, 0xfa, 0x38, 0x52, 0xa1, 0x65, 0x39, 0x86, 0x1c, 0xe4, 0x26, 0xd4, 0xd9, + 0x6b, 0xfa, 0xc0, 0x75, 0x94, 0x1a, 0xcb, 0x77, 0x23, 0x3b, 0xb2, 0x0c, 0x43, 0xaa, 0xf6, 0xdf, + 0x4a, 0x70, 0x35, 0x32, 0x76, 0x6d, 0xe9, 0x8e, 0xde, 0x3d, 0xc3, 0x5d, 0x9d, 0x9f, 0x9e, 0x74, + 0x3b, 0xef, 0x0d, 0x20, 0xa5, 0x67, 0xe0, 0x06, 0x90, 0xff, 0x5b, 0x04, 0x1e, 0x7c, 0x4d, 0xbe, + 0x05, 0x53, 0x7a, 0xec, 0x9a, 0x66, 0xf9, 0x3a, 0x6f, 0xe5, 0x7e, 0x9d, 0x3c, 0xc6, 0x3b, 0x0c, + 0x80, 0x8b, 0x97, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xbe, 0xaf, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0x3b, + 0xef, 0x12, 0xc2, 0xf9, 0x30, 0x5f, 0x93, 0xd0, 0x18, 0x0a, 0x21, 0xdf, 0x29, 0xc0, 0xb4, 0x17, + 0xdf, 0xae, 0xc9, 0x17, 0x92, 0x27, 0xb4, 0x23, 0x86, 0x16, 0x0f, 0xb7, 0x8b, 0xef, 0x09, 0x93, + 0x32, 0xb5, 0xff, 0x5a, 0x80, 0xe9, 0x8e, 0x6d, 0x99, 0x96, 0xd3, 0xbd, 0xc0, 0x0b, 0x48, 0xee, + 0x41, 0xc5, 0xb7, 0x2d, 0x93, 0x8e, 0xb9, 0x9a, 0x88, 0x75, 0x8c, 0x01, 0xa0, 0xc0, 0x49, 0xde, + 0x68, 0x52, 0x3a, 0xc3, 0x8d, 0x26, 0x7f, 0x54, 0x05, 0x79, 0x8c, 0x80, 0x0c, 0xa0, 0xd1, 0x55, + 0x17, 0x25, 0xc8, 0x67, 0xbc, 0x9d, 0x23, 0xc9, 0x66, 0xe2, 0xca, 0x05, 0x31, 0xf7, 0x87, 0x85, + 0x18, 0x49, 0x22, 0x34, 0x79, 0x3f, 0xf8, 0x6a, 0xce, 0xfb, 0xc1, 0x85, 0xb8, 0xe1, 0x1b, 0xc2, + 0x75, 0x28, 0x1f, 0x04, 0x41, 0x5f, 0x0e, 0xa6, 0xf1, 0xcf, 0x89, 0x44, 0x79, 0x9e, 0x84, 0x4e, + 0xc4, 0xfe, 0x23, 0x87, 0x66, 0x22, 0x1c, 0x3d, 0xbc, 0x85, 0x71, 0x25, 0x57, 0x18, 0x49, 0x5c, + 0x04, 0xfb, 0x8f, 0x1c, 0x9a, 0xfc, 0x12, 0x34, 0x03, 0x4f, 0x77, 0xfc, 0x7d, 0xd7, 0xeb, 0x51, + 0x4f, 0xee, 0x51, 0xd7, 0x72, 0x5c, 0x91, 0xbd, 0x13, 0xa1, 0x09, 0x93, 0x6c, 0xa2, 0x08, 0xe3, + 0xd2, 0xc8, 0x21, 0xd4, 0x07, 0xa6, 0x68, 0x98, 0x34, 0x83, 0x2d, 0xe7, 0xb9, 0xf5, 0x3c, 0x16, + 0x24, 0xa2, 0xfe, 0x61, 0x28, 0x20, 0x79, 0xe1, 0x68, 0x6d, 0x52, 0x17, 0x8e, 0xc6, 0x47, 0x63, + 0x56, 0x12, 0x1a, 0xd2, 0x93, 0x7a, 0xad, 0xd3, 0x95, 0x31, 0x6e, 0x6b, 0xb9, 0x55, 0x4e, 0x21, + 0xb2, 0x19, 0xea, 0xc6, 0x4e, 0x17, 0x95, 0x0c, 0xad, 0x07, 0xd2, 0x77, 0x44, 0x8c, 0xc4, 0x65, + 0x4d, 0xe2, 0x64, 0xe4, 0xe2, 0xd9, 0xe6, 0x83, 0xf0, 0xd6, 0xa0, 0x58, 0xb2, 0xf8, 0xcc, 0x5b, + 0x99, 0xb4, 0xff, 0x54, 0x84, 0xd2, 0xce, 0x66, 0x47, 0x24, 0x80, 0xe5, 0xd7, 0xbf, 0xd1, 0xce, + 0xa1, 0xd5, 0xbf, 0x4f, 0x3d, 0x6b, 0xff, 0x58, 0x6e, 0xbd, 0x63, 0x09, 0x60, 0xd3, 0x1c, 0x98, + 0x51, 0x8b, 0xbc, 0x0b, 0x53, 0x86, 0xbe, 0x42, 0xbd, 0x60, 0x1c, 0xc3, 0x02, 0x3f, 0x02, 0xbe, + 0xb2, 0x1c, 0x55, 0xc7, 0x04, 0x18, 0xd9, 0x05, 0x30, 0x22, 0xe8, 0xd2, 0xb9, 0xcd, 0x21, 0x31, + 0xe0, 0x18, 0x10, 0x41, 0x68, 0x1c, 0x32, 0x56, 0x8e, 0x5a, 0x3e, 0x0f, 0x2a, 0x1f, 0x39, 0x77, + 0x54, 0x5d, 0x8c, 0x60, 0x34, 0x07, 0xa6, 0x13, 0x37, 0x38, 0x91, 0x2f, 0x43, 0xdd, 0xed, 0xc7, + 0xa6, 0xd3, 0x06, 0x8f, 0xa6, 0xad, 0xdf, 0x93, 0x65, 0x8f, 0x4e, 0x5a, 0xd3, 0x9b, 0x6e, 0xd7, + 0x32, 0x54, 0x01, 0x86, 0xec, 0x44, 0x83, 0x2a, 0x3f, 0xb7, 0xa9, 0xee, 0x6f, 0xe2, 0x6b, 0x07, + 0xbf, 0x62, 0xc5, 0x47, 0x49, 0xd1, 0x7e, 0xa5, 0x0c, 0x91, 0xc7, 0x95, 0xf8, 0x50, 0x15, 0x67, + 0x46, 0xe4, 0xcc, 0x7d, 0xa1, 0xc7, 0x53, 0xa4, 0x28, 0xd2, 0x85, 0xd2, 0xfb, 0xee, 0x5e, 0xee, + 0x89, 0x3b, 0x96, 0xb0, 0x41, 0xd8, 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x5e, 0x80, 0xe7, + 0xfd, 0xb4, 0xea, 0x2b, 0x87, 0x03, 0xe6, 0xd7, 0xf1, 0xd3, 0xca, 0xb4, 0x0c, 0x7b, 0x1e, 0x45, + 0xc6, 0xe1, 0xb6, 0xb0, 0xfe, 0x17, 0xae, 0x50, 0x39, 0x9c, 0xd6, 0x73, 0xde, 0x2f, 0x9b, 0xec, + 0xff, 0x64, 0x19, 0x4a, 0x51, 0xda, 0xb7, 0x8b, 0xd0, 0x8c, 0xcd, 0xd6, 0xb9, 0xaf, 0x05, 0x7b, + 0x98, 0xba, 0x16, 0x6c, 0x7b, 0xfc, 0xc8, 0x80, 0xa8, 0x55, 0x17, 0x7d, 0x33, 0xd8, 0xbf, 0x28, + 0x42, 0x69, 0x77, 0x75, 0x2d, 0xb9, 0x69, 0x2d, 0x3c, 0x85, 0x4d, 0xeb, 0x01, 0xd4, 0xf6, 0x06, + 0x96, 0x1d, 0x58, 0x4e, 0xee, 0x94, 0x32, 0xea, 0x16, 0x35, 0xe9, 0xeb, 0x10, 0xa8, 0xa8, 0xe0, + 0x49, 0x17, 0x6a, 0x5d, 0x91, 0xd3, 0x33, 0x77, 0xbc, 0xa4, 0xcc, 0x0d, 0x2a, 0x04, 0xc9, 0x3f, + 0xa8, 0xd0, 0xb5, 0x63, 0xa8, 0xee, 0xae, 0x4a, 0xb5, 0xff, 0xe9, 0xf6, 0xa6, 0xf6, 0x4b, 0x10, + 0x6a, 0x01, 0x4f, 0x5f, 0xf8, 0xef, 0x16, 0x20, 0xa9, 0xf8, 0x3c, 0xfd, 0xd1, 0x74, 0x98, 0x1e, + 0x4d, 0xab, 0x93, 0xf8, 0xf8, 0xb2, 0x07, 0x94, 0xf6, 0xef, 0x0b, 0x90, 0x3a, 0xe8, 0x47, 0x5e, + 0x93, 0xe9, 0xe1, 0x92, 0x81, 0x69, 0x2a, 0x3d, 0x1c, 0x49, 0x72, 0xc7, 0xd2, 0xc4, 0x7d, 0xc8, + 0xb6, 0x6b, 0x71, 0x07, 0x9a, 0x6c, 0xfe, 0xdd, 0xf1, 0xb7, 0x6b, 0x59, 0xee, 0x38, 0x19, 0x3c, + 0x19, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x8f, 0x8b, 0x50, 0x7d, 0x6a, 0xb9, 0x0d, 0x68, 0x22, 0x9e, + 0x75, 0x25, 0xe7, 0x6c, 0x3f, 0x32, 0x9a, 0xb5, 0x97, 0x8a, 0x66, 0xcd, 0x7b, 0x6d, 0xf9, 0x13, + 0x62, 0x59, 0xff, 0x6d, 0x01, 0xe4, 0x5a, 0xb3, 0xe1, 0xf8, 0x81, 0xee, 0x18, 0x94, 0x18, 0xe1, + 0xc2, 0x96, 0x37, 0x68, 0x4a, 0x06, 0x16, 0x0a, 0x5d, 0x86, 0xff, 0x56, 0x0b, 0x19, 0xf9, 0x29, + 0xa8, 0x1f, 0xb8, 0x7e, 0xc0, 0x17, 0xaf, 0x62, 0xd2, 0x64, 0x76, 0x5b, 0x96, 0x63, 0xc8, 0x91, + 0x76, 0x67, 0x57, 0x46, 0xbb, 0xb3, 0xb5, 0xdf, 0x2a, 0xc2, 0xd4, 0x27, 0x25, 0x79, 0x42, 0x56, + 0xf4, 0x6f, 0x29, 0x67, 0xf4, 0x6f, 0xf9, 0x3c, 0xd1, 0xbf, 0xda, 0x0f, 0x0a, 0x00, 0x4f, 0x2d, + 0x73, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0x3d, 0xae, 0xb2, 0xc3, 0x72, 0xff, 0x41, 0x45, 0x3d, 0x12, + 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0x33, 0x7a, 0x22, 0xd0, 0x35, 0xb7, 0xbe, 0x9c, 0x8a, 0x9b, 0x0d, + 0xe3, 0xb4, 0x92, 0xe5, 0x98, 0x12, 0x4b, 0x5e, 0x8f, 0x32, 0x93, 0xdf, 0x8d, 0x86, 0xfd, 0x50, + 0x4a, 0x71, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x10, 0x58, 0x5c, 0x9a, 0x48, 0x60, 0x71, 0xfc, 0xc8, + 0x64, 0xf9, 0xb1, 0x47, 0x26, 0x8f, 0xa0, 0xb1, 0xef, 0xb9, 0x3d, 0x1e, 0xbb, 0x2b, 0xef, 0xfe, + 0xbe, 0x95, 0x63, 0xa1, 0xec, 0xed, 0x59, 0x0e, 0x35, 0x79, 0x5c, 0x70, 0x68, 0xb8, 0x5a, 0x53, + 0xf8, 0x18, 0x89, 0xe2, 0xb6, 0x7e, 0x57, 0x48, 0xad, 0x4e, 0x52, 0x6a, 0x38, 0x97, 0xec, 0x08, + 0x74, 0x54, 0x62, 0x92, 0xf1, 0xba, 0xb5, 0xa7, 0x13, 0xaf, 0xab, 0xfd, 0xc5, 0x9a, 0x9a, 0xc0, + 0x9e, 0xb9, 0x24, 0xb8, 0x9f, 0x1e, 0x74, 0xef, 0xd2, 0xa1, 0x53, 0xe8, 0xf5, 0xa7, 0x78, 0x0a, + 0xbd, 0x31, 0x99, 0x53, 0xe8, 0x90, 0xef, 0x14, 0x7a, 0x73, 0x42, 0xa7, 0xd0, 0xa7, 0x26, 0x75, + 0x0a, 0x7d, 0x7a, 0xac, 0x53, 0xe8, 0x33, 0x67, 0x3a, 0x85, 0x7e, 0x52, 0x82, 0xd4, 0x66, 0xfc, + 0x53, 0xc7, 0xdb, 0xff, 0x57, 0x8e, 0xb7, 0xef, 0x16, 0x21, 0x9a, 0x88, 0xcf, 0x19, 0x98, 0xf4, + 0x0e, 0xd4, 0x7b, 0xfa, 0x43, 0x1e, 0x38, 0x9d, 0xe7, 0xee, 0xe8, 0x2d, 0x89, 0x81, 0x21, 0x1a, + 0xf1, 0x01, 0xac, 0xf0, 0xfe, 0x86, 0xdc, 0x2e, 0x8c, 0xe8, 0x2a, 0x08, 0x61, 0x24, 0x8d, 0xfe, + 0x63, 0x4c, 0x8c, 0xf6, 0x6f, 0x8a, 0x20, 0x2f, 0xfa, 0x20, 0x14, 0x2a, 0xfb, 0xd6, 0x43, 0x6a, + 0xe6, 0x0e, 0x77, 0x8e, 0xdd, 0xe8, 0x2f, 0x7c, 0x34, 0xbc, 0x00, 0x05, 0x3a, 0x37, 0xbe, 0x0b, + 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, 0xdf, 0x45, 0x11, 0x2a, 0x19, + 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, 0xca, 0xd6, 0xef, 0x8b, 0x34, + 0x14, 0x52, 0x46, 0xfb, 0x17, 0xbf, 0xff, 0xc3, 0xeb, 0xcf, 0xfd, 0xe0, 0x87, 0xd7, 0x9f, 0xfb, + 0xe8, 0x87, 0xd7, 0x9f, 0xfb, 0x95, 0xd3, 0xeb, 0x85, 0xef, 0x9f, 0x5e, 0x2f, 0xfc, 0xe0, 0xf4, + 0x7a, 0xe1, 0xa3, 0xd3, 0xeb, 0x85, 0xff, 0x7c, 0x7a, 0xbd, 0xf0, 0x57, 0xfe, 0xcb, 0xf5, 0xe7, + 0x7e, 0xe1, 0x4b, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, 0xc5, 0xfe, 0x61, 0x77, 0x91, + 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xad, 0x0f, 0x6c, 0xf9, + 0xf5, 0x9e, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6393,6 +6425,34 @@ func (m *MonoVertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MonoVertexLifecycle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MonoVertexLifecycle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MonoVertexLifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DesiredPhase) + copy(dAtA[i:], m.DesiredPhase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredPhase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MonoVertexLimits) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6500,6 +6560,16 @@ func (m *MonoVertexSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a { size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -10653,6 +10723,17 @@ func (m *MonoVertex) Size() (n int) { return n } +func (m *MonoVertexLifecycle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DesiredPhase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MonoVertexLimits) Size() (n int) { if m == nil { return 0 @@ -10739,6 +10820,8 @@ func (m *MonoVertexSpec) Size() (n int) { } l = m.UpdateStrategy.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12599,6 +12682,16 @@ func (this *MonoVertex) String() string { }, "") return s } +func (this *MonoVertexLifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MonoVertexLifecycle{`, + `DesiredPhase:` + fmt.Sprintf("%v", this.DesiredPhase) + `,`, + `}`, + }, "") + return s +} func (this *MonoVertexLimits) String() string { if this == nil { return "nil" @@ -12658,6 +12751,7 @@ func (this *MonoVertexSpec) String() string { `Sidecars:` + repeatedStringForSidecars + `,`, `DaemonTemplate:` + strings.Replace(this.DaemonTemplate.String(), "DaemonTemplate", "DaemonTemplate", 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "UpdateStrategy", "UpdateStrategy", 1), `&`, ``, 1) + `,`, + `Lifecycle:` + strings.Replace(strings.Replace(this.Lifecycle.String(), "MonoVertexLifecycle", "MonoVertexLifecycle", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -23347,6 +23441,88 @@ func (m *MonoVertex) Unmarshal(dAtA []byte) error { } return nil } +func (m *MonoVertexLifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MonoVertexLifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MonoVertexLifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredPhase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredPhase = MonoVertexPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MonoVertexLimits) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -24000,6 +24176,39 @@ func (m *MonoVertexSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 035f4cf46b..f96a526599 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -899,6 +899,13 @@ message MonoVertex { optional MonoVertexStatus status = 3; } +message MonoVertexLifecycle { + // DesiredPhase used to bring the pipeline from current phase to desired phase + // +kubebuilder:default=Running + // +optional + optional string desiredPhase = 1; +} + message MonoVertexLimits { // Read batch size from the source. // +kubebuilder:default=500 @@ -965,6 +972,11 @@ message MonoVertexSpec { // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} // +optional optional UpdateStrategy updateStrategy = 12; + + // Lifecycle defines the Lifecycle properties of a MonoVertex + // +kubebuilder:default={"desiredPhase": Running} + // +optional + optional MonoVertexLifecycle lifecycle = 13; } message MonoVertexStatus { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index e3b8e28f64..d271c34144 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -38,6 +38,7 @@ const ( MonoVertexPhaseUnknown MonoVertexPhase = "" MonoVertexPhaseRunning MonoVertexPhase = "Running" MonoVertexPhaseFailed MonoVertexPhase = "Failed" + MonoVertexPhasePaused MonoVertexPhase = "Paused" // MonoVertexConditionDeployed has the status True when the MonoVertex // has its sub resources created and deployed. @@ -79,6 +80,10 @@ func (mv MonoVertex) getReplicas() int { } func (mv MonoVertex) CalculateReplicas() int { + // If we are pausing the MonoVertex then we should have the desired replicas as 0 + if mv.Spec.Lifecycle.GetDesiredPhase() == MonoVertexPhasePaused { + return 0 + } desiredReplicas := mv.getReplicas() // Don't allow replicas to be out of the range of min and max when auto scaling is enabled if s := mv.Spec.Scale; !s.Disabled { @@ -307,8 +312,7 @@ func (mv MonoVertex) simpleCopy() MonoVertex { m.Spec.Limits.ReadTimeout = &metav1.Duration{Duration: DefaultReadTimeout} } m.Spec.UpdateStrategy = UpdateStrategy{} - // TODO: lifecycle - // mvVtxCopy.Spec.Lifecycle = Lifecycle{} + m.Spec.Lifecycle = MonoVertexLifecycle{} return m } @@ -442,6 +446,10 @@ type MonoVertexSpec struct { // +kubebuilder:default={"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "25%"}} // +optional UpdateStrategy UpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,12,opt,name=updateStrategy"` + // Lifecycle defines the Lifecycle properties of a MonoVertex + // +kubebuilder:default={"desiredPhase": Running} + // +optional + Lifecycle MonoVertexLifecycle `json:"lifecycle,omitempty" protobuf:"bytes,13,opt,name=lifecycle"` } func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { @@ -595,19 +603,23 @@ func (mvs *MonoVertexStatus) MarkPhaseRunning() { mvs.MarkPhase(MonoVertexPhaseRunning, "", "") } +// MarkPhasePaused set the Pipeline has been paused. +func (mvs *MonoVertexStatus) MarkPhasePaused() { + mvs.MarkPhase(MonoVertexPhasePaused, "", "MonoVertex paused") +} + // IsHealthy indicates whether the MonoVertex is in healthy status // It returns false if any issues exists // True indicates that the MonoVertex is healthy -// TODO: Add support for paused whenever added in MonoVtx? func (mvs *MonoVertexStatus) IsHealthy() bool { // check for the phase field first switch mvs.Phase { // Directly return an error if the phase is failed case MonoVertexPhaseFailed: return false - // Check if the MonoVertex is ready if the phase is running, + // Check if the MonoVertex is ready if the phase is running or Paused, // We check if all the required conditions are true for it to be healthy - case MonoVertexPhaseRunning: + case MonoVertexPhaseRunning, MonoVertexPhasePaused: return mvs.IsReady() default: return false @@ -621,3 +633,20 @@ type MonoVertexList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []MonoVertex `json:"items" protobuf:"bytes,2,rep,name=items"` } + +type MonoVertexLifecycle struct { + // DesiredPhase used to bring the pipeline from current phase to desired phase + // +kubebuilder:default=Running + // +optional + DesiredPhase MonoVertexPhase `json:"desiredPhase,omitempty" protobuf:"bytes,1,opt,name=desiredPhase"` +} + +// GetDesiredPhase is used to fetch the desired lifecycle phase for a MonoVertex +func (lc MonoVertexLifecycle) GetDesiredPhase() MonoVertexPhase { + switch lc.DesiredPhase { + case MonoVertexPhasePaused: + return MonoVertexPhasePaused + default: + return MonoVertexPhaseRunning + } +} diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go index ae6a62c09d..98bb801003 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -103,6 +103,22 @@ func TestMonoVertex_MarkPhaseRunning(t *testing.T) { } } +func TestMonoVertex_MarkPhasePaused(t *testing.T) { + mvs := MonoVertexStatus{} + mvs.MarkPhasePaused() + + if mvs.Phase != MonoVertexPhasePaused { + t.Errorf("MarkPhaseRunning did not set the Phase to Paused, got %v", mvs.Phase) + } +} + +func TestMonoVertex_GetDesiredPhase(t *testing.T) { + lc := MonoVertexLifecycle{} + assert.Equal(t, MonoVertexPhaseRunning, lc.GetDesiredPhase()) + lc.DesiredPhase = MonoVertexPhasePaused + assert.Equal(t, MonoVertexPhasePaused, lc.GetDesiredPhase()) +} + func TestMonoVertex_MarkDaemonUnHealthy(t *testing.T) { mvs := MonoVertexStatus{} mvs.MarkDaemonUnHealthy("reason", "message") @@ -369,6 +385,22 @@ func TestMonoVertex_CalculateReplicas(t *testing.T) { } assert.Equal(t, 5, mv.CalculateReplicas()) }) + + t.Run("phase paused", func(t *testing.T) { + replicas := int32(10) + mv := MonoVertex{ + Spec: MonoVertexSpec{ + Lifecycle: MonoVertexLifecycle{DesiredPhase: MonoVertexPhasePaused}, + Replicas: &replicas, + Scale: Scale{ + Disabled: false, + Min: ptr.To[int32](2), + Max: ptr.To[int32](5), + }, + }, + } + assert.Equal(t, 0, mv.CalculateReplicas()) + }) } func TestMonoVertex_GetServiceObj(t *testing.T) { diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/openapi_generated.go index bd59769897..83186d25b7 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/openapi_generated.go @@ -74,6 +74,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Log": schema_pkg_apis_numaflow_v1alpha1_Log(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata": schema_pkg_apis_numaflow_v1alpha1_Metadata(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertex": schema_pkg_apis_numaflow_v1alpha1_MonoVertex(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLifecycle": schema_pkg_apis_numaflow_v1alpha1_MonoVertexLifecycle(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits": schema_pkg_apis_numaflow_v1alpha1_MonoVertexLimits(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexList": schema_pkg_apis_numaflow_v1alpha1_MonoVertexList(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexSpec": schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref), @@ -3057,6 +3058,25 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertex(ref common.ReferenceCallback) } } +func schema_pkg_apis_numaflow_v1alpha1_MonoVertexLifecycle(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "desiredPhase": { + SchemaProps: spec.SchemaProps{ + Description: "DesiredPhase used to bring the pipeline from current phase to desired phase", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_numaflow_v1alpha1_MonoVertexLimits(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3369,11 +3389,18 @@ func schema_pkg_apis_numaflow_v1alpha1_MonoVertexSpec(ref common.ReferenceCallba Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy"), }, }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Lifecycle defines the Lifecycle properties of a MonoVertex", + Default: map[string]interface{}{}, + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLifecycle"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ContainerTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.DaemonTemplate", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Metadata", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLifecycle", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.MonoVertexLimits", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Scale", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Sink", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Source", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UpdateStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodResourceClaim", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index ff8dfaf5e5..3307adf743 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -783,13 +783,11 @@ func (pls *PipelineStatus) IsHealthy() bool { switch pls.Phase { case PipelinePhaseFailed: return false - case PipelinePhaseRunning: + case PipelinePhaseRunning, PipelinePhasePaused: return pls.IsReady() case PipelinePhaseDeleting, PipelinePhasePausing: // Transient phases, return true return true - case PipelinePhasePaused: - return true default: return false } diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go index d7e5c334b9..34835e7b07 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go @@ -572,7 +572,7 @@ func TestPipelineStatus_IsHealthy(t *testing.T) { name: "Paused phase", phase: PipelinePhasePaused, ready: false, - want: true, + want: false, }, { name: "Unknown phase", diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 49b93292ff..c131980d96 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -1409,6 +1409,22 @@ func (in *MonoVertex) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonoVertexLifecycle) DeepCopyInto(out *MonoVertexLifecycle) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoVertexLifecycle. +func (in *MonoVertexLifecycle) DeepCopy() *MonoVertexLifecycle { + if in == nil { + return nil + } + out := new(MonoVertexLifecycle) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonoVertexLimits) DeepCopyInto(out *MonoVertexLimits) { *out = *in @@ -1525,6 +1541,7 @@ func (in *MonoVertexSpec) DeepCopyInto(out *MonoVertexSpec) { (*in).DeepCopyInto(*out) } in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + out.Lifecycle = in.Lifecycle return } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 3fbfb3c1ab..5e31ed6f28 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -122,8 +122,6 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon return ctrl.Result{}, err } - // TODO: handle lifecycle changes - if err := mr.orchestratePods(ctx, monoVtx); err != nil { monoVtx.Status.MarkDeployFailed("OrchestratePodsFailed", err.Error()) mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "OrchestratePodsFailed", "OrchestratePodsFailed: %s", err.Error()) @@ -132,13 +130,21 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon monoVtx.Status.MarkDeployed() - // Mark it running before checking the status of the pods - monoVtx.Status.MarkPhaseRunning() + // Update the phase based on the DesiredPhase from the lifecycle, this should encompass + // the Paused and running states. + originalPhase := monoVtx.Status.Phase + monoVtx.Status.MarkPhase(monoVtx.Spec.Lifecycle.GetDesiredPhase(), "", "") + // If the phase has changed, log the event + if monoVtx.Status.Phase != originalPhase { + log.Infow("Updated MonoVertex phase", zap.String("originalPhase", string(originalPhase)), zap.String("originalPhase", string(monoVtx.Status.Phase))) + mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "UpdateMonoVertexPhase", "Updated MonoVertex phase from %s to %s", string(originalPhase), string(monoVtx.Status.Phase)) + } // Check children resource status if err := mr.checkChildrenResourceStatus(ctx, monoVtx); err != nil { return ctrl.Result{}, fmt.Errorf("failed to check mono vertex children resource status, %w", err) } + return ctrl.Result{}, nil } diff --git a/pkg/reconciler/monovertex/controller_test.go b/pkg/reconciler/monovertex/controller_test.go index 8e1f179db4..c9c5c003c2 100644 --- a/pkg/reconciler/monovertex/controller_test.go +++ b/pkg/reconciler/monovertex/controller_test.go @@ -35,11 +35,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/stretchr/testify/assert" + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" - "github.com/stretchr/testify/assert" ) const ( diff --git a/pkg/reconciler/monovertex/scaling/scaling.go b/pkg/reconciler/monovertex/scaling/scaling.go index 0b35265190..7f9ee9ea84 100644 --- a/pkg/reconciler/monovertex/scaling/scaling.go +++ b/pkg/reconciler/monovertex/scaling/scaling.go @@ -171,11 +171,11 @@ func (s *Scaler) scaleOneMonoVertex(ctx context.Context, key string, worker int) log.Infof("MonoVertex not in Running phase, skip scaling.") return nil } - // TODO: lifecycle - // if monoVtx.Spec.Lifecycle.GetDesiredPhase() != dfv1.MonoVertexPhaseRunning { - // log.Info("MonoVertex is pausing, skip scaling.") - // return nil - // } + + if monoVtx.Spec.Lifecycle.GetDesiredPhase() != dfv1.MonoVertexPhaseRunning { + log.Info("MonoVertex desiredPhase is not running, skip scaling.") + return nil + } if int(monoVtx.Status.Replicas) != monoVtx.CalculateReplicas() { log.Infof("MonoVertex %s might be under processing, replicas mismatch, skip scaling.", monoVtx.Name) return nil diff --git a/rust/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs index 423e21fd9c..bfbcd121d0 100644 --- a/rust/numaflow-models/src/models/mod.rs +++ b/rust/numaflow-models/src/models/mod.rs @@ -90,6 +90,8 @@ pub mod metadata; pub use self::metadata::Metadata; pub mod mono_vertex; pub use self::mono_vertex::MonoVertex; +pub mod mono_vertex_lifecycle; +pub use self::mono_vertex_lifecycle::MonoVertexLifecycle; pub mod mono_vertex_limits; pub use self::mono_vertex_limits::MonoVertexLimits; pub mod mono_vertex_list; diff --git a/rust/numaflow-models/src/models/mono_vertex_lifecycle.rs b/rust/numaflow-models/src/models/mono_vertex_lifecycle.rs new file mode 100644 index 0000000000..90d5b97422 --- /dev/null +++ b/rust/numaflow-models/src/models/mono_vertex_lifecycle.rs @@ -0,0 +1,32 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MonoVertexLifecycle { + /// DesiredPhase used to bring the pipeline from current phase to desired phase + #[serde(rename = "desiredPhase", skip_serializing_if = "Option::is_none")] + pub desired_phase: Option, +} + +impl MonoVertexLifecycle { + pub fn new() -> MonoVertexLifecycle { + MonoVertexLifecycle { + desired_phase: None, + } + } +} diff --git a/rust/numaflow-models/src/models/mono_vertex_spec.rs b/rust/numaflow-models/src/models/mono_vertex_spec.rs index 6d4068bee7..7cad8cc898 100644 --- a/rust/numaflow-models/src/models/mono_vertex_spec.rs +++ b/rust/numaflow-models/src/models/mono_vertex_spec.rs @@ -41,6 +41,8 @@ pub struct MonoVertexSpec { /// List of customized init containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ #[serde(rename = "initContainers", skip_serializing_if = "Option::is_none")] pub init_containers: Option>, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option>, #[serde(rename = "limits", skip_serializing_if = "Option::is_none")] pub limits: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] @@ -96,6 +98,7 @@ impl MonoVertexSpec { dns_policy: None, image_pull_secrets: None, init_containers: None, + lifecycle: None, limits: None, metadata: None, node_selector: None, From 895a7780410b7bb5a43a4ab6f4dd55c1c145561f Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Wed, 25 Sep 2024 17:47:12 -0400 Subject: [PATCH 070/188] feat: container-type level version compatibility check (#2087) Signed-off-by: Keran Yang --- pkg/sdkclient/const.go | 2 - pkg/sdkclient/serverinfo/serverinfo.go | 26 +++++++- pkg/sdkclient/serverinfo/serverinfo_test.go | 38 ++++++++--- pkg/sdkclient/serverinfo/types.go | 74 ++++++++++++++++++--- 4 files changed, 115 insertions(+), 25 deletions(-) diff --git a/pkg/sdkclient/const.go b/pkg/sdkclient/const.go index b7ace077cf..54f88b66cb 100644 --- a/pkg/sdkclient/const.go +++ b/pkg/sdkclient/const.go @@ -38,7 +38,6 @@ const ( // Server information file configs MapServerInfoFile = "/var/run/numaflow/mapper-server-info" - MapStreamServerInfoFile = "/var/run/numaflow/mapstreamer-server-info" ReduceServerInfoFile = "/var/run/numaflow/reducer-server-info" ReduceStreamServerInfoFile = "/var/run/numaflow/reducestreamer-server-info" SessionReduceServerInfoFile = "/var/run/numaflow/sessionreducer-server-info" @@ -47,5 +46,4 @@ const ( FbSinkServerInfoFile = "/var/run/numaflow/fb-sinker-server-info" SourceServerInfoFile = "/var/run/numaflow/sourcer-server-info" SourceTransformerServerInfoFile = "/var/run/numaflow/sourcetransformer-server-info" - BatchMapServerInfoFile = "/var/run/numaflow/batchmapper-server-info" ) diff --git a/pkg/sdkclient/serverinfo/serverinfo.go b/pkg/sdkclient/serverinfo/serverinfo.go index d01acd1cda..f94d83e072 100644 --- a/pkg/sdkclient/serverinfo/serverinfo.go +++ b/pkg/sdkclient/serverinfo/serverinfo.go @@ -67,6 +67,10 @@ func waitForServerInfo(timeout time.Duration, filePath string) (*ServerInfo, err minNumaflowVersion := serverInfo.MinimumNumaflowVersion sdkLanguage := serverInfo.Language numaflowVersion := numaflow.GetVersion().Version + containerType, err := getContainerType(filePath) + if err != nil { + return nil, fmt.Errorf("failed to get container type: %w", err) + } // If MinimumNumaflowVersion is empty, skip the numaflow compatibility check as there was an // error writing server info file on the SDK side @@ -87,7 +91,7 @@ func waitForServerInfo(timeout time.Duration, filePath string) (*ServerInfo, err if sdkVersion == "" || sdkLanguage == "" { log.Printf("warning: failed to get the SDK version/language, skipping SDK version compatibility check") } else { - if err := checkSDKCompatibility(sdkVersion, sdkLanguage, minimumSupportedSDKVersions); err != nil { + if err := checkSDKCompatibility(sdkVersion, sdkLanguage, containerType, minimumSupportedSDKVersions); err != nil { return nil, fmt.Errorf("SDK version %s does not satisfy the minimum required by numaflow version %s: %w", sdkVersion, numaflowVersion, err) } @@ -176,8 +180,11 @@ func checkNumaflowCompatibility(numaflowVersion string, minNumaflowVersion strin } // checkSDKCompatibility checks if the current SDK version is compatible with the numaflow version -func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, minSupportedSDKVersions sdkConstraints) error { - if sdkRequiredVersion, ok := minSupportedSDKVersions[sdkLanguage]; ok { +func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, containerType ContainerType, minSupportedSDKVersions sdkConstraints) error { + if _, ok := minSupportedSDKVersions[sdkLanguage]; !ok { + return fmt.Errorf("SDK language %s is not supported", sdkLanguage) + } + if sdkRequiredVersion, ok := minSupportedSDKVersions[sdkLanguage][containerType]; ok { sdkConstraint := fmt.Sprintf(">= %s", sdkRequiredVersion) if sdkLanguage == Python { // Python pre-releases/releases follow PEP440 specification which requires a different library for parsing @@ -206,6 +213,19 @@ func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, minSupported sdkVersionSemVer.String(), humanReadable(sdkRequiredVersion), err) } } + } else { + return fmt.Errorf("SDK container type %s is not supported", containerType) } return nil } + +// getContainerType returns the container type from the server info file path +// serverInfoFilePath is in the format of "/var/run/numaflow/{ContainerType}-server-info" +func getContainerType(serverInfoFilePath string) (ContainerType, error) { + splits := strings.Split(serverInfoFilePath, "/") + if containerType := strings.TrimSuffix(splits[len(splits)-1], "-server-info"); containerType == "" { + return "", fmt.Errorf("failed to get container type from server info file path: %s", serverInfoFilePath) + } else { + return ContainerType(containerType), nil + } +} diff --git a/pkg/sdkclient/serverinfo/serverinfo_test.go b/pkg/sdkclient/serverinfo/serverinfo_test.go index ad7f06e690..105775de17 100644 --- a/pkg/sdkclient/serverinfo/serverinfo_test.go +++ b/pkg/sdkclient/serverinfo/serverinfo_test.go @@ -29,7 +29,7 @@ import ( ) func Test_SDKServerInfo(t *testing.T) { - filepath := os.TempDir() + "/server-info" + filepath := os.TempDir() + "/sourcer-server-info" defer os.Remove(filepath) info := &ServerInfo{ Protocol: TCP, @@ -185,10 +185,18 @@ func Test_CheckNumaflowCompatibility(t *testing.T) { // this test suite is to test SDK compatibility check when all the minimum-supported versions are stable releases func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { var testMinimumSupportedSDKVersions = sdkConstraints{ - Python: "0.6.0rc100", - Go: "0.6.0-z", - Java: "0.6.0-z", - Rust: "0.1.0-z", + Python: map[ContainerType]string{ + sourcer: "0.6.0rc100", + }, + Go: map[ContainerType]string{ + sourcer: "0.6.0-z", + }, + Java: map[ContainerType]string{ + sourcer: "0.6.0-z", + }, + Rust: map[ContainerType]string{ + sourcer: "0.1.0-z", + }, } tests := []struct { name string @@ -275,7 +283,7 @@ func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, tt.minimumSupportedSDKVersions) + err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, sourcer, tt.minimumSupportedSDKVersions) if tt.shouldErr { assert.Error(t, err, "Expected error") assert.Contains(t, err.Error(), tt.errMessage) @@ -289,10 +297,18 @@ func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { // this test suite is to test SDK compatibility check when all the minimum-supported versions are pre-releases func Test_CheckSDKCompatibility_MinimumBeingPreReleases(t *testing.T) { var testMinimumSupportedSDKVersions = sdkConstraints{ - Python: "0.6.0b1", - Go: "0.6.0-rc2", - Java: "0.6.0-rc2", - Rust: "0.1.0-rc3", + Python: map[ContainerType]string{ + sourcer: "0.6.0b1", + }, + Go: map[ContainerType]string{ + sourcer: "0.6.0-rc2", + }, + Java: map[ContainerType]string{ + sourcer: "0.6.0-rc2", + }, + Rust: map[ContainerType]string{ + sourcer: "0.1.0-rc3", + }, } tests := []struct { name string @@ -379,7 +395,7 @@ func Test_CheckSDKCompatibility_MinimumBeingPreReleases(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, tt.minimumSupportedSDKVersions) + err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, sourcer, tt.minimumSupportedSDKVersions) if tt.shouldErr { assert.Error(t, err, "Expected error") assert.Contains(t, err.Error(), tt.errMessage) diff --git a/pkg/sdkclient/serverinfo/types.go b/pkg/sdkclient/serverinfo/types.go index 9e4a152d03..23f6e2394d 100644 --- a/pkg/sdkclient/serverinfo/types.go +++ b/pkg/sdkclient/serverinfo/types.go @@ -27,7 +27,23 @@ const ( Rust Language = "rust" ) -type sdkConstraints map[Language]string +type ContainerType string + +// the string content matches the corresponding server info file name. +// DO NOT change it unless the server info file name is changed. +const ( + sourcer ContainerType = "sourcer" + sourcetransformer ContainerType = "sourcetransformer" + sinker ContainerType = "sinker" + mapper ContainerType = "mapper" + reducer ContainerType = "reducer" + reducestreamer ContainerType = "reducestreamer" + sessionreducer ContainerType = "sessionreducer" + sideinput ContainerType = "sideinput" + fbsinker ContainerType = "fb-sinker" +) + +type sdkConstraints map[Language]map[ContainerType]string /* minimumSupportedSDKVersions is the minimum supported version of each SDK for the current numaflow version. @@ -70,14 +86,54 @@ A constraint ">=0.8.0-z" will match any pre-release version of 0.8.0, including More details about version comparison can be found in the PEP 440 and semver documentation. */ var minimumSupportedSDKVersions = sdkConstraints{ - // meaning the minimum supported python SDK version is 0.8.0 - Python: "0.8.0rc100", - // meaning the minimum supported go SDK version is 0.8.0 - Go: "0.8.0-z", - // meaning the minimum supported java SDK version is 0.8.0 - Java: "0.8.0-z", - // meaning the minimum supported rust SDK version is 0.1.0 - Rust: "0.1.0-z", + Python: map[ContainerType]string{ + // meaning the minimum supported python SDK version is 0.8.0 + sourcer: "0.8.0rc100", + sourcetransformer: "0.8.0rc100", + sinker: "0.8.0rc100", + mapper: "0.8.0rc100", + reducer: "0.8.0rc100", + reducestreamer: "0.8.0rc100", + sessionreducer: "0.8.0rc100", + sideinput: "0.8.0rc100", + fbsinker: "0.8.0rc100", + }, + Go: map[ContainerType]string{ + // meaning the minimum supported go SDK version is 0.8.0 + sourcer: "0.8.0-z", + sourcetransformer: "0.8.0-z", + sinker: "0.8.0-z", + mapper: "0.8.0-z", + reducer: "0.8.0-z", + reducestreamer: "0.8.0-z", + sessionreducer: "0.8.0-z", + sideinput: "0.8.0-z", + fbsinker: "0.8.0-z", + }, + Java: map[ContainerType]string{ + // meaning the minimum supported go SDK version is 0.8.0 + sourcer: "0.8.0-z", + sourcetransformer: "0.8.0-z", + sinker: "0.8.0-z", + mapper: "0.8.0-z", + reducer: "0.8.0-z", + reducestreamer: "0.8.0-z", + sessionreducer: "0.8.0-z", + sideinput: "0.8.0-z", + fbsinker: "0.8.0-z", + }, + Rust: map[ContainerType]string{ + // meaning the minimum supported go SDK version is 0.1.0 + sourcer: "0.1.0-z", + sourcetransformer: "0.1.0-z", + sinker: "0.1.0-z", + mapper: "0.1.0-z", + reducer: "0.1.0-z", + reducestreamer: "0.1.0-z", + sessionreducer: "0.1.0-z", + sideinput: "0.1.0-z", + fbsinker: "0.1.0-z", + }, } // humanReadable returns the human-readable minimum supported version. From 6cdec2d6d1325866e99a204389bc9dc460146cbf Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Fri, 27 Sep 2024 01:18:49 +0530 Subject: [PATCH 071/188] feat: Bidirectional Streaming for UDSink (#2080) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- go.mod | 2 +- go.sum | 4 +- pkg/apis/proto/sink/v1/sink.proto | 39 ++++-- pkg/sdkclient/sinker/client.go | 115 +++++++++++++++--- pkg/sdkclient/sinker/client_test.go | 34 ++++-- pkg/sdkclient/sinker/interface.go | 2 +- pkg/sinks/sink.go | 8 +- pkg/sinks/udsink/sink.go | 16 +-- pkg/sinks/udsink/udsink_grpc.go | 18 +-- pkg/sinks/udsink/udsink_grpc_test.go | 67 ++++++----- rust/Cargo.lock | 138 ++++++++++++---------- rust/monovertex/Cargo.toml | 2 +- rust/monovertex/proto/sink.proto | 55 ++++++--- rust/monovertex/src/forwarder.rs | 26 ++-- rust/monovertex/src/message.rs | 19 +-- rust/monovertex/src/sink.rs | 125 ++++++++++++++------ rust/monovertex/src/source.rs | 1 - rust/servesink/Cargo.toml | 2 +- test/map-e2e/map_test.go | 23 ++-- test/map-e2e/testdata/flatmap-batch.yaml | 55 ++++----- test/map-e2e/testdata/flatmap-stream.yaml | 61 +++++----- test/map-e2e/testdata/flatmap.yaml | 56 ++++----- 22 files changed, 546 insertions(+), 322 deletions(-) diff --git a/go.mod b/go.mod index c251e8216b..a015dcac65 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793 + github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 diff --git a/go.sum b/go.sum index 722bd72ee3..b17e994439 100644 --- a/go.sum +++ b/go.sum @@ -485,8 +485,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793 h1:kUQw1LsUvmTjqFfcia6DZOxy8qCQwvdY0TpOnR8w3Xg= -github.com/numaproj/numaflow-go v0.8.2-0.20240918054944-0fd13d430793/go.mod h1:g4JZOyUPhjfhv+kR0sX5d8taw/dasgKPXLvQBi39mJ4= +github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0 h1:qPqZfJdPdsz4qymyzMSNICQe/xBnx9P/G3hRbC1DR7k= +github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0/go.mod h1:g4JZOyUPhjfhv+kR0sX5d8taw/dasgKPXLvQBi39mJ4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/apis/proto/sink/v1/sink.proto b/pkg/apis/proto/sink/v1/sink.proto index caadaf66b6..255b9b6969 100644 --- a/pkg/apis/proto/sink/v1/sink.proto +++ b/pkg/apis/proto/sink/v1/sink.proto @@ -27,7 +27,7 @@ package sink.v1; service Sink { // SinkFn writes the request to a user defined sink. - rpc SinkFn(stream SinkRequest) returns (SinkResponse); + rpc SinkFn(stream SinkRequest) returns (stream SinkResponse); // IsReady is the heartbeat endpoint for gRPC. rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); @@ -37,12 +37,32 @@ service Sink { * SinkRequest represents a request element. */ message SinkRequest { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - string id = 5; - map headers = 6; + message Request { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + string id = 5; + map headers = 6; + } + message Status { + bool eot = 1; + } + // Required field indicating the request. + Request request = 1; + // Required field indicating the status of the request. + // If eot is set to true, it indicates the end of transmission. + Status status = 2; + // optional field indicating the handshake message. + optional Handshake handshake = 3; +} + +/* + * Handshake message between client and server to indicate the start of transmission. + */ +message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; } /** @@ -73,5 +93,6 @@ message SinkResponse { // err_msg is the error message, set it if success is set to false. string err_msg = 3; } - repeated Result results = 1; -} + Result result = 1; + optional Handshake handshake = 2; +} \ No newline at end of file diff --git a/pkg/sdkclient/sinker/client.go b/pkg/sdkclient/sinker/client.go index 67fe08557c..51c1273568 100644 --- a/pkg/sdkclient/sinker/client.go +++ b/pkg/sdkclient/sinker/client.go @@ -19,26 +19,31 @@ package sinker import ( "context" "fmt" + "time" sinkpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sink/v1" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" "github.com/numaproj/numaflow/pkg/sdkclient" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/shared/logging" ) // client contains the grpc connection and the grpc client. type client struct { - conn *grpc.ClientConn - grpcClt sinkpb.SinkClient + conn *grpc.ClientConn + grpcClt sinkpb.SinkClient + sinkStream sinkpb.Sink_SinkFnClient } var _ Client = (*client)(nil) -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SinkAddr) + var logger = logging.FromContext(ctx) for _, inputOption := range inputOptions { inputOption(opts) @@ -53,12 +58,70 @@ func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (C c.conn = conn c.grpcClt = sinkpb.NewSinkClient(conn) + + // Wait until the server is ready +waitUntilReady: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("failed to connect to the server: %v", ctx.Err()) + default: + ready, _ := c.IsReady(ctx, &emptypb.Empty{}) + if ready { + break waitUntilReady + } else { + logger.Warnw("waiting for the server to be ready", zap.String("server", opts.UdsSockAddr())) + time.Sleep(100 * time.Millisecond) + } + } + } + + // Create the sink stream + c.sinkStream, err = c.grpcClt.SinkFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create sink stream: %v", err) + } + + // Perform handshake + handshakeRequest := &sinkpb.SinkRequest{ + Handshake: &sinkpb.Handshake{ + Sot: true, + }, + } + if err := c.sinkStream.Send(handshakeRequest); err != nil { + return nil, fmt.Errorf("failed to send handshake request: %v", err) + } + + handshakeResponse, err := c.sinkStream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive handshake response: %v", err) + } + if handshakeResponse.GetHandshake() == nil || !handshakeResponse.GetHandshake().GetSot() { + return nil, fmt.Errorf("invalid handshake response") + } + return c, nil } // NewFromClient creates a new client object from a grpc client, which is useful for testing. -func NewFromClient(c sinkpb.SinkClient) (Client, error) { - return &client{grpcClt: c}, nil +func NewFromClient(ctx context.Context, sinkClient sinkpb.SinkClient, inputOptions ...sdkclient.Option) (Client, error) { + var opts = sdkclient.DefaultOptions(sdkclient.SinkAddr) + var err error + + for _, inputOption := range inputOptions { + inputOption(opts) + } + + c := new(client) + c.grpcClt = sinkClient + + // Create the sink stream + c.sinkStream, err = c.grpcClt.SinkFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create sink stream: %v", err) + } + + return c, nil } // CloseConn closes the grpc client connection. @@ -66,6 +129,10 @@ func (c *client) CloseConn(ctx context.Context) error { if c.conn == nil { return nil } + err := c.sinkStream.CloseSend() + if err != nil { + return err + } return c.conn.Close() } @@ -79,20 +146,34 @@ func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { } // SinkFn applies a function to a list of requests. -func (c *client) SinkFn(ctx context.Context, requests []*sinkpb.SinkRequest) (*sinkpb.SinkResponse, error) { - stream, err := c.grpcClt.SinkFn(ctx) - if err != nil { - return nil, fmt.Errorf("failed to execute c.grpcClt.SinkFn(): %w", err) - } - for _, datum := range requests { - if err := stream.Send(datum); err != nil { - return nil, fmt.Errorf("failed to execute stream.Send(%v): %w", datum, err) +func (c *client) SinkFn(ctx context.Context, requests []*sinkpb.SinkRequest) ([]*sinkpb.SinkResponse, error) { + // Stream the array of sink requests + for _, req := range requests { + if err := c.sinkStream.Send(req); err != nil { + return nil, fmt.Errorf("failed to send sink request: %v", err) } } - responseList, err := stream.CloseAndRecv() - if err != nil { - return nil, fmt.Errorf("failed to execute stream.CloseAndRecv(): %w", err) + + // send eot request + eotRequest := &sinkpb.SinkRequest{ + Status: &sinkpb.SinkRequest_Status{ + Eot: true, + }, + } + + if err := c.sinkStream.Send(eotRequest); err != nil { + return nil, fmt.Errorf("failed to send eot request: %v", err) + } + + // Wait for the corresponding responses + var responses []*sinkpb.SinkResponse + for i := 0; i < len(requests); i++ { + resp, err := c.sinkStream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive sink response: %v", err) + } + responses = append(responses, resp) } - return responseList, nil + return responses, nil } diff --git a/pkg/sdkclient/sinker/client_test.go b/pkg/sdkclient/sinker/client_test.go index fdc115a882..762a989fed 100644 --- a/pkg/sdkclient/sinker/client_test.go +++ b/pkg/sdkclient/sinker/client_test.go @@ -36,10 +36,12 @@ func TestClient_IsReady(t *testing.T) { defer ctrl.Finish() mockClient := sinkmock.NewMockSinkClient(ctrl) + mockStream := sinkmock.NewMockSink_SinkFnClient(ctrl) + mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockStream, nil) mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sinkpb.ReadyResponse{Ready: true}, nil) mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sinkpb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) - testClient, err := NewFromClient(mockClient) + testClient, err := NewFromClient(ctx, mockClient) assert.NoError(t, err) reflect.DeepEqual(testClient, &client{ grpcClt: mockClient, @@ -62,31 +64,37 @@ func TestClient_SinkFn(t *testing.T) { mockSinkClient := sinkmock.NewMockSink_SinkFnClient(ctrl) mockSinkClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockSinkClient.EXPECT().CloseAndRecv().Return(&sinkpb.SinkResponse{ - Results: []*sinkpb.SinkResponse_Result{ - { - Id: "temp-id", - Status: sinkpb.Status_SUCCESS, - }, + mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ + Result: &sinkpb.SinkResponse_Result{ + Id: "temp-id", + Status: sinkpb.Status_SUCCESS, }, }, nil) mockClient := sinkmock.NewMockSinkClient(ctrl) mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockSinkClient, nil) - testClient, err := NewFromClient(mockClient) + testClient, err := NewFromClient(ctx, mockClient) assert.NoError(t, err) reflect.DeepEqual(testClient, &client{ grpcClt: mockClient, }) - response, err := testClient.SinkFn(ctx, []*sinkpb.SinkRequest{}) - assert.Equal(t, &sinkpb.SinkResponse{Results: []*sinkpb.SinkResponse_Result{ + response, err := testClient.SinkFn(ctx, []*sinkpb.SinkRequest{ { - Id: "temp-id", - Status: sinkpb.Status_SUCCESS, + Request: &sinkpb.SinkRequest_Request{ + Id: "temp-id", + }, + }, + }) + assert.Equal(t, []*sinkpb.SinkResponse{ + { + Result: &sinkpb.SinkResponse_Result{ + Id: "temp-id", + Status: sinkpb.Status_SUCCESS, + }, }, - }}, response) + }, response) assert.NoError(t, err) } diff --git a/pkg/sdkclient/sinker/interface.go b/pkg/sdkclient/sinker/interface.go index 9f4aa0eb2f..489394fded 100644 --- a/pkg/sdkclient/sinker/interface.go +++ b/pkg/sdkclient/sinker/interface.go @@ -27,5 +27,5 @@ import ( type Client interface { CloseConn(ctx context.Context) error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) - SinkFn(ctx context.Context, datumList []*sinkpb.SinkRequest) (*sinkpb.SinkResponse, error) + SinkFn(ctx context.Context, datumList []*sinkpb.SinkRequest) ([]*sinkpb.SinkResponse, error) } diff --git a/pkg/sinks/sink.go b/pkg/sinks/sink.go index 413d232799..e15c02daa0 100644 --- a/pkg/sinks/sink.go +++ b/pkg/sinks/sink.go @@ -84,7 +84,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { switch u.ISBSvcType { case dfv1.ISBSvcTypeRedis: redisClient := redisclient.NewInClusterRedisClient() - readOptions := []redisclient.Option{} + var readOptions []redisclient.Option if x := u.VertexInstance.Vertex.Spec.Limits; x != nil && x.ReadTimeout != nil { readOptions = append(readOptions, redisclient.WithReadTimeOut(x.ReadTimeout.Duration)) } @@ -155,7 +155,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { return err } - sdkClient, err := sinkclient.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) + sdkClient, err := sinkclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { return fmt.Errorf("failed to create sdk client, %w", err) } @@ -184,7 +184,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { return err } - sdkClient, err := sinkclient.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.FbSinkAddr)) + sdkClient, err := sinkclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.FbSinkAddr)) if err != nil { return fmt.Errorf("failed to create sdk client, %w", err) } @@ -292,7 +292,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { // wait for all the forwarders to exit finalWg.Wait() - // close the from vertex wm stores + // close the fromVertex wm stores // since we created the stores, we can close them for _, wmStore := range fromVertexWmStores { _ = wmStore.Close() diff --git a/pkg/sinks/udsink/sink.go b/pkg/sinks/udsink/sink.go index 36a7dc1219..fad85a925c 100644 --- a/pkg/sinks/udsink/sink.go +++ b/pkg/sinks/udsink/sink.go @@ -65,13 +65,15 @@ func (s *UserDefinedSink) Write(ctx context.Context, messages []isb.Message) ([] msgs := make([]*sinkpb.SinkRequest, len(messages)) for i, m := range messages { msgs[i] = &sinkpb.SinkRequest{ - Id: m.ID.String(), - Value: m.Payload, - Keys: m.Keys, - EventTime: timestamppb.New(m.EventTime), - // Watermark is only available in readmessage.... - Watermark: timestamppb.New(time.Time{}), // TODO: insert the correct watermark - Headers: m.Headers, + Request: &sinkpb.SinkRequest_Request{ + Id: m.ID.String(), + Value: m.Payload, + Keys: m.Keys, + EventTime: timestamppb.New(m.EventTime), + // Watermark is only available in readmessage.... + Watermark: timestamppb.New(time.Time{}), // TODO: insert the correct watermark + Headers: m.Headers, + }, } } return nil, s.udsink.ApplySink(ctx, msgs) diff --git a/pkg/sinks/udsink/udsink_grpc.go b/pkg/sinks/udsink/udsink_grpc.go index 1b9c31131c..80f47d2675 100644 --- a/pkg/sinks/udsink/udsink_grpc.go +++ b/pkg/sinks/udsink/udsink_grpc.go @@ -91,7 +91,7 @@ func (u *UDSgRPCBasedUDSink) WaitUntilReady(ctx context.Context) error { func (u *UDSgRPCBasedUDSink) ApplySink(ctx context.Context, requests []*sinkpb.SinkRequest) []error { errs := make([]error, len(requests)) - response, err := u.client.SinkFn(ctx, requests) + responses, err := u.client.SinkFn(ctx, requests) if err != nil { for i := range requests { errs[i] = &ApplyUDSinkErr{ @@ -106,24 +106,24 @@ func (u *UDSgRPCBasedUDSink) ApplySink(ctx context.Context, requests []*sinkpb.S return errs } // Use ID to map the response messages, so that there's no strict requirement for the user-defined sink to return the response in order. - resMap := make(map[string]*sinkpb.SinkResponse_Result) - for _, res := range response.GetResults() { - resMap[res.GetId()] = res + resMap := make(map[string]*sinkpb.SinkResponse) + for _, res := range responses { + resMap[res.Result.GetId()] = res } for i, m := range requests { - if r, existing := resMap[m.GetId()]; !existing { + if r, existing := resMap[m.Request.GetId()]; !existing { errs[i] = &NotFoundErr } else { - if r.GetStatus() == sinkpb.Status_FAILURE { - if r.GetErrMsg() != "" { + if r.Result.GetStatus() == sinkpb.Status_FAILURE { + if r.Result.GetErrMsg() != "" { errs[i] = &ApplyUDSinkErr{ UserUDSinkErr: true, - Message: r.GetErrMsg(), + Message: r.Result.GetErrMsg(), } } else { errs[i] = &UnknownUDSinkErr } - } else if r.GetStatus() == sinkpb.Status_FALLBACK { + } else if r.Result.GetStatus() == sinkpb.Status_FALLBACK { errs[i] = &WriteToFallbackErr } else { errs[i] = nil diff --git a/pkg/sinks/udsink/udsink_grpc_test.go b/pkg/sinks/udsink/udsink_grpc_test.go index b367416cc5..96968fd9eb 100644 --- a/pkg/sinks/udsink/udsink_grpc_test.go +++ b/pkg/sinks/udsink/udsink_grpc_test.go @@ -19,7 +19,6 @@ package udsink import ( "context" "errors" - "fmt" "testing" "time" @@ -32,8 +31,8 @@ import ( sinkclient "github.com/numaproj/numaflow/pkg/sdkclient/sinker" ) -func NewMockUDSgRPCBasedUDSink(mockClient *sinkmock.MockSinkClient) *UDSgRPCBasedUDSink { - c, _ := sinkclient.NewFromClient(mockClient) +func NewMockUDSgRPCBasedUDSink(ctx context.Context, mockClient *sinkmock.MockSinkClient) *UDSgRPCBasedUDSink { + c, _ := sinkclient.NewFromClient(ctx, mockClient) return &UDSgRPCBasedUDSink{c} } @@ -42,6 +41,8 @@ func Test_gRPCBasedUDSink_WaitUntilReadyWithMockClient(t *testing.T) { defer ctrl.Finish() mockClient := sinkmock.NewMockSinkClient(ctrl) + mockStream := sinkmock.NewMockSink_SinkFnClient(ctrl) + mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockStream, nil) mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&sinkpb.ReadyResponse{Ready: true}, nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -53,7 +54,7 @@ func Test_gRPCBasedUDSink_WaitUntilReadyWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSink(mockClient) + u := NewMockUDSgRPCBasedUDSink(ctx, mockClient) err := u.WaitUntilReady(ctx) assert.NoError(t, err) } @@ -66,16 +67,20 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { testDatumList := []*sinkpb.SinkRequest{ { - Id: "test_id_0", - Value: []byte(`sink_message_success`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + Request: &sinkpb.SinkRequest_Request{ + Id: "test_id_0", + Value: []byte(`sink_message_success`), + EventTime: timestamppb.New(time.Unix(1661169660, 0)), + Watermark: timestamppb.New(time.Time{}), + }, }, { - Id: "test_id_1", - Value: []byte(`sink_message_err`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + Request: &sinkpb.SinkRequest_Request{ + Id: "test_id_1", + Value: []byte(`sink_message_err`), + EventTime: timestamppb.New(time.Unix(1661169660, 0)), + Watermark: timestamppb.New(time.Time{}), + }, }, } testResponseList := []*sinkpb.SinkResponse_Result{ @@ -93,8 +98,11 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { mockSinkClient := sinkmock.NewMockSink_SinkFnClient(ctrl) mockSinkClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockSinkClient.EXPECT().CloseAndRecv().Return(&sinkpb.SinkResponse{ - Results: testResponseList, + mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ + Result: testResponseList[0], + }, nil) + mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ + Result: testResponseList[1], }, nil) mockClient := sinkmock.NewMockSinkClient(ctrl) @@ -109,7 +117,7 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSink(mockClient) + u := NewMockUDSgRPCBasedUDSink(ctx, mockClient) gotErrList := u.ApplySink(ctx, testDatumList) assert.Equal(t, 2, len(gotErrList)) assert.Equal(t, nil, gotErrList[0]) @@ -127,16 +135,20 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { testDatumList := []*sinkpb.SinkRequest{ { - Id: "test_id_0", - Value: []byte(`sink_message_grpc_err`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + Request: &sinkpb.SinkRequest_Request{ + Id: "test_id_0", + Value: []byte(`sink_message_grpc_err`), + EventTime: timestamppb.New(time.Unix(1661169660, 0)), + Watermark: timestamppb.New(time.Time{}), + }, }, { - Id: "test_id_1", - Value: []byte(`sink_message_grpc_err`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + Request: &sinkpb.SinkRequest_Request{ + Id: "test_id_1", + Value: []byte(`sink_message_grpc_err`), + EventTime: timestamppb.New(time.Unix(1661169660, 0)), + Watermark: timestamppb.New(time.Time{}), + }, }, } @@ -144,7 +156,8 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { mockSinkErrClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() mockClient := sinkmock.NewMockSinkClient(ctrl) - mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockSinkErrClient, fmt.Errorf("mock SinkFn error")) + mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockSinkErrClient, nil) + mockSinkErrClient.EXPECT().Recv().Return(nil, errors.New("mock SinkFn error")).AnyTimes() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -155,12 +168,12 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { } }() - u := NewMockUDSgRPCBasedUDSink(mockClient) + u := NewMockUDSgRPCBasedUDSink(ctx, mockClient) gotErrList := u.ApplySink(ctx, testDatumList) expectedErrList := []error{ &ApplyUDSinkErr{ UserUDSinkErr: false, - Message: "gRPC client.SinkFn failed, failed to execute c.grpcClt.SinkFn(): mock SinkFn error", + Message: "gRPC client.SinkFn failed, failed to receive sink response: mock SinkFn error", InternalErr: InternalErr{ Flag: true, MainCarDown: false, @@ -168,7 +181,7 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { }, &ApplyUDSinkErr{ UserUDSinkErr: false, - Message: "gRPC client.SinkFn failed, failed to execute c.grpcClt.SinkFn(): mock SinkFn error", + Message: "gRPC client.SinkFn failed, failed to receive sink response: mock SinkFn error", InternalErr: InternalErr{ Flag: true, MainCarDown: false, diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 81b0a5149b..d6839447ba 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -110,9 +110,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", @@ -145,9 +145,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234314bd569802ec87011d653d6815c6d7b9ffb969e9fee5b8b20ef860e8dce9" +checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" dependencies = [ "bindgen", "cc", @@ -160,9 +160,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", @@ -186,7 +186,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", @@ -207,7 +207,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -215,11 +215,10 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ - "heck 0.4.1", "proc-macro2", "quote", "syn", @@ -245,7 +244,7 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", - "tower", + "tower 0.4.13", "tower-service", ] @@ -907,12 +906,6 @@ dependencies = [ "http 1.1.0", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1116,9 +1109,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -1129,7 +1122,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -1318,7 +1310,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.4.13", "tower-http", "tracing", ] @@ -1353,9 +1345,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -1498,7 +1490,7 @@ dependencies = [ "tokio-util", "tonic", "tonic-build", - "tower", + "tower 0.4.13", "tracing", "tracing-subscriber", "trait-variant", @@ -1513,9 +1505,9 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "nkeys" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2de02c883c178998da8d0c9816a88ef7ef5c58314dd1585c97a4a5679f3ab337" +checksum = "9f49e787f4c61cbd0f9320b31cc26e58719f6aa5068e34697dd3aea361412fe3" dependencies = [ "data-encoding", "ed25519", @@ -1605,7 +1597,7 @@ dependencies = [ [[package]] name = "numaflow" version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?branch=main#362f2b0a0705c34ce3693b8714885dfbae7843e8" +source = "git+https://github.com/numaproj/numaflow-rs.git?rev=0c1682864a4b906fab52e149cfd7cacc679ce688#0c1682864a4b906fab52e149cfd7cacc679ce688" dependencies = [ "chrono", "futures-util", @@ -1757,9 +1749,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -1768,9 +1760,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -1778,9 +1770,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", @@ -1791,9 +1783,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", @@ -1854,9 +1846,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "d30538d42559de6b034bc76fd6dd4c38961b1ee5c6c56e3808c50128fdbc22ce" [[package]] name = "powerfmt" @@ -1917,9 +1909,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", "prost-derive", @@ -1927,12 +1919,12 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", - "heck 0.5.0", + "heck", "itertools 0.13.0", "log", "multimap", @@ -1948,9 +1940,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", @@ -1961,9 +1953,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" dependencies = [ "prost", ] @@ -2095,9 +2087,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "62871f2d65009c0256aed1b9cfeeb8ac272833c404e13d53d400cd0dad7a2ac0" dependencies = [ "bitflags 2.6.0", ] @@ -2470,9 +2462,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -2625,7 +2617,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tower-http", "tracing", "tracing-subscriber", @@ -2816,18 +2808,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -3007,9 +2999,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.5.0", "serde", @@ -3042,7 +3034,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -3081,6 +3073,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -3253,9 +3261,9 @@ checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unsafe-libyaml" @@ -3638,9 +3646,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "c52ac009d615e79296318c1bcce2d422aaca15ad08515e344feeda07df67a587" dependencies = [ "memchr", ] diff --git a/rust/monovertex/Cargo.toml b/rust/monovertex/Cargo.toml index ce973f79ac..f3644dc140 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/monovertex/Cargo.toml @@ -38,7 +38,7 @@ log = "0.4.22" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "0c1682864a4b906fab52e149cfd7cacc679ce688" } [build-dependencies] tonic-build = "0.12.1" diff --git a/rust/monovertex/proto/sink.proto b/rust/monovertex/proto/sink.proto index c413ea863b..300e570314 100644 --- a/rust/monovertex/proto/sink.proto +++ b/rust/monovertex/proto/sink.proto @@ -7,7 +7,7 @@ package sink.v1; service Sink { // SinkFn writes the request to a user defined sink. - rpc SinkFn(stream SinkRequest) returns (SinkResponse); + rpc SinkFn(stream SinkRequest) returns (stream SinkResponse); // IsReady is the heartbeat endpoint for gRPC. rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); @@ -17,12 +17,32 @@ service Sink { * SinkRequest represents a request element. */ message SinkRequest { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - string id = 5; - map headers = 6; + message Request { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + string id = 5; + map headers = 6; + } + message Status { + bool eot = 1; + } + // Required field indicating the request. + Request request = 1; + // Required field indicating the status of the request. + // If eot is set to true, it indicates the end of transmission. + Status status = 2; + // optional field indicating the handshake message. + optional Handshake handshake = 3; +} + +/* + * Handshake message between client and server to indicate the start of transmission. + */ +message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; } /** @@ -32,6 +52,15 @@ message ReadyResponse { bool ready = 1; } +/* + * Status is the status of the response. + */ +enum Status { + SUCCESS = 0; + FAILURE = 1; + FALLBACK = 2; +} + /** * SinkResponse is the individual response of each message written to the sink. */ @@ -44,14 +73,6 @@ message SinkResponse { // err_msg is the error message, set it if success is set to false. string err_msg = 3; } - repeated Result results = 1; -} - -/* - * Status is the status of the response. - */ -enum Status { - SUCCESS = 0; - FAILURE = 1; - FALLBACK = 2; + Result result = 1; + optional Handshake handshake = 2; } \ No newline at end of file diff --git a/rust/monovertex/src/forwarder.rs b/rust/monovertex/src/forwarder.rs index d60644b338..ac69c09903 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/monovertex/src/forwarder.rs @@ -381,11 +381,15 @@ impl Forwarder { // create a map of id to result, since there is no strict requirement // for the udsink to return the results in the same order as the requests - let result_map: HashMap<_, _> = response - .results - .iter() - .map(|result| (result.id.clone(), result)) - .collect(); + let result_map = response + .into_iter() + .map(|resp| match resp.result { + Some(result) => Ok((result.id.clone(), result)), + None => Err(Error::SinkError( + "Response does not contain a result".to_string(), + )), + }) + .collect::>>()?; error_map.clear(); // drain all the messages that were successfully written @@ -459,11 +463,15 @@ impl Forwarder { // create a map of id to result, since there is no strict requirement // for the udsink to return the results in the same order as the requests - let result_map: HashMap<_, _> = fb_response - .results + let result_map = fb_response .iter() - .map(|result| (result.id.clone(), result)) - .collect(); + .map(|resp| match &resp.result { + Some(result) => Ok((result.id.clone(), result)), + None => Err(Error::SinkError( + "Response does not contain a result".to_string(), + )), + }) + .collect::>>()?; let mut contains_fallback_status = false; diff --git a/rust/monovertex/src/message.rs b/rust/monovertex/src/message.rs index 403c377ec4..d5cde5d2bc 100644 --- a/rust/monovertex/src/message.rs +++ b/rust/monovertex/src/message.rs @@ -6,9 +6,10 @@ use chrono::{DateTime, Utc}; use crate::error::Error; use crate::shared::{prost_timestamp_from_utc, utc_from_timestamp}; +use crate::sink_pb::sink_request::Request; use crate::sink_pb::SinkRequest; use crate::source_pb; -use crate::source_pb::{AckRequest, read_response}; +use crate::source_pb::{read_response, AckRequest}; use crate::sourcetransform_pb::SourceTransformRequest; /// A message that is sent from the source to the sink. @@ -94,12 +95,16 @@ impl TryFrom for Message { impl From for SinkRequest { fn from(message: Message) -> Self { Self { - keys: message.keys, - value: message.value, - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - id: message.id, - headers: message.headers, + request: Some(Request { + keys: message.keys, + value: message.value, + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + id: message.id, + headers: message.headers, + }), + status: None, + handshake: None, } } } diff --git a/rust/monovertex/src/sink.rs b/rust/monovertex/src/sink.rs index a2088a8c2f..873404d4c5 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/monovertex/src/sink.rs @@ -1,46 +1,98 @@ -use crate::error::Result; +use crate::error::{Error, Result}; use crate::message::Message; use crate::sink_pb::sink_client::SinkClient; -use crate::sink_pb::{SinkRequest, SinkResponse}; +use crate::sink_pb::sink_request::Status; +use crate::sink_pb::{Handshake, SinkRequest, SinkResponse}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; +use tonic::{Request, Streaming}; + +const DEFAULT_CHANNEL_SIZE: usize = 1000; /// SinkWriter writes messages to a sink. -#[derive(Clone)] pub struct SinkWriter { - client: SinkClient, + sink_tx: mpsc::Sender, + resp_stream: Streaming, } impl SinkWriter { - pub(crate) async fn new(client: SinkClient) -> Result { - Ok(Self { client }) - } + pub(crate) async fn new(mut client: SinkClient) -> Result { + let (sink_tx, sink_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + let sink_stream = ReceiverStream::new(sink_rx); + + // Perform handshake with the server before sending any requests + let handshake_request = SinkRequest { + request: None, + status: None, + handshake: Some(Handshake { sot: true }), + }; + sink_tx + .send(handshake_request) + .await + .map_err(|e| Error::SinkError(format!("failed to send handshake request: {}", e)))?; + + let mut resp_stream = client + .sink_fn(Request::new(sink_stream)) + .await? + .into_inner(); - pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result { - // create a channel with at least size - let (tx, rx) = tokio::sync::mpsc::channel(if messages.is_empty() { - 1 - } else { - messages.len() - }); + // First response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let handshake_response = resp_stream.message().await?.ok_or(Error::SinkError( + "failed to receive handshake response".to_string(), + ))?; + // Handshake cannot be None during the initial phase and it has to set `sot` to true. + if handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(Error::SinkError("invalid handshake response".to_string())); + } + + Ok(Self { + sink_tx, + resp_stream, + }) + } + + /// writes a set of messages to the sink. + pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result> { let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); + let num_requests = requests.len(); - tokio::spawn(async move { - for request in requests { - if tx.send(request).await.is_err() { - break; - } - } - }); - - let response = self - .client - .sink_fn(tokio_stream::wrappers::ReceiverStream::new(rx)) - .await? - .into_inner(); + // write requests to the server + for request in requests { + self.sink_tx + .send(request) + .await + .map_err(|e| Error::SinkError(format!("failed to send request: {}", e)))?; + } - Ok(response) + // send eot request to indicate the end of the stream + let eot_request = SinkRequest { + request: None, + status: Some(Status { eot: true }), + handshake: None, + }; + self.sink_tx + .send(eot_request) + .await + .map_err(|e| Error::SinkError(format!("failed to send eot request: {}", e)))?; + + // now that we have sent, we wait for responses! + // NOTE: this works now because the results are not streamed, as of today it will give the + // response only once it has read all the requests. + let mut responses = Vec::new(); + for _ in 0..num_requests { + let response = self + .resp_stream + .message() + .await? + .ok_or(Error::SinkError("failed to receive response".to_string()))?; + responses.push(response); + }; + + Ok(responses) } } @@ -57,10 +109,7 @@ mod tests { struct Logger; #[tonic::async_trait] impl sink::Sinker for Logger { - async fn sink( - &self, - mut input: tokio::sync::mpsc::Receiver, - ) -> Vec { + async fn sink(&self, mut input: mpsc::Receiver) -> Vec { let mut responses: Vec = Vec::new(); while let Some(datum) = input.recv().await { let response = match std::str::from_utf8(&datum.value) { @@ -87,13 +136,14 @@ mod tests { let server_info = server_info_file.clone(); let server_socket = sock_file.clone(); + let server_handle = tokio::spawn(async move { sink::Server::new(Logger) .with_socket_file(server_socket) .with_server_info_file(server_info) .start_with_shutdown(shutdown_rx) .await - .unwrap(); + .expect("failed to start sink server"); }); // wait for the server to start @@ -129,12 +179,17 @@ mod tests { }, ]; - let response = sink_client.sink_fn(messages).await?; - assert_eq!(response.results.len(), 2); + let response = sink_client.sink_fn(messages.clone()).await?; + assert_eq!(response.len(), 2); + let response = sink_client.sink_fn(messages.clone()).await?; + assert_eq!(response.len(), 2); + + drop(sink_client); shutdown_tx .send(()) .expect("failed to send shutdown signal"); + server_handle.await.expect("failed to join server task"); Ok(()) } diff --git a/rust/monovertex/src/source.rs b/rust/monovertex/src/source.rs index fdfde1b6d4..6b3729d3d7 100644 --- a/rust/monovertex/src/source.rs +++ b/rust/monovertex/src/source.rs @@ -7,7 +7,6 @@ use crate::source_pb::source_client::SourceClient; use crate::source_pb::{ ack_response, read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; -use log::info; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 7b037c7208..a9a768ac6c 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", branch = "main" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "0c1682864a4b906fab52e149cfd7cacc679ce688" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/test/map-e2e/map_test.go b/test/map-e2e/map_test.go index a1d171a253..60821a869b 100644 --- a/test/map-e2e/map_test.go +++ b/test/map-e2e/map_test.go @@ -30,6 +30,7 @@ type MapSuite struct { E2ESuite } +// FIXME(sink-streaming) python sdk func (s *MapSuite) TestBatchMapUDFunctionAndSink() { w := s.Given().Pipeline("@testdata/flatmap-batch.yaml"). When(). @@ -42,8 +43,8 @@ func (s *MapSuite) TestBatchMapUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("rust-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("rust-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). @@ -54,7 +55,7 @@ func (s *MapSuite) TestBatchMapUDFunctionAndSink() { w.Expect(). VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + //VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). VertexPodLogContains("rust-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } @@ -71,8 +72,8 @@ func (s *MapSuite) TestUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) @@ -81,8 +82,8 @@ func (s *MapSuite) TestUDFunctionAndSink() { w.Expect(). VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) + //VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } func (s *MapSuite) TestMapStreamUDFunctionAndSink() { @@ -98,8 +99,8 @@ func (s *MapSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) @@ -110,8 +111,8 @@ func (s *MapSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). VertexPodLogContains("go-udsink-2", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - w.Expect(). - VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + //w.Expect(). + // VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) } diff --git a/test/map-e2e/testdata/flatmap-batch.yaml b/test/map-e2e/testdata/flatmap-batch.yaml index f4a36213a6..780e1a59da 100644 --- a/test/map-e2e/testdata/flatmap-batch.yaml +++ b/test/map-e2e/testdata/flatmap-batch.yaml @@ -25,29 +25,30 @@ spec: # https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/log image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always - - name: python-split - scale: - min: 1 - udf: - container: - args: - - python - - example.py - # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/batchmap/flatmap - image: quay.io/numaio/numaflow-python/batch-map-flatmap:stable - imagePullPolicy: Always - - name: python-udsink - scale: - min: 1 - sink: - udsink: - container: - args: - - python - - example.py - # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log - image: quay.io/numaio/numaflow-python/sink-log:stable - imagePullPolicy: Always +# FIXME(sink-streaming) python sdk +# - name: python-split +# scale: +# min: 1 +# udf: +# container: +# args: +# - python +# - example.py +# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/batchmap/flatmap +# image: quay.io/numaio/numaflow-python/batch-map-flatmap:stable +# imagePullPolicy: Always +# - name: python-udsink +# scale: +# min: 1 +# sink: +# udsink: +# container: +# args: +# - python +# - example.py +# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log +# image: quay.io/numaio/numaflow-python/sink-log:stable +# imagePullPolicy: Always - name: rust-split scale: min: 1 @@ -87,10 +88,10 @@ spec: to: go-split - from: go-split to: go-udsink - - from: in - to: python-split - - from: python-split - to: python-udsink +# - from: in +# to: python-split +# - from: python-split +# to: python-udsink - from: in to: rust-split - from: rust-split diff --git a/test/map-e2e/testdata/flatmap-stream.yaml b/test/map-e2e/testdata/flatmap-stream.yaml index 503ffcaf3a..0c749d82f7 100644 --- a/test/map-e2e/testdata/flatmap-stream.yaml +++ b/test/map-e2e/testdata/flatmap-stream.yaml @@ -36,32 +36,33 @@ spec: # https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/log image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always - - name: python-split - partitions: 3 - limits: - readBatchSize: 1 - scale: - min: 1 - udf: - container: - args: - - python - - example.py - # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/mapstream/flatmap_stream - image: quay.io/numaio/numaflow-python/map-flatmap-stream:stable - imagePullPolicy: Always - - name: python-udsink - scale: - min: 1 - sink: - udsink: - container: - args: - - python - - example.py - # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log - image: quay.io/numaio/numaflow-python/sink-log:stable - imagePullPolicy: Always +# FIXME(sink-streaming) python sdk +# - name: python-split +# partitions: 3 +# limits: +# readBatchSize: 1 +# scale: +# min: 1 +# udf: +# container: +# args: +# - python +# - example.py +# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/mapstream/flatmap_stream +# image: quay.io/numaio/numaflow-python/map-flatmap-stream:stable +# imagePullPolicy: Always +# - name: python-udsink +# scale: +# min: 1 +# sink: +# udsink: +# container: +# args: +# - python +# - example.py +# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log +# image: quay.io/numaio/numaflow-python/sink-log:stable +# imagePullPolicy: Always - name: java-split partitions: 3 limits: @@ -89,10 +90,10 @@ spec: to: go-udsink - from: go-split to: go-udsink-2 - - from: in - to: python-split - - from: python-split - to: python-udsink +# - from: in +# to: python-split +# - from: python-split +# to: python-udsink - from: in to: java-split - from: java-split diff --git a/test/map-e2e/testdata/flatmap.yaml b/test/map-e2e/testdata/flatmap.yaml index da0799f874..5082d54b00 100644 --- a/test/map-e2e/testdata/flatmap.yaml +++ b/test/map-e2e/testdata/flatmap.yaml @@ -25,30 +25,30 @@ spec: image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always -# FIXME(fb-sink): python - - name: python-split - scale: - min: 1 - udf: - container: - args: - - python - - example.py - # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/map/flatmap - image: quay.io/numaio/numaflow-python/map-flatmap:stable - imagePullPolicy: Always - - name: python-udsink - scale: - min: 1 - sink: - udsink: - container: - args: - - python - - example.py - # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log - image: quay.io/numaio/numaflow-python/sink-log:stable - imagePullPolicy: Always +# FIXME(sink-streaming) python sdk +# - name: python-split +# scale: +# min: 1 +# udf: +# container: +# args: +# - python +# - example.py +# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/map/flatmap +# image: quay.io/numaio/numaflow-python/map-flatmap:stable +# imagePullPolicy: Always +# - name: python-udsink +# scale: +# min: 1 +# sink: +# udsink: +# container: +# args: +# - python +# - example.py +# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log +# image: quay.io/numaio/numaflow-python/sink-log:stable +# imagePullPolicy: Always - name: java-split scale: min: 1 @@ -71,10 +71,10 @@ spec: to: go-split - from: go-split to: go-udsink - - from: in - to: python-split - - from: python-split - to: python-udsink +# - from: in +# to: python-split +# - from: python-split +# to: python-udsink - from: in to: java-split - from: java-split From 79e4e2c1b43973312b673cc6a27dc368214fa892 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 16:45:08 -0700 Subject: [PATCH 072/188] docs: updated CHANGELOG.md (#2094) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbc4a1a7b7..21035aecaa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,33 @@ # Changelog +## v1.3.2 (2024-09-26) + + * [cb7d17d4](https://github.com/numaproj/numaflow/commit/cb7d17d4f3e2ecfcf6a1aa413031f714c135983d) Update manifests to v1.3.2 + * [816a8e74](https://github.com/numaproj/numaflow/commit/816a8e749c3f071b0c5e4c2ce97025c8138c6cbb) feat: container-type level version compatibility check (#2087) + * [9fae141e](https://github.com/numaproj/numaflow/commit/9fae141e686c99e95824b8bdcbec4d4e1bf04241) feat: add pause for monovertex (#2077) + * [fc59a3e0](https://github.com/numaproj/numaflow/commit/fc59a3e06f3e86947d9f905e1a728aa155f68bf4) fix: rollback codegen script (#2079) + * [82beda64](https://github.com/numaproj/numaflow/commit/82beda6462b4828f914ea70a07d8bed5f1302675) Fix: Use Merge patch rather than json patch for `pause-timestamp` annotation apply (#2078) + * [9c2e8f81](https://github.com/numaproj/numaflow/commit/9c2e8f812148d6cc45781762b6328a509225e747) fix: support version compatibility check for pre-release versions (#2069) + * [b0e60014](https://github.com/numaproj/numaflow/commit/b0e60014b3e98019fcc20edd668de90e542534a3) feat: allow customization on readyz and livez config (#2068) + * [88d2a7a3](https://github.com/numaproj/numaflow/commit/88d2a7a30ae9e1fd63799878f1a0e8b0650615e8) doc: example for PVC (#2067) + * [7726cf42](https://github.com/numaproj/numaflow/commit/7726cf4272a2f534202e4ec6ecd81c52e731dbf7) fix: skip updating phase for resource check (#2065) + * [782872f5](https://github.com/numaproj/numaflow/commit/782872f55c7fcfb0b1f4747ad71c71f0fc26280c) chore(deps): bump express from 4.19.2 to 4.21.0 in /ui (#2061) + * [234e19fc](https://github.com/numaproj/numaflow/commit/234e19fc84d8c4f3c29217ccc4deafb35e9182f1) fix: builtin transformer should keep the keys (#2047) + * [f7716aa2](https://github.com/numaproj/numaflow/commit/f7716aa2a6afd9cc3596d307c68125f77cf55f92) feat: rolling update for Pipeline Vertex (#2040) + * [db9337a2](https://github.com/numaproj/numaflow/commit/db9337a2cd00e0b19fda1f79793bb9f35eae9436) feat: rolling update for MonoVertex (#2029) + * [6f376414](https://github.com/numaproj/numaflow/commit/6f3764140cd86356a197c8d15cd6f7b7afc0a4a0) fix: pause lifecyle changes and add drained status (#2028) + * [754bc5e3](https://github.com/numaproj/numaflow/commit/754bc5e3646f52ed0784bdfc9810f6ad77c5ae2d) fix: Fix numaflow-rs binary location in image (#2050) + +### Contributors + + * Derek Wang + * Julie Vogelman + * Keran Yang + * Sidhant Kohli + * Sreekanth + * Vigith Maurice + * dependabot[bot] + ## v1.3.1 (2024-09-02) * [a42d0063](https://github.com/numaproj/numaflow/commit/a42d0063caf53d6f4c01c2fb2f6f6f6f74a8f987) Update manifests to v1.3.1 From ef96cfff79536eac8339580b11de41719119c8e4 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 26 Sep 2024 17:56:54 -0700 Subject: [PATCH 073/188] chore: fix 2 issues found by code scanning (#2095) Signed-off-by: Derek Wang --- go.mod | 2 +- pkg/sinks/sink.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index a015dcac65..c2d7d6edd5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/numaproj/numaflow -go 1.22 +go 1.22.7 require ( github.com/IBM/sarama v1.43.2 diff --git a/pkg/sinks/sink.go b/pkg/sinks/sink.go index e15c02daa0..73075dc1cc 100644 --- a/pkg/sinks/sink.go +++ b/pkg/sinks/sink.go @@ -230,7 +230,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to find a sink, error: %w", err) } - log.Infow("Fallback sink writer created", zap.String("vertex", u.VertexInstance.Vertex.Spec.Sink.Fallback.String())) + log.Info("Fallback sink writer created") forwardOpts = append(forwardOpts, sinkforward.WithFbSinkWriter(fbSinkWriter)) } From 5b8b8dddac727e53bcfbf4c8071221b284e606e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 01:20:14 +0000 Subject: [PATCH 074/188] chore(deps): bump rollup from 2.79.1 to 2.79.2 in /ui (#2096) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index d71fb7d1e6..f44d029a2d 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -9966,9 +9966,9 @@ rollup-plugin-terser@^7.0.0: terser "^5.0.0" rollup@^2.43.1: - version "2.79.1" - resolved "https://registry.yarnpkg.com/rollup/-/rollup-2.79.1.tgz#bedee8faef7c9f93a2647ac0108748f497f081c7" - integrity sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw== + version "2.79.2" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-2.79.2.tgz#f150e4a5db4b121a21a747d762f701e5e9f49090" + integrity sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ== optionalDependencies: fsevents "~2.3.2" From 74025964ecdc3efd4866b555e71b02f336d4bb45 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Fri, 27 Sep 2024 13:18:03 -0700 Subject: [PATCH 075/188] chore: organize rust code (#2099) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Yashash H L --- rust/Cargo.lock | 82 +++++++++---------- rust/Cargo.toml | 4 +- rust/Dockerfile | 8 +- rust/monovertex/src/shared.rs | 54 ------------ rust/{monovertex => numaflow-core}/Cargo.toml | 2 +- rust/{monovertex => numaflow-core}/build.rs | 0 .../proto/sink.proto | 0 .../proto/source.proto | 0 .../proto/sourcetransform.proto | 0 .../src/config.rs | 0 .../src/error.rs | 0 rust/numaflow-core/src/lib.rs | 36 ++++++++ .../src/message.rs | 12 +-- .../src/monovertex.rs} | 64 ++++++--------- .../src/monovertex}/forwarder.rs | 56 ++++++------- .../src/monovertex}/metrics.rs | 10 +-- rust/numaflow-core/src/shared.rs | 2 + .../src/shared}/server_info.rs | 2 +- .../src/shared/utils.rs} | 72 ++++++++++++++-- rust/numaflow-core/src/sink.rs | 4 + .../src/sink/user_defined.rs} | 29 ++++--- rust/numaflow-core/src/source.rs | 4 + .../src/source/user_defined.rs} | 22 ++--- rust/numaflow-core/src/transformer.rs | 4 + .../src/transformer/user_defined.rs} | 23 +++--- rust/src/bin/main.rs | 2 +- 26 files changed, 269 insertions(+), 223 deletions(-) delete mode 100644 rust/monovertex/src/shared.rs rename rust/{monovertex => numaflow-core}/Cargo.toml (98%) rename rust/{monovertex => numaflow-core}/build.rs (100%) rename rust/{monovertex => numaflow-core}/proto/sink.proto (100%) rename rust/{monovertex => numaflow-core}/proto/source.proto (100%) rename rust/{monovertex => numaflow-core}/proto/sourcetransform.proto (100%) rename rust/{monovertex => numaflow-core}/src/config.rs (100%) rename rust/{monovertex => numaflow-core}/src/error.rs (100%) create mode 100644 rust/numaflow-core/src/lib.rs rename rust/{monovertex => numaflow-core}/src/message.rs (90%) rename rust/{monovertex/src/lib.rs => numaflow-core/src/monovertex.rs} (89%) rename rust/{monovertex/src => numaflow-core/src/monovertex}/forwarder.rs (96%) rename rust/{monovertex/src => numaflow-core/src/monovertex}/metrics.rs (98%) create mode 100644 rust/numaflow-core/src/shared.rs rename rust/{monovertex/src => numaflow-core/src/shared}/server_info.rs (99%) rename rust/{monovertex/src/startup.rs => numaflow-core/src/shared/utils.rs} (84%) create mode 100644 rust/numaflow-core/src/sink.rs rename rust/{monovertex/src/sink.rs => numaflow-core/src/sink/user_defined.rs} (90%) create mode 100644 rust/numaflow-core/src/source.rs rename rust/{monovertex/src/source.rs => numaflow-core/src/source/user_defined.rs} (95%) create mode 100644 rust/numaflow-core/src/transformer.rs rename rust/{monovertex/src/transformer.rs => numaflow-core/src/transformer/user_defined.rs} (90%) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index d6839447ba..655f30bc4d 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1457,46 +1457,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" -[[package]] -name = "monovertex" -version = "0.1.0" -dependencies = [ - "axum", - "axum-server", - "backoff", - "base64 0.22.1", - "bytes", - "chrono", - "hyper-util", - "kube", - "log", - "numaflow 0.1.1", - "numaflow-models", - "once_cell", - "parking_lot", - "pep440_rs", - "prometheus-client", - "prost", - "prost-types", - "rcgen", - "rustls 0.23.13", - "semver", - "serde", - "serde_json", - "tempfile", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tonic", - "tonic-build", - "tower 0.4.13", - "tracing", - "tracing-subscriber", - "trait-variant", - "uuid", -] - [[package]] name = "multimap" version = "0.10.0" @@ -1586,7 +1546,7 @@ name = "numaflow" version = "0.1.0" dependencies = [ "backoff", - "monovertex", + "numaflow-core", "servesink", "serving", "tokio", @@ -1616,6 +1576,46 @@ dependencies = [ "uuid", ] +[[package]] +name = "numaflow-core" +version = "0.1.0" +dependencies = [ + "axum", + "axum-server", + "backoff", + "base64 0.22.1", + "bytes", + "chrono", + "hyper-util", + "kube", + "log", + "numaflow 0.1.1", + "numaflow-models", + "once_cell", + "parking_lot", + "pep440_rs", + "prometheus-client", + "prost", + "prost-types", + "rcgen", + "rustls 0.23.13", + "semver", + "serde", + "serde_json", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tower 0.4.13", + "tracing", + "tracing-subscriber", + "trait-variant", + "uuid", +] + [[package]] name = "numaflow-models" version = "0.0.0-pre" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index d4c5152f12..518f905105 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,4 +1,4 @@ -workspace = { members = ["backoff", "numaflow-models", "servesink", "serving", "monovertex"] } +workspace = { members = ["backoff", "numaflow-models", "servesink", "serving", "numaflow-core"] } [[bin]] name = "numaflow" @@ -15,6 +15,6 @@ tokio = "1.39.2" backoff = { path = "backoff" } servesink = { path = "servesink" } serving = { path = "serving" } -monovertex = { path = "monovertex" } +numaflow-core = { path = "numaflow-core" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/Dockerfile b/rust/Dockerfile index e680b85eac..62208944f6 100644 --- a/rust/Dockerfile +++ b/rust/Dockerfile @@ -21,7 +21,7 @@ RUN cargo new numaflow-models COPY ./numaflow-models/Cargo.toml ./numaflow-models/Cargo.toml RUN cargo new monovertex -COPY monovertex/Cargo.toml ./monovertex/Cargo.toml +COPY numaflow-core/Cargo.toml ./monovertex/Cargo.toml RUN cargo new serving COPY ./serving/Cargo.toml ./serving/Cargo.toml @@ -39,9 +39,9 @@ COPY ./servesink/src ./servesink/src COPY ./backoff/src ./backoff/src COPY ./numaflow-models/src ./numaflow-models/src COPY ./serving/src ./serving/src -COPY monovertex/src ./monovertex/src -COPY monovertex/build.rs ./monovertex/build.rs -COPY monovertex/proto ./monovertex/proto +COPY numaflow-core/src ./monovertex/src +COPY numaflow-core/build.rs ./monovertex/build.rs +COPY numaflow-core/proto ./monovertex/proto # Build the real binaries RUN touch src/bin/main.rs && \ diff --git a/rust/monovertex/src/shared.rs b/rust/monovertex/src/shared.rs deleted file mode 100644 index 2ce22ba803..0000000000 --- a/rust/monovertex/src/shared.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::path::PathBuf; - -use crate::error::Error; -use backoff::retry::Retry; -use backoff::strategy::fixed; -use chrono::{DateTime, TimeZone, Timelike, Utc}; -use prost_types::Timestamp; -use tokio::net::UnixStream; -use tonic::transport::{Channel, Endpoint, Uri}; -use tower::service_fn; - -pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { - t.map_or(Utc.timestamp_nanos(-1), |t| { - DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) - }) -} - -pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { - Some(Timestamp { - seconds: t.timestamp(), - nanos: t.nanosecond() as i32, - }) -} - -pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Result { - const RECONNECT_INTERVAL: u64 = 1000; - const MAX_RECONNECT_ATTEMPTS: usize = 5; - - let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); - - let channel = Retry::retry( - interval, - || async { connect_with_uds(socket_path.clone()).await }, - |_: &Error| true, - ) - .await?; - Ok(channel) -} - -pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { - let channel = Endpoint::try_from("http://[::]:50051") - .map_err(|e| Error::ConnectionError(format!("Failed to create endpoint: {:?}", e)))? - .connect_with_connector(service_fn(move |_: Uri| { - let uds_socket = uds_path.clone(); - async move { - Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( - UnixStream::connect(uds_socket).await?, - )) - } - })) - .await - .map_err(|e| Error::ConnectionError(format!("Failed to connect: {:?}", e)))?; - Ok(channel) -} diff --git a/rust/monovertex/Cargo.toml b/rust/numaflow-core/Cargo.toml similarity index 98% rename from rust/monovertex/Cargo.toml rename to rust/numaflow-core/Cargo.toml index f3644dc140..85a3bc39b1 100644 --- a/rust/monovertex/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monovertex" +name = "numaflow-core" version = "0.1.0" edition = "2021" diff --git a/rust/monovertex/build.rs b/rust/numaflow-core/build.rs similarity index 100% rename from rust/monovertex/build.rs rename to rust/numaflow-core/build.rs diff --git a/rust/monovertex/proto/sink.proto b/rust/numaflow-core/proto/sink.proto similarity index 100% rename from rust/monovertex/proto/sink.proto rename to rust/numaflow-core/proto/sink.proto diff --git a/rust/monovertex/proto/source.proto b/rust/numaflow-core/proto/source.proto similarity index 100% rename from rust/monovertex/proto/source.proto rename to rust/numaflow-core/proto/source.proto diff --git a/rust/monovertex/proto/sourcetransform.proto b/rust/numaflow-core/proto/sourcetransform.proto similarity index 100% rename from rust/monovertex/proto/sourcetransform.proto rename to rust/numaflow-core/proto/sourcetransform.proto diff --git a/rust/monovertex/src/config.rs b/rust/numaflow-core/src/config.rs similarity index 100% rename from rust/monovertex/src/config.rs rename to rust/numaflow-core/src/config.rs diff --git a/rust/monovertex/src/error.rs b/rust/numaflow-core/src/error.rs similarity index 100% rename from rust/monovertex/src/error.rs rename to rust/numaflow-core/src/error.rs diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs new file mode 100644 index 0000000000..dbe4a06137 --- /dev/null +++ b/rust/numaflow-core/src/lib.rs @@ -0,0 +1,36 @@ +use tracing::error; + +/// Custom Error handling. +mod error; +pub(crate) use crate::error::Result; +pub(crate) use crate::error::Error; + +/// MonoVertex is a simplified version of the [Pipeline] spec which is ideal for high TPS, low latency +/// use-cases which do not require [ISB]. +/// +/// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ +/// [ISB]: https://numaflow.numaproj.io/core-concepts/inter-step-buffer/ +pub mod monovertex; +pub use crate::monovertex::mono_vertex; + +/// Parse configs, including Numaflow specifications. +mod config; + +/// Internal message structure that is passed around. +mod message; +/// [Sink] serves as the endpoint for processed data that has been outputted from the platform, +/// which is then sent to an external system or application. +/// +/// [Sink]: https://numaflow.numaproj.io/user-guide/sinks/overview/ +mod sink; +/// [Source] is responsible for reliable reading data from an unbounded source into Numaflow. +/// +/// [Source]: https://numaflow.numaproj.io/user-guide/sources/overview/ +mod source; +/// Transformer is a feature that allows users to execute custom code to transform their data at +/// [source]. +/// +/// [Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/ +mod transformer; +/// Shared entities that can be used orthogonal to different modules. +mod shared; \ No newline at end of file diff --git a/rust/monovertex/src/message.rs b/rust/numaflow-core/src/message.rs similarity index 90% rename from rust/monovertex/src/message.rs rename to rust/numaflow-core/src/message.rs index d5cde5d2bc..b99a61b31d 100644 --- a/rust/monovertex/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -5,12 +5,12 @@ use base64::Engine; use chrono::{DateTime, Utc}; use crate::error::Error; -use crate::shared::{prost_timestamp_from_utc, utc_from_timestamp}; -use crate::sink_pb::sink_request::Request; -use crate::sink_pb::SinkRequest; -use crate::source_pb; -use crate::source_pb::{read_response, AckRequest}; -use crate::sourcetransform_pb::SourceTransformRequest; +use crate::monovertex::sink_pb::sink_request::Request; +use crate::monovertex::sink_pb::SinkRequest; +use crate::monovertex::source_pb; +use crate::monovertex::source_pb::{read_response, AckRequest}; +use crate::monovertex::sourcetransform_pb::SourceTransformRequest; +use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; /// A message that is sent from the source to the sink. #[derive(Debug, Clone)] diff --git a/rust/monovertex/src/lib.rs b/rust/numaflow-core/src/monovertex.rs similarity index 89% rename from rust/monovertex/src/lib.rs rename to rust/numaflow-core/src/monovertex.rs index 01b07498a2..3601cfe3c4 100644 --- a/rust/monovertex/src/lib.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,43 +1,28 @@ -extern crate core; - +use crate::config::{config, SDKConfig}; +use crate::error; +use crate::shared::utils; +use crate::shared::utils::create_rpc_channel; +use crate::sink::user_defined::SinkWriter; +use crate::source::user_defined::{SourceAcker, SourceReader}; +use crate::transformer::user_defined::SourceTransformer; +use forwarder::ForwarderBuilder; +use metrics::MetricsState; +use sink_pb::sink_client::SinkClient; +use source_pb::source_client::SourceClient; +use sourcetransform_pb::source_transform_client::SourceTransformClient; use tokio::signal; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; -use tracing::{error, info}; - -use crate::config::{config, SDKConfig}; +use tracing::info; -use crate::forwarder::ForwarderBuilder; -use crate::metrics::MetricsState; -use crate::shared::create_rpc_channel; -use crate::sink::SinkWriter; -use crate::sink_pb::sink_client::SinkClient; -use crate::source::{SourceAcker, SourceReader}; -use crate::source_pb::source_client::SourceClient; -use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; -use crate::transformer::SourceTransformer; - -pub(crate) use self::error::Result; - -/// SourcerSinker orchestrates data movement from the Source to the Sink via the optional SourceTransformer. +/// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: /// - Read X messages from the source /// - Invokes the SourceTransformer concurrently /// - Calls the Sinker to write the batch to the Sink /// - Send Acknowledgement back to the Source -mod error; -pub(crate) use crate::error::Error; - -mod config; mod forwarder; -mod message; -mod metrics; -mod server_info; -mod shared; -mod sink; -mod source; -mod startup; -mod transformer; +pub(crate) mod metrics; pub(crate) mod source_pb { tonic::include_proto!("source.v1"); @@ -51,12 +36,12 @@ pub(crate) mod sourcetransform_pb { tonic::include_proto!("sourcetransformer.v1"); } -pub async fn mono_vertex() -> Result<()> { +pub async fn mono_vertex() -> error::Result<()> { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); // wait for SIG{INT,TERM} and invoke cancellation token. - let shutdown_handle: JoinHandle> = tokio::spawn(async move { + let shutdown_handle: JoinHandle> = tokio::spawn(async move { shutdown_signal().await; shutdown_cln_token.cancel(); Ok(()) @@ -98,9 +83,9 @@ async fn shutdown_signal() { } } -async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> Result<()> { +async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> error::Result<()> { // make sure that we have compatibility with the server - startup::check_compatibility( + utils::check_compatibility( &cln_token, sdk_config.source_server_info_path.into(), sdk_config.sink_server_info_path.into(), @@ -151,7 +136,7 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> }; // readiness check for all the ud containers - startup::wait_until_ready( + utils::wait_until_ready( cln_token.clone(), &mut source_grpc_client, &mut sink_grpc_client, @@ -172,10 +157,10 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> // start the metrics server // FIXME: what to do with the handle - startup::start_metrics_server(metrics_state).await; + utils::start_metrics_server(metrics_state).await; // start the lag reader to publish lag metrics - let mut lag_reader = startup::create_lag_reader(source_grpc_client.clone()).await; + let mut lag_reader = utils::create_lag_reader(source_grpc_client.clone()).await; lag_reader.start().await; // build the forwarder @@ -210,8 +195,9 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> #[cfg(test)] mod tests { use crate::config::SDKConfig; - use crate::server_info::ServerInfo; - use crate::{error, start_forwarder}; + use crate::error; + use crate::monovertex::start_forwarder; + use crate::shared::server_info::ServerInfo; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source}; use std::fs::File; diff --git a/rust/monovertex/src/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs similarity index 96% rename from rust/monovertex/src/forwarder.rs rename to rust/numaflow-core/src/monovertex/forwarder.rs index ac69c09903..561e52d6e1 100644 --- a/rust/monovertex/src/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -1,19 +1,19 @@ -use std::collections::HashMap; - use crate::config::{config, OnFailureStrategy}; -use crate::error::{Error, Result}; +use crate::error; +use crate::error::Error; use crate::message::{Message, Offset}; -use crate::metrics; -use crate::metrics::forward_metrics; -use crate::sink::SinkWriter; -use crate::sink_pb::Status::{Failure, Fallback, Success}; -use crate::source::{SourceAcker, SourceReader}; -use crate::transformer::SourceTransformer; +use crate::monovertex::metrics; +use crate::monovertex::metrics::forward_metrics; +use crate::monovertex::sink_pb::Status::{Failure, Fallback, Success}; +use crate::sink::user_defined::SinkWriter; +use crate::source::user_defined::{SourceAcker, SourceReader}; +use crate::transformer::user_defined::SourceTransformer; use chrono::Utc; +use log::warn; +use std::collections::HashMap; use tokio::task::JoinSet; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::log::warn; use tracing::{debug, info}; /// Forwarder is responsible for reading messages from the source, applying transformation if @@ -89,7 +89,7 @@ impl Forwarder { /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. /// this means that, in the happy path scenario a block is always completely processed. /// this function will return on any error and will cause end up in a non-0 exit code. - pub(crate) async fn start(&mut self) -> Result<()> { + pub(crate) async fn start(&mut self) -> error::Result<()> { let mut processed_msgs_count: usize = 0; let mut last_forwarded_at = std::time::Instant::now(); info!("Forwarder has started"); @@ -124,7 +124,7 @@ impl Forwarder { /// Read messages from the source, apply transformation if transformer is present, /// write the messages to the sink, if fallback messages are present write them to the fallback sink, /// and then acknowledge the messages back to the source. - async fn read_and_process_messages(&mut self) -> Result { + async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); let messages = self .source_reader @@ -198,7 +198,7 @@ impl Forwarder { // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. - async fn apply_transformer(&self, messages: Vec) -> Result> { + async fn apply_transformer(&self, messages: Vec) -> error::Result> { let Some(transformer_client) = &self.source_transformer else { // return early if there is no transformer return Ok(messages); @@ -232,7 +232,7 @@ impl Forwarder { } // Writes the messages to the sink and handles fallback messages if present - async fn write_to_sink(&mut self, messages: Vec) -> Result<()> { + async fn write_to_sink(&mut self, messages: Vec) -> error::Result<()> { let msg_count = messages.len() as u64; if messages.is_empty() { @@ -322,7 +322,7 @@ impl Forwarder { error_map: &mut HashMap, fallback_msgs: &mut Vec, messages_to_send: &mut Vec, - ) -> Result { + ) -> error::Result { // if we are done with the messages, break the loop if messages_to_send.is_empty() { return Ok(false); @@ -373,7 +373,7 @@ impl Forwarder { error_map: &mut HashMap, fallback_msgs: &mut Vec, messages_to_send: &mut Vec, - ) -> Result { + ) -> error::Result { let start_time = tokio::time::Instant::now(); match self.sink_writer.sink_fn(messages_to_send.clone()).await { Ok(response) => { @@ -389,7 +389,7 @@ impl Forwarder { "Response does not contain a result".to_string(), )), }) - .collect::>>()?; + .collect::>>()?; error_map.clear(); // drain all the messages that were successfully written @@ -428,7 +428,7 @@ impl Forwarder { } // Writes the fallback messages to the fallback sink - async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> Result<()> { + async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> error::Result<()> { if self.fb_sink_writer.is_none() { return Err(Error::SinkError( "Response contains fallback messages but no fallback sink is configured" @@ -471,7 +471,7 @@ impl Forwarder { "Response does not contain a result".to_string(), )), }) - .collect::>>()?; + .collect::>>()?; let mut contains_fallback_status = false; @@ -534,7 +534,7 @@ impl Forwarder { } // Acknowledge the messages back to the source - async fn acknowledge_messages(&mut self, offsets: Vec) -> Result<()> { + async fn acknowledge_messages(&mut self, offsets: Vec) -> error::Result<()> { let n = offsets.len(); let start_time = tokio::time::Instant::now(); @@ -566,14 +566,14 @@ mod tests { use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::forwarder::ForwarderBuilder; - use crate::shared::create_rpc_channel; - use crate::sink::SinkWriter; - use crate::sink_pb::sink_client::SinkClient; - use crate::source::{SourceAcker, SourceReader}; - use crate::source_pb::source_client::SourceClient; - use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; - use crate::transformer::SourceTransformer; + use crate::monovertex::forwarder::ForwarderBuilder; + use crate::monovertex::sink_pb::sink_client::SinkClient; + use crate::monovertex::source_pb::source_client::SourceClient; + use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; + use crate::shared::utils::create_rpc_channel; + use crate::sink::user_defined::SinkWriter; + use crate::source::user_defined::{SourceAcker, SourceReader}; + use crate::transformer::user_defined::SourceTransformer; struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, diff --git a/rust/monovertex/src/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs similarity index 98% rename from rust/monovertex/src/metrics.rs rename to rust/numaflow-core/src/monovertex/metrics.rs index fc6ab7a0b0..496f14330f 100644 --- a/rust/monovertex/src/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -18,9 +18,9 @@ use tracing::{debug, error, info}; use crate::config::config; use crate::error::Error; -use crate::sink_pb::sink_client::SinkClient; -use crate::source_pb::source_client::SourceClient; -use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::monovertex::sink_pb::sink_client::SinkClient; +use crate::monovertex::source_pb::source_client::SourceClient; +use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use prometheus_client::encoding::text::encode; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -561,8 +561,8 @@ async fn calculate_pending( #[cfg(test)] mod tests { use super::*; - use crate::metrics::MetricsState; - use crate::shared::create_rpc_channel; + use crate::monovertex::metrics::MetricsState; + use crate::shared::utils::create_rpc_channel; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; use std::net::SocketAddr; diff --git a/rust/numaflow-core/src/shared.rs b/rust/numaflow-core/src/shared.rs new file mode 100644 index 0000000000..63753fe858 --- /dev/null +++ b/rust/numaflow-core/src/shared.rs @@ -0,0 +1,2 @@ +pub mod server_info; +pub mod utils; diff --git a/rust/monovertex/src/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs similarity index 99% rename from rust/monovertex/src/server_info.rs rename to rust/numaflow-core/src/shared/server_info.rs index 35495097b8..1c78429cb6 100644 --- a/rust/monovertex/src/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -12,7 +12,7 @@ use tokio_util::sync::CancellationToken; use tracing::{info, warn}; use crate::error::{self, Error}; -use crate::server_info::version::SdkConstraints; +use crate::shared::server_info::version::SdkConstraints; // Constant to represent the end of the server info. // Equivalent to U+005C__END__. diff --git a/rust/monovertex/src/startup.rs b/rust/numaflow-core/src/shared/utils.rs similarity index 84% rename from rust/monovertex/src/startup.rs rename to rust/numaflow-core/src/shared/utils.rs index 2614d045b7..b8a5e07686 100644 --- a/rust/monovertex/src/startup.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -3,18 +3,28 @@ use std::path::PathBuf; use std::time::Duration; use crate::config::config; +use crate::error; use crate::error::Error; -use crate::metrics::{start_metrics_https_server, LagReader, LagReaderBuilder, MetricsState}; -use crate::sink_pb::sink_client::SinkClient; -use crate::source_pb::source_client::SourceClient; -use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; -use crate::{error, server_info}; - +use crate::monovertex::metrics::{ + start_metrics_https_server, LagReader, LagReaderBuilder, MetricsState, +}; +use crate::monovertex::sink_pb::sink_client::SinkClient; +use crate::monovertex::source_pb::source_client::SourceClient; +use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::shared::server_info; + +use axum::http::Uri; +use backoff::retry::Retry; +use backoff::strategy::fixed; +use chrono::{DateTime, TimeZone, Timelike, Utc}; +use prost_types::Timestamp; +use tokio::net::UnixStream; use tokio::task::JoinHandle; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tonic::transport::Channel; +use tonic::transport::{Channel, Endpoint}; use tonic::Request; +use tower::service_fn; use tracing::{info, warn}; pub(crate) async fn check_compatibility( @@ -135,11 +145,55 @@ pub(crate) async fn wait_until_ready( Ok(()) } +pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { + t.map_or(Utc.timestamp_nanos(-1), |t| { + DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) + }) +} + +pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { + Some(Timestamp { + seconds: t.timestamp(), + nanos: t.nanosecond() as i32, + }) +} + +pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Result { + const RECONNECT_INTERVAL: u64 = 1000; + const MAX_RECONNECT_ATTEMPTS: usize = 5; + + let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(socket_path.clone()).await }, + |_: &Error| true, + ) + .await?; + Ok(channel) +} + +pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { + let channel = Endpoint::try_from("http://[::]:50051") + .map_err(|e| Error::ConnectionError(format!("Failed to create endpoint: {:?}", e)))? + .connect_with_connector(service_fn(move |_: Uri| { + let uds_socket = uds_path.clone(); + async move { + Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( + UnixStream::connect(uds_socket).await?, + )) + } + })) + .await + .map_err(|e| Error::ConnectionError(format!("Failed to connect: {:?}", e)))?; + Ok(channel) +} + #[cfg(test)] mod tests { use super::*; - use crate::server_info::ServerInfo; - use crate::shared::create_rpc_channel; + use crate::shared::server_info::ServerInfo; + use crate::shared::utils::create_rpc_channel; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; use std::fs::File; diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs new file mode 100644 index 0000000000..ccd6fb8fbe --- /dev/null +++ b/rust/numaflow-core/src/sink.rs @@ -0,0 +1,4 @@ +/// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. +/// +/// [User-Defined Sink]: https://numaflow.numaproj.io/user-guide/sinks/user-defined-sinks/ +pub(crate) mod user_defined; diff --git a/rust/monovertex/src/sink.rs b/rust/numaflow-core/src/sink/user_defined.rs similarity index 90% rename from rust/monovertex/src/sink.rs rename to rust/numaflow-core/src/sink/user_defined.rs index 873404d4c5..0489bacd46 100644 --- a/rust/monovertex/src/sink.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,8 +1,9 @@ -use crate::error::{Error, Result}; +use crate::error; +use crate::error::Error; use crate::message::Message; -use crate::sink_pb::sink_client::SinkClient; -use crate::sink_pb::sink_request::Status; -use crate::sink_pb::{Handshake, SinkRequest, SinkResponse}; +use crate::monovertex::sink_pb::sink_client::SinkClient; +use crate::monovertex::sink_pb::sink_request::Status; +use crate::monovertex::sink_pb::{Handshake, SinkRequest, SinkResponse}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; @@ -17,7 +18,7 @@ pub struct SinkWriter { } impl SinkWriter { - pub(crate) async fn new(mut client: SinkClient) -> Result { + pub(crate) async fn new(mut client: SinkClient) -> error::Result { let (sink_tx, sink_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let sink_stream = ReceiverStream::new(sink_rx); @@ -55,7 +56,10 @@ impl SinkWriter { } /// writes a set of messages to the sink. - pub(crate) async fn sink_fn(&mut self, messages: Vec) -> Result> { + pub(crate) async fn sink_fn( + &mut self, + messages: Vec, + ) -> error::Result> { let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); let num_requests = requests.len(); @@ -90,8 +94,8 @@ impl SinkWriter { .await? .ok_or(Error::SinkError("failed to receive response".to_string()))?; responses.push(response); - }; - + } + Ok(responses) } } @@ -100,11 +104,14 @@ impl SinkWriter { mod tests { use chrono::offset::Utc; use numaflow::sink; + use tokio::sync::mpsc; use tracing::info; - use super::*; - use crate::message::Offset; - use crate::shared::create_rpc_channel; + use crate::error::Result; + use crate::message::{Message, Offset}; + use crate::monovertex::sink_pb::sink_client::SinkClient; + use crate::shared::utils::create_rpc_channel; + use crate::sink::user_defined::SinkWriter; struct Logger; #[tonic::async_trait] diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs new file mode 100644 index 0000000000..4ba2755ce7 --- /dev/null +++ b/rust/numaflow-core/src/source.rs @@ -0,0 +1,4 @@ +/// [User-Defined Source] extends Numaflow to add custom sources supported outside the builtins. +/// +/// [User-Defined Source]: https://numaflow.numaproj.io/user-guide/sources/user-defined-sources/ +pub(crate) mod user_defined; diff --git a/rust/monovertex/src/source.rs b/rust/numaflow-core/src/source/user_defined.rs similarity index 95% rename from rust/monovertex/src/source.rs rename to rust/numaflow-core/src/source/user_defined.rs index 6b3729d3d7..7805dd6610 100644 --- a/rust/monovertex/src/source.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -1,10 +1,10 @@ use crate::config::config; +use crate::error; use crate::error::Error::SourceError; -use crate::error::Result; use crate::message::{Message, Offset}; -use crate::source_pb; -use crate::source_pb::source_client::SourceClient; -use crate::source_pb::{ +use crate::monovertex::source_pb; +use crate::monovertex::source_pb::source_client::SourceClient; +use crate::monovertex::source_pb::{ ack_response, read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; use tokio::sync::mpsc; @@ -20,7 +20,7 @@ pub(crate) struct SourceReader { } impl SourceReader { - pub(crate) async fn new(mut client: SourceClient) -> Result { + pub(crate) async fn new(mut client: SourceClient) -> error::Result { let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); let read_stream = ReceiverStream::new(read_rx); @@ -59,7 +59,7 @@ impl SourceReader { &mut self, num_records: u64, timeout_in_ms: u32, - ) -> Result> { + ) -> error::Result> { let request = ReadRequest { request: Some(read_request::Request { num_records, @@ -98,7 +98,7 @@ pub(crate) struct SourceAcker { } impl SourceAcker { - pub(crate) async fn new(mut client: SourceClient) -> Result { + pub(crate) async fn new(mut client: SourceClient) -> error::Result { let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); let ack_stream = ReceiverStream::new(ack_rx); @@ -130,7 +130,7 @@ impl SourceAcker { }) } - pub(crate) async fn ack(&mut self, offsets: Vec) -> Result { + pub(crate) async fn ack(&mut self, offsets: Vec) -> error::Result { let n = offsets.len(); // send n ack requests @@ -162,9 +162,9 @@ impl SourceAcker { mod tests { use std::collections::HashSet; - use crate::shared::create_rpc_channel; - use crate::source::{SourceAcker, SourceReader}; - use crate::source_pb::source_client::SourceClient; + use crate::monovertex::source_pb::source_client::SourceClient; + use crate::shared::utils::create_rpc_channel; + use crate::source::user_defined::{SourceAcker, SourceReader}; use chrono::Utc; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs new file mode 100644 index 0000000000..af407e159e --- /dev/null +++ b/rust/numaflow-core/src/transformer.rs @@ -0,0 +1,4 @@ +/// User-Defined Transformer extends Numaflow to add custom sources supported outside the builtins. +/// +/// [User-Defined Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/#build-your-own-transformer +pub(crate) mod user_defined; diff --git a/rust/monovertex/src/transformer.rs b/rust/numaflow-core/src/transformer/user_defined.rs similarity index 90% rename from rust/monovertex/src/transformer.rs rename to rust/numaflow-core/src/transformer/user_defined.rs index f7797b5d7d..de7b765b79 100644 --- a/rust/monovertex/src/transformer.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,8 +1,8 @@ -use crate::error::Result; +use crate::error; use crate::message::Message; -use crate::shared::utc_from_timestamp; -use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; -use crate::sourcetransform_pb::SourceTransformRequest; +use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::monovertex::sourcetransform_pb::SourceTransformRequest; +use crate::shared::utils::utc_from_timestamp; use tonic::transport::Channel; const DROP: &str = "U+005C__DROP__"; @@ -14,11 +14,14 @@ pub struct SourceTransformer { } impl SourceTransformer { - pub(crate) async fn new(client: SourceTransformClient) -> Result { + pub(crate) async fn new(client: SourceTransformClient) -> error::Result { Ok(Self { client }) } - pub(crate) async fn transform_fn(&mut self, message: Message) -> Result>> { + pub(crate) async fn transform_fn( + &mut self, + message: Message, + ) -> error::Result>> { // fields which will not be changed let offset = message.offset.clone(); let id = message.id.clone(); @@ -57,9 +60,9 @@ impl SourceTransformer { mod tests { use std::error::Error; - use crate::shared::create_rpc_channel; - use crate::sourcetransform_pb::source_transform_client::SourceTransformClient; - use crate::transformer::SourceTransformer; + use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; + use crate::shared::utils::create_rpc_channel; + use crate::transformer::user_defined::SourceTransformer; use numaflow::sourcetransform; use tempfile::TempDir; @@ -137,7 +140,7 @@ mod tests { ) -> Vec { let message = sourcetransform::Message::new(input.value, chrono::offset::Utc::now()) .keys(input.keys) - .tags(vec![crate::transformer::DROP.to_string()]); + .tags(vec![crate::transformer::user_defined::DROP.to_string()]); vec![message] } } diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index 0b000dc032..46a811e814 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -30,7 +30,7 @@ async fn main() { info!("Error running servesink: {}", e); } } else if args.contains(&"--monovertex".to_string()) { - if let Err(e) = monovertex::mono_vertex().await { + if let Err(e) = numaflow_core::monovertex::mono_vertex().await { error!("Error running monovertex: {}", e); } } else { From 66c86133df91a0c6076261f14b6e72f37910473c Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sat, 28 Sep 2024 09:38:30 -0700 Subject: [PATCH 076/188] chore: merge source_{reader,acker} impl into one (#2102) Signed-off-by: Vigith Maurice --- rust/numaflow-core/src/lib.rs | 6 +- rust/numaflow-core/src/monovertex.rs | 8 +- .../numaflow-core/src/monovertex/forwarder.rs | 65 +++------ rust/numaflow-core/src/source/user_defined.rs | 128 +++++++++--------- 4 files changed, 87 insertions(+), 120 deletions(-) diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index dbe4a06137..a941bd6cb1 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -2,8 +2,8 @@ use tracing::error; /// Custom Error handling. mod error; -pub(crate) use crate::error::Result; pub(crate) use crate::error::Error; +pub(crate) use crate::error::Result; /// MonoVertex is a simplified version of the [Pipeline] spec which is ideal for high TPS, low latency /// use-cases which do not require [ISB]. @@ -18,6 +18,8 @@ mod config; /// Internal message structure that is passed around. mod message; +/// Shared entities that can be used orthogonal to different modules. +mod shared; /// [Sink] serves as the endpoint for processed data that has been outputted from the platform, /// which is then sent to an external system or application. /// @@ -32,5 +34,3 @@ mod source; /// /// [Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/ mod transformer; -/// Shared entities that can be used orthogonal to different modules. -mod shared; \ No newline at end of file diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 3601cfe3c4..afd8d0dc09 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -3,7 +3,7 @@ use crate::error; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; -use crate::source::user_defined::{SourceAcker, SourceReader}; +use crate::source::user_defined::Source; use crate::transformer::user_defined::SourceTransformer; use forwarder::ForwarderBuilder; use metrics::MetricsState; @@ -164,12 +164,10 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> lag_reader.start().await; // build the forwarder - let source_reader = SourceReader::new(source_grpc_client.clone()).await?; - let source_acker = SourceAcker::new(source_grpc_client.clone()).await?; + let source_reader = Source::new(source_grpc_client.clone()).await?; let sink_writer = SinkWriter::new(sink_grpc_client.clone()).await?; - let mut forwarder_builder = - ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token); + let mut forwarder_builder = ForwarderBuilder::new(source_reader, sink_writer, cln_token); // add transformer if exists if let Some(transformer_grpc_client) = transformer_grpc_client { diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 561e52d6e1..a32aff093b 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -6,7 +6,7 @@ use crate::monovertex::metrics; use crate::monovertex::metrics::forward_metrics; use crate::monovertex::sink_pb::Status::{Failure, Fallback, Success}; use crate::sink::user_defined::SinkWriter; -use crate::source::user_defined::{SourceAcker, SourceReader}; +use crate::source::user_defined::Source; use crate::transformer::user_defined::SourceTransformer; use chrono::Utc; use log::warn; @@ -20,8 +20,7 @@ use tracing::{debug, info}; /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. pub(crate) struct Forwarder { - source_reader: SourceReader, - source_acker: SourceAcker, + source: Source, sink_writer: SinkWriter, source_transformer: Option, fb_sink_writer: Option, @@ -31,8 +30,7 @@ pub(crate) struct Forwarder { /// ForwarderBuilder is used to build a Forwarder instance with optional fields. pub(crate) struct ForwarderBuilder { - source_reader: SourceReader, - source_acker: SourceAcker, + source: Source, sink_writer: SinkWriter, cln_token: CancellationToken, source_transformer: Option, @@ -42,14 +40,12 @@ pub(crate) struct ForwarderBuilder { impl ForwarderBuilder { /// Create a new builder with mandatory fields pub(crate) fn new( - source_reader: SourceReader, - source_acker: SourceAcker, + source: Source, sink_writer: SinkWriter, cln_token: CancellationToken, ) -> Self { Self { - source_reader, - source_acker, + source, sink_writer, cln_token, source_transformer: None, @@ -74,8 +70,7 @@ impl ForwarderBuilder { pub(crate) fn build(self) -> Forwarder { let common_labels = metrics::forward_metrics_labels().clone(); Forwarder { - source_reader: self.source_reader, - source_acker: self.source_acker, + source: self.source, sink_writer: self.sink_writer, source_transformer: self.source_transformer, fb_sink_writer: self.fb_sink_writer, @@ -127,7 +122,7 @@ impl Forwarder { async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); let messages = self - .source_reader + .source .read(config().batch_size, config().timeout_in_ms) .await .map_err(|e| { @@ -538,7 +533,7 @@ impl Forwarder { let n = offsets.len(); let start_time = tokio::time::Instant::now(); - self.source_acker.ack(offsets).await?; + self.source.ack(offsets).await?; debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); @@ -572,7 +567,7 @@ mod tests { use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; - use crate::source::user_defined::{SourceAcker, SourceReader}; + use crate::source::user_defined::Source; use crate::transformer::user_defined::SourceTransformer; struct SimpleSource { @@ -752,18 +747,12 @@ mod tests { let cln_token = CancellationToken::new(); - let source_reader = SourceReader::new(SourceClient::new( + let source = Source::new(SourceClient::new( create_rpc_channel(source_sock_file.clone()).await.unwrap(), )) .await .expect("failed to connect to source server"); - let source_acker = SourceAcker::new(SourceClient::new( - create_rpc_channel(source_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) @@ -776,10 +765,9 @@ mod tests { .await .expect("failed to connect to transformer server"); - let mut forwarder = - ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) - .source_transformer(transformer_client) - .build(); + let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) + .source_transformer(transformer_client) + .build(); // Assert the received message in a different task let assert_handle = tokio::spawn(async move { @@ -881,27 +869,19 @@ mod tests { let cln_token = CancellationToken::new(); - let source_reader = SourceReader::new(SourceClient::new( + let source = Source::new(SourceClient::new( create_rpc_channel(source_sock_file.clone()).await.unwrap(), )) .await .expect("failed to connect to source server"); - let source_acker = SourceAcker::new(SourceClient::new( - create_rpc_channel(source_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) .await .expect("failed to connect to sink server"); - let mut forwarder = - ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) - .build(); + let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()).build(); let cancel_handle = tokio::spawn(async move { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; @@ -1003,18 +983,12 @@ mod tests { let cln_token = CancellationToken::new(); - let source_reader = SourceReader::new(SourceClient::new( + let source = Source::new(SourceClient::new( create_rpc_channel(source_sock_file.clone()).await.unwrap(), )) .await .expect("failed to connect to source server"); - let source_acker = SourceAcker::new(SourceClient::new( - create_rpc_channel(source_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) @@ -1027,10 +1001,9 @@ mod tests { .await .expect("failed to connect to fb sink server"); - let mut forwarder = - ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token.clone()) - .fallback_sink_writer(fb_sink_writer) - .build(); + let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) + .fallback_sink_writer(fb_sink_writer) + .build(); let assert_handle = tokio::spawn(async move { let received_message = sink_rx.recv().await.unwrap(); diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 7805dd6610..b51324a68e 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -14,13 +14,29 @@ use tonic::{Request, Streaming}; /// SourceReader reads messages from a source. #[derive(Debug)] -pub(crate) struct SourceReader { +pub(crate) struct Source { read_tx: mpsc::Sender, resp_stream: Streaming, + ack_tx: mpsc::Sender, + ack_resp_stream: Streaming, } -impl SourceReader { +impl Source { pub(crate) async fn new(mut client: SourceClient) -> error::Result { + let (read_tx, resp_stream) = Self::create_reader(&mut client).await?; + let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; + + Ok(Self { + read_tx, + resp_stream, + ack_tx, + ack_resp_stream, + }) + } + + pub(crate) async fn create_reader( + client: &mut SourceClient, + ) -> error::Result<(mpsc::Sender, Streaming)> { let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); let read_stream = ReceiverStream::new(read_rx); @@ -49,10 +65,38 @@ impl SourceReader { return Err(SourceError("invalid handshake response".to_string())); } - Ok(Self { - read_tx, - resp_stream, - }) + Ok((read_tx, resp_stream)) + } + + pub(crate) async fn create_acker( + client: &mut SourceClient, + ) -> error::Result<(mpsc::Sender, Streaming)> { + let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); + let ack_stream = ReceiverStream::new(ack_rx); + + // do a handshake for ack with the server before we start sending ack requests + let ack_handshake_request = AckRequest { + request: None, + handshake: Some(source_pb::Handshake { sot: true }), + }; + ack_tx + .send(ack_handshake_request) + .await + .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; + + let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); + + // first response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( + "failed to receive ack handshake response".to_string(), + ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. + if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(SourceError("invalid ack handshake response".to_string())); + } + + Ok((ack_tx, ack_resp_stream)) } pub(crate) async fn read( @@ -88,47 +132,6 @@ impl SourceReader { } Ok(messages) } -} - -/// SourceAcker acks the messages from a source. -#[derive(Debug)] -pub(crate) struct SourceAcker { - ack_tx: mpsc::Sender, - ack_resp_stream: Streaming, -} - -impl SourceAcker { - pub(crate) async fn new(mut client: SourceClient) -> error::Result { - let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); - let ack_stream = ReceiverStream::new(ack_rx); - - // do a handshake for ack with the server before we start sending ack requests - let ack_handshake_request = AckRequest { - request: None, - handshake: Some(source_pb::Handshake { sot: true }), - }; - ack_tx - .send(ack_handshake_request) - .await - .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; - - let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); - - // first response from the server will be the handshake response. We need to check if the - // server has accepted the handshake. - let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( - "failed to receive ack handshake response".to_string(), - ))?; - // handshake cannot to None during the initial phase and it has to set `sot` to true. - if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { - return Err(SourceError("invalid ack handshake response".to_string())); - } - - Ok(Self { - ack_tx, - ack_resp_stream, - }) - } pub(crate) async fn ack(&mut self, offsets: Vec) -> error::Result { let n = offsets.len(); @@ -164,7 +167,7 @@ mod tests { use crate::monovertex::source_pb::source_client::SourceClient; use crate::shared::utils::create_rpc_channel; - use crate::source::user_defined::{SourceAcker, SourceReader}; + use crate::source::user_defined::Source; use chrono::Utc; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; @@ -248,24 +251,17 @@ mod tests { // TODO: flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut source_reader = SourceReader::new(SourceClient::new( - create_rpc_channel(sock_file.clone()).await.unwrap(), - )) - .await - .map_err(|e| panic!("failed to create source reader: {:?}", e)) - .unwrap(); - - let mut source_acker = SourceAcker::new(SourceClient::new( - create_rpc_channel(sock_file).await.unwrap(), - )) - .await - .map_err(|e| panic!("failed to create source acker: {:?}", e)) - .unwrap(); - - let messages = source_reader.read(5, 1000).await.unwrap(); + let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); + + let mut source = Source::new(client) + .await + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); + + let messages = source.read(5, 1000).await.unwrap(); assert_eq!(messages.len(), 5); - let response = source_acker + let response = source .ack(messages.iter().map(|m| m.offset.clone()).collect()) .await .unwrap(); @@ -273,8 +269,8 @@ mod tests { // we need to drop the client, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 - drop(source_reader); - drop(source_acker); + drop(source); + shutdown_tx .send(()) .expect("failed to send shutdown signal"); From ac7b33b803c2530df6c7f24252fb4e714c9bd07f Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 29 Sep 2024 20:16:03 -0700 Subject: [PATCH 077/188] chore: rename monovertex in Dockerfile (#2101) Signed-off-by: Vigith Maurice --- rust/Dockerfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/rust/Dockerfile b/rust/Dockerfile index 62208944f6..3fcd606faa 100644 --- a/rust/Dockerfile +++ b/rust/Dockerfile @@ -1,6 +1,6 @@ # Use multi-stage builds to keep the final image small # Use an official Rust image for the build stage -FROM rust:1.79-bookworm as builder +FROM rust:1.80-bookworm as builder RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash @@ -20,8 +20,8 @@ COPY ./backoff/Cargo.toml ./backoff/Cargo.toml RUN cargo new numaflow-models COPY ./numaflow-models/Cargo.toml ./numaflow-models/Cargo.toml -RUN cargo new monovertex -COPY numaflow-core/Cargo.toml ./monovertex/Cargo.toml +RUN cargo new numaflow-core +COPY numaflow-core/Cargo.toml ./numaflow-core/Cargo.toml RUN cargo new serving COPY ./serving/Cargo.toml ./serving/Cargo.toml @@ -39,9 +39,9 @@ COPY ./servesink/src ./servesink/src COPY ./backoff/src ./backoff/src COPY ./numaflow-models/src ./numaflow-models/src COPY ./serving/src ./serving/src -COPY numaflow-core/src ./monovertex/src -COPY numaflow-core/build.rs ./monovertex/build.rs -COPY numaflow-core/proto ./monovertex/proto +COPY numaflow-core/src ./numaflow-core/src +COPY numaflow-core/build.rs ./numaflow-core/build.rs +COPY numaflow-core/proto ./numaflow-core/proto # Build the real binaries RUN touch src/bin/main.rs && \ From e69551ba07d14dee5dccd90b28cf8b497943f415 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Wed, 2 Oct 2024 09:25:04 +0530 Subject: [PATCH 078/188] feat: Use gRPC bidirectional streaming for source transformer (#2071) --- Makefile | 2 +- go.mod | 4 +- go.sum | 8 +- hack/generate-proto.sh | 7 +- pkg/apis/proto/daemon/daemon_grpc.pb.go | 25 +- .../proto/mvtxdaemon/mvtxdaemon_grpc.pb.go | 16 +- .../sourcetransform/v1/sourcetransform.proto | 30 +- pkg/isb/tracker/message_tracker.go | 56 ++ .../tracker/message_tracker_test.go} | 30 +- pkg/sdkclient/grpc/grpc_utils.go | 2 - pkg/sdkclient/sourcetransformer/client.go | 120 +++- .../sourcetransformer/client_test.go | 168 +++-- pkg/sdkclient/sourcetransformer/interface.go | 2 +- .../forward/applier/sourcetransformer.go | 8 +- pkg/sources/forward/data_forward.go | 69 +-- pkg/sources/forward/data_forward_test.go | 91 ++- pkg/sources/forward/shutdown_test.go | 12 +- pkg/sources/generator/tickgen.go | 1 - pkg/sources/source.go | 2 +- pkg/sources/transformer/grpc_transformer.go | 160 +++-- .../transformer/grpc_transformer_test.go | 575 +++++------------- pkg/udf/forward/forward.go | 2 +- pkg/udf/rpc/grpc_batch_map.go | 33 +- pkg/udf/rpc/tracker.go | 75 --- pkg/webhook/validator/validator.go | 5 +- rust/Cargo.lock | 2 +- rust/numaflow-core/Cargo.toml | 2 +- .../numaflow-core/proto/sourcetransform.proto | 33 +- rust/numaflow-core/src/config.rs | 18 +- rust/numaflow-core/src/message.rs | 16 +- .../numaflow-core/src/monovertex/forwarder.rs | 32 +- .../src/transformer/user_defined.rs | 224 +++++-- rust/servesink/Cargo.toml | 2 +- .../extract-event-time-from-payload.yaml | 2 +- test/transformer-e2e/transformer_test.go | 30 +- 35 files changed, 946 insertions(+), 918 deletions(-) create mode 100644 pkg/isb/tracker/message_tracker.go rename pkg/{udf/rpc/tracker_test.go => isb/tracker/message_tracker_test.go} (53%) delete mode 100644 pkg/udf/rpc/tracker.go diff --git a/Makefile b/Makefile index a4bc2012bc..11d91c5890 100644 --- a/Makefile +++ b/Makefile @@ -244,7 +244,7 @@ manifests: crds kubectl kustomize config/extensions/webhook > config/validating-webhook-install.yaml $(GOPATH)/bin/golangci-lint: - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.54.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.61.0 .PHONY: lint lint: $(GOPATH)/bin/golangci-lint diff --git a/go.mod b/go.mod index c2d7d6edd5..ba62a6f28d 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0 + github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 @@ -55,7 +55,7 @@ require ( golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 google.golang.org/grpc v1.66.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 google.golang.org/protobuf v1.34.2 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 diff --git a/go.sum b/go.sum index b17e994439..9670ccac4b 100644 --- a/go.sum +++ b/go.sum @@ -485,8 +485,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0 h1:qPqZfJdPdsz4qymyzMSNICQe/xBnx9P/G3hRbC1DR7k= -github.com/numaproj/numaflow-go v0.8.2-0.20240923064822-e16694a878d0/go.mod h1:g4JZOyUPhjfhv+kR0sX5d8taw/dasgKPXLvQBi39mJ4= +github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0 h1:MN4Q36mPrXqPrv2dNoK3gyV7c1CGwUF3wNJxTZSw1lk= +github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1049,8 +1049,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 h1:9SxA29VM43MF5Z9dQu694wmY5t8E/Gxr7s+RSxiIDmc= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0/go.mod h1:yZOK5zhQMiALmuweVdIVoQPa6eIJyXn2B9g5dJDhqX4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index bf970ce318..7d9f19cb67 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -22,11 +22,14 @@ install-protobuf() { ARCH=$(uname_arch) echo "OS: $OS ARCH: $ARCH" + if [[ "$ARCH" = "amd64" ]]; then + ARCH="x86_64" + elif [[ "$ARCH" = "arm64" ]]; then + ARCH="aarch_64" + fi BINARY_URL=$PB_REL/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-${OS}-${ARCH}.zip if [[ "$OS" = "darwin" ]]; then BINARY_URL=$PB_REL/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-osx-universal_binary.zip - elif [[ "$OS" = "linux" ]]; then - BINARY_URL=$PB_REL/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip fi echo "Downloading $BINARY_URL" diff --git a/pkg/apis/proto/daemon/daemon_grpc.pb.go b/pkg/apis/proto/daemon/daemon_grpc.pb.go index 61e15a2a62..6b348d8fdf 100644 --- a/pkg/apis/proto/daemon/daemon_grpc.pb.go +++ b/pkg/apis/proto/daemon/daemon_grpc.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v5.27.2 // source: pkg/apis/proto/daemon/daemon.proto @@ -30,8 +30,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( DaemonService_ListBuffers_FullMethodName = "/daemon.DaemonService/ListBuffers" @@ -44,6 +44,8 @@ const ( // DaemonServiceClient is the client API for DaemonService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// DaemonService is a grpc service that is used to provide APIs for giving any pipeline information. type DaemonServiceClient interface { ListBuffers(ctx context.Context, in *ListBuffersRequest, opts ...grpc.CallOption) (*ListBuffersResponse, error) GetBuffer(ctx context.Context, in *GetBufferRequest, opts ...grpc.CallOption) (*GetBufferResponse, error) @@ -62,8 +64,9 @@ func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { } func (c *daemonServiceClient) ListBuffers(ctx context.Context, in *ListBuffersRequest, opts ...grpc.CallOption) (*ListBuffersResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListBuffersResponse) - err := c.cc.Invoke(ctx, DaemonService_ListBuffers_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_ListBuffers_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -71,8 +74,9 @@ func (c *daemonServiceClient) ListBuffers(ctx context.Context, in *ListBuffersRe } func (c *daemonServiceClient) GetBuffer(ctx context.Context, in *GetBufferRequest, opts ...grpc.CallOption) (*GetBufferResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetBufferResponse) - err := c.cc.Invoke(ctx, DaemonService_GetBuffer_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetBuffer_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -80,8 +84,9 @@ func (c *daemonServiceClient) GetBuffer(ctx context.Context, in *GetBufferReques } func (c *daemonServiceClient) GetVertexMetrics(ctx context.Context, in *GetVertexMetricsRequest, opts ...grpc.CallOption) (*GetVertexMetricsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetVertexMetricsResponse) - err := c.cc.Invoke(ctx, DaemonService_GetVertexMetrics_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetVertexMetrics_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -89,8 +94,9 @@ func (c *daemonServiceClient) GetVertexMetrics(ctx context.Context, in *GetVerte } func (c *daemonServiceClient) GetPipelineWatermarks(ctx context.Context, in *GetPipelineWatermarksRequest, opts ...grpc.CallOption) (*GetPipelineWatermarksResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPipelineWatermarksResponse) - err := c.cc.Invoke(ctx, DaemonService_GetPipelineWatermarks_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetPipelineWatermarks_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -98,8 +104,9 @@ func (c *daemonServiceClient) GetPipelineWatermarks(ctx context.Context, in *Get } func (c *daemonServiceClient) GetPipelineStatus(ctx context.Context, in *GetPipelineStatusRequest, opts ...grpc.CallOption) (*GetPipelineStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPipelineStatusResponse) - err := c.cc.Invoke(ctx, DaemonService_GetPipelineStatus_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetPipelineStatus_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -109,6 +116,8 @@ func (c *daemonServiceClient) GetPipelineStatus(ctx context.Context, in *GetPipe // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility +// +// DaemonService is a grpc service that is used to provide APIs for giving any pipeline information. type DaemonServiceServer interface { ListBuffers(context.Context, *ListBuffersRequest) (*ListBuffersResponse, error) GetBuffer(context.Context, *GetBufferRequest) (*GetBufferResponse, error) diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go index 33f0b26d6b..76477c3de0 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon_grpc.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v5.27.2 // source: pkg/apis/proto/mvtxdaemon/mvtxdaemon.proto @@ -31,8 +31,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName = "/mvtxdaemon.MonoVertexDaemonService/GetMonoVertexMetrics" @@ -42,6 +42,8 @@ const ( // MonoVertexDaemonServiceClient is the client API for MonoVertexDaemonService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// MonoVertexDaemonService is a grpc service that is used to provide APIs for giving any MonoVertex information. type MonoVertexDaemonServiceClient interface { GetMonoVertexMetrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexMetricsResponse, error) GetMonoVertexStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexStatusResponse, error) @@ -56,8 +58,9 @@ func NewMonoVertexDaemonServiceClient(cc grpc.ClientConnInterface) MonoVertexDae } func (c *monoVertexDaemonServiceClient) GetMonoVertexMetrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexMetricsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetMonoVertexMetricsResponse) - err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexMetrics_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -65,8 +68,9 @@ func (c *monoVertexDaemonServiceClient) GetMonoVertexMetrics(ctx context.Context } func (c *monoVertexDaemonServiceClient) GetMonoVertexStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMonoVertexStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetMonoVertexStatusResponse) - err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexStatus_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, MonoVertexDaemonService_GetMonoVertexStatus_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -76,6 +80,8 @@ func (c *monoVertexDaemonServiceClient) GetMonoVertexStatus(ctx context.Context, // MonoVertexDaemonServiceServer is the server API for MonoVertexDaemonService service. // All implementations must embed UnimplementedMonoVertexDaemonServiceServer // for forward compatibility +// +// MonoVertexDaemonService is a grpc service that is used to provide APIs for giving any MonoVertex information. type MonoVertexDaemonServiceServer interface { GetMonoVertexMetrics(context.Context, *emptypb.Empty) (*GetMonoVertexMetricsResponse, error) GetMonoVertexStatus(context.Context, *emptypb.Empty) (*GetMonoVertexStatusResponse, error) diff --git a/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto b/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto index b93d82b9a8..740ae1c671 100644 --- a/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto +++ b/pkg/apis/proto/sourcetransform/v1/sourcetransform.proto @@ -28,21 +28,35 @@ service SourceTransform { // SourceTransformFn applies a function to each request element. // In addition to map function, SourceTransformFn also supports assigning a new event time to response. // SourceTransformFn can be used only at source vertex by source data transformer. - rpc SourceTransformFn(SourceTransformRequest) returns (SourceTransformResponse); + rpc SourceTransformFn(stream SourceTransformRequest) returns (stream SourceTransformResponse); // IsReady is the heartbeat endpoint for gRPC. rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); } +/* + * Handshake message between client and server to indicate the start of transmission. + */ + message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; +} + /** * SourceTransformerRequest represents a request element. */ message SourceTransformRequest { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - map headers = 5; + message Request { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + map headers = 5; + // This ID is used to uniquely identify a transform request + string id = 6; + } + Request request = 1; + optional Handshake handshake = 2; } /** @@ -56,6 +70,10 @@ message SourceTransformResponse { repeated string tags = 4; } repeated Result results = 1; + // This ID is used to refer the responses to the request it corresponds to. + string id = 2; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 3; } /** diff --git a/pkg/isb/tracker/message_tracker.go b/pkg/isb/tracker/message_tracker.go new file mode 100644 index 0000000000..dfd608e5bf --- /dev/null +++ b/pkg/isb/tracker/message_tracker.go @@ -0,0 +1,56 @@ +package tracker + +import ( + "sync" + + "github.com/numaproj/numaflow/pkg/isb" +) + +// MessageTracker is used to store a key value pair for string and *ReadMessage +// as it can be accessed by concurrent goroutines, we keep all operations +// under a mutex +type MessageTracker struct { + lock sync.RWMutex + m map[string]*isb.ReadMessage +} + +// NewMessageTracker initializes a new instance of a Tracker +func NewMessageTracker(messages []*isb.ReadMessage) *MessageTracker { + m := make(map[string]*isb.ReadMessage, len(messages)) + for _, msg := range messages { + id := msg.ReadOffset.String() + m[id] = msg + } + return &MessageTracker{ + m: m, + lock: sync.RWMutex{}, + } +} + +// Remove will remove the entry for a given id and return the stored value corresponding to this id. +// A `nil` return value indicates that the id doesn't exist in the tracker. +func (t *MessageTracker) Remove(id string) *isb.ReadMessage { + t.lock.Lock() + defer t.lock.Unlock() + item, ok := t.m[id] + if !ok { + return nil + } + delete(t.m, id) + return item +} + +// IsEmpty is a helper function which checks if the Tracker map is empty +// return true if empty +func (t *MessageTracker) IsEmpty() bool { + t.lock.RLock() + defer t.lock.RUnlock() + return len(t.m) == 0 +} + +// Len returns the number of messages currently stored in the tracker +func (t *MessageTracker) Len() int { + t.lock.RLock() + defer t.lock.RUnlock() + return len(t.m) +} diff --git a/pkg/udf/rpc/tracker_test.go b/pkg/isb/tracker/message_tracker_test.go similarity index 53% rename from pkg/udf/rpc/tracker_test.go rename to pkg/isb/tracker/message_tracker_test.go index 21704f4425..3c2ae767d0 100644 --- a/pkg/udf/rpc/tracker_test.go +++ b/pkg/isb/tracker/message_tracker_test.go @@ -1,4 +1,4 @@ -package rpc +package tracker import ( "testing" @@ -6,32 +6,34 @@ import ( "github.com/stretchr/testify/assert" + "github.com/numaproj/numaflow/pkg/isb" "github.com/numaproj/numaflow/pkg/isb/testutils" ) func TestTracker_AddRequest(t *testing.T) { - tr := NewTracker() readMessages := testutils.BuildTestReadMessages(3, time.Unix(1661169600, 0), nil) - for _, msg := range readMessages { - tr.addRequest(&msg) + messages := make([]*isb.ReadMessage, len(readMessages)) + for i, msg := range readMessages { + messages[i] = &msg } + tr := NewMessageTracker(messages) id := readMessages[0].ReadOffset.String() - m, ok := tr.getRequest(id) - assert.True(t, ok) + m := tr.Remove(id) + assert.NotNil(t, m) assert.Equal(t, readMessages[0], *m) } func TestTracker_RemoveRequest(t *testing.T) { - tr := NewTracker() readMessages := testutils.BuildTestReadMessages(3, time.Unix(1661169600, 0), nil) - for _, msg := range readMessages { - tr.addRequest(&msg) + messages := make([]*isb.ReadMessage, len(readMessages)) + for i, msg := range readMessages { + messages[i] = &msg } + tr := NewMessageTracker(messages) id := readMessages[0].ReadOffset.String() - m, ok := tr.getRequest(id) - assert.True(t, ok) + m := tr.Remove(id) + assert.NotNil(t, m) assert.Equal(t, readMessages[0], *m) - tr.removeRequest(id) - _, ok = tr.getRequest(id) - assert.False(t, ok) + m = tr.Remove(id) + assert.Nil(t, m) } diff --git a/pkg/sdkclient/grpc/grpc_utils.go b/pkg/sdkclient/grpc/grpc_utils.go index 293ba8e8d7..71ae252738 100644 --- a/pkg/sdkclient/grpc/grpc_utils.go +++ b/pkg/sdkclient/grpc/grpc_utils.go @@ -18,7 +18,6 @@ package grpc import ( "fmt" - "log" "strconv" "google.golang.org/grpc" @@ -56,7 +55,6 @@ func ConnectToServer(udsSockAddr string, serverInfo *serverinfo.ServerInfo, maxM ) } else { sockAddr = getUdsSockAddr(udsSockAddr) - log.Println("UDS Client:", sockAddr) conn, err = grpc.NewClient(sockAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMessageSize), grpc.MaxCallSendMsgSize(maxMessageSize))) diff --git a/pkg/sdkclient/sourcetransformer/client.go b/pkg/sdkclient/sourcetransformer/client.go index d9d47302c0..92372ff7a4 100644 --- a/pkg/sdkclient/sourcetransformer/client.go +++ b/pkg/sdkclient/sourcetransformer/client.go @@ -18,7 +18,10 @@ package sourcetransformer import ( "context" + "fmt" + "time" + "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -28,16 +31,18 @@ import ( sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/shared/logging" ) // client contains the grpc connection and the grpc client. type client struct { conn *grpc.ClientConn grpcClt transformpb.SourceTransformClient + stream transformpb.SourceTransform_SourceTransformFnClient } // New creates a new client object. -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SourceTransformerAddr) for _, inputOption := range inputOptions { @@ -53,18 +58,81 @@ func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (C c := new(client) c.conn = conn c.grpcClt = transformpb.NewSourceTransformClient(conn) + + var logger = logging.FromContext(ctx) + +waitUntilReady: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("waiting for transformer gRPC server to be ready: %w", ctx.Err()) + default: + _, err := c.IsReady(ctx, &emptypb.Empty{}) + if err != nil { + logger.Warnf("Transformer server is not ready: %v", err) + time.Sleep(100 * time.Millisecond) + continue waitUntilReady + } + break waitUntilReady + } + } + + c.stream, err = c.grpcClt.SourceTransformFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create a gRPC stream for source transform: %w", err) + } + + if err := doHandshake(c.stream); err != nil { + return nil, err + } + return c, nil } +func doHandshake(stream transformpb.SourceTransform_SourceTransformFnClient) error { + // Send handshake request + handshakeReq := &transformpb.SourceTransformRequest{ + Handshake: &transformpb.Handshake{ + Sot: true, + }, + } + if err := stream.Send(handshakeReq); err != nil { + return fmt.Errorf("failed to send handshake request for source tansform: %w", err) + } + + handshakeResp, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to receive handshake response from source transform stream: %w", err) + } + if resp := handshakeResp.GetHandshake(); resp == nil || !resp.GetSot() { + return fmt.Errorf("invalid handshake response for source transform. Received='%+v'", resp) + } + return nil +} + // NewFromClient creates a new client object from a grpc client. This is used for testing. -func NewFromClient(c transformpb.SourceTransformClient) (Client, error) { +func NewFromClient(ctx context.Context, c transformpb.SourceTransformClient) (Client, error) { + stream, err := c.SourceTransformFn(ctx) + if err != nil { + return nil, err + } + + if err := doHandshake(stream); err != nil { + return nil, err + } + return &client{ grpcClt: c, + stream: stream, }, nil } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn(_ context.Context) error { + err := c.stream.CloseSend() + if err != nil { + return err + } if c.conn == nil { return nil } @@ -81,11 +149,47 @@ func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { } // SourceTransformFn SourceTransformerFn applies a function to each request element. -func (c *client) SourceTransformFn(ctx context.Context, request *transformpb.SourceTransformRequest) (*transformpb.SourceTransformResponse, error) { - transformResponse, err := c.grpcClt.SourceTransformFn(ctx, request) - err = sdkerr.ToUDFErr("c.grpcClt.SourceTransformFn", err) - if err != nil { +// Response channel will not be closed. Caller can select on response and error channel to exit on first error. +func (c *client) SourceTransformFn(ctx context.Context, requests []*transformpb.SourceTransformRequest) ([]*transformpb.SourceTransformResponse, error) { + var eg errgroup.Group + // send n requests + eg.Go(func() error { + for _, req := range requests { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := c.stream.Send(req); err != nil { + return sdkerr.ToUDFErr("c.grpcClt.SourceTransformFn stream.Send", err) + } + } + return nil + }) + + // receive n responses + responses := make([]*transformpb.SourceTransformResponse, len(requests)) + eg.Go(func() error { + for i := 0; i < len(requests); i++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + resp, err := c.stream.Recv() + if err != nil { + return sdkerr.ToUDFErr("c.grpcClt.SourceTransformFn stream.Recv", err) + } + responses[i] = resp + } + return nil + }) + + // wait for the send and receive goroutines to finish + // if any of the goroutines return an error, the error will be caught here + if err := eg.Wait(); err != nil { return nil, err } - return transformResponse, nil + + return responses, nil } diff --git a/pkg/sdkclient/sourcetransformer/client_test.go b/pkg/sdkclient/sourcetransformer/client_test.go index 27526312fd..c66abbd6ea 100644 --- a/pkg/sdkclient/sourcetransformer/client_test.go +++ b/pkg/sdkclient/sourcetransformer/client_test.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,80 +18,132 @@ package sourcetransformer import ( "context" + "errors" "fmt" - "reflect" + "net" "testing" + "time" - "github.com/golang/mock/gomock" transformpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" - transformermock "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1/transformmock" - "github.com/stretchr/testify/assert" + "github.com/numaproj/numaflow-go/pkg/sourcetransformer" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) func TestClient_IsReady(t *testing.T) { var ctx = context.Background() + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + return sourcetransformer.MessagesBuilder() + }), + } + + // Start the gRPC server + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + defer conn.Close() + + // Create a client connection to the server + client := transformpb.NewSourceTransformClient(conn) - ctrl := gomock.NewController(t) - defer ctrl.Finish() + testClient, err := NewFromClient(ctx, client) + require.NoError(t, err) - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&transformpb.ReadyResponse{Ready: true}, nil) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&transformpb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) + ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) + require.True(t, ready) + require.NoError(t, err) +} - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, +func newServer(t *testing.T, register func(server *grpc.Server)) *grpc.ClientConn { + lis := bufconn.Listen(100) + t.Cleanup(func() { + _ = lis.Close() }) - ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) - assert.True(t, ready) - assert.NoError(t, err) + server := grpc.NewServer() + t.Cleanup(func() { + server.Stop() + }) - ready, err = testClient.IsReady(ctx, &emptypb.Empty{}) - assert.False(t, ready) - assert.EqualError(t, err, "mock connection refused") -} + register(server) -func TestClient_SourceTransformFn(t *testing.T) { - var ctx = context.Background() + errChan := make(chan error, 1) + go func() { + // t.Fatal should only be called from the goroutine running the test + if err := server.Serve(lis); err != nil { + errChan <- err + } + }() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), gomock.Any()).Return(&transformpb.SourceTransformResponse{Results: []*transformpb.SourceTransformResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, nil) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), gomock.Any()).Return(&transformpb.SourceTransformResponse{Results: []*transformpb.SourceTransformResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, fmt.Errorf("mock connection refused")) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, + dialer := func(context.Context, string) (net.Conn, error) { + return lis.Dial() + } + + conn, err := grpc.NewClient("passthrough://", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(insecure.NewCredentials())) + t.Cleanup(func() { + _ = conn.Close() }) + if err != nil { + t.Fatalf("Creating new gRPC client connection: %v", err) + } + + var grpcServerErr error + select { + case grpcServerErr = <-errChan: + case <-time.After(500 * time.Millisecond): + grpcServerErr = errors.New("gRPC server didn't start in 500ms") + } + if err != nil { + t.Fatalf("Failed to start gRPC server: %v", grpcServerErr) + } + + return conn +} - result, err := testClient.SourceTransformFn(ctx, &transformpb.SourceTransformRequest{}) - assert.Equal(t, &transformpb.SourceTransformResponse{Results: []*transformpb.SourceTransformResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, result) - assert.NoError(t, err) - - _, err = testClient.SourceTransformFn(ctx, &transformpb.SourceTransformRequest{}) - assert.EqualError(t, err, "NonRetryable: mock connection refused") +func TestClient_SourceTransformFn(t *testing.T) { + var testTime = time.Date(2021, 8, 15, 14, 30, 45, 100, time.Local) + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + msg := datum.Value() + return sourcetransformer.MessagesBuilder().Append(sourcetransformer.NewMessage(msg, testTime).WithKeys([]string{keys[0] + "_test"})) + }), + } + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + transformClient := transformpb.NewSourceTransformClient(conn) + var ctx = context.Background() + client, _ := NewFromClient(ctx, transformClient) + + requests := make([]*transformpb.SourceTransformRequest, 5) + go func() { + for i := 0; i < 5; i++ { + requests[i] = &transformpb.SourceTransformRequest{ + Request: &transformpb.SourceTransformRequest_Request{ + Keys: []string{fmt.Sprintf("client_key_%d", i)}, + Value: []byte("test"), + }, + } + } + }() + + responses, err := client.SourceTransformFn(ctx, requests) + require.NoError(t, err) + var results [][]*transformpb.SourceTransformResponse_Result + for _, resp := range responses { + results = append(results, resp.GetResults()) + } + expected := [][]*transformpb.SourceTransformResponse_Result{ + {{Keys: []string{"client_key_0_test"}, Value: []byte("test"), EventTime: timestamppb.New(testTime)}}, + {{Keys: []string{"client_key_1_test"}, Value: []byte("test"), EventTime: timestamppb.New(testTime)}}, + {{Keys: []string{"client_key_2_test"}, Value: []byte("test"), EventTime: timestamppb.New(testTime)}}, + {{Keys: []string{"client_key_3_test"}, Value: []byte("test"), EventTime: timestamppb.New(testTime)}}, + {{Keys: []string{"client_key_4_test"}, Value: []byte("test"), EventTime: timestamppb.New(testTime)}}, + } + require.ElementsMatch(t, expected, results) } diff --git a/pkg/sdkclient/sourcetransformer/interface.go b/pkg/sdkclient/sourcetransformer/interface.go index 4d8e3d8f71..883353f3a6 100644 --- a/pkg/sdkclient/sourcetransformer/interface.go +++ b/pkg/sdkclient/sourcetransformer/interface.go @@ -27,5 +27,5 @@ import ( type Client interface { CloseConn(ctx context.Context) error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) - SourceTransformFn(ctx context.Context, request *transformpb.SourceTransformRequest) (*transformpb.SourceTransformResponse, error) + SourceTransformFn(ctx context.Context, requests []*transformpb.SourceTransformRequest) ([]*transformpb.SourceTransformResponse, error) } diff --git a/pkg/sources/forward/applier/sourcetransformer.go b/pkg/sources/forward/applier/sourcetransformer.go index 795cd4c5a2..a935d511ea 100644 --- a/pkg/sources/forward/applier/sourcetransformer.go +++ b/pkg/sources/forward/applier/sourcetransformer.go @@ -25,13 +25,13 @@ import ( // SourceTransformApplier applies the source transform on the read message and gives back a new message. Any UserError will be retried here, while // InternalErr can be returned and could be retried by the callee. type SourceTransformApplier interface { - ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) + ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) } // ApplySourceTransformFunc is a function type that implements SourceTransformApplier interface. -type ApplySourceTransformFunc func(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) +type ApplySourceTransformFunc func(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) // ApplyTransform implements SourceTransformApplier interface. -func (f ApplySourceTransformFunc) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return f(ctx, message) +func (f ApplySourceTransformFunc) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return f(ctx, messages) } diff --git a/pkg/sources/forward/data_forward.go b/pkg/sources/forward/data_forward.go index 48ff97da3a..913be67939 100644 --- a/pkg/sources/forward/data_forward.go +++ b/pkg/sources/forward/data_forward.go @@ -305,34 +305,14 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { // If a user-defined transformer exists, apply it if df.opts.transformer != nil { - // user-defined transformer concurrent processing request channel - transformerCh := make(chan *isb.ReadWriteMessagePair) - - // create a pool of Transformer Processors - var wg sync.WaitGroup - for i := 0; i < df.opts.transformerConcurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - df.concurrentApplyTransformer(ctx, transformerCh) - }() + for _, m := range readMessages { + // assign watermark to the message + m.Watermark = time.Time(processorWM) } concurrentTransformerProcessingStart := time.Now() - for idx, m := range readMessages { + readWriteMessagePairs = df.applyTransformer(ctx, readMessages) - // assign watermark to the message - m.Watermark = time.Time(processorWM) - readWriteMessagePairs[idx].ReadMessage = m - // send transformer processing work to the channel. Thus, the results of the transformer - // application on a read message will be stored as the corresponding writeMessage in readWriteMessagePairs - transformerCh <- &readWriteMessagePairs[idx] - } - // let the go routines know that there is no more work - close(transformerCh) - // wait till the processing is done. this will not be an infinite wait because the transformer processing will exit if - // context.Done() is closed. - wg.Wait() df.opts.logger.Debugw("concurrent applyTransformer completed", zap.Int("concurrency", df.opts.transformerConcurrency), zap.Duration("took", time.Since(concurrentTransformerProcessingStart)), @@ -536,6 +516,7 @@ func (df *DataForward) writeToBuffers( for toVertexName, toVertexMessages := range messageToStep { writeOffsets[toVertexName] = make([][]isb.Offset, len(toVertexMessages)) } + for toVertexName, toVertexBuffer := range df.toBuffers { for index, partition := range toVertexBuffer { writeOffsets[toVertexName][index], err = df.writeToBuffer(ctx, partition, messageToStep[toVertexName][index]) @@ -591,6 +572,7 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. zap.String("reason", err.Error()), zap.String("partition", toBufferPartition.GetName()), zap.String("vertex", df.vertexName), zap.String("pipeline", df.pipelineName), + zap.String("msg_id", msg.ID.String()), ) } else { needRetry = true @@ -661,42 +643,12 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. return writeOffsets, nil } -// concurrentApplyTransformer applies the transformer based on the request from the channel -func (df *DataForward) concurrentApplyTransformer(ctx context.Context, readMessagePair <-chan *isb.ReadWriteMessagePair) { - for message := range readMessagePair { - start := time.Now() - metrics.SourceTransformerReadMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Inc() - - writeMessages, err := df.applyTransformer(ctx, message.ReadMessage) - metrics.SourceTransformerWriteMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(writeMessages))) - - message.WriteMessages = append(message.WriteMessages, writeMessages...) - message.Err = err - metrics.SourceTransformerProcessingTime.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Observe(float64(time.Since(start).Microseconds())) - } -} - // applyTransformer applies the transformer and will block if there is any InternalErr. On the other hand, if this is a UserError // the skip flag is set. The ShutDown flag will only if there is an InternalErr and ForceStop has been invoked. // The UserError retry will be done on the applyTransformer. -func (df *DataForward) applyTransformer(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (df *DataForward) applyTransformer(ctx context.Context, messages []*isb.ReadMessage) []isb.ReadWriteMessagePair { for { - writeMessages, err := df.opts.transformer.ApplyTransform(ctx, readMessage) + transformResults, err := df.opts.transformer.ApplyTransform(ctx, messages) if err != nil { df.opts.logger.Errorw("Transformer.Apply error", zap.Error(err)) // TODO: implement retry with backoff etc. @@ -712,12 +664,11 @@ func (df *DataForward) applyTransformer(ctx context.Context, readMessage *isb.Re metrics.LabelVertexType: string(dfv1.VertexTypeSource), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), }).Inc() - - return nil, err + return []isb.ReadWriteMessagePair{{Err: err}} } continue } - return writeMessages, nil + return transformResults } } diff --git a/pkg/sources/forward/data_forward_test.go b/pkg/sources/forward/data_forward_test.go index 25e41a9fa6..96cb6760e6 100644 --- a/pkg/sources/forward/data_forward_test.go +++ b/pkg/sources/forward/data_forward_test.go @@ -121,8 +121,16 @@ func (f myForwardTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.Ve }}, nil } -func (f myForwardTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "test-vertex", message) +func (f myForwardTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + out := make([]isb.ReadWriteMessagePair, len(messages)) + for i, msg := range messages { + writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", msg) + out[i] = isb.ReadWriteMessagePair{ + ReadMessage: msg, + WriteMessages: writeMsg, + } + } + return out, nil } func TestNewDataForward(t *testing.T) { @@ -856,36 +864,31 @@ func (f *mySourceForwardTestRoundRobin) WhereTo(_ []string, _ []string, s string // such that we can verify message IsLate attribute gets set to true. var testSourceNewEventTime = testSourceWatermark.Add(time.Duration(-1) * time.Minute) -func (f mySourceForwardTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return func(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { - _ = ctx - offset := readMessage.ReadOffset - payload := readMessage.Body.Payload - parentPaneInfo := readMessage.MessageInfo - - // apply source data transformer - _ = payload - // copy the payload - result := payload - // assign new event time - parentPaneInfo.EventTime = testSourceNewEventTime - var key []string - - writeMessage := isb.Message{ +func (f mySourceForwardTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + results := make([]isb.ReadWriteMessagePair, len(messages)) + for i, message := range messages { + message.MessageInfo.EventTime = testSourceNewEventTime + writeMsg := isb.Message{ Header: isb.Header{ - MessageInfo: parentPaneInfo, + MessageInfo: message.MessageInfo, ID: isb.MessageID{ VertexName: "test-vertex", - Offset: offset.String(), + Offset: message.ReadOffset.String(), }, - Keys: key, + Keys: []string{}, }, Body: isb.Body{ - Payload: result, + Payload: message.Body.Payload, }, } - return []*isb.WriteMessage{{Message: writeMessage}}, nil - }(ctx, message) + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: message, + WriteMessages: []*isb.WriteMessage{{ + Message: writeMsg, + }}, + } + } + return results, nil } // TestSourceWatermarkPublisher is a dummy implementation of isb.SourceWatermarkPublisher interface @@ -1153,8 +1156,16 @@ func (f myForwardDropTest) WhereTo(_ []string, _ []string, s string) ([]forwarde return []forwarder.VertexBuffer{}, nil } -func (f myForwardDropTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "test-vertex", message) +func (f myForwardDropTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + results := make([]isb.ReadWriteMessagePair, len(messages)) + for i, message := range messages { + writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: message, + WriteMessages: writeMsg, + } + } + return results, nil } type myForwardToAllTest struct { @@ -1174,8 +1185,16 @@ func (f *myForwardToAllTest) WhereTo(_ []string, _ []string, s string) ([]forwar return output, nil } -func (f *myForwardToAllTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "test-vertex", message) +func (f *myForwardToAllTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + results := make([]isb.ReadWriteMessagePair, len(messages)) + for i, message := range messages { + writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: message, + WriteMessages: writeMsg, + } + } + return results, nil } type myForwardInternalErrTest struct { @@ -1188,7 +1207,7 @@ func (f myForwardInternalErrTest) WhereTo(_ []string, _ []string, s string) ([]f }}, nil } -func (f myForwardInternalErrTest) ApplyTransform(_ context.Context, _ *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f myForwardInternalErrTest) ApplyTransform(ctx context.Context, _ []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, &udfapplier.ApplyUDFErr{ UserUDFErr: false, InternalErr: struct { @@ -1209,8 +1228,16 @@ func (f myForwardApplyWhereToErrTest) WhereTo(_ []string, _ []string, s string) }}, fmt.Errorf("whereToStep failed") } -func (f myForwardApplyWhereToErrTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "test-vertex", message) +func (f myForwardApplyWhereToErrTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + results := make([]isb.ReadWriteMessagePair, len(messages)) + for i, message := range messages { + writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: message, + WriteMessages: writeMsg, + } + } + return results, nil } type myForwardApplyTransformerErrTest struct { @@ -1223,7 +1250,7 @@ func (f myForwardApplyTransformerErrTest) WhereTo(_ []string, _ []string, s stri }}, nil } -func (f myForwardApplyTransformerErrTest) ApplyTransform(_ context.Context, _ *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f myForwardApplyTransformerErrTest) ApplyTransform(_ context.Context, _ []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, fmt.Errorf("transformer error") } diff --git a/pkg/sources/forward/shutdown_test.go b/pkg/sources/forward/shutdown_test.go index a4ffc5e2e2..34003e729f 100644 --- a/pkg/sources/forward/shutdown_test.go +++ b/pkg/sources/forward/shutdown_test.go @@ -43,8 +43,16 @@ func (s myShutdownTest) WhereTo([]string, []string, string) ([]forwarder.VertexB return []forwarder.VertexBuffer{}, nil } -func (s myShutdownTest) ApplyTransform(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (f myShutdownTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + results := make([]isb.ReadWriteMessagePair, len(messages)) + for i, message := range messages { + writeMsg, _ := testutils.CopyUDFTestApply(ctx, "", message) + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: message, + WriteMessages: writeMsg, + } + } + return results, nil } func (s myShutdownTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { diff --git a/pkg/sources/generator/tickgen.go b/pkg/sources/generator/tickgen.go index ff00ba8cba..c0cdb9dcf1 100644 --- a/pkg/sources/generator/tickgen.go +++ b/pkg/sources/generator/tickgen.go @@ -202,7 +202,6 @@ loop: tickgenSourceReadCount.With(map[string]string{metrics.LabelVertex: mg.vertexName, metrics.LabelPipeline: mg.pipelineName}).Inc() msgs = append(msgs, mg.newReadMessage(r.key, r.data, r.offset, r.ts)) case <-timeout: - mg.logger.Infow("Timed out waiting for messages to read.", zap.Duration("waited", mg.readTimeout)) break loop } } diff --git a/pkg/sources/source.go b/pkg/sources/source.go index 0b3e23a94b..69bc0c0099 100644 --- a/pkg/sources/source.go +++ b/pkg/sources/source.go @@ -240,7 +240,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { return err } - srcTransformerClient, err := sourcetransformer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) + srcTransformerClient, err := sourcetransformer.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { return fmt.Errorf("failed to create transformer gRPC client, %w", err) } diff --git a/pkg/sources/transformer/grpc_transformer.go b/pkg/sources/transformer/grpc_transformer.go index 14b414a348..459e99f21b 100644 --- a/pkg/sources/transformer/grpc_transformer.go +++ b/pkg/sources/transformer/grpc_transformer.go @@ -24,10 +24,8 @@ import ( v1 "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" - "k8s.io/apimachinery/pkg/util/wait" "github.com/numaproj/numaflow/pkg/isb" - sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" "github.com/numaproj/numaflow/pkg/sdkclient/sourcetransformer" "github.com/numaproj/numaflow/pkg/shared/logging" "github.com/numaproj/numaflow/pkg/udf/rpc" @@ -54,7 +52,7 @@ func (u *GRPCBasedTransformer) IsHealthy(ctx context.Context) error { // WaitUntilReady waits until the client is connected. func (u *GRPCBasedTransformer) WaitUntilReady(ctx context.Context) error { - log := logging.FromContext(ctx) + logger := logging.FromContext(ctx) for { select { case <-ctx.Done(): @@ -63,7 +61,7 @@ func (u *GRPCBasedTransformer) WaitUntilReady(ctx context.Context) error { if _, err := u.client.IsReady(ctx, &emptypb.Empty{}); err == nil { return nil } else { - log.Infof("waiting for transformer to be ready: %v", err) + logger.Infof("waiting for transformer to be ready: %v", err) time.Sleep(1 * time.Second) } } @@ -75,103 +73,81 @@ func (u *GRPCBasedTransformer) CloseConn(ctx context.Context) error { return u.client.CloseConn(ctx) } -func (u *GRPCBasedTransformer) ApplyTransform(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { - keys := readMessage.Keys - payload := readMessage.Body.Payload - offset := readMessage.ReadOffset - parentMessageInfo := readMessage.MessageInfo - var req = &v1.SourceTransformRequest{ - Keys: keys, - Value: payload, - EventTime: timestamppb.New(parentMessageInfo.EventTime), - Watermark: timestamppb.New(readMessage.Watermark), - Headers: readMessage.Headers, +func (u *GRPCBasedTransformer) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + transformResults := make([]isb.ReadWriteMessagePair, len(messages)) + requests := make([]*v1.SourceTransformRequest, len(messages)) + idToMsgMapping := make(map[string]*isb.ReadMessage) + + for i, msg := range messages { + // we track the id to the message mapping to be able to match the response with the original message. + // we use the original message's event time if the user doesn't change it. Also we use the original message's + // read offset + index as the id for the response. + id := msg.ReadOffset.String() + idToMsgMapping[id] = msg + req := &v1.SourceTransformRequest{ + Request: &v1.SourceTransformRequest_Request{ + Keys: msg.Keys, + Value: msg.Body.Payload, + EventTime: timestamppb.New(msg.MessageInfo.EventTime), + Watermark: timestamppb.New(msg.Watermark), + Headers: msg.Headers, + Id: id, + }, + } + requests[i] = req } - response, err := u.client.SourceTransformFn(ctx, req) + responses, err := u.client.SourceTransformFn(ctx, requests) + if err != nil { - udfErr, _ := sdkerr.FromError(err) - switch udfErr.ErrorKind() { - case sdkerr.Retryable: - var success bool - _ = wait.ExponentialBackoffWithContext(ctx, wait.Backoff{ - // retry every "duration * factor + [0, jitter]" interval for 5 times - Duration: 1 * time.Second, - Factor: 1, - Jitter: 0.1, - Steps: 5, - }, func(_ context.Context) (done bool, err error) { - response, err = u.client.SourceTransformFn(ctx, req) - if err != nil { - udfErr, _ = sdkerr.FromError(err) - switch udfErr.ErrorKind() { - case sdkerr.Retryable: - return false, nil - case sdkerr.NonRetryable: - return true, nil - default: - return true, nil - } - } - success = true - return true, nil - }) - if !success { - return nil, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.SourceTransformFn failed, %s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - } - case sdkerr.NonRetryable: - return nil, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.SourceTransformFn failed, %s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - default: - return nil, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.SourceTransformFn failed, %s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, - } + err = &rpc.ApplyUDFErr{ + UserUDFErr: false, + Message: fmt.Sprintf("gRPC client.SourceTransformFn failed, %s", err), + InternalErr: rpc.InternalErr{ + Flag: true, + MainCarDown: false, + }, } + return nil, err } - taggedMessages := make([]*isb.WriteMessage, 0) - for i, result := range response.GetResults() { - keys := result.Keys - if result.EventTime != nil { - // Transformer supports changing event time. - parentMessageInfo.EventTime = result.EventTime.AsTime() + for i, resp := range responses { + parentMessage, ok := idToMsgMapping[resp.GetId()] + if !ok { + panic("tracker doesn't contain the message ID received from the response") } - taggedMessage := &isb.WriteMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: parentMessageInfo, - ID: isb.MessageID{ - VertexName: u.vertexName, - Offset: offset.String(), - Index: int32(i), + taggedMessages := make([]*isb.WriteMessage, len(resp.GetResults())) + for i, result := range resp.GetResults() { + keys := result.Keys + if result.EventTime != nil { + // Transformer supports changing event time. + parentMessage.MessageInfo.EventTime = result.EventTime.AsTime() + } + taggedMessage := &isb.WriteMessage{ + Message: isb.Message{ + Header: isb.Header{ + MessageInfo: parentMessage.MessageInfo, + ID: isb.MessageID{ + VertexName: u.vertexName, + Offset: parentMessage.ReadOffset.String(), + Index: int32(i), + }, + Keys: keys, + }, + Body: isb.Body{ + Payload: result.Value, }, - Keys: keys, - }, - Body: isb.Body{ - Payload: result.Value, }, - }, - Tags: result.Tags, + Tags: result.Tags, + } + taggedMessages[i] = taggedMessage + } + responsePair := isb.ReadWriteMessagePair{ + ReadMessage: parentMessage, + WriteMessages: taggedMessages, + Err: nil, } - taggedMessages = append(taggedMessages, taggedMessage) + transformResults[i] = responsePair } - return taggedMessages, nil + return transformResults, nil } diff --git a/pkg/sources/transformer/grpc_transformer_test.go b/pkg/sources/transformer/grpc_transformer_test.go index 959a40bf51..cd8ccbe852 100644 --- a/pkg/sources/transformer/grpc_transformer_test.go +++ b/pkg/sources/transformer/grpc_transformer_test.go @@ -19,101 +19,60 @@ package transformer import ( "context" "encoding/json" - "fmt" + "errors" + "net" "testing" "time" - "github.com/golang/mock/gomock" - v1 "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" - transformermock "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1/transformmock" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/numaproj/numaflow-go/pkg/sourcetransformer" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" + transformpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" "github.com/numaproj/numaflow/pkg/isb" "github.com/numaproj/numaflow/pkg/isb/testutils" - "github.com/numaproj/numaflow/pkg/sdkclient/sourcetransformer" + sourcetransformerSdk "github.com/numaproj/numaflow/pkg/sdkclient/sourcetransformer" "github.com/numaproj/numaflow/pkg/udf/rpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" ) -func NewMockGRPCBasedTransformer(mockClient *transformermock.MockSourceTransformClient) *GRPCBasedTransformer { - c, _ := sourcetransformer.NewFromClient(mockClient) - return &GRPCBasedTransformer{"test-vertex", c} -} - -func TestGRPCBasedTransformer_WaitUntilReadyWithMockClient(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&v1.ReadyResponse{Ready: true}, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockGRPCBasedTransformer(mockClient) - err := u.WaitUntilReady(ctx) - assert.NoError(t, err) -} - -type rpcMsg struct { - msg proto.Message -} - -func (r *rpcMsg) Matches(msg interface{}) bool { - m, ok := msg.(proto.Message) - if !ok { - return false +func TestGRPCBasedTransformer_WaitUntilReadyWithServer(t *testing.T) { + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + return sourcetransformer.Messages{} + }), } - return proto.Equal(m, r.msg) -} -func (r *rpcMsg) String() string { - return fmt.Sprintf("is %s", r.msg) + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + transformClient := transformpb.NewSourceTransformClient(conn) + client, _ := sourcetransformerSdk.NewFromClient(context.Background(), transformClient) + u := NewGRPCBasedTransformer("testVertex", client) + err := u.WaitUntilReady(context.Background()) + assert.NoError(t, err) } -func TestGRPCBasedTransformer_BasicApplyWithMockClient(t *testing.T) { +func TestGRPCBasedTransformer_BasicApplyWithServer(t *testing.T) { t.Run("test success", func(t *testing.T) { - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169600, 0)), - Watermark: timestamppb.New(time.Time{}), + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + return sourcetransformer.MessagesBuilder().Append(sourcetransformer.NewMessage(datum.Value(), datum.EventTime()).WithKeys(keys)) + }), } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(&v1.SourceTransformResponse{ - Results: []*v1.SourceTransformResponse_Result{ - { - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - }, - }, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - u := NewMockGRPCBasedTransformer(mockClient) - got, err := u.ApplyTransform(ctx, &isb.ReadMessage{ + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + transformClient := transformpb.NewSourceTransformClient(conn) + ctx := context.Background() + client, err := sourcetransformerSdk.NewFromClient(ctx, transformClient) + require.NoError(t, err, "creating source transformer client") + u := NewGRPCBasedTransformer("testVertex", client) + + got, err := u.ApplyTransform(ctx, []*isb.ReadMessage{{ Message: isb.Message{ Header: isb.Header{ MessageInfo: isb.MessageInfo{ @@ -130,94 +89,33 @@ func TestGRPCBasedTransformer_BasicApplyWithMockClient(t *testing.T) { }, }, ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, + }}, ) assert.NoError(t, err) - assert.Equal(t, req.Keys, got[0].Keys) - assert.Equal(t, req.Value, got[0].Payload) + assert.Equal(t, []string{"test_success_key"}, got[0].WriteMessages[0].Keys) + assert.Equal(t, []byte(`forward_message`), got[0].WriteMessages[0].Payload) }) t.Run("test error", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + return sourcetransformer.Messages{} + }), } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, fmt.Errorf("mock error")) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - u := NewMockGRPCBasedTransformer(mockClient) - _, err := u.ApplyTransform(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "0-0", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) }) - }) + transformClient := transformpb.NewSourceTransformClient(conn) + ctx, cancel := context.WithCancel(context.Background()) + client, err := sourcetransformerSdk.NewFromClient(ctx, transformClient) + require.NoError(t, err, "creating source transformer client") + u := NewGRPCBasedTransformer("testVertex", client) - t.Run("test error retryable: failed after 5 retries", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + // This cancelled context is passed to the ApplyTransform function to simulate failure + cancel() - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockGRPCBasedTransformer(mockClient) - _, err := u.ApplyTransform(ctx, &isb.ReadMessage{ + _, err = u.ApplyTransform(ctx, []*isb.ReadMessage{{ Message: isb.Message{ Header: isb.Header{ MessageInfo: isb.MessageInfo{ @@ -234,292 +132,155 @@ func TestGRPCBasedTransformer_BasicApplyWithMockClient(t *testing.T) { }, }, ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, + }}, ) - assert.ErrorIs(t, err, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, - }) - }) - - t.Run("test error retryable: failed after 1 retry", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.InvalidArgument, "mock test err: non retryable").Err()) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockGRPCBasedTransformer(mockClient) - _, err := u.ApplyTransform(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "0-0", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &rpc.ApplyUDFErr{ + expectedUDFErr := &rpc.ApplyUDFErr{ UserUDFErr: false, - Message: fmt.Sprintf("%s", err), + Message: "gRPC client.SourceTransformFn failed, context canceled", InternalErr: rpc.InternalErr{ Flag: true, MainCarDown: false, }, - }) - }) - - t.Run("test error retryable: success after 1 retry", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169720, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(&v1.SourceTransformResponse{ - Results: []*v1.SourceTransformResponse_Result{ - { - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - }, - }, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockGRPCBasedTransformer(mockClient) - got, err := u.ApplyTransform(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169720, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "0-0", - }, - Keys: []string{"test_success_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.NoError(t, err) - assert.Equal(t, req.Keys, got[0].Keys) - assert.Equal(t, req.Value, got[0].Payload) - }) - - t.Run("test error non retryable", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - req := &v1.SourceTransformRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), } - mockClient.EXPECT().SourceTransformFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.InvalidArgument, "mock test err: non retryable").Err()) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockGRPCBasedTransformer(mockClient) - _, err := u.ApplyTransform(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "0-0", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &rpc.ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: rpc.InternalErr{ - Flag: true, - MainCarDown: false, - }, - }) + var receivedErr *rpc.ApplyUDFErr + assert.ErrorAs(t, err, &receivedErr) + assert.Equal(t, expectedUDFErr, receivedErr) }) } -func TestGRPCBasedTransformer_ApplyWithMockClient_ChangePayload(t *testing.T) { - multiplyBy2 := func(body []byte) interface{} { - var result testutils.PayloadForTest - _ = json.Unmarshal(body, &result) - result.Value = result.Value * 2 - return result - } - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, datum *v1.SourceTransformRequest, opts ...grpc.CallOption) (*v1.SourceTransformResponse, error) { +func TestGRPCBasedTransformer_ApplyWithServer_ChangePayload(t *testing.T) { + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { var originalValue testutils.PayloadForTest - _ = json.Unmarshal(datum.GetValue(), &originalValue) - doubledValue, _ := json.Marshal(multiplyBy2(datum.GetValue()).(testutils.PayloadForTest)) - var Results []*v1.SourceTransformResponse_Result + _ = json.Unmarshal(datum.Value(), &originalValue) + doubledValue := testutils.PayloadForTest{ + Value: originalValue.Value * 2, + Key: originalValue.Key, + } + doubledValueBytes, _ := json.Marshal(&doubledValue) + + var resultKeys []string if originalValue.Value%2 == 0 { - Results = append(Results, &v1.SourceTransformResponse_Result{ - Keys: []string{"even"}, - Value: doubledValue, - }) + resultKeys = []string{"even"} } else { - Results = append(Results, &v1.SourceTransformResponse_Result{ - Keys: []string{"odd"}, - Value: doubledValue, - }) - } - datumList := &v1.SourceTransformResponse{ - Results: Results, + resultKeys = []string{"odd"} } - return datumList, nil - }, - ).AnyTimes() - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") - } - }() + return sourcetransformer.MessagesBuilder().Append(sourcetransformer.NewMessage(doubledValueBytes, datum.EventTime()).WithKeys(resultKeys)) + }), + } - u := NewMockGRPCBasedTransformer(mockClient) + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + transformClient := transformpb.NewSourceTransformClient(conn) + ctx := context.Background() + client, _ := sourcetransformerSdk.NewFromClient(ctx, transformClient) + u := NewGRPCBasedTransformer("testVertex", client) var count = int64(10) readMessages := testutils.BuildTestReadMessages(count, time.Unix(1661169600, 0), nil) - - var results = make([][]byte, len(readMessages)) - var resultKeys = make([][]string, len(readMessages)) + messages := make([]*isb.ReadMessage, len(readMessages)) for idx, readMessage := range readMessages { - apply, err := u.ApplyTransform(ctx, &readMessage) - assert.NoError(t, err) - results[idx] = apply[0].Payload - resultKeys[idx] = apply[0].Header.Keys + messages[idx] = &readMessage } + apply, err := u.ApplyTransform(context.TODO(), messages) + assert.NoError(t, err) - var expectedResults = make([][]byte, count) - var expectedKeys = make([][]string, count) - for idx, readMessage := range readMessages { + for _, pair := range apply { + resultPayload := pair.WriteMessages[0].Payload + resultKeys := pair.WriteMessages[0].Header.Keys var readMessagePayload testutils.PayloadForTest - _ = json.Unmarshal(readMessage.Payload, &readMessagePayload) + _ = json.Unmarshal(pair.ReadMessage.Payload, &readMessagePayload) + var expectedKeys []string if readMessagePayload.Value%2 == 0 { - expectedKeys[idx] = []string{"even"} + expectedKeys = []string{"even"} } else { - expectedKeys[idx] = []string{"odd"} + expectedKeys = []string{"odd"} } - marshal, _ := json.Marshal(multiplyBy2(readMessage.Payload)) - expectedResults[idx] = marshal - } + assert.Equal(t, expectedKeys, resultKeys) - assert.Equal(t, expectedResults, results) - assert.Equal(t, expectedKeys, resultKeys) + doubledValue := testutils.PayloadForTest{ + Key: readMessagePayload.Key, + Value: readMessagePayload.Value * 2, + } + marshal, _ := json.Marshal(doubledValue) + assert.Equal(t, marshal, resultPayload) + } } -func TestGRPCBasedTransformer_ApplyWithMockClient_ChangeEventTime(t *testing.T) { - testEventTime := time.Date(1992, 2, 8, 0, 0, 0, 100, time.UTC) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := transformermock.NewMockSourceTransformClient(ctrl) - mockClient.EXPECT().SourceTransformFn(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, datum *v1.SourceTransformRequest, opts ...grpc.CallOption) (*v1.SourceTransformResponse, error) { - var Results []*v1.SourceTransformResponse_Result - Results = append(Results, &v1.SourceTransformResponse_Result{ - Keys: []string{"even"}, - Value: datum.Value, - EventTime: timestamppb.New(testEventTime), - }) - datumList := &v1.SourceTransformResponse{ - Results: Results, - } - return datumList, nil - }, - ).AnyTimes() +func newServer(t *testing.T, register func(server *grpc.Server)) *grpc.ClientConn { + lis := bufconn.Listen(100) + t.Cleanup(func() { + _ = lis.Close() + }) + + server := grpc.NewServer() + t.Cleanup(func() { + server.Stop() + }) + + register(server) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() + errChan := make(chan error, 1) go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded { - t.Log(t.Name(), "test timeout") + // t.Fatal should only be called from the goroutine running the test + if err := server.Serve(lis); err != nil { + errChan <- err } }() - u := NewMockGRPCBasedTransformer(mockClient) + dialer := func(context.Context, string) (net.Conn, error) { + return lis.Dial() + } + + conn, err := grpc.NewClient("passthrough://", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(insecure.NewCredentials())) + t.Cleanup(func() { + _ = conn.Close() + }) + if err != nil { + t.Fatalf("Creating new gRPC client connection: %v", err) + } + + var grpcServerErr error + select { + case grpcServerErr = <-errChan: + case <-time.After(500 * time.Millisecond): + grpcServerErr = errors.New("gRPC server didn't start in 500ms") + } + if err != nil { + t.Fatalf("Failed to start gRPC server: %v", grpcServerErr) + } + + return conn +} + +func TestGRPCBasedTransformer_Apply_ChangeEventTime(t *testing.T) { + testEventTime := time.Date(1992, 2, 8, 0, 0, 0, 100, time.UTC) + svc := &sourcetransformer.Service{ + Transformer: sourcetransformer.SourceTransformFunc(func(ctx context.Context, keys []string, datum sourcetransformer.Datum) sourcetransformer.Messages { + msg := datum.Value() + return sourcetransformer.MessagesBuilder().Append(sourcetransformer.NewMessage(msg, testEventTime).WithKeys([]string{"even"})) + }), + } + conn := newServer(t, func(server *grpc.Server) { + transformpb.RegisterSourceTransformServer(server, svc) + }) + transformClient := transformpb.NewSourceTransformClient(conn) + ctx := context.Background() + client, _ := sourcetransformerSdk.NewFromClient(ctx, transformClient) + u := NewGRPCBasedTransformer("testVertex", client) var count = int64(2) readMessages := testutils.BuildTestReadMessages(count, time.Unix(1661169600, 0), nil) - for _, readMessage := range readMessages { - apply, err := u.ApplyTransform(ctx, &readMessage) - assert.NoError(t, err) - assert.Equal(t, testEventTime, apply[0].EventTime) + messages := make([]*isb.ReadMessage, len(readMessages)) + for idx, readMessage := range readMessages { + messages[idx] = &readMessage + } + apply, err := u.ApplyTransform(context.TODO(), messages) + assert.NoError(t, err) + for _, pair := range apply { + assert.NoError(t, pair.Err) + assert.Equal(t, testEventTime, pair.WriteMessages[0].EventTime) } } diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index 53efc945da..e768808cc3 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -481,7 +481,7 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage if len(dataMessages) > 1 { errMsg := "data message size is not 1 with map UDF streaming" isdf.opts.logger.Errorw(errMsg) - return nil, fmt.Errorf(errMsg) + return nil, errors.New(errMsg) } else if len(dataMessages) == 1 { // send to map UDF only the data messages diff --git a/pkg/udf/rpc/grpc_batch_map.go b/pkg/udf/rpc/grpc_batch_map.go index 6d6c397642..ce65d201fb 100644 --- a/pkg/udf/rpc/grpc_batch_map.go +++ b/pkg/udf/rpc/grpc_batch_map.go @@ -26,26 +26,21 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/numaproj/numaflow/pkg/isb" + "github.com/numaproj/numaflow/pkg/isb/tracker" "github.com/numaproj/numaflow/pkg/sdkclient/batchmapper" "github.com/numaproj/numaflow/pkg/shared/logging" ) // GRPCBasedBatchMap is a map applier that uses gRPC client to invoke the map UDF. It implements the applier.MapApplier interface. type GRPCBasedBatchMap struct { - vertexName string - client batchmapper.Client - requestTracker *tracker + vertexName string + client batchmapper.Client } func NewUDSgRPCBasedBatchMap(vertexName string, client batchmapper.Client) *GRPCBasedBatchMap { return &GRPCBasedBatchMap{ vertexName: vertexName, client: client, - // requestTracker is used to store the read messages in a key, value manner where - // key is the read offset and the reference to read message as the value. - // Once the results are received from the UDF, we map the responses to the corresponding request - // using a lookup on this tracker. - requestTracker: NewTracker(), } } @@ -93,18 +88,17 @@ func (u *GRPCBasedBatchMap) ApplyBatchMap(ctx context.Context, messages []*isb.R // trackerReq is used to store the read messages in a key, value manner where // key is the read offset and the reference to read message as the value. // Once the results are received from the UDF, we map the responses to the corresponding request - // using a lookup on this tracker. - trackerReq := NewTracker() + // using a lookup on this Tracker. + trackerReq := tracker.NewMessageTracker(messages) // Read routine: this goroutine iterates over the input messages and sends each // of the read messages to the grpc client after transforming it to a BatchMapRequest. // Once all messages are sent, it closes the input channel to indicate that all requests have been read. - // On creating a new request, we add it to a tracker map so that the responses on the stream + // On creating a new request, we add it to a Tracker map so that the responses on the stream // can be mapped backed to the given parent request go func() { defer close(inputChan) for _, msg := range messages { - trackerReq.addRequest(msg) inputChan <- u.parseInputRequest(msg) } }() @@ -139,14 +133,14 @@ loop: } // Get the unique request ID for which these responses are meant for. msgId := grpcResp.GetId() - // Fetch the request value for the given ID from the tracker - parentMessage, ok := trackerReq.getRequest(msgId) - if !ok { - // this case is when the given request ID was not present in the tracker. + // Fetch the request value for the given ID from the Tracker + parentMessage := trackerReq.Remove(msgId) + if parentMessage == nil { + // this case is when the given request ID was not present in the Tracker. // This means that either the UDF added an incorrect ID // This cannot be processed further and should result in an error // Can there be another case for this? - logger.Error("Request missing from tracker, ", msgId) + logger.Error("Request missing from message tracker, ", msgId) return nil, fmt.Errorf("incorrect ID found during batch map processing") } // parse the responses received @@ -159,12 +153,11 @@ loop: Err: nil, } udfResults = append(udfResults, responsePair) - trackerReq.removeRequest(msgId) } } - // check if there are elements left in the tracker. This cannot be an acceptable case as we want the + // check if there are elements left in the Tracker. This cannot be an acceptable case as we want the // UDF to send responses for all elements. - if !trackerReq.isEmpty() { + if !trackerReq.IsEmpty() { logger.Error("BatchMap response for all requests not received from UDF") return nil, fmt.Errorf("batchMap response for all requests not received from UDF") } diff --git a/pkg/udf/rpc/tracker.go b/pkg/udf/rpc/tracker.go deleted file mode 100644 index 60b57a7af9..0000000000 --- a/pkg/udf/rpc/tracker.go +++ /dev/null @@ -1,75 +0,0 @@ -package rpc - -import ( - "sync" - - "github.com/numaproj/numaflow/pkg/isb" -) - -// tracker is used to store a key value pair for string and *isb.ReadMessage -// as it can be accessed by concurrent goroutines, we keep all operations -// under a mutex -type tracker struct { - lock sync.RWMutex - m map[string]*isb.ReadMessage -} - -// NewTracker initializes a new instance of a tracker -func NewTracker() *tracker { - return &tracker{ - m: make(map[string]*isb.ReadMessage), - lock: sync.RWMutex{}, - } -} - -// addRequest add a new entry for a given message to the tracker. -// the key is chosen as the read offset of the message -func (t *tracker) addRequest(msg *isb.ReadMessage) { - id := msg.ReadOffset.String() - t.set(id, msg) -} - -// getRequest returns the message corresponding to a given id, along with a bool -// to indicate if it does not exist -func (t *tracker) getRequest(id string) (*isb.ReadMessage, bool) { - return t.get(id) -} - -// removeRequest will remove the entry for a given id -func (t *tracker) removeRequest(id string) { - t.delete(id) -} - -// get is a helper function which fetches the message corresponding to a given id -// it acquires a lock before accessing the map -func (t *tracker) get(key string) (*isb.ReadMessage, bool) { - t.lock.RLock() - defer t.lock.RUnlock() - item, ok := t.m[key] - return item, ok -} - -// set is a helper function which add a key, value pair to the tracker map -// it acquires a lock before accessing the map -func (t *tracker) set(key string, msg *isb.ReadMessage) { - t.lock.Lock() - defer t.lock.Unlock() - t.m[key] = msg -} - -// delete is a helper function which will remove the entry for a given id -// it acquires a lock before accessing the map -func (t *tracker) delete(key string) { - t.lock.Lock() - defer t.lock.Unlock() - delete(t.m, key) -} - -// isEmpty is a helper function which checks if the tracker map is empty -// return true if empty -func (t *tracker) isEmpty() bool { - t.lock.RLock() - defer t.lock.RUnlock() - items := len(t.m) - return items == 0 -} diff --git a/pkg/webhook/validator/validator.go b/pkg/webhook/validator/validator.go index d5f2e86664..6d4e3e46a1 100644 --- a/pkg/webhook/validator/validator.go +++ b/pkg/webhook/validator/validator.go @@ -83,7 +83,10 @@ func GetValidator(ctx context.Context, NumaClient v1alpha1.NumaflowV1alpha1Inter // DeniedResponse constructs a denied AdmissionResponse func DeniedResponse(reason string, args ...interface{}) *admissionv1.AdmissionResponse { - result := apierrors.NewBadRequest(fmt.Sprintf(reason, args...)).Status() + if len(args) > 0 { + reason = fmt.Sprintf(reason, args) + } + result := apierrors.NewBadRequest(reason).Status() return &admissionv1.AdmissionResponse{ Result: &result, Allowed: false, diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 655f30bc4d..e2b3045712 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1557,7 +1557,7 @@ dependencies = [ [[package]] name = "numaflow" version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?rev=0c1682864a4b906fab52e149cfd7cacc679ce688#0c1682864a4b906fab52e149cfd7cacc679ce688" +source = "git+https://github.com/numaproj/numaflow-rs.git?rev=30d8ce1972fd3f0c0b8059fee209516afeef0088#30d8ce1972fd3f0c0b8059fee209516afeef0088" dependencies = [ "chrono", "futures-util", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 85a3bc39b1..a10a46b9ab 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -38,7 +38,7 @@ log = "0.4.22" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "0c1682864a4b906fab52e149cfd7cacc679ce688" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } [build-dependencies] tonic-build = "0.12.1" diff --git a/rust/numaflow-core/proto/sourcetransform.proto b/rust/numaflow-core/proto/sourcetransform.proto index 18e045c323..9d0a63a9dc 100644 --- a/rust/numaflow-core/proto/sourcetransform.proto +++ b/rust/numaflow-core/proto/sourcetransform.proto @@ -9,21 +9,36 @@ service SourceTransform { // SourceTransformFn applies a function to each request element. // In addition to map function, SourceTransformFn also supports assigning a new event time to response. // SourceTransformFn can be used only at source vertex by source data transformer. - rpc SourceTransformFn(SourceTransformRequest) returns (SourceTransformResponse); + rpc SourceTransformFn(stream SourceTransformRequest) returns (stream SourceTransformResponse); // IsReady is the heartbeat endpoint for gRPC. rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); } +/* + * Handshake message between client and server to indicate the start of transmission. + */ + message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; +} + + /** * SourceTransformerRequest represents a request element. */ message SourceTransformRequest { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - map headers = 5; + message Request { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + map headers = 5; + // This ID is used to uniquely identify a transform request + string id = 6; + } + Request request = 1; + optional Handshake handshake = 2; } /** @@ -37,6 +52,10 @@ message SourceTransformResponse { repeated string tags = 4; } repeated Result results = 1; + // This ID is used to refer the responses to the request it corresponds to. + string id = 2; + // Handshake message between client and server to indicate the start of transmission. + optional Handshake handshake = 3; } /** @@ -44,4 +63,4 @@ message SourceTransformResponse { */ message ReadyResponse { bool ready = 1; -} \ No newline at end of file +} diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index 5d245ed397..c3263e999c 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -3,6 +3,7 @@ use base64::prelude::BASE64_STANDARD; use base64::Engine; use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use std::env; +use std::fmt::Display; use std::sync::OnceLock; const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; @@ -53,17 +54,14 @@ impl OnFailureStrategy { _ => Some(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY), } } +} - /// Converts the `OnFailureStrategy` enum variant to a String. - /// This facilitates situations where the enum needs to be displayed or logged as a string. - /// - /// # Returns - /// A string representing the `OnFailureStrategy` enum variant. - fn to_string(&self) -> String { +impl Display for OnFailureStrategy { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { - OnFailureStrategy::Retry => "retry".to_string(), - OnFailureStrategy::Fallback => "fallback".to_string(), - OnFailureStrategy::Drop => "drop".to_string(), + OnFailureStrategy::Retry => write!(f, "retry"), + OnFailureStrategy::Fallback => write!(f, "fallback"), + OnFailureStrategy::Drop => write!(f, "drop"), } } } @@ -647,4 +645,4 @@ mod tests { let drop = OnFailureStrategy::Drop; assert_eq!(drop.to_string(), "drop"); } -} +} \ No newline at end of file diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index b99a61b31d..d230e994fb 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -7,7 +7,7 @@ use chrono::{DateTime, Utc}; use crate::error::Error; use crate::monovertex::sink_pb::sink_request::Request; use crate::monovertex::sink_pb::SinkRequest; -use crate::monovertex::source_pb; +use crate::monovertex::{source_pb, sourcetransform_pb}; use crate::monovertex::source_pb::{read_response, AckRequest}; use crate::monovertex::sourcetransform_pb::SourceTransformRequest; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; @@ -58,11 +58,15 @@ impl From for AckRequest { impl From for SourceTransformRequest { fn from(message: Message) -> Self { Self { - keys: message.keys, - value: message.value, - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - headers: message.headers, + request: Some(sourcetransform_pb::source_transform_request::Request { + id: message.id, + keys: message.keys, + value: message.value, + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + }), + handshake: None, } } } diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index a32aff093b..ab58cfad03 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -1,3 +1,10 @@ +use chrono::Utc; +use log::warn; +use std::collections::HashMap; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; + use crate::config::{config, OnFailureStrategy}; use crate::error; use crate::error::Error; @@ -8,13 +15,6 @@ use crate::monovertex::sink_pb::Status::{Failure, Fallback, Success}; use crate::sink::user_defined::SinkWriter; use crate::source::user_defined::Source; use crate::transformer::user_defined::SourceTransformer; -use chrono::Utc; -use log::warn; -use std::collections::HashMap; -use tokio::task::JoinSet; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tracing::{debug, info}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages @@ -193,26 +193,14 @@ impl Forwarder { // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. - async fn apply_transformer(&self, messages: Vec) -> error::Result> { - let Some(transformer_client) = &self.source_transformer else { + async fn apply_transformer(&mut self, messages: Vec) -> error::Result> { + let Some(transformer_client) = &mut self.source_transformer else { // return early if there is no transformer return Ok(messages); }; let start_time = tokio::time::Instant::now(); - let mut jh = JoinSet::new(); - for message in messages { - let mut transformer_client = transformer_client.clone(); - jh.spawn(async move { transformer_client.transform_fn(message).await }); - } - - let mut results = Vec::new(); - while let Some(task) = jh.join_next().await { - let result = task.map_err(|e| Error::TransformerError(format!("{:?}", e)))?; - if let Some(result) = result? { - results.extend(result); - } - } + let results = transformer_client.transform_fn(messages).await?; debug!( "Transformer latency - {}ms", diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index de7b765b79..71a9d24cd6 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,67 +1,178 @@ -use crate::error; -use crate::message::Message; -use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; -use crate::monovertex::sourcetransform_pb::SourceTransformRequest; -use crate::shared::utils::utc_from_timestamp; +use std::collections::HashMap; + use tonic::transport::Channel; +use tonic::{Request, Streaming}; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::CancellationToken; +use tracing::warn; +use crate::error::{Result, Error}; +use crate::message::{Message, Offset}; +use crate::monovertex::sourcetransform_pb::{self, SourceTransformRequest, SourceTransformResponse, source_transform_client::SourceTransformClient}; +use crate::shared::utils::utc_from_timestamp; +use crate::config::config; const DROP: &str = "U+005C__DROP__"; /// TransformerClient is a client to interact with the transformer server. -#[derive(Clone)] pub struct SourceTransformer { - client: SourceTransformClient, + read_tx: mpsc::Sender, + resp_stream: Streaming, } impl SourceTransformer { - pub(crate) async fn new(client: SourceTransformClient) -> error::Result { - Ok(Self { client }) - } + pub(crate) async fn new(mut client: SourceTransformClient) -> Result { + let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); + let read_stream = ReceiverStream::new(read_rx); - pub(crate) async fn transform_fn( - &mut self, - message: Message, - ) -> error::Result>> { - // fields which will not be changed - let offset = message.offset.clone(); - let id = message.id.clone(); - let headers = message.headers.clone(); - - // TODO: is this complex? the reason to do this is, tomorrow when we have the normal - // Pipeline CRD, we can require the Into trait. - let response = self - .client - .source_transform_fn(>::into(message)) + // do a handshake for read with the server before we start sending read requests + let handshake_request = SourceTransformRequest { + request: None, + handshake: Some(sourcetransform_pb::Handshake { sot: true }), + }; + read_tx.send(handshake_request).await.map_err(|e| { + Error::TransformerError(format!("failed to send handshake request: {}", e)) + })?; + + let mut resp_stream = client + .source_transform_fn(Request::new(read_stream)) .await? .into_inner(); - let mut messages = Vec::new(); - for result in response.results { - // if the message is tagged with DROP, we will not forward it. - if result.tags.contains(&DROP.to_string()) { - return Ok(None); + // first response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let handshake_response = resp_stream.message().await?.ok_or(Error::TransformerError( + "failed to receive handshake response".to_string(), + ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. + if handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(Error::TransformerError( + "invalid handshake response".to_string(), + )); + } + + Ok(Self { + read_tx, + resp_stream, + }) + } + + pub(crate) async fn transform_fn(&mut self, messages: Vec) -> Result> { + // fields which will not be changed + struct MessageInfo { + offset: Offset, + headers: HashMap, + } + + let mut tracker: HashMap = HashMap::with_capacity(messages.len()); + for message in &messages { + tracker.insert( + message.id.clone(), + MessageInfo { + offset: message.offset.clone(), + headers: message.headers.clone(), + }, + ); + } + + // Cancellation token is used to cancel either sending task (if an error occurs while receiving) or receiving messages (if an error occurs on sending task) + let token = CancellationToken::new(); + + // Send transform requests to the source transformer server + let sender_task: JoinHandle> = tokio::spawn({ + let read_tx = self.read_tx.clone(); + let token = token.clone(); + async move { + for msg in messages { + let result = tokio::select! { + result = read_tx.send(msg.into()) => result, + _ = token.cancelled() => { + warn!("Cancellation token was cancelled while sending source transform requests"); + return Ok(()); + }, + }; + + match result { + Ok(()) => continue, + Err(e) => { + token.cancel(); + return Err(Error::TransformerError(e.to_string())); + } + }; + } + Ok(()) } - let message = Message { - keys: result.keys, - value: result.value, - offset: offset.clone(), - id: id.clone(), - event_time: utc_from_timestamp(result.event_time), - headers: headers.clone(), + }); + + // Receive transformer results + let mut messages = Vec::new(); + while !tracker.is_empty() { + let resp = tokio::select! { + _ = token.cancelled() => { + break; + }, + resp = self.resp_stream.message() => {resp} + }; + + let resp = match resp { + Ok(Some(val)) => val, + Ok(None) => { + // Logging at warning level since we don't expect this to happen + warn!("Source transformer server closed its sending end of the stream. No more messages to receive"); + token.cancel(); + break; + } + Err(e) => { + token.cancel(); + return Err(Error::TransformerError(format!( + "gRPC error while receiving messages from source transformer server: {e:?}" + ))); + } + }; + + let Some((msg_id, msg_info)) = tracker.remove_entry(&resp.id) else { + token.cancel(); + return Err(Error::TransformerError(format!( + "Received message with unknown ID {}", + resp.id + ))); }; - messages.push(message); + + for (i, result) in resp.results.into_iter().enumerate() { + // TODO: Expose metrics + if result.tags.iter().any(|x| x == DROP) { + continue; + } + let message = Message { + id: format!("{}-{}", msg_id, i), + keys: result.keys, + value: result.value, + offset: msg_info.offset.clone(), + event_time: utc_from_timestamp(result.event_time), + headers: msg_info.headers.clone(), + }; + messages.push(message); + } } - Ok(Some(messages)) + sender_task.await.unwrap().map_err(|e| { + Error::TransformerError(format!( + "Sending messages to gRPC transformer failed: {e:?}", + )) + })?; + + Ok(messages) } } #[cfg(test)] mod tests { use std::error::Error; + use std::time::Duration; - use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::utils::create_rpc_channel; + use crate::transformer::user_defined::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::transformer::user_defined::SourceTransformer; use numaflow::sourcetransform; use tempfile::TempDir; @@ -105,7 +216,7 @@ mod tests { let mut client = SourceTransformer::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) - .await?; + .await?; let message = crate::message::Message { keys: vec!["first".into()], @@ -115,18 +226,29 @@ mod tests { offset: "0".into(), }, event_time: chrono::Utc::now(), - id: "".to_string(), + id: "1".to_string(), headers: Default::default(), }; - let resp = client.transform_fn(message).await?; - assert!(resp.is_some()); - assert_eq!(resp.unwrap().len(), 1); + let resp = tokio::time::timeout( + tokio::time::Duration::from_secs(2), + client.transform_fn(vec![message]), + ) + .await??; + assert_eq!(resp.len(), 1); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(client); shutdown_tx .send(()) .expect("failed to send shutdown signal"); - handle.await.expect("failed to join server task"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); Ok(()) } @@ -169,7 +291,7 @@ mod tests { let mut client = SourceTransformer::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) - .await?; + .await?; let message = crate::message::Message { keys: vec!["second".into()], @@ -183,8 +305,12 @@ mod tests { headers: Default::default(), }; - let resp = client.transform_fn(message).await?; - assert!(resp.is_none()); + let resp = client.transform_fn(vec![message]).await?; + assert!(resp.is_empty()); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(client); shutdown_tx .send(()) @@ -192,4 +318,4 @@ mod tests { handle.await.expect("failed to join server task"); Ok(()) } -} +} \ No newline at end of file diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index a9a768ac6c..80430c169b 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] tonic = "0.12.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "0c1682864a4b906fab52e149cfd7cacc679ce688" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/test/transformer-e2e/testdata/extract-event-time-from-payload.yaml b/test/transformer-e2e/testdata/extract-event-time-from-payload.yaml index 7bee8ef95f..8066caf9ec 100644 --- a/test/transformer-e2e/testdata/extract-event-time-from-payload.yaml +++ b/test/transformer-e2e/testdata/extract-event-time-from-payload.yaml @@ -6,7 +6,7 @@ spec: vertices: - name: in source: - http: {} + http: { } transformer: builtin: name: eventTimeExtractor diff --git a/test/transformer-e2e/transformer_test.go b/test/transformer-e2e/transformer_test.go index 55b88f3683..e6b727fcb9 100644 --- a/test/transformer-e2e/transformer_test.go +++ b/test/transformer-e2e/transformer_test.go @@ -21,6 +21,7 @@ package e2e import ( "context" "encoding/json" + "errors" "fmt" "os" "strconv" @@ -142,7 +143,7 @@ wmLoop: for { select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { s.T().Log("test timed out") assert.Fail(s.T(), "timed out") break wmLoop @@ -173,23 +174,24 @@ func (s *TransformerSuite) TestSourceTransformer() { } var wg sync.WaitGroup - wg.Add(4) - go func() { - defer wg.Done() - s.testSourceTransformer("python") - }() - go func() { - defer wg.Done() - s.testSourceTransformer("java") - }() + wg.Add(1) + // FIXME: Enable these tests after corresponding SDKs are changed to support bidirectional streaming + //go func() { + // defer wg.Done() + // s.testSourceTransformer("python") + //}() + //go func() { + // defer wg.Done() + // s.testSourceTransformer("java") + //}() go func() { defer wg.Done() s.testSourceTransformer("go") }() - go func() { - defer wg.Done() - s.testSourceTransformer("rust") - }() + //go func() { + // defer wg.Done() + // s.testSourceTransformer("rust") + //}() wg.Wait() } From 173afb5e4794ab17b4ae28be74f187c2f79caf3e Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Tue, 1 Oct 2024 21:43:49 -0700 Subject: [PATCH 079/188] chore: check for pause timeout after errors (#2084) Signed-off-by: Sidhant Kohli --- pkg/reconciler/pipeline/controller.go | 36 ++++++++++++++++----------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 29dab84526..145c5544ad 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -845,18 +845,23 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin return updated, err } - daemonClient, err := daemonclient.NewGRPCDaemonServiceClient(pl.GetDaemonServiceURL()) - if err != nil { - return true, err - } - defer func() { - _ = daemonClient.Close() - }() - drainCompleted, err := daemonClient.IsDrained(ctx, pl.Name) - if err != nil { - return true, err + var daemonError error + var drainCompleted = false + + // Check for the daemon to obtain the buffer draining information, in case we see an error trying to + // retrieve this we do not exit prematurely to allow honoring the pause timeout for a consistent error + // - In case the timeout has not occurred we would trigger a requeue + // - If the timeout has occurred even after getting the drained error, we will try to pause the pipeline + daemonClient, daemonError := daemonclient.NewGRPCDaemonServiceClient(pl.GetDaemonServiceURL()) + if daemonClient != nil { + defer func() { + _ = daemonClient.Close() + }() + drainCompleted, err = daemonClient.IsDrained(ctx, pl.Name) + if err != nil { + daemonError = err + } } - pauseTimestamp, err := time.Parse(time.RFC3339, pl.GetAnnotations()[dfv1.KeyPauseTimestamp]) if err != nil { return false, err @@ -864,18 +869,21 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin // if drain is completed, or we have exceeded the pause deadline, mark pl as paused and scale down if time.Now().After(pauseTimestamp.Add(time.Duration(pl.Spec.Lifecycle.GetPauseGracePeriodSeconds())*time.Second)) || drainCompleted { - _, err := r.scaleDownAllVertices(ctx, pl) + _, err = r.scaleDownAllVertices(ctx, pl) if err != nil { return true, err } - // if the drain completed succesfully, then set the DrainedOnPause field to true + if daemonError != nil { + r.logger.Errorw("Error in fetching Drained status, Pausing due to timeout", zap.Error(daemonError)) + } + // if the drain completed successfully, then set the DrainedOnPause field to true if drainCompleted { pl.Status.MarkDrainedOnPauseTrue() } pl.Status.MarkPhasePaused() return false, nil } - return true, nil + return true, daemonError } func (r *pipelineReconciler) scaleDownSourceVertices(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { From 6aacb6ea8bf656c1d65888deba4c21b0aea5de73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:05:41 -0700 Subject: [PATCH 080/188] chore(deps): bump tonic from 0.12.2 to 0.12.3 in /rust (#2111) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- rust/Cargo.lock | 4 ++-- rust/numaflow-core/Cargo.toml | 2 +- rust/servesink/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index e2b3045712..761512fdbf 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -3012,9 +3012,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index a10a46b9ab..d056f9b347 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } -tonic = "0.12.2" +tonic = "0.12.3" bytes = "1.7.1" thiserror = "1.0.63" tokio = { version = "1.39.3", features = ["full"] } diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 80430c169b..8f6b6234a2 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -tonic = "0.12.0" +tonic = "0.12.3" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } tracing = "0.1.40" From 3dbed43ea652ed5e4913e2346e60816d52b258ed Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Wed, 2 Oct 2024 02:06:46 -0400 Subject: [PATCH 081/188] feat: container-level version compatibility check for monovertex (#2108) Signed-off-by: Keran Yang --- rust/numaflow-core/Cargo.toml | 1 - rust/numaflow-core/src/monovertex.rs | 4 +- rust/numaflow-core/src/shared/server_info.rs | 181 +++++++++++++------ rust/numaflow-core/src/shared/utils.rs | 8 +- 4 files changed, 134 insertions(+), 60 deletions(-) diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index d056f9b347..c94af812a9 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -21,7 +21,6 @@ tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } hyper-util = "0.1.6" tower = "0.4.13" uuid = { version = "1.10.0", features = ["v4"] } -once_cell = "1.19.0" serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models" } trait-variant = "0.1.2" diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index afd8d0dc09..be87ad361e 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -244,7 +244,7 @@ mod tests { let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let src_sock_file = tmp_dir.path().join("source.sock"); - let src_info_file = tmp_dir.path().join("source-server-info"); + let src_info_file = tmp_dir.path().join("sourcer-server-info"); let server_info_obj = ServerInfo { protocol: "uds".to_string(), language: "rust".to_string(), @@ -271,7 +271,7 @@ mod tests { let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let sink_sock_file = tmp_dir.path().join("sink.sock"); - let sink_server_info = tmp_dir.path().join("sink-server-info"); + let sink_server_info = tmp_dir.path().join("sinker-server-info"); write_server_info(sink_server_info.to_str().unwrap(), &server_info_obj) .await diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 1c78429cb6..0df809363f 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -40,7 +40,7 @@ pub(crate) async fn check_for_server_compatibility( cln_token: CancellationToken, ) -> error::Result<()> { // Read the server info file - let server_info = read_server_info(file_path, cln_token).await?; + let server_info = read_server_info(&file_path, cln_token).await?; // Log the server info info!("Server info file: {:?}", server_info); @@ -49,6 +49,7 @@ pub(crate) async fn check_for_server_compatibility( let sdk_version = &server_info.version; let min_numaflow_version = &server_info.minimum_numaflow_version; let sdk_language = &server_info.language; + let container_type = get_container_type(&file_path).unwrap_or(""); // Get version information let version_info = version::get_version_info(); let numaflow_version = &version_info.version; @@ -72,7 +73,7 @@ pub(crate) async fn check_for_server_compatibility( } else { // Get minimum supported SDK versions and check compatibility let min_supported_sdk_versions = version::get_minimum_supported_sdk_versions(); - check_sdk_compatibility(sdk_version, sdk_language, min_supported_sdk_versions)?; + check_sdk_compatibility(sdk_version, sdk_language, container_type, min_supported_sdk_versions)?; } Ok(()) @@ -109,10 +110,20 @@ fn check_numaflow_compatibility( fn check_sdk_compatibility( sdk_version: &str, sdk_language: &str, + container_type : &str, min_supported_sdk_versions: &SdkConstraints, ) -> error::Result<()> { // Check if the SDK language is present in the minimum supported SDK versions - if let Some(sdk_required_version) = min_supported_sdk_versions.get(sdk_language) { + if !min_supported_sdk_versions.contains_key(sdk_language) { + return Err(Error::ServerInfoError(format!( + "SDK version constraint not found for language: {}, container type: {}", + sdk_language, + container_type + ))); + } + let empty_map = HashMap::new(); + let lang_constraints = min_supported_sdk_versions.get(sdk_language).unwrap_or(&empty_map); + if let Some(sdk_required_version) = lang_constraints.get(container_type) { let sdk_constraint = format!(">={}", sdk_required_version); // For Python, use Pep440 versioning @@ -149,14 +160,16 @@ fn check_sdk_compatibility( } else { // Language not found in the supported SDK versions warn!( - "SDK version constraint not found for language: {}", - sdk_language + "SDK version constraint not found for language: {}, container type: {}", + sdk_language, + container_type ); // Return error indicating the language return Err(Error::ServerInfoError(format!( - "SDK version constraint not found for language: {}", - sdk_language + "SDK version constraint not found for language: {}, container type: {}", + sdk_language, + container_type ))); } Ok(()) @@ -246,11 +259,25 @@ fn trim_after_dash(input: &str) -> &str { } } +/// Extracts the container type from the server info file. +/// The file name is in the format of -server-info. +fn get_container_type(server_info_file: &PathBuf) -> Option<&str> { + let file_name = server_info_file.file_name()?; + let container_type = file_name + .to_str()? + .trim_end_matches("-server-info"); + if container_type.is_empty() { + None + } else { + Some(container_type) + } +} + /// Reads the server info file and returns the parsed ServerInfo struct. /// The cancellation token is used to stop ready-check of server_info file in case it is missing. /// This cancellation token is closed via the global shutdown handler. async fn read_server_info( - file_path: PathBuf, + file_path: &PathBuf, cln_token: CancellationToken, ) -> error::Result { // Infinite loop to keep checking until the file is ready @@ -318,21 +345,43 @@ async fn read_server_info( mod version { use std::collections::HashMap; use std::env; + use std::sync::LazyLock; - use once_cell::sync::Lazy; + pub(crate) type SdkConstraints = HashMap>; - pub(crate) type SdkConstraints = HashMap; - - // MINIMUM_SUPPORTED_SDK_VERSIONS is a HashMap with SDK language as key and minimum supported version as value - static MINIMUM_SUPPORTED_SDK_VERSIONS: Lazy = Lazy::new(|| { + // MINIMUM_SUPPORTED_SDK_VERSIONS is the minimum supported version of each SDK for the current numaflow version. + static MINIMUM_SUPPORTED_SDK_VERSIONS: LazyLock = LazyLock::new(|| { // TODO: populate this from a static file and make it part of the release process // the value of the map matches `minimumSupportedSDKVersions` in pkg/sdkclient/serverinfo/types.go // please follow the instruction there to update the value + // NOTE: the string content of the keys matches the corresponding server info file name. + // DO NOT change it unless the server info file name is changed. + let mut go_version_map = HashMap::new(); + go_version_map.insert("sourcer".to_string(), "0.8.0-z".to_string()); + go_version_map.insert("sourcetransformer".to_string(), "0.8.0-z".to_string()); + go_version_map.insert("sinker".to_string(), "0.8.0-z".to_string()); + go_version_map.insert("fb-sinker".to_string(), "0.8.0-z".to_string()); + let mut python_version_map = HashMap::new(); + python_version_map.insert("sourcer".to_string(), "0.8.0rc100".to_string()); + python_version_map.insert("sourcetransformer".to_string(), "0.8.0rc100".to_string()); + python_version_map.insert("sinker".to_string(), "0.8.0rc100".to_string()); + python_version_map.insert("fb-sinker".to_string(), "0.8.0rc100".to_string()); + let mut java_version_map = HashMap::new(); + java_version_map.insert("sourcer".to_string(), "0.8.0-z".to_string()); + java_version_map.insert("sourcetransformer".to_string(), "0.8.0-z".to_string()); + java_version_map.insert("sinker".to_string(), "0.8.0-z".to_string()); + java_version_map.insert("fb-sinker".to_string(), "0.8.0-z".to_string()); + let mut rust_version_map = HashMap::new(); + rust_version_map.insert("sourcer".to_string(), "0.1.0-z".to_string()); + rust_version_map.insert("sourcetransformer".to_string(), "0.1.0-z".to_string()); + rust_version_map.insert("sinker".to_string(), "0.1.0-z".to_string()); + rust_version_map.insert("fb-sinker".to_string(), "0.1.0-z".to_string()); + let mut m = HashMap::new(); - m.insert("go".to_string(), "0.8.0-z".to_string()); - m.insert("python".to_string(), "0.8.0rc100".to_string()); - m.insert("java".to_string(), "0.8.0-z".to_string()); - m.insert("rust".to_string(), "0.1.0-z".to_string()); + m.insert("go".to_string(), go_version_map); + m.insert("python".to_string(), python_version_map); + m.insert("java".to_string(), java_version_map); + m.insert("rust".to_string(), rust_version_map); m }); @@ -397,8 +446,8 @@ mod version { } } - /// Use once_cell::sync::Lazy for thread-safe, one-time initialization - static VERSION_INFO: Lazy = Lazy::new(VersionInfo::init); + /// Use std::sync::LazyLock for thread-safe, one-time initialization + static VERSION_INFO: LazyLock = LazyLock::new(VersionInfo::init); /// Getter function for VersionInfo pub fn get_version_info() -> &'static VersionInfo { @@ -420,6 +469,7 @@ mod tests { const TCP: &str = "tcp"; const PYTHON: &str = "python"; const GOLANG: &str = "go"; + const TEST_CONTAINER_TYPE: &str = "sourcer"; async fn write_server_info( svr_info: &ServerInfo, @@ -470,22 +520,40 @@ mod tests { // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being stable releases fn create_sdk_constraints_stable_versions() -> SdkConstraints { - let mut constraints = HashMap::new(); - constraints.insert("python".to_string(), "1.2.0rc100".to_string()); - constraints.insert("java".to_string(), "2.0.0-z".to_string()); - constraints.insert("go".to_string(), "0.10.0-z".to_string()); - constraints.insert("rust".to_string(), "0.1.0-z".to_string()); - constraints + let mut go_version_map = HashMap::new(); + go_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.10.0-z".to_string()); + let mut python_version_map = HashMap::new(); + python_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "1.2.0rc100".to_string()); + let mut java_version_map = HashMap::new(); + java_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "2.0.0-z".to_string()); + let mut rust_version_map = HashMap::new(); + rust_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.1.0-z".to_string()); + + let mut m = HashMap::new(); + m.insert("go".to_string(), go_version_map); + m.insert("python".to_string(), python_version_map); + m.insert("java".to_string(), java_version_map); + m.insert("rust".to_string(), rust_version_map); + m } // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being pre-releases fn create_sdk_constraints_pre_release_versions() -> SdkConstraints { - let mut constraints = HashMap::new(); - constraints.insert("python".to_string(), "1.2.0b2".to_string()); - constraints.insert("java".to_string(), "2.0.0-rc2".to_string()); - constraints.insert("go".to_string(), "0.10.0-rc2".to_string()); - constraints.insert("rust".to_string(), "0.1.0-rc3".to_string()); - constraints + let mut go_version_map = HashMap::new(); + go_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.10.0-rc2".to_string()); + let mut python_version_map = HashMap::new(); + python_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "1.2.0b2".to_string()); + let mut java_version_map = HashMap::new(); + java_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "2.0.0-rc2".to_string()); + let mut rust_version_map = HashMap::new(); + rust_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.1.0-rc3".to_string()); + + let mut m = HashMap::new(); + m.insert("go".to_string(), go_version_map); + m.insert("python".to_string(), python_version_map); + m.insert("java".to_string(), java_version_map); + m.insert("rust".to_string(), rust_version_map); + m } #[tokio::test] @@ -495,7 +563,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -507,7 +575,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language,TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -522,7 +590,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -534,7 +602,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -549,7 +617,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -561,7 +629,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -576,7 +644,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -588,7 +656,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -603,7 +671,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -615,7 +683,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -630,7 +698,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -642,7 +710,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -657,7 +725,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -669,7 +737,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -684,7 +752,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -696,7 +764,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -711,7 +779,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -723,7 +791,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -738,7 +806,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_ok()); } @@ -750,7 +818,7 @@ mod tests { let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); let result = - check_sdk_compatibility(sdk_version, sdk_language, &min_supported_sdk_versions); + check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); assert!(result.is_err()); assert!( @@ -858,6 +926,13 @@ mod tests { "numaflow version 1.1.6-rc1 must be upgraded to at least 1.1.6-rc2, in order to work with current SDK version")); } + #[tokio::test] + async fn test_get_container_type_from_file_valid() { + let file_path = PathBuf::from("/var/run/numaflow/sourcer-server-info"); + let container_type = get_container_type(&file_path); + assert_eq!("sourcer", container_type.unwrap()); + } + #[tokio::test] async fn test_write_server_info_success() { // Create a temporary directory @@ -949,7 +1024,7 @@ mod tests { let _ = write_server_info(&server_info, file_path.to_str().unwrap()).await; // Call the read_server_info function - let result = read_server_info(file_path, cln_token).await; + let result = read_server_info(&file_path, cln_token).await; assert!(result.is_ok(), "Expected Ok, got {:?}", result); let server_info = result.unwrap(); @@ -978,7 +1053,7 @@ mod tests { let _drop_guard = cln_token.clone().drop_guard(); // Call the read_server_info function - let result = read_server_info(file_path, cln_token).await; + let result = read_server_info(&file_path, cln_token).await; assert!(result.is_err(), "Expected Err, got {:?}", result); let error = result.unwrap_err(); diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index b8a5e07686..99e9cef995 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -214,10 +214,10 @@ mod tests { #[tokio::test] async fn test_check_compatibility_success() { let dir = tempdir().unwrap(); - let source_file_path = dir.path().join("source_server_info.json"); - let sink_file_path = dir.path().join("sink_server_info.json"); - let transformer_file_path = dir.path().join("transformer_server_info.json"); - let fb_sink_file_path = dir.path().join("fb_sink_server_info.json"); + let source_file_path = dir.path().join("sourcer-server-info"); + let sink_file_path = dir.path().join("sinker-server-info"); + let transformer_file_path = dir.path().join("sourcetransformer-server-info"); + let fb_sink_file_path = dir.path().join("fb-sink-server-info"); let server_info = ServerInfo { protocol: "uds".to_string(), From dc25c4dc11c7fd5125c53bfff2b39fa49b9c8368 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Wed, 2 Oct 2024 23:44:55 -0700 Subject: [PATCH 082/188] feat: implement Source trait and use it for user-defined source (#2114) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Yashash H L Co-authored-by: Sreekanth --- rust/Cargo.lock | 1 - rust/numaflow-core/src/config.rs | 2 +- rust/numaflow-core/src/lib.rs | 3 + rust/numaflow-core/src/message.rs | 2 +- rust/numaflow-core/src/monovertex.rs | 17 +- .../numaflow-core/src/monovertex/forwarder.rs | 76 +++---- rust/numaflow-core/src/monovertex/metrics.rs | 53 +++-- rust/numaflow-core/src/reader.rs | 9 + rust/numaflow-core/src/shared/server_info.rs | 186 +++++++++++++----- rust/numaflow-core/src/shared/utils.rs | 10 +- rust/numaflow-core/src/source.rs | 18 ++ rust/numaflow-core/src/source/user_defined.rs | 100 +++++++--- .../src/transformer/user_defined.rs | 25 +-- 13 files changed, 335 insertions(+), 167 deletions(-) create mode 100644 rust/numaflow-core/src/reader.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 761512fdbf..9837be100c 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1591,7 +1591,6 @@ dependencies = [ "log", "numaflow 0.1.1", "numaflow-models", - "once_cell", "parking_lot", "pep440_rs", "prometheus-client", diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index c3263e999c..6310295f46 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -645,4 +645,4 @@ mod tests { let drop = OnFailureStrategy::Drop; assert_eq!(drop.to_string(), "drop"); } -} \ No newline at end of file +} diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index a941bd6cb1..4e410c9f90 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -34,3 +34,6 @@ mod source; /// /// [Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/ mod transformer; + +/// Reads from a stream. +mod reader; diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index d230e994fb..64f4079764 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -7,9 +7,9 @@ use chrono::{DateTime, Utc}; use crate::error::Error; use crate::monovertex::sink_pb::sink_request::Request; use crate::monovertex::sink_pb::SinkRequest; -use crate::monovertex::{source_pb, sourcetransform_pb}; use crate::monovertex::source_pb::{read_response, AckRequest}; use crate::monovertex::sourcetransform_pb::SourceTransformRequest; +use crate::monovertex::{source_pb, sourcetransform_pb}; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; /// A message that is sent from the source to the sink. diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index be87ad361e..3aecbe066a 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -3,7 +3,7 @@ use crate::error; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; -use crate::source::user_defined::Source; +use crate::source::user_defined::new_source; use crate::transformer::user_defined::SourceTransformer; use forwarder::ForwarderBuilder; use metrics::MetricsState; @@ -145,6 +145,13 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> ) .await?; + let (source_reader, lag_reader) = new_source( + source_grpc_client.clone(), + config().batch_size as usize, + config().timeout_in_ms as u16, + ) + .await?; + // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not // joined. @@ -159,12 +166,12 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> // FIXME: what to do with the handle utils::start_metrics_server(metrics_state).await; - // start the lag reader to publish lag metrics - let mut lag_reader = utils::create_lag_reader(source_grpc_client.clone()).await; - lag_reader.start().await; + // start the pending reader to publish pending metrics + let mut pending_reader = utils::create_pending_reader(lag_reader).await; + pending_reader.start().await; // build the forwarder - let source_reader = Source::new(source_grpc_client.clone()).await?; + let sink_writer = SinkWriter::new(sink_grpc_client.clone()).await?; let mut forwarder_builder = ForwarderBuilder::new(source_reader, sink_writer, cln_token); diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index ab58cfad03..130305d01d 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -6,21 +6,20 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, info}; use crate::config::{config, OnFailureStrategy}; -use crate::error; use crate::error::Error; use crate::message::{Message, Offset}; use crate::monovertex::metrics; use crate::monovertex::metrics::forward_metrics; use crate::monovertex::sink_pb::Status::{Failure, Fallback, Success}; use crate::sink::user_defined::SinkWriter; -use crate::source::user_defined::Source; use crate::transformer::user_defined::SourceTransformer; +use crate::{error, source}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. -pub(crate) struct Forwarder { - source: Source, +pub(crate) struct Forwarder { + source: T, sink_writer: SinkWriter, source_transformer: Option, fb_sink_writer: Option, @@ -29,21 +28,17 @@ pub(crate) struct Forwarder { } /// ForwarderBuilder is used to build a Forwarder instance with optional fields. -pub(crate) struct ForwarderBuilder { - source: Source, +pub(crate) struct ForwarderBuilder { + source: T, sink_writer: SinkWriter, cln_token: CancellationToken, source_transformer: Option, fb_sink_writer: Option, } -impl ForwarderBuilder { +impl ForwarderBuilder { /// Create a new builder with mandatory fields - pub(crate) fn new( - source: Source, - sink_writer: SinkWriter, - cln_token: CancellationToken, - ) -> Self { + pub(crate) fn new(source: T, sink_writer: SinkWriter, cln_token: CancellationToken) -> Self { Self { source, sink_writer, @@ -67,7 +62,7 @@ impl ForwarderBuilder { /// Build the Forwarder instance #[must_use] - pub(crate) fn build(self) -> Forwarder { + pub(crate) fn build(self) -> Forwarder { let common_labels = metrics::forward_metrics_labels().clone(); Forwarder { source: self.source, @@ -80,7 +75,10 @@ impl ForwarderBuilder { } } -impl Forwarder { +impl Forwarder +where + T: source::Source, +{ /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. /// this means that, in the happy path scenario a block is always completely processed. /// this function will return on any error and will cause end up in a non-0 exit code. @@ -121,13 +119,9 @@ impl Forwarder { /// and then acknowledge the messages back to the source. async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); - let messages = self - .source - .read(config().batch_size, config().timeout_in_ms) - .await - .map_err(|e| { - Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) - })?; + let messages = self.source.read().await.map_err(|e| { + Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) + })?; debug!( "Read batch size: {} and latency - {}ms", @@ -542,21 +536,21 @@ impl Forwarder { mod tests { use std::collections::HashSet; - use chrono::Utc; - use numaflow::source::{Message, Offset, SourceReadRequest}; - use numaflow::{sink, source, sourcetransform}; - use tokio::sync::mpsc; - use tokio::sync::mpsc::Sender; - use tokio_util::sync::CancellationToken; - + use crate::config::config; use crate::monovertex::forwarder::ForwarderBuilder; use crate::monovertex::sink_pb::sink_client::SinkClient; use crate::monovertex::source_pb::source_client::SourceClient; use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; - use crate::source::user_defined::Source; + use crate::source::user_defined::UserDefinedSource; use crate::transformer::user_defined::SourceTransformer; + use chrono::Utc; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source, sourcetransform}; + use tokio::sync::mpsc; + use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, @@ -735,9 +729,11 @@ mod tests { let cln_token = CancellationToken::new(); - let source = Source::new(SourceClient::new( - create_rpc_channel(source_sock_file.clone()).await.unwrap(), - )) + let source = UserDefinedSource::new( + SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), + config().batch_size as usize, + config().timeout_in_ms as u16, + ) .await .expect("failed to connect to source server"); @@ -857,9 +853,11 @@ mod tests { let cln_token = CancellationToken::new(); - let source = Source::new(SourceClient::new( - create_rpc_channel(source_sock_file.clone()).await.unwrap(), - )) + let source = UserDefinedSource::new( + SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), + 500, + 100, + ) .await .expect("failed to connect to source server"); @@ -971,9 +969,11 @@ mod tests { let cln_token = CancellationToken::new(); - let source = Source::new(SourceClient::new( - create_rpc_channel(source_sock_file.clone()).await.unwrap(), - )) + let source = UserDefinedSource::new( + SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), + 500, + 100, + ) .await .expect("failed to connect to source server"); diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 496f14330f..adbbdde3b2 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -21,6 +21,7 @@ use crate::error::Error; use crate::monovertex::sink_pb::sink_client::SinkClient; use crate::monovertex::source_pb::source_client::SourceClient; use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; +use crate::reader; use prometheus_client::encoding::text::encode; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -362,11 +363,11 @@ struct TimestampedPending { timestamp: std::time::Instant, } -/// `LagReader` is responsible for periodically checking the lag of the source client +/// PendingReader is responsible for periodically checking the lag of the reader /// and exposing the metrics. It maintains a list of pending stats and ensures that /// only the most recent entries are kept. -pub(crate) struct LagReader { - source_client: SourceClient, +pub(crate) struct PendingReader { + lag_reader: T, lag_checking_interval: Duration, refresh_interval: Duration, buildup_handle: Option>, @@ -374,17 +375,17 @@ pub(crate) struct LagReader { pending_stats: Arc>>, } -/// LagReaderBuilder is used to build a `LagReader` instance. -pub(crate) struct LagReaderBuilder { - source_client: SourceClient, +/// PendingReaderBuilder is used to build a [LagReader] instance. +pub(crate) struct PendingReaderBuilder { + lag_reader: T, lag_checking_interval: Option, refresh_interval: Option, } -impl LagReaderBuilder { - pub(crate) fn new(source_client: SourceClient) -> Self { +impl PendingReaderBuilder { + pub(crate) fn new(lag_reader: T) -> Self { Self { - source_client, + lag_reader, lag_checking_interval: None, refresh_interval: None, } @@ -400,9 +401,9 @@ impl LagReaderBuilder { self } - pub(crate) fn build(self) -> LagReader { - LagReader { - source_client: self.source_client, + pub(crate) fn build(self) -> PendingReader { + PendingReader { + lag_reader: self.lag_reader, lag_checking_interval: self .lag_checking_interval .unwrap_or_else(|| Duration::from_secs(3)), @@ -416,20 +417,20 @@ impl LagReaderBuilder { } } -impl LagReader { +impl PendingReader { /// Starts the lag reader by spawning tasks to build up pending info and expose pending metrics. /// /// This method spawns two asynchronous tasks: /// - One to periodically check the lag and update the pending stats. /// - Another to periodically expose the pending metrics. pub async fn start(&mut self) { - let source_client = self.source_client.clone(); + let pending_reader = self.lag_reader.clone(); let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; let pending_stats = self.pending_stats.clone(); self.buildup_handle = Some(tokio::spawn(async move { - build_pending_info(source_client, lag_checking_interval, pending_stats).await; + build_pending_info(pending_reader, lag_checking_interval, pending_stats).await; })); let pending_stats = self.pending_stats.clone(); @@ -439,8 +440,8 @@ impl LagReader { } } -/// When lag-reader is dropped, we need to clean up the pending exposer and the pending builder tasks. -impl Drop for LagReader { +/// When the PendingReader is dropped, we need to clean up the pending exposer and the pending builder tasks. +impl Drop for PendingReader { fn drop(&mut self) { if let Some(handle) = self.expose_handle.take() { handle.abort(); @@ -454,15 +455,15 @@ impl Drop for LagReader { } /// Periodically checks the pending messages from the source client and build the pending stats. -async fn build_pending_info( - mut source_client: SourceClient, +async fn build_pending_info( + mut lag_reader: T, lag_checking_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(lag_checking_interval); loop { ticker.tick().await; - match fetch_pending(&mut source_client).await { + match fetch_pending(&mut lag_reader).await { Ok(pending) => { if pending != -1 { let mut stats = pending_stats.lock().await; @@ -484,14 +485,8 @@ async fn build_pending_info( } } -async fn fetch_pending(source_client: &mut SourceClient) -> crate::error::Result { - let request = Request::new(()); - let response = source_client - .pending_fn(request) - .await? - .into_inner() - .result - .map_or(-1, |r| r.count); // default to -1(unavailable) +async fn fetch_pending(lag_reader: &mut T) -> crate::error::Result { + let response: i64 = lag_reader.pending().await?.map_or(-1, |p| p as i64); // default to -1(unavailable) Ok(response) } @@ -556,8 +551,6 @@ async fn calculate_pending( result } -// TODO add tests - #[cfg(test)] mod tests { use super::*; diff --git a/rust/numaflow-core/src/reader.rs b/rust/numaflow-core/src/reader.rs new file mode 100644 index 0000000000..43dc7e8fc9 --- /dev/null +++ b/rust/numaflow-core/src/reader.rs @@ -0,0 +1,9 @@ +/// Lag reader reports the pending information at Reader (source, ISBs), this information is used by +/// the auto-scaler. +#[trait_variant::make(LagReader: Send)] +#[allow(dead_code)] +pub(crate) trait LocalLagReader { + /// Pending elements yet to be read from the stream. The stream could be the [crate::source], or ISBs + /// It may or may not include unacknowledged messages. + async fn pending(&mut self) -> crate::error::Result>; +} diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 0df809363f..f058b7a313 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -73,7 +73,12 @@ pub(crate) async fn check_for_server_compatibility( } else { // Get minimum supported SDK versions and check compatibility let min_supported_sdk_versions = version::get_minimum_supported_sdk_versions(); - check_sdk_compatibility(sdk_version, sdk_language, container_type, min_supported_sdk_versions)?; + check_sdk_compatibility( + sdk_version, + sdk_language, + container_type, + min_supported_sdk_versions, + )?; } Ok(()) @@ -110,19 +115,20 @@ fn check_numaflow_compatibility( fn check_sdk_compatibility( sdk_version: &str, sdk_language: &str, - container_type : &str, + container_type: &str, min_supported_sdk_versions: &SdkConstraints, ) -> error::Result<()> { // Check if the SDK language is present in the minimum supported SDK versions if !min_supported_sdk_versions.contains_key(sdk_language) { return Err(Error::ServerInfoError(format!( "SDK version constraint not found for language: {}, container type: {}", - sdk_language, - container_type + sdk_language, container_type ))); } let empty_map = HashMap::new(); - let lang_constraints = min_supported_sdk_versions.get(sdk_language).unwrap_or(&empty_map); + let lang_constraints = min_supported_sdk_versions + .get(sdk_language) + .unwrap_or(&empty_map); if let Some(sdk_required_version) = lang_constraints.get(container_type) { let sdk_constraint = format!(">={}", sdk_required_version); @@ -161,15 +167,13 @@ fn check_sdk_compatibility( // Language not found in the supported SDK versions warn!( "SDK version constraint not found for language: {}, container type: {}", - sdk_language, - container_type + sdk_language, container_type ); // Return error indicating the language return Err(Error::ServerInfoError(format!( "SDK version constraint not found for language: {}, container type: {}", - sdk_language, - container_type + sdk_language, container_type ))); } Ok(()) @@ -263,9 +267,7 @@ fn trim_after_dash(input: &str) -> &str { /// The file name is in the format of -server-info. fn get_container_type(server_info_file: &PathBuf) -> Option<&str> { let file_name = server_info_file.file_name()?; - let container_type = file_name - .to_str()? - .trim_end_matches("-server-info"); + let container_type = file_name.to_str()?.trim_end_matches("-server-info"); if container_type.is_empty() { None } else { @@ -562,8 +564,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -574,8 +580,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language,TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -589,8 +599,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -601,8 +615,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -616,8 +634,12 @@ mod tests { let sdk_language = "java"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -628,8 +650,12 @@ mod tests { let sdk_language = "java"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -643,8 +669,12 @@ mod tests { let sdk_language = "go"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -655,8 +685,12 @@ mod tests { let sdk_language = "go"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -670,8 +704,12 @@ mod tests { let sdk_language = "rust"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -682,8 +720,12 @@ mod tests { let sdk_language = "rust"; let min_supported_sdk_versions = create_sdk_constraints_stable_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -697,8 +739,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -709,8 +755,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -724,8 +774,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -736,8 +790,12 @@ mod tests { let sdk_language = "python"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -751,8 +809,12 @@ mod tests { let sdk_language = "java"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -763,8 +825,12 @@ mod tests { let sdk_language = "java"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -778,8 +844,12 @@ mod tests { let sdk_language = "go"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -790,8 +860,12 @@ mod tests { let sdk_language = "go"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( @@ -805,8 +879,12 @@ mod tests { let sdk_language = "rust"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_ok()); } @@ -817,8 +895,12 @@ mod tests { let sdk_language = "rust"; let min_supported_sdk_versions = create_sdk_constraints_pre_release_versions(); - let result = - check_sdk_compatibility(sdk_version, sdk_language, TEST_CONTAINER_TYPE, &min_supported_sdk_versions); + let result = check_sdk_compatibility( + sdk_version, + sdk_language, + TEST_CONTAINER_TYPE, + &min_supported_sdk_versions, + ); assert!(result.is_err()); assert!( diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 99e9cef995..4068aaf9b6 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -3,15 +3,15 @@ use std::path::PathBuf; use std::time::Duration; use crate::config::config; -use crate::error; use crate::error::Error; use crate::monovertex::metrics::{ - start_metrics_https_server, LagReader, LagReaderBuilder, MetricsState, + start_metrics_https_server, MetricsState, PendingReader, PendingReaderBuilder, }; use crate::monovertex::sink_pb::sink_client::SinkClient; use crate::monovertex::source_pb::source_client::SourceClient; use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::server_info; +use crate::{error, reader}; use axum::http::Uri; use backoff::retry::Retry; @@ -81,8 +81,10 @@ pub(crate) async fn start_metrics_server(metrics_state: MetricsState) -> JoinHan }) } -pub(crate) async fn create_lag_reader(lag_reader_grpc_client: SourceClient) -> LagReader { - LagReaderBuilder::new(lag_reader_grpc_client) +pub(crate) async fn create_pending_reader( + lag_reader_grpc_client: T, +) -> PendingReader { + PendingReaderBuilder::new(lag_reader_grpc_client) .lag_checking_interval(Duration::from_secs( config().lag_check_interval_in_secs.into(), )) diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 4ba2755ce7..34108bec1a 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,4 +1,22 @@ +use crate::message::{Message, Offset}; + /// [User-Defined Source] extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Source]: https://numaflow.numaproj.io/user-guide/sources/user-defined-sources/ pub(crate) mod user_defined; + +/// Set of items that has to be implemented to become a Source. +pub(crate) trait Source { + #[allow(dead_code)] + /// Name of the source. + fn name(&self) -> &'static str; + + async fn read(&mut self) -> crate::Result>; + + /// acknowledge an offset. The implementor might choose to do it in an asynchronous way. + async fn ack(&mut self, _: Vec) -> crate::Result<()>; + + #[allow(dead_code)] + /// number of partitions processed by this source. + fn partitions(&self) -> Vec; +} diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index b51324a68e..53ab9d9dcd 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -5,24 +5,44 @@ use crate::message::{Message, Offset}; use crate::monovertex::source_pb; use crate::monovertex::source_pb::source_client::SourceClient; use crate::monovertex::source_pb::{ - ack_response, read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, + read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; +use crate::reader::LagReader; +use crate::source::Source; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -/// SourceReader reads messages from a source. +/// User-Defined Source to operative on custom sources. #[derive(Debug)] -pub(crate) struct Source { +pub(crate) struct UserDefinedSource { read_tx: mpsc::Sender, resp_stream: Streaming, ack_tx: mpsc::Sender, ack_resp_stream: Streaming, + num_records: usize, + timeout_in_ms: u16, } -impl Source { - pub(crate) async fn new(mut client: SourceClient) -> error::Result { +/// Creates a new User-Defined Source and its corresponding Lag Reader. +pub(crate) async fn new_source( + client: SourceClient, + num_records: usize, + timeout_in_ms: u16, +) -> error::Result<(UserDefinedSource, UserDefinedSourceLagReader)> { + let ud_src = UserDefinedSource::new(client.clone(), num_records, timeout_in_ms).await?; + let lag_reader = UserDefinedSourceLagReader::new(client); + + Ok((ud_src, lag_reader)) +} + +impl UserDefinedSource { + pub(crate) async fn new( + mut client: SourceClient, + num_records: usize, + timeout_in_ms: u16, + ) -> error::Result { let (read_tx, resp_stream) = Self::create_reader(&mut client).await?; let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; @@ -31,6 +51,8 @@ impl Source { resp_stream, ack_tx, ack_resp_stream, + num_records, + timeout_in_ms, }) } @@ -98,16 +120,18 @@ impl Source { Ok((ack_tx, ack_resp_stream)) } +} + +impl Source for UserDefinedSource { + fn name(&self) -> &'static str { + "user-defined-source" + } - pub(crate) async fn read( - &mut self, - num_records: u64, - timeout_in_ms: u32, - ) -> error::Result> { + async fn read(&mut self) -> error::Result> { let request = ReadRequest { request: Some(read_request::Request { - num_records, - timeout_in_ms, + num_records: self.num_records as u64, + timeout_in_ms: self.timeout_in_ms as u32, }), handshake: None, }; @@ -117,7 +141,7 @@ impl Source { .await .map_err(|e| SourceError(e.to_string()))?; - let mut messages = Vec::with_capacity(num_records as usize); + let mut messages = Vec::with_capacity(self.num_records); while let Some(response) = self.resp_stream.message().await? { if response.status.map_or(false, |status| status.eot) { @@ -133,7 +157,7 @@ impl Source { Ok(messages) } - pub(crate) async fn ack(&mut self, offsets: Vec) -> error::Result { + async fn ack(&mut self, offsets: Vec) -> error::Result<()> { let n = offsets.len(); // send n ack requests @@ -154,20 +178,46 @@ impl Source { .ok_or(SourceError("failed to receive ack response".to_string()))?; } - Ok(AckResponse { - result: Some(ack_response::Result { success: Some(()) }), - handshake: None, - }) + Ok(()) + } + + fn partitions(&self) -> Vec { + todo!() + } +} + +#[derive(Clone)] +pub(crate) struct UserDefinedSourceLagReader { + source_client: SourceClient, +} + +impl UserDefinedSourceLagReader { + fn new(source_client: SourceClient) -> Self { + Self { source_client } + } +} + +impl LagReader for UserDefinedSourceLagReader { + async fn pending(&mut self) -> error::Result> { + Ok(self + .source_client + .pending_fn(Request::new(())) + .await? + .into_inner() + .result + .map(|r| r.count as usize)) } } #[cfg(test)] mod tests { + use super::*; + use std::collections::HashSet; use crate::monovertex::source_pb::source_client::SourceClient; use crate::shared::utils::create_rpc_channel; - use crate::source::user_defined::Source; + use chrono::Utc; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; @@ -253,19 +303,21 @@ mod tests { let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); - let mut source = Source::new(client) + let (mut source, mut lag_reader) = new_source(client, 5, 1000) .await .map_err(|e| panic!("failed to create source reader: {:?}", e)) .unwrap(); - let messages = source.read(5, 1000).await.unwrap(); + let messages = source.read().await.unwrap(); assert_eq!(messages.len(), 5); let response = source .ack(messages.iter().map(|m| m.offset.clone()).collect()) - .await - .unwrap(); - assert!(response.result.unwrap().success.is_some()); + .await; + assert!(response.is_ok()); + + let pending = lag_reader.pending().await.unwrap(); + assert_eq!(pending, Some(0)); // we need to drop the client, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 71a9d24cd6..b2564b0e72 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,17 +1,20 @@ use std::collections::HashMap; -use tonic::transport::Channel; -use tonic::{Request, Streaming}; +use crate::config::config; +use crate::error::{Error, Result}; +use crate::message::{Message, Offset}; +use crate::monovertex::sourcetransform_pb::{ + self, source_transform_client::SourceTransformClient, SourceTransformRequest, + SourceTransformResponse, +}; +use crate::shared::utils::utc_from_timestamp; use tokio::sync::mpsc; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; +use tonic::{Request, Streaming}; use tracing::warn; -use crate::error::{Result, Error}; -use crate::message::{Message, Offset}; -use crate::monovertex::sourcetransform_pb::{self, SourceTransformRequest, SourceTransformResponse, source_transform_client::SourceTransformClient}; -use crate::shared::utils::utc_from_timestamp; -use crate::config::config; const DROP: &str = "U+005C__DROP__"; @@ -216,7 +219,7 @@ mod tests { let mut client = SourceTransformer::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) - .await?; + .await?; let message = crate::message::Message { keys: vec!["first".into()], @@ -234,7 +237,7 @@ mod tests { tokio::time::Duration::from_secs(2), client.transform_fn(vec![message]), ) - .await??; + .await??; assert_eq!(resp.len(), 1); // we need to drop the client, because if there are any in-flight requests @@ -291,7 +294,7 @@ mod tests { let mut client = SourceTransformer::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) - .await?; + .await?; let message = crate::message::Message { keys: vec!["second".into()], @@ -318,4 +321,4 @@ mod tests { handle.await.expect("failed to join server task"); Ok(()) } -} \ No newline at end of file +} From 772907a3f7ebd36b3693faa57ce9b3b81cf856e8 Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Thu, 3 Oct 2024 09:56:34 -0400 Subject: [PATCH 083/188] chore: add rust formatting check to CI (#2117) --- .github/workflows/ci.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e9861ca408..698f2f9ee0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -122,6 +122,11 @@ jobs: CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE='./target/debug/coverage/cargo-test-%p-%m.profraw' cargo test --all-features --workspace --all grcov . -s ./target/debug/coverage/ --binary-path ./target/debug/ -t lcov --branch --ignore-not-existing -o ./target/debug/coverage/lcov.info + - name: Check Rust formatting + working-directory: ./rust + run: | + cargo fmt -- --check + - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: From 7586ffb056f3155fd9f13ba8dee33be38851ce94 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Thu, 3 Oct 2024 20:26:14 +0530 Subject: [PATCH 084/188] Debugging unit test timeout in CI (#2118) Signed-off-by: Sreekanth --- .github/workflows/ci.yaml | 37 ++++++++++++++++++++++++++----------- Makefile | 2 +- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 698f2f9ee0..afcea04945 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -43,7 +43,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: "1.22" - name: Add bins to PATH run: | echo /home/runner/go/bin >> $GITHUB_PATH @@ -72,7 +72,7 @@ jobs: --health-timeout 5s --health-retries 5 nats: - image: 'bitnami/nats:latest' + image: "bitnami/nats:latest" ports: - 4222:4222 env: @@ -81,7 +81,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: "1.22" id: go - name: Check out code @@ -94,11 +94,11 @@ jobs: key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} - name: Get dependencies - run: go mod download + run: go mod download -x - name: Test Go run: make test-coverage-with-isb - + - name: Install Rust uses: actions-rust-lang/setup-rust-toolchain@v1 with: @@ -119,7 +119,7 @@ jobs: - name: Test Rust working-directory: ./rust run: | - CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE='./target/debug/coverage/cargo-test-%p-%m.profraw' cargo test --all-features --workspace --all + CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE='./target/debug/coverage/cargo-test-%p-%m.profraw' cargo test --all-features --workspace --all grcov . -s ./target/debug/coverage/ --binary-path ./target/debug/ -t lcov --branch --ignore-not-existing -o ./target/debug/coverage/lcov.info - name: Check Rust formatting @@ -145,7 +145,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: "1.22" - name: Restore Go build cache uses: actions/cache@v4 with: @@ -165,7 +165,7 @@ jobs: uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: cache-workspaces: rust -> target - rustflags: '' + rustflags: "" - name: Configure sccache run: | echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV @@ -192,13 +192,28 @@ jobs: e2e-tests: name: E2E Tests runs-on: ubuntu-latest - needs: [ build-rust-amd64 ] + needs: [build-rust-amd64] timeout-minutes: 20 strategy: fail-fast: false matrix: driver: [jetstream] - case: [e2e, diamond-e2e, transformer-e2e, kafka-e2e, map-e2e, reduce-one-e2e, reduce-two-e2e, udsource-e2e, api-e2e, sideinputs-e2e, idle-source-e2e, monovertex-e2e, builtin-source-e2e] + case: + [ + e2e, + diamond-e2e, + transformer-e2e, + kafka-e2e, + map-e2e, + reduce-one-e2e, + reduce-two-e2e, + udsource-e2e, + api-e2e, + sideinputs-e2e, + idle-source-e2e, + monovertex-e2e, + builtin-source-e2e, + ] include: - driver: redis case: e2e @@ -218,7 +233,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: "1.22" - name: Add bins to PATH run: | echo /home/runner/go/bin >> $GITHUB_PATH diff --git a/Makefile b/Makefile index 11d91c5890..6ff001e100 100644 --- a/Makefile +++ b/Makefile @@ -105,7 +105,7 @@ test-coverage: .PHONY: test-coverage-with-isb test-coverage-with-isb: - go test -covermode=atomic -coverprofile=test/profile.cov -tags=isb_redis $(shell go list ./... | grep -v /vendor/ | grep -v /numaflow/test/ | grep -v /pkg/client/ | grep -v /pkg/proto/ | grep -v /hack/) + go test -v -timeout 7m -covermode=atomic -coverprofile=test/profile.cov -tags=isb_redis $(shell go list ./... | grep -v /vendor/ | grep -v /numaflow/test/ | grep -v /pkg/client/ | grep -v /pkg/proto/ | grep -v /hack/) go tool cover -func=test/profile.cov .PHONY: test-code From 6b4687578b3f4ae4cb5ce03812bcd2a216dc3af3 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Thu, 3 Oct 2024 08:04:06 -0700 Subject: [PATCH 085/188] chore: separate ack (#2119) Signed-off-by: Vigith Maurice --- rust/numaflow-core/src/monovertex.rs | 5 +- .../numaflow-core/src/monovertex/forwarder.rs | 61 +++++---- rust/numaflow-core/src/source.rs | 13 +- rust/numaflow-core/src/source/user_defined.rs | 127 ++++++++++-------- 4 files changed, 122 insertions(+), 84 deletions(-) diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 3aecbe066a..601957a94b 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -145,7 +145,7 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> ) .await?; - let (source_reader, lag_reader) = new_source( + let (source_read, source_ack, lag_reader) = new_source( source_grpc_client.clone(), config().batch_size as usize, config().timeout_in_ms as u16, @@ -174,7 +174,8 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> let sink_writer = SinkWriter::new(sink_grpc_client.clone()).await?; - let mut forwarder_builder = ForwarderBuilder::new(source_reader, sink_writer, cln_token); + let mut forwarder_builder = + ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token); // add transformer if exists if let Some(transformer_grpc_client) = transformer_grpc_client { diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 130305d01d..f84488907d 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -18,8 +18,9 @@ use crate::{error, source}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. -pub(crate) struct Forwarder { - source: T, +pub(crate) struct Forwarder { + source_read: R, + source_ack: A, sink_writer: SinkWriter, source_transformer: Option, fb_sink_writer: Option, @@ -28,19 +29,26 @@ pub(crate) struct Forwarder { } /// ForwarderBuilder is used to build a Forwarder instance with optional fields. -pub(crate) struct ForwarderBuilder { - source: T, +pub(crate) struct ForwarderBuilder { + source_read: R, + source_ack: A, sink_writer: SinkWriter, cln_token: CancellationToken, source_transformer: Option, fb_sink_writer: Option, } -impl ForwarderBuilder { +impl ForwarderBuilder { /// Create a new builder with mandatory fields - pub(crate) fn new(source: T, sink_writer: SinkWriter, cln_token: CancellationToken) -> Self { + pub(crate) fn new( + source_read: R, + source_ack: A, + sink_writer: SinkWriter, + cln_token: CancellationToken, + ) -> Self { Self { - source, + source_read, + source_ack, sink_writer, cln_token, source_transformer: None, @@ -62,10 +70,11 @@ impl ForwarderBuilder { /// Build the Forwarder instance #[must_use] - pub(crate) fn build(self) -> Forwarder { + pub(crate) fn build(self) -> Forwarder { let common_labels = metrics::forward_metrics_labels().clone(); Forwarder { - source: self.source, + source_read: self.source_read, + source_ack: self.source_ack, sink_writer: self.sink_writer, source_transformer: self.source_transformer, fb_sink_writer: self.fb_sink_writer, @@ -75,9 +84,10 @@ impl ForwarderBuilder { } } -impl Forwarder +impl Forwarder where - T: source::Source, + A: source::SourceAcker, + R: source::SourceReader, { /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. /// this means that, in the happy path scenario a block is always completely processed. @@ -119,7 +129,7 @@ where /// and then acknowledge the messages back to the source. async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); - let messages = self.source.read().await.map_err(|e| { + let messages = self.source_read.read().await.map_err(|e| { Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) })?; @@ -515,7 +525,7 @@ where let n = offsets.len(); let start_time = tokio::time::Instant::now(); - self.source.ack(offsets).await?; + self.source_ack.ack(offsets).await?; debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); @@ -543,7 +553,7 @@ mod tests { use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; - use crate::source::user_defined::UserDefinedSource; + use crate::source::user_defined::new_source; use crate::transformer::user_defined::SourceTransformer; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; @@ -729,7 +739,7 @@ mod tests { let cln_token = CancellationToken::new(); - let source = UserDefinedSource::new( + let (source_read, source_ack, _) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), config().batch_size as usize, config().timeout_in_ms as u16, @@ -749,9 +759,10 @@ mod tests { .await .expect("failed to connect to transformer server"); - let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) - .source_transformer(transformer_client) - .build(); + let mut forwarder = + ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()) + .source_transformer(transformer_client) + .build(); // Assert the received message in a different task let assert_handle = tokio::spawn(async move { @@ -853,7 +864,7 @@ mod tests { let cln_token = CancellationToken::new(); - let source = UserDefinedSource::new( + let (source_read, source_ack, _) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), 500, 100, @@ -867,7 +878,8 @@ mod tests { .await .expect("failed to connect to sink server"); - let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()).build(); + let mut forwarder = + ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()).build(); let cancel_handle = tokio::spawn(async move { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; @@ -969,7 +981,7 @@ mod tests { let cln_token = CancellationToken::new(); - let source = UserDefinedSource::new( + let (source_read, source_ack, _) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), 500, 100, @@ -989,9 +1001,10 @@ mod tests { .await .expect("failed to connect to fb sink server"); - let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) - .fallback_sink_writer(fb_sink_writer) - .build(); + let mut forwarder = + ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()) + .fallback_sink_writer(fb_sink_writer) + .build(); let assert_handle = tokio::spawn(async move { let received_message = sink_rx.recv().await.unwrap(); diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 34108bec1a..787f122dd7 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -5,18 +5,21 @@ use crate::message::{Message, Offset}; /// [User-Defined Source]: https://numaflow.numaproj.io/user-guide/sources/user-defined-sources/ pub(crate) mod user_defined; -/// Set of items that has to be implemented to become a Source. -pub(crate) trait Source { +/// Set of Read related items that has to be implemented to become a Source. +pub(crate) trait SourceReader { #[allow(dead_code)] /// Name of the source. fn name(&self) -> &'static str; async fn read(&mut self) -> crate::Result>; - /// acknowledge an offset. The implementor might choose to do it in an asynchronous way. - async fn ack(&mut self, _: Vec) -> crate::Result<()>; - #[allow(dead_code)] /// number of partitions processed by this source. fn partitions(&self) -> Vec; } + +/// Set of Ack related items that has to be implemented to become a Source. +pub(crate) trait SourceAcker { + /// acknowledge an offset. The implementor might choose to do it in an asynchronous way. + async fn ack(&mut self, _: Vec) -> crate::Result<()>; +} diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 53ab9d9dcd..67b77aa309 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -8,7 +8,7 @@ use crate::monovertex::source_pb::{ read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; use crate::reader::LagReader; -use crate::source::Source; +use crate::source::{SourceAcker, SourceReader}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; @@ -16,47 +16,54 @@ use tonic::{Request, Streaming}; /// User-Defined Source to operative on custom sources. #[derive(Debug)] -pub(crate) struct UserDefinedSource { +pub(crate) struct UserDefinedSourceRead { read_tx: mpsc::Sender, resp_stream: Streaming, - ack_tx: mpsc::Sender, - ack_resp_stream: Streaming, num_records: usize, timeout_in_ms: u16, } +/// User-Defined Source to operative on custom sources. +#[derive(Debug)] +pub(crate) struct UserDefinedSourceAck { + ack_tx: mpsc::Sender, + ack_resp_stream: Streaming, +} + /// Creates a new User-Defined Source and its corresponding Lag Reader. pub(crate) async fn new_source( client: SourceClient, num_records: usize, timeout_in_ms: u16, -) -> error::Result<(UserDefinedSource, UserDefinedSourceLagReader)> { - let ud_src = UserDefinedSource::new(client.clone(), num_records, timeout_in_ms).await?; +) -> error::Result<( + UserDefinedSourceRead, + UserDefinedSourceAck, + UserDefinedSourceLagReader, +)> { + let src_read = UserDefinedSourceRead::new(client.clone(), num_records, timeout_in_ms).await?; + let src_ack = UserDefinedSourceAck::new(client.clone()).await?; let lag_reader = UserDefinedSourceLagReader::new(client); - Ok((ud_src, lag_reader)) + Ok((src_read, src_ack, lag_reader)) } -impl UserDefinedSource { - pub(crate) async fn new( +impl UserDefinedSourceRead { + async fn new( mut client: SourceClient, num_records: usize, timeout_in_ms: u16, ) -> error::Result { let (read_tx, resp_stream) = Self::create_reader(&mut client).await?; - let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; Ok(Self { read_tx, resp_stream, - ack_tx, - ack_resp_stream, num_records, timeout_in_ms, }) } - pub(crate) async fn create_reader( + async fn create_reader( client: &mut SourceClient, ) -> error::Result<(mpsc::Sender, Streaming)> { let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); @@ -89,40 +96,9 @@ impl UserDefinedSource { Ok((read_tx, resp_stream)) } - - pub(crate) async fn create_acker( - client: &mut SourceClient, - ) -> error::Result<(mpsc::Sender, Streaming)> { - let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); - let ack_stream = ReceiverStream::new(ack_rx); - - // do a handshake for ack with the server before we start sending ack requests - let ack_handshake_request = AckRequest { - request: None, - handshake: Some(source_pb::Handshake { sot: true }), - }; - ack_tx - .send(ack_handshake_request) - .await - .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; - - let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); - - // first response from the server will be the handshake response. We need to check if the - // server has accepted the handshake. - let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( - "failed to receive ack handshake response".to_string(), - ))?; - // handshake cannot to None during the initial phase and it has to set `sot` to true. - if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { - return Err(SourceError("invalid ack handshake response".to_string())); - } - - Ok((ack_tx, ack_resp_stream)) - } } -impl Source for UserDefinedSource { +impl SourceReader for UserDefinedSourceRead { fn name(&self) -> &'static str { "user-defined-source" } @@ -157,6 +133,54 @@ impl Source for UserDefinedSource { Ok(messages) } + fn partitions(&self) -> Vec { + todo!() + } +} + +impl UserDefinedSourceAck { + async fn new(mut client: SourceClient) -> error::Result { + let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; + + Ok(Self { + ack_tx, + ack_resp_stream, + }) + } + + async fn create_acker( + client: &mut SourceClient, + ) -> error::Result<(mpsc::Sender, Streaming)> { + let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); + let ack_stream = ReceiverStream::new(ack_rx); + + // do a handshake for ack with the server before we start sending ack requests + let ack_handshake_request = AckRequest { + request: None, + handshake: Some(source_pb::Handshake { sot: true }), + }; + ack_tx + .send(ack_handshake_request) + .await + .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; + + let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); + + // first response from the server will be the handshake response. We need to check if the + // server has accepted the handshake. + let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( + "failed to receive ack handshake response".to_string(), + ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. + if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(SourceError("invalid ack handshake response".to_string())); + } + + Ok((ack_tx, ack_resp_stream)) + } +} + +impl SourceAcker for UserDefinedSourceAck { async fn ack(&mut self, offsets: Vec) -> error::Result<()> { let n = offsets.len(); @@ -180,10 +204,6 @@ impl Source for UserDefinedSource { Ok(()) } - - fn partitions(&self) -> Vec { - todo!() - } } #[derive(Clone)] @@ -303,15 +323,15 @@ mod tests { let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); - let (mut source, mut lag_reader) = new_source(client, 5, 1000) + let (mut src_read, mut src_ack, mut lag_reader) = new_source(client, 5, 1000) .await .map_err(|e| panic!("failed to create source reader: {:?}", e)) .unwrap(); - let messages = source.read().await.unwrap(); + let messages = src_read.read().await.unwrap(); assert_eq!(messages.len(), 5); - let response = source + let response = src_ack .ack(messages.iter().map(|m| m.offset.clone()).collect()) .await; assert!(response.is_ok()); @@ -321,7 +341,8 @@ mod tests { // we need to drop the client, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 - drop(source); + drop(src_read); + drop(src_ack); shutdown_tx .send(()) From 393ea5489b57b5c097c9b840b7eed1345e26a63f Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Fri, 4 Oct 2024 22:03:13 +0530 Subject: [PATCH 086/188] chore: Unit tests for metrics.rs (#2122) Signed-off-by: Sreekanth --- rust/Cargo.toml | 8 +- rust/numaflow-core/src/monovertex/metrics.rs | 115 ++++++++++++------- rust/numaflow-core/src/shared/server_info.rs | 12 +- 3 files changed, 87 insertions(+), 48 deletions(-) diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 518f905105..54f4a91173 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,4 +1,10 @@ -workspace = { members = ["backoff", "numaflow-models", "servesink", "serving", "numaflow-core"] } +workspace = { members = [ + "backoff", + "numaflow-models", + "servesink", + "serving", + "numaflow-core", +] } [[bin]] name = "numaflow" diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index adbbdde3b2..bf6ee4d30b 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -9,11 +9,18 @@ use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; +use prometheus_client::encoding::text::encode; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::registry::Registry; use rcgen::{generate_simple_self_signed, CertifiedKey}; -use tokio::net::{TcpListener, ToSocketAddrs}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio::time; +use tonic::transport::Channel; +use tonic::Request; use tracing::{debug, error, info}; use crate::config::config; @@ -22,14 +29,6 @@ use crate::monovertex::sink_pb::sink_client::SinkClient; use crate::monovertex::source_pb::source_client::SourceClient; use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::reader; -use prometheus_client::encoding::text::encode; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::Registry; -use tonic::transport::Channel; -use tonic::Request; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon @@ -263,31 +262,6 @@ pub async fn metrics_handler() -> impl IntoResponse { .unwrap() } -/// Collect and emit prometheus metrics. -/// Metrics router and server over HTTP endpoint. -// This is not used currently -#[allow(dead_code)] -pub(crate) async fn start_metrics_http_server( - addr: A, - metrics_state: MetricsState, -) -> crate::Result<()> -where - A: ToSocketAddrs + std::fmt::Debug, -{ - let metrics_app = metrics_router(metrics_state); - - let listener = TcpListener::bind(&addr) - .await - .map_err(|e| Error::MetricsError(format!("Creating listener on {:?}: {}", addr, e)))?; - - debug!("metrics server started at addr: {:?}", addr); - - axum::serve(listener, metrics_app) - .await - .map_err(|e| Error::MetricsError(format!("Starting web server for metrics: {}", e)))?; - Ok(()) -} - pub(crate) async fn start_metrics_https_server( addr: SocketAddr, metrics_state: MetricsState, @@ -490,13 +464,15 @@ async fn fetch_pending(lag_reader: &mut T) -> crate::error Ok(response) } +const LOOKBACK_SECONDS_MAP: [(&str, i64); 4] = + [("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; + // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( refresh_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(refresh_interval); - let lookback_seconds_map = vec![("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; // store the pending info in a sorted way for deterministic display // string concat is more efficient? @@ -504,8 +480,8 @@ async fn expose_pending_metrics( loop { ticker.tick().await; - for (label, seconds) in &lookback_seconds_map { - let pending = calculate_pending(*seconds, &pending_stats).await; + for (label, seconds) in LOOKBACK_SECONDS_MAP { + let pending = calculate_pending(seconds, &pending_stats).await; if pending != -1 { let mut metric_labels = forward_metrics_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); @@ -553,14 +529,17 @@ async fn calculate_pending( #[cfg(test)] mod tests { - use super::*; - use crate::monovertex::metrics::MetricsState; - use crate::shared::utils::create_rpc_channel; + use std::net::SocketAddr; + use std::time::Instant; + use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; - use std::net::SocketAddr; use tokio::sync::mpsc::Sender; + use super::*; + use crate::monovertex::metrics::MetricsState; + use crate::shared::utils::create_rpc_channel; + struct SimpleSource; #[tonic::async_trait] impl source::Sourcer for SimpleSource { @@ -709,4 +688,58 @@ mod tests { fb_sink_server_handle.await.unwrap(); transformer_handle.await.unwrap(); } + + #[tokio::test] + async fn test_expose_pending_metrics() { + let pending_stats = Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))); + let refresh_interval = Duration::from_secs(1); + + // Populate pending_stats with some values. + // The array will be sorted by the timestamp with the most recent last. + { + let mut pending_stats = pending_stats.lock().await; + pending_stats.push(TimestampedPending { + pending: 15, + timestamp: Instant::now() - Duration::from_secs(150), + }); + pending_stats.push(TimestampedPending { + pending: 30, + timestamp: Instant::now() - Duration::from_secs(70), + }); + pending_stats.push(TimestampedPending { + pending: 20, + timestamp: Instant::now() - Duration::from_secs(30), + }); + pending_stats.push(TimestampedPending { + pending: 10, + timestamp: Instant::now(), + }); + } + + tokio::spawn({ + let pending_stats = pending_stats.clone(); + async move { + expose_pending_metrics(refresh_interval, pending_stats).await; + } + }); + // We use tokio::time::interval() as the ticker in the expose_pending_metrics() function. + // The first tick happens immediately, so we don't need to wait for the refresh_interval for the first iteration to complete. + tokio::time::sleep(Duration::from_millis(50)).await; + + // Get the stored values for all time intevals + // We will store the values corresponding to the labels (from LOOKBACK_SECONDS_MAP) "1m", "default", "5m", "15" in the same order in this array + let mut stored_values: [i64; 4] = [0; 4]; + { + for (i, (label, _)) in LOOKBACK_SECONDS_MAP.iter().enumerate() { + let mut metric_labels = forward_metrics_labels().clone(); + metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); + let guage = forward_metrics() + .source_pending + .get_or_create(&metric_labels) + .get(); + stored_values[i] = guage; + } + } + assert_eq!(stored_values, [15, 20, 18, 18]); + } } diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index f058b7a313..b3f8b51847 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -190,12 +190,12 @@ fn human_readable(ver: &str) -> String { return String::new(); } // semver - if ver.ends_with("-z") { - return ver[..ver.len() - 2].to_string(); + if let Some(version) = ver.strip_suffix("-z") { + return version.to_string(); } // PEP 440 - if ver.ends_with("rc100") { - return ver[..ver.len() - 5].to_string(); + if let Some(version) = ver.strip_suffix("rc100") { + return version.to_string(); } ver.to_string() } @@ -265,7 +265,7 @@ fn trim_after_dash(input: &str) -> &str { /// Extracts the container type from the server info file. /// The file name is in the format of -server-info. -fn get_container_type(server_info_file: &PathBuf) -> Option<&str> { +fn get_container_type(server_info_file: &Path) -> Option<&str> { let file_name = server_info_file.file_name()?; let container_type = file_name.to_str()?.trim_end_matches("-server-info"); if container_type.is_empty() { From 06515a2cbfc3a183131cab54394bb5d2c546e046 Mon Sep 17 00:00:00 2001 From: Julie Vogelman Date: Sat, 5 Oct 2024 14:37:13 -0700 Subject: [PATCH 087/188] fix: create buffers and buckets before updating Vertices (#2112) Signed-off-by: Julie Vogelman --- pkg/reconciler/pipeline/controller.go | 81 ++++++++++++++------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 145c5544ad..c2f9080529 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -280,46 +280,6 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df newBuckets[b] = b } } - newObjs := buildVertices(pl) - for vertexName, newObj := range newObjs { - if oldObj, existing := existingObjs[vertexName]; !existing { - if err := r.client.Create(ctx, &newObj); err != nil { - if apierrors.IsAlreadyExists(err) { // probably somebody else already created it - continue - } else { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateVertexFailed", "Failed to create vertex: %w", err.Error()) - return fmt.Errorf("failed to create vertex, err: %w", err) - } - } - log.Infow("Created vertex successfully", zap.String("vertex", vertexName)) - r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateVertexSuccess", "Created vertex %s successfully", vertexName) - } else { - if oldObj.GetAnnotations()[dfv1.KeyHash] != newObj.GetAnnotations()[dfv1.KeyHash] { // need to update - originalReplicas := oldObj.Spec.Replicas - oldObj.Spec = newObj.Spec - oldObj.Spec.Replicas = originalReplicas - oldObj.Annotations[dfv1.KeyHash] = newObj.GetAnnotations()[dfv1.KeyHash] - if err := r.client.Update(ctx, &oldObj); err != nil { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "UpdateVertexFailed", "Failed to update vertex: %w", err.Error()) - return fmt.Errorf("failed to update vertex, err: %w", err) - } - log.Infow("Updated vertex successfully", zap.String("vertex", vertexName)) - r.recorder.Eventf(pl, corev1.EventTypeNormal, "UpdateVertexSuccess", "Updated vertex %s successfully", vertexName) - } - delete(existingObjs, vertexName) - } - } - for _, v := range existingObjs { - if err := r.client.Delete(ctx, &v); err != nil { - r.recorder.Eventf(pl, corev1.EventTypeWarning, "DeleteStaleVertexFailed", "Failed to delete vertex: %w", err.Error()) - return fmt.Errorf("failed to delete vertex, err: %w", err) - } - log.Infow("Deleted stale vertex successfully", zap.String("vertex", v.Name)) - r.recorder.Eventf(pl, corev1.EventTypeNormal, "DeleteStaleVertexSuccess", "Deleted stale vertex %s successfully", v.Name) - // Clean up vertex replica metrics - reconciler.VertexDesiredReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) - reconciler.VertexCurrentReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) - } // create batch job if len(newBuffers) > 0 || len(newBuckets) > 0 { @@ -362,6 +322,47 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateJobForISBDeletionSuccessful", "Create ISB deletion job successfully") } + newObjs := buildVertices(pl) + for vertexName, newObj := range newObjs { + if oldObj, existing := existingObjs[vertexName]; !existing { + if err := r.client.Create(ctx, &newObj); err != nil { + if apierrors.IsAlreadyExists(err) { // probably somebody else already created it + continue + } else { + r.recorder.Eventf(pl, corev1.EventTypeWarning, "CreateVertexFailed", "Failed to create vertex: %w", err.Error()) + return fmt.Errorf("failed to create vertex, err: %w", err) + } + } + log.Infow("Created vertex successfully", zap.String("vertex", vertexName)) + r.recorder.Eventf(pl, corev1.EventTypeNormal, "CreateVertexSuccess", "Created vertex %s successfully", vertexName) + } else { + if oldObj.GetAnnotations()[dfv1.KeyHash] != newObj.GetAnnotations()[dfv1.KeyHash] { // need to update + originalReplicas := oldObj.Spec.Replicas + oldObj.Spec = newObj.Spec + oldObj.Spec.Replicas = originalReplicas + oldObj.Annotations[dfv1.KeyHash] = newObj.GetAnnotations()[dfv1.KeyHash] + if err := r.client.Update(ctx, &oldObj); err != nil { + r.recorder.Eventf(pl, corev1.EventTypeWarning, "UpdateVertexFailed", "Failed to update vertex: %w", err.Error()) + return fmt.Errorf("failed to update vertex, err: %w", err) + } + log.Infow("Updated vertex successfully", zap.String("vertex", vertexName)) + r.recorder.Eventf(pl, corev1.EventTypeNormal, "UpdateVertexSuccess", "Updated vertex %s successfully", vertexName) + } + delete(existingObjs, vertexName) + } + } + for _, v := range existingObjs { + if err := r.client.Delete(ctx, &v); err != nil { + r.recorder.Eventf(pl, corev1.EventTypeWarning, "DeleteStaleVertexFailed", "Failed to delete vertex: %w", err.Error()) + return fmt.Errorf("failed to delete vertex, err: %w", err) + } + log.Infow("Deleted stale vertex successfully", zap.String("vertex", v.Name)) + r.recorder.Eventf(pl, corev1.EventTypeNormal, "DeleteStaleVertexSuccess", "Deleted stale vertex %s successfully", v.Name) + // Clean up vertex replica metrics + reconciler.VertexDesiredReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) + reconciler.VertexCurrentReplicas.DeleteLabelValues(pl.Namespace, pl.Name, v.Spec.Name) + } + // Daemon service if err := r.createOrUpdateDaemonService(ctx, pl); err != nil { return err From 4fad59c06a8ba44c87ea6c26d4658ba4b524d016 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 6 Oct 2024 12:12:02 -0700 Subject: [PATCH 088/188] chore: k8s lib version upgrade to 1.31 (#2127) Signed-off-by: Derek Wang --- .codecov.yml | 1 + .github/workflows/ci.yaml | 8 +- .github/workflows/gh-pages.yaml | 2 +- .github/workflows/nightly-build.yml | 2 +- .github/workflows/release.yml | 6 +- Makefile | 4 +- api/json-schema/schema.json | 3254 +++++++++++++---- api/openapi-spec/swagger.json | 3250 ++++++++++++---- ...w.numaproj.io_interstepbufferservices.yaml | 334 +- .../numaflow.numaproj.io_monovertices.yaml | 597 ++- .../full/numaflow.numaproj.io_pipelines.yaml | 1091 +++++- .../full/numaflow.numaproj.io_vertices.yaml | 548 ++- config/install.yaml | 2570 ++++++++++++- config/namespace-install.yaml | 2570 ++++++++++++- go.mod | 58 +- go.sum | 128 +- hack/generate-proto.sh | 9 +- hack/openapi-gen.sh | 18 +- hack/swagger-gen.sh | 2 +- hack/tools.go | 12 - hack/update-codegen.sh | 48 +- pkg/apis/numaflow/v1alpha1/generated.proto | 164 +- ...i_generated.go => zz_generated.openapi.go} | 29 +- pkg/apis/proto/daemon/daemon.pb.gw.go | 6 +- pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go | 6 +- .../versioned/fake/clientset_generated.go | 6 +- .../fake/fake_interstepbufferservice.go | 36 +- .../numaflow/v1alpha1/fake/fake_monovertex.go | 36 +- .../numaflow/v1alpha1/fake/fake_pipeline.go | 36 +- .../numaflow/v1alpha1/fake/fake_vertex.go | 36 +- .../v1alpha1/interstepbufferservice.go | 146 +- .../typed/numaflow/v1alpha1/monovertex.go | 146 +- .../typed/numaflow/v1alpha1/pipeline.go | 146 +- .../typed/numaflow/v1alpha1/vertex.go | 146 +- .../informers/externalversions/factory.go | 1 + .../v1alpha1/interstepbufferservice.go | 39 +- .../listers/numaflow/v1alpha1/monovertex.go | 39 +- .../listers/numaflow/v1alpha1/pipeline.go | 39 +- .../listers/numaflow/v1alpha1/vertex.go | 39 +- pkg/reconciler/cmd/start.go | 116 +- rust/Cargo.lock | 238 +- rust/numaflow-core/Cargo.toml | 2 +- rust/numaflow-models/Cargo.toml | 4 +- rust/numaflow-models/templates/Cargo.mustache | 4 +- 44 files changed, 13102 insertions(+), 2870 deletions(-) rename pkg/apis/numaflow/v1alpha1/{openapi_generated.go => zz_generated.openapi.go} (99%) diff --git a/.codecov.yml b/.codecov.yml index dd3d8fd073..f4c0d21aa5 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -3,6 +3,7 @@ ignore: - "**/*.pb.gw.go" - "**/*generated.go" - "**/*generated.deepcopy.go" +- "**/*generated.openapi.go" - "**/*_test.go" - "pkg/client/.*" - "vendor/.*" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index afcea04945..a5a35fba4c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -43,7 +43,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Add bins to PATH run: | echo /home/runner/go/bin >> $GITHUB_PATH @@ -81,7 +81,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" id: go - name: Check out code @@ -145,7 +145,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Restore Go build cache uses: actions/cache@v4 with: @@ -233,7 +233,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Add bins to PATH run: | echo /home/runner/go/bin >> $GITHUB_PATH diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml index ff97c18614..8d89476cb9 100644 --- a/.github/workflows/gh-pages.yaml +++ b/.github/workflows/gh-pages.yaml @@ -21,7 +21,7 @@ jobs: - name: Setup Golang uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' - name: build run: make docs - name: deploy diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index ed6a898b83..769d7641a7 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -26,7 +26,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' - name: Build binaries run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 84cd8f533d..af459314e6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,7 +24,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' - name: Build binaries run: | @@ -162,7 +162,7 @@ jobs: fi - uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' - uses: actions/checkout@v4 - run: go install sigs.k8s.io/bom/cmd/bom@v0.2.0 - run: go install github.com/spdx/spdx-sbom-generator/cmd/generator@v0.0.13 @@ -240,4 +240,4 @@ jobs: /tmp/sbom.tar.gz /tmp/sbom.tar.gz.sig env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/Makefile b/Makefile index 6ff001e100..27aa203aec 100644 --- a/Makefile +++ b/Makefile @@ -192,10 +192,10 @@ endif .PHONY: build-rust-in-docker build-rust-in-docker: mkdir -p dist - -$(DOCKER) container ls --all --filter=ancestor='$(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)' --format "{{.ID}}" | xargs docker rm + -$(DOCKER) container ls --all --filter=ancestor='$(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)' --format "{{.ID}}" | xargs $(DOCKER) rm -$(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) --target rust-builder -f $(DOCKERFILE) . - export CTR=$$(docker create $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)) && $(DOCKER) cp $$CTR:/root/numaflow dist/numaflow-rs-linux-$(HOST_ARCH) && $(DOCKER) rm $$CTR && $(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) + export CTR=$$($(DOCKER) create $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)) && $(DOCKER) cp $$CTR:/root/numaflow dist/numaflow-rs-linux-$(HOST_ARCH) && $(DOCKER) rm $$CTR && $(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) image-multi: ui-build set-qemu dist/$(BINARY_NAME)-linux-arm64.gz dist/$(BINARY_NAME)-linux-amd64.gz $(DOCKER) buildx build --sbom=false --provenance=false --build-arg "BASE_IMAGE=$(RELEASE_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) --platform linux/amd64,linux/arm64 --file $(DOCKERFILE) ${PUSH_OPTION} . diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 27258328ee..2366c1b53a 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -2,6 +2,42 @@ "$id": "http://io.numaproj.numaflow/numaflow.json", "$schema": "http://json-schema.org/schema#", "definitions": { + "io.k8s.api.admissionregistration.v1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "properties": { + "key": { + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "type": "string" + }, + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + }, + "required": [ + "key", + "valueExpression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "type": "string" + }, + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", + "type": "string" + } + }, + "required": [ + "fieldRef", + "warning" + ], + "type": "object" + }, "io.k8s.api.admissionregistration.v1.MatchCondition": { "description": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", "properties": { @@ -20,6 +56,41 @@ ], "type": "object" }, + "io.k8s.api.admissionregistration.v1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, "io.k8s.api.admissionregistration.v1.MutatingWebhook": { "description": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "properties": { @@ -28,7 +99,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "clientConfig": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig", @@ -39,7 +111,7 @@ "type": "string" }, "matchConditions": { - "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" }, @@ -76,7 +148,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "sideEffects": { "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", @@ -117,6 +190,10 @@ "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhook" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -165,8 +242,8 @@ } ] }, - "io.k8s.api.admissionregistration.v1.RuleWithOperations": { - "description": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "io.k8s.api.admissionregistration.v1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", "properties": { "apiGroups": { "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", @@ -192,6 +269,14 @@ "type": "array", "x-kubernetes-list-type": "atomic" }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", "items": { @@ -205,109 +290,132 @@ "type": "string" } }, - "type": "object" + "type": "object", + "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.admissionregistration.v1.ServiceReference": { - "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "io.k8s.api.admissionregistration.v1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", "properties": { "name": { - "description": "`name` is the name of the service. Required", + "description": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", "type": "string" }, "namespace": { - "description": "`namespace` is the namespace of the service. Required", + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", "type": "string" }, - "path": { - "description": "`path` is an optional URL path which will be sent in any request to this service.", + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", "type": "string" }, - "port": { - "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", - "format": "int32", - "type": "integer" + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset." } }, - "required": [ - "namespace", - "name" - ], - "type": "object" + "type": "object", + "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { - "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "io.k8s.api.admissionregistration.v1.RuleWithOperations": { + "description": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", "properties": { - "admissionReviewVersions": { - "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "clientConfig": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig", - "description": "ClientConfig defines how to communicate with the hook. Required" + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "failurePolicy": { - "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", - "type": "string" + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "matchConditions": { - "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + "type": "string" }, "type": "array", - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" + "x-kubernetes-list-type": "atomic" }, - "matchPolicy": { - "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", "type": "string" - }, + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "properties": { "name": { - "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "description": "`name` is the name of the service. Required", "type": "string" }, - "namespaceSelector": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", - "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." - }, - "objectSelector": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", - "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." - }, - "rules": { - "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" - }, - "type": "array" + "namespace": { + "description": "`namespace` is the namespace of the service. Required", + "type": "string" }, - "sideEffects": { - "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "path": { + "description": "`path` is an optional URL path which will be sent in any request to this service.", "type": "string" }, - "timeoutSeconds": { - "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "port": { + "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", "format": "int32", "type": "integer" } }, "required": [ - "name", - "clientConfig", - "sideEffects", - "admissionReviewVersions" + "namespace", + "name" ], "type": "object" }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { - "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "io.k8s.api.admissionregistration.v1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "properties": { + "expressionWarnings": { + "description": "The type checking warnings for each expression.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ExpressionWarning" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -321,38 +429,360 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." }, - "webhooks": { - "description": "Webhooks is a list of webhooks and the affected resources and operations.", - "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" - }, - "type": "array", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus", + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only." } }, "type": "object", "x-kubernetes-group-version-kind": [ { "group": "admissionregistration.k8s.io", - "kind": "ValidatingWebhookConfiguration", + "kind": "ValidatingAdmissionPolicy", "version": "v1" } ] }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { - "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "items": { - "description": "List of ValidatingWebhookConfiguration.", - "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" - }, - "type": "array" + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "properties": { + "matchResources": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchResources", + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required." + }, + "paramRef": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ParamRef", + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param." + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.AuditAnnotation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchResources", + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required." + }, + "paramKind": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ParamKind", + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null." + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.Validation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.Variable" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "format": "int64", + "type": "integer" + }, + "typeChecking": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.TypeChecking", + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking." + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { + "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "properties": { + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "clientConfig": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig", + "description": "ClientConfig defines how to communicate with the hook. Required" + }, + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "name": { + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { + "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { + "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingWebhookConfiguration.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" + }, + "type": "array" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -375,6 +805,50 @@ } ] }, + "io.k8s.api.admissionregistration.v1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + }, + "required": [ + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.Variable": { + "description": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, "io.k8s.api.admissionregistration.v1.WebhookClientConfig": { "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "properties": { @@ -1451,7 +1925,8 @@ "required": [ "type", "status", - "reason" + "reason", + "message" ], "type": "object" }, @@ -1742,6 +2217,10 @@ "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -1976,6 +2455,10 @@ "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2160,6 +2643,10 @@ "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2365,11 +2852,11 @@ }, "ordinals": { "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetOrdinals", - "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta." + "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested." }, "persistentVolumeClaimRetentionPolicy": { "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy", - "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional" + "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta." }, "podManagementPolicy": { "description": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -2406,7 +2893,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -2435,6 +2923,10 @@ "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2596,7 +3088,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "boundObjectRef": { "$ref": "#/definitions/io.k8s.api.authentication.v1.BoundObjectReference", @@ -2675,7 +3168,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "token": { "description": "Token is the opaque bearer token.", @@ -2692,7 +3186,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "authenticated": { "description": "Authenticated indicates that the token was associated with a known user.", @@ -2727,7 +3222,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "uid": { "description": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", @@ -2818,6 +3314,42 @@ }, "type": "object" }, + "io.k8s.api.authorization.v1.FieldSelectorAttributes": { + "description": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "properties": { + "rawSelector": { + "description": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "type": "string" + }, + "requirements": { + "description": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.LabelSelectorAttributes": { + "description": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "properties": { + "rawSelector": { + "description": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "type": "string" + }, + "requirements": { + "description": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, "io.k8s.api.authorization.v1.LocalSubjectAccessReview": { "description": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "properties": { @@ -2876,14 +3408,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "verbs": { "description": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -2894,10 +3428,18 @@ "io.k8s.api.authorization.v1.ResourceAttributes": { "description": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", "properties": { + "fieldSelector": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.FieldSelectorAttributes", + "description": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default)." + }, "group": { "description": "Group is the API Group of the Resource. \"*\" means all.", "type": "string" }, + "labelSelector": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.LabelSelectorAttributes", + "description": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default)." + }, "name": { "description": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", "type": "string" @@ -2933,28 +3475,32 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "resourceNames": { "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "resources": { "description": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "verbs": { "description": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -3112,7 +3658,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "nonResourceAttributes": { "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes", @@ -3174,14 +3721,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceRule" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "resourceRules": { "description": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "items": { "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceRule" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -4266,6 +4815,10 @@ "format": "int32", "type": "integer" }, + "managedBy": { + "description": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "type": "string" + }, "manualSelector": { "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" @@ -4282,7 +4835,7 @@ }, "podFailurePolicy": { "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicy", - "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default)." + "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure." }, "podReplacementPolicy": { "description": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", @@ -4292,6 +4845,10 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" }, + "successPolicy": { + "$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicy", + "description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default)." + }, "suspend": { "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "type": "boolean" @@ -4315,7 +4872,7 @@ "description": "JobStatus represents the current state of a Job.", "properties": { "active": { - "description": "The number of pending and running pods.", + "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", "format": "int32", "type": "integer" }, @@ -4325,10 +4882,10 @@ }, "completionTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully." + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field." }, "conditions": { - "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". A Job cannot have both the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "items": { "$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition" }, @@ -4338,25 +4895,25 @@ "x-kubernetes-patch-strategy": "merge" }, "failed": { - "description": "The number of pods which reached phase Failed.", + "description": "The number of pods which reached phase Failed. The value increases monotonically.", "format": "int32", "type": "integer" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "description": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { - "description": "The number of pods which have a Ready condition.", + "description": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", "format": "int32", "type": "integer" }, "startTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC." + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished." }, "succeeded": { - "description": "The number of pods which reached phase Succeeded.", + "description": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", "format": "int32", "type": "integer" }, @@ -4367,7 +4924,7 @@ }, "uncountedTerminatedPods": { "$ref": "#/definitions/io.k8s.api.batch.v1.UncountedTerminatedPods", - "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null." + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs." } }, "type": "object" @@ -4473,6 +5030,38 @@ ], "type": "object" }, + "io.k8s.api.batch.v1.SuccessPolicy": { + "description": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", + "properties": { + "rules": { + "description": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded \u003e= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "rules" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.SuccessPolicyRule": { + "description": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.", + "properties": { + "succeededCount": { + "description": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.", + "format": "int32", + "type": "integer" + }, + "succeededIndexes": { + "description": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.", + "type": "string" + } + }, + "type": "object" + }, "io.k8s.api.batch.v1.UncountedTerminatedPods": { "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "properties": { @@ -4835,25 +5424,135 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", "description": "acquireTime is a time when the current lease was acquired." }, - "holderIdentity": { - "description": "holderIdentity contains the identity of the holder of a current lease.", + "holderIdentity": { + "description": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "type": "string" + }, + "leaseDurationSeconds": { + "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", + "format": "int32", + "type": "integer" + }, + "leaseTransitions": { + "description": "leaseTransitions is the number of transitions of a lease between holders.", + "format": "int32", + "type": "integer" + }, + "preferredHolder": { + "description": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", + "type": "string" + }, + "renewTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "renewTime is a time when the current holder of a lease has last updated the lease." + }, + "strategy": { + "description": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidate": { + "description": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec", + "description": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseCandidate", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidateList": { + "description": "LeaseCandidateList is a list of Lease objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.coordination.v1alpha1.LeaseCandidate" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseCandidateList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec": { + "description": "LeaseCandidateSpec is a specification of a Lease.", + "properties": { + "binaryVersion": { + "description": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", + "type": "string" + }, + "emulationVersion": { + "description": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "type": "string" + }, + "leaseName": { + "description": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", "type": "string" }, - "leaseDurationSeconds": { - "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", - "format": "int32", - "type": "integer" + "pingTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime." }, - "leaseTransitions": { - "description": "leaseTransitions is the number of transitions of a lease between holders.", - "format": "int32", - "type": "integer" + "preferredStrategies": { + "description": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "renewTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", - "description": "renewTime is a time when the current holder of a lease has last updated the lease." + "description": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates." } }, + "required": [ + "leaseName", + "preferredStrategies" + ], "type": "object" }, "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource": { @@ -4900,6 +5599,31 @@ }, "type": "object" }, + "io.k8s.api.core.v1.AppArmorProfile": { + "description": "AppArmorProfile defines a pod or container's AppArmor settings.", + "properties": { + "localhostProfile": { + "description": "localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is \"Localhost\".", + "type": "string" + }, + "type": { + "description": "type indicates which kind of AppArmor profile will be applied. Valid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "localhostProfile": "LocalhostProfile" + } + } + ] + }, "io.k8s.api.core.v1.AttachedVolume": { "description": "AttachedVolume describes a volume attached to a node", "properties": { @@ -5125,14 +5849,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "drop": { "description": "Removed capabilities", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -5145,7 +5871,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "path": { "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", @@ -5181,7 +5908,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "path": { "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", @@ -5259,20 +5987,6 @@ ], "type": "object" }, - "io.k8s.api.core.v1.ClaimSource": { - "description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", - "properties": { - "resourceClaimName": { - "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", - "type": "string" - }, - "resourceClaimTemplateName": { - "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", - "type": "string" - } - }, - "type": "object" - }, "io.k8s.api.core.v1.ClientIPConfig": { "description": "ClientIPConfig represents the configurations of Client IP based session affinity.", "properties": { @@ -5352,6 +6066,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ComponentCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -5456,7 +6174,7 @@ "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5474,7 +6192,7 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5562,10 +6280,11 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5588,10 +6307,11 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5609,14 +6329,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -5624,6 +6346,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -5632,7 +6358,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -5722,6 +6449,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -5731,6 +6462,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -5752,7 +6487,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "sizeBytes": { "description": "The size of the image in bytes.", @@ -5902,6 +6638,19 @@ "description": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", "type": "object" }, + "allocatedResourcesStatus": { + "description": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceStatus" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, "containerID": { "description": "ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", "type": "string" @@ -5942,6 +6691,23 @@ "state": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState", "description": "State holds details about the container's current condition." + }, + "user": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerUser", + "description": "User represents user identity information initially attached to the first process of the container" + }, + "volumeMounts": { + "description": "Status of volume mounts.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMountStatus" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" } }, "required": [ @@ -5953,6 +6719,16 @@ ], "type": "object" }, + "io.k8s.api.core.v1.ContainerUser": { + "description": "ContainerUser represents user identity information", + "properties": { + "linux": { + "$ref": "#/definitions/io.k8s.api.core.v1.LinuxContainerUser", + "description": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so." + } + }, + "type": "object" + }, "io.k8s.api.core.v1.DaemonEndpoint": { "description": "DaemonEndpoint contains information about a single Daemon endpoint.", "properties": { @@ -5975,7 +6751,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -5985,7 +6762,7 @@ "properties": { "fieldRef": { "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector", - "description": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported." + "description": "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." }, "mode": { "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", @@ -6019,7 +6796,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -6099,21 +6877,24 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "notReadyAddresses": { "description": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "Port numbers available on the related IP addresses.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointPort" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -6138,7 +6919,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointSubset" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object", @@ -6254,14 +7036,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -6269,6 +7053,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -6277,7 +7065,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images", @@ -6371,6 +7160,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -6380,6 +7173,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -6561,7 +7358,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -6587,14 +7385,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "wwids": { "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -6801,7 +7601,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.HTTPHeader" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "path": { "description": "Path to access on the HTTP server.", @@ -6847,13 +7648,17 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ip": { "description": "IP address of the host file entry.", "type": "string" } }, + "required": [ + "ip" + ], "type": "object" }, "io.k8s.api.core.v1.HostIP": { @@ -6864,6 +7669,9 @@ "type": "string" } }, + "required": [ + "ip" + ], "type": "object" }, "io.k8s.api.core.v1.HostPathVolumeSource": { @@ -6920,7 +7728,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "readOnly": { "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", @@ -6979,7 +7788,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "readOnly": { "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", @@ -7001,6 +7811,20 @@ ], "type": "object" }, + "io.k8s.api.core.v1.ImageVolumeSource": { + "description": "ImageVolumeSource represents a image volume resource.", + "properties": { + "pullPolicy": { + "description": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", + "type": "string" + }, + "reference": { + "description": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + } + }, + "type": "object" + }, "io.k8s.api.core.v1.KeyToPath": { "description": "Maps a string key to a path within a volume.", "properties": { @@ -7180,7 +8004,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeItem" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -7188,6 +8013,35 @@ ], "type": "object" }, + "io.k8s.api.core.v1.LinuxContainerUser": { + "description": "LinuxContainerUser represents user identity information in Linux containers", + "properties": { + "gid": { + "description": "GID is the primary gid initially attached to the first process in the container", + "format": "int64", + "type": "integer" + }, + "supplementalGroups": { + "description": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "uid": { + "description": "UID is the primary uid initially attached to the first process in the container", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "uid", + "gid" + ], + "type": "object" + }, "io.k8s.api.core.v1.LoadBalancerIngress": { "description": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "properties": { @@ -7222,7 +8076,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerIngress" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -7231,7 +8086,7 @@ "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" } }, @@ -7397,7 +8252,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -7411,6 +8267,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7480,7 +8340,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", @@ -7565,6 +8426,16 @@ }, "type": "object" }, + "io.k8s.api.core.v1.NodeFeatures": { + "description": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "properties": { + "supplementalGroupsPolicy": { + "description": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", + "type": "boolean" + } + }, + "type": "object" + }, "io.k8s.api.core.v1.NodeList": { "description": "NodeList is the whole list of all Nodes which have been registered with master.", "properties": { @@ -7600,6 +8471,34 @@ } ] }, + "io.k8s.api.core.v1.NodeRuntimeHandler": { + "description": "NodeRuntimeHandler is a set of runtime handler information.", + "properties": { + "features": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeRuntimeHandlerFeatures", + "description": "Supported features." + }, + "name": { + "description": "Runtime handler name. Empty for the default runtime handler.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeRuntimeHandlerFeatures": { + "description": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", + "properties": { + "recursiveReadOnlyMounts": { + "description": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "type": "boolean" + }, + "userNamespaces": { + "description": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", + "type": "boolean" + } + }, + "type": "object" + }, "io.k8s.api.core.v1.NodeSelector": { "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", "properties": { @@ -7608,7 +8507,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -7633,7 +8533,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -7650,14 +8551,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "matchFields": { "description": "A list of node selector requirements by node's fields.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object", @@ -7684,6 +8587,7 @@ "type": "string" }, "type": "array", + "x-kubernetes-list-type": "set", "x-kubernetes-patch-strategy": "merge" }, "providerID": { @@ -7695,7 +8599,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Taint" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "unschedulable": { "description": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", @@ -7713,6 +8618,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.NodeAddress" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7727,7 +8636,7 @@ "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" }, - "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "type": "object" }, "conditions": { @@ -7736,6 +8645,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.NodeCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7747,12 +8660,17 @@ "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints", "description": "Endpoints of daemons running on the Node." }, + "features": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeFeatures", + "description": "Features describes the set of features implemented by the CRI implementation." + }, "images": { "description": "List of container images on this node", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerImage" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "nodeInfo": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSystemInfo", @@ -7762,19 +8680,29 @@ "description": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "type": "string" }, + "runtimeHandlers": { + "description": "The available runtime handlers.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeRuntimeHandler" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "volumesAttached": { "description": "List of volumes that are attached to the node.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.AttachedVolume" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "volumesInUse": { "description": "List of attachable volumes in use (mounted) by the node.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -7799,7 +8727,7 @@ "type": "string" }, "kubeProxyVersion": { - "description": "KubeProxy Version reported by the node.", + "description": "Deprecated: KubeProxy Version reported by the node.", "type": "string" }, "kubeletVersion": { @@ -7972,7 +8900,7 @@ "type": "string" }, "reason": { - "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", "type": "string" }, "status": { @@ -8031,7 +8959,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "dataSource": { "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference", @@ -8054,7 +8983,7 @@ "type": "string" }, "volumeAttributesClassName": { - "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", "type": "string" }, "volumeMode": { @@ -8076,7 +9005,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "allocatedResourceStatuses": { "additionalProperties": { @@ -8101,21 +9031,25 @@ "type": "object" }, "conditions": { - "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, "currentVolumeAttributesClassName": { - "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", "type": "string" }, "modifyVolumeStatus": { "$ref": "#/definitions/io.k8s.api.core.v1.ModifyVolumeStatus", - "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature." + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default)." }, "phase": { "description": "phase represents the current phase of PersistentVolumeClaim.", @@ -8201,7 +9135,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "awsElasticBlockStore": { "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", @@ -8276,7 +9211,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "nfs": { "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource", @@ -8319,7 +9255,7 @@ "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md" }, "volumeAttributesClassName": { - "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", "type": "string" }, "volumeMode": { @@ -8338,7 +9274,7 @@ "properties": { "lastPhaseTransitionTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default)." + "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions." }, "message": { "description": "message is a human-readable message indicating details about why the volume is in this state.", @@ -8413,14 +9349,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -8433,7 +9371,7 @@ "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." }, "matchLabelKeys": { - "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", "items": { "type": "string" }, @@ -8441,7 +9379,7 @@ "x-kubernetes-list-type": "atomic" }, "mismatchLabelKeys": { - "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", "items": { "type": "string" }, @@ -8457,7 +9395,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "topologyKey": { "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", @@ -8477,14 +9416,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -8531,21 +9472,24 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "options": { "description": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfigOption" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "searches": { "description": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -8571,6 +9515,9 @@ "type": "string" } }, + "required": [ + "ip" + ], "type": "object" }, "io.k8s.api.core.v1.PodList": { @@ -8635,15 +9582,19 @@ "type": "object" }, "io.k8s.api.core.v1.PodResourceClaim": { - "description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "description": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", "properties": { "name": { "description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", "type": "string" }, - "source": { - "$ref": "#/definitions/io.k8s.api.core.v1.ClaimSource", - "description": "Source describes where to find the ResourceClaim." + "resourceClaimName": { + "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "type": "string" + }, + "resourceClaimTemplateName": { + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "type": "string" } }, "required": [ @@ -8659,7 +9610,7 @@ "type": "string" }, "resourceClaimName": { - "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", "type": "string" } }, @@ -8684,6 +9635,10 @@ "io.k8s.api.core.v1.PodSecurityContext": { "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "properties": { + "appArmorProfile": { + "$ref": "#/definitions/io.k8s.api.core.v1.AppArmorProfile", + "description": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." + }, "fsGroup": { "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.", "format": "int64", @@ -8716,19 +9671,25 @@ "description": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." }, "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", "items": { "format": "int64", "type": "integer" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "supplementalGroupsPolicy": { + "description": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" }, "sysctls": { "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Sysctl" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "windowsOptions": { "$ref": "#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions", @@ -8759,6 +9720,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Container" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -8780,15 +9745,23 @@ "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralContainer" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, "hostAliases": { - "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.HostAlias" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "ip" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "ip", "x-kubernetes-patch-strategy": "merge" }, @@ -8818,6 +9791,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -8827,11 +9804,15 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Container" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, "nodeName": { - "description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "description": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "type": "string" }, "nodeSelector": { @@ -8844,7 +9825,7 @@ }, "os": { "$ref": "#/definitions/io.k8s.api.core.v1.PodOS", - "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" }, "overhead": { "additionalProperties": { @@ -8871,7 +9852,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodReadinessGate" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "resourceClaims": { "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", @@ -8899,7 +9881,7 @@ "type": "string" }, "schedulingGates": { - "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodSchedulingGate" }, @@ -8916,7 +9898,7 @@ "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." }, "serviceAccount": { - "description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "description": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "type": "string" }, "serviceAccountName": { @@ -8945,7 +9927,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "topologySpreadConstraints": { "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", @@ -8967,6 +9950,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Volume" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge,retainKeys" } @@ -8985,6 +9972,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.PodCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -8993,14 +9984,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ephemeralContainerStatuses": { "description": "Status for any ephemeral containers that have run in this pod.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "hostIP": { "description": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", @@ -9021,7 +10014,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "message": { "description": "A human readable message indicating details about why the pod is in this condition.", @@ -9045,6 +10039,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.PodIP" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "ip" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "ip", "x-kubernetes-patch-strategy": "merge" }, @@ -9281,11 +10279,12 @@ "type": "integer" }, "sources": { - "description": "sources is the list of volume projections", + "description": "sources is the list of volume projections. Each entry in this list handles one source.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -9344,7 +10343,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "pool": { "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", @@ -9389,7 +10389,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "pool": { "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", @@ -9554,6 +10555,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerCondition" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -9589,6 +10594,10 @@ "name": { "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", "type": "string" + }, + "request": { + "description": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", + "type": "string" } }, "required": [ @@ -9618,6 +10627,23 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "io.k8s.api.core.v1.ResourceHealth": { + "description": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "properties": { + "health": { + "description": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", + "type": "string" + }, + "resourceID": { + "description": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "type": "string" + } + }, + "required": [ + "resourceID" + ], + "type": "object" + }, "io.k8s.api.core.v1.ResourceQuota": { "description": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "properties": { @@ -9705,7 +10731,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -9761,6 +10788,29 @@ }, "type": "object" }, + "io.k8s.api.core.v1.ResourceStatus": { + "properties": { + "name": { + "description": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "type": "string" + }, + "resources": { + "description": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceHealth" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "resourceID" + ], + "x-kubernetes-list-type": "map" + } + }, + "required": [ + "name" + ], + "type": "object" + }, "io.k8s.api.core.v1.SELinuxOptions": { "description": "SELinuxOptions are the labels to be applied to the container", "properties": { @@ -9893,7 +10943,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object", @@ -9915,7 +10966,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -10001,7 +11053,7 @@ "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10019,7 +11071,7 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10076,10 +11128,11 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10117,7 +11170,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "optional": { "description": "optional field specify whether the Secret or its keys must be defined", @@ -10137,6 +11191,10 @@ "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", "type": "boolean" }, + "appArmorProfile": { + "$ref": "#/definitions/io.k8s.api.core.v1.AppArmorProfile", + "description": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows." + }, "capabilities": { "$ref": "#/definitions/io.k8s.api.core.v1.Capabilities", "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." @@ -10146,7 +11204,7 @@ "type": "boolean" }, "procMount": { - "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "description": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "type": "string" }, "readOnlyRootFilesystem": { @@ -10231,7 +11289,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -10247,6 +11306,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -10411,7 +11474,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "externalName": { "description": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", @@ -10455,7 +11519,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", @@ -10491,6 +11556,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SessionAffinityConfig", "description": "sessionAffinityConfig contains the configurations of session affinity." }, + "trafficDistribution": { + "description": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", + "type": "string" + }, "type": { "description": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "type": "string" @@ -10697,7 +11766,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -10714,7 +11784,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorLabelRequirement" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object", @@ -10741,7 +11812,7 @@ "type": "integer" }, "minDomains": { - "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).", + "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.", "format": "int32", "type": "integer" }, @@ -10888,6 +11959,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource", "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" }, + "image": { + "$ref": "#/definitions/io.k8s.api.core.v1.ImageVolumeSource", + "description": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type." + }, "iscsi": { "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource", "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" @@ -10972,7 +12047,7 @@ "type": "string" }, "mountPropagation": { - "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).", "type": "string" }, "name": { @@ -10983,6 +12058,10 @@ "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", "type": "boolean" }, + "recursiveReadOnly": { + "description": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.", + "type": "string" + }, "subPath": { "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "type": "string" @@ -10998,6 +12077,32 @@ ], "type": "object" }, + "io.k8s.api.core.v1.VolumeMountStatus": { + "description": "VolumeMountStatus shows status of volume mounts.", + "properties": { + "mountPath": { + "description": "MountPath corresponds to the original VolumeMount.", + "type": "string" + }, + "name": { + "description": "Name corresponds to the name of the original VolumeMount.", + "type": "string" + }, + "readOnly": { + "description": "ReadOnly corresponds to the original VolumeMount.", + "type": "boolean" + }, + "recursiveReadOnly": { + "description": "RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, depending on the mount result.", + "type": "string" + } + }, + "required": [ + "name", + "mountPath" + ], + "type": "object" + }, "io.k8s.api.core.v1.VolumeNodeAffinity": { "description": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", "properties": { @@ -11009,7 +12114,7 @@ "type": "object" }, "io.k8s.api.core.v1.VolumeProjection": { - "description": "Projection that may be projected along with other supported volume types", + "description": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", "properties": { "clusterTrustBundle": { "$ref": "#/definitions/io.k8s.api.core.v1.ClusterTrustBundleProjection", @@ -12679,7 +13784,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -12907,7 +14013,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerIngress" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -13060,14 +14167,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "to": { "description": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -13080,14 +14189,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -13172,14 +14283,16 @@ "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "ingress": { "description": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "podSelector": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", @@ -13190,7 +14303,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -13211,9 +14325,10 @@ "type": "integer" } }, - "type": "object" + "type": "object", + "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.networking.v1alpha1.IPAddress": { + "io.k8s.api.networking.v1beta1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "properties": { "apiVersion": { @@ -13229,7 +14344,7 @@ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddressSpec", + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.IPAddressSpec", "description": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" } }, @@ -13238,11 +14353,11 @@ { "group": "networking.k8s.io", "kind": "IPAddress", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.IPAddressList": { + "io.k8s.api.networking.v1beta1.IPAddressList": { "description": "IPAddressList contains a list of IPAddress.", "properties": { "apiVersion": { @@ -13252,7 +14367,7 @@ "items": { "description": "items is the list of IPAddresses.", "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddress" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.IPAddress" }, "type": "array" }, @@ -13273,21 +14388,24 @@ { "group": "networking.k8s.io", "kind": "IPAddressList", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.IPAddressSpec": { + "io.k8s.api.networking.v1beta1.IPAddressSpec": { "description": "IPAddressSpec describe the attributes in an IP Address.", "properties": { "parentRef": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ParentReference", + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ParentReference", "description": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object." } }, + "required": [ + "parentRef" + ], "type": "object" }, - "io.k8s.api.networking.v1alpha1.ParentReference": { + "io.k8s.api.networking.v1beta1.ParentReference": { "description": "ParentReference describes a reference to a parent object.", "properties": { "group": { @@ -13307,9 +14425,13 @@ "type": "string" } }, + "required": [ + "resource", + "name" + ], "type": "object" }, - "io.k8s.api.networking.v1alpha1.ServiceCIDR": { + "io.k8s.api.networking.v1beta1.ServiceCIDR": { "description": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", "properties": { "apiVersion": { @@ -13325,11 +14447,11 @@ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRSpec", + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDRSpec", "description": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" }, "status": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRStatus", + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDRStatus", "description": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" } }, @@ -13338,11 +14460,11 @@ { "group": "networking.k8s.io", "kind": "ServiceCIDR", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRList": { + "io.k8s.api.networking.v1beta1.ServiceCIDRList": { "description": "ServiceCIDRList contains a list of ServiceCIDR objects.", "properties": { "apiVersion": { @@ -13352,7 +14474,7 @@ "items": { "description": "items is the list of ServiceCIDRs.", "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDR" }, "type": "array" }, @@ -13373,11 +14495,11 @@ { "group": "networking.k8s.io", "kind": "ServiceCIDRList", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRSpec": { + "io.k8s.api.networking.v1beta1.ServiceCIDRSpec": { "description": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", "properties": { "cidrs": { @@ -13385,12 +14507,13 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRStatus": { + "io.k8s.api.networking.v1beta1.ServiceCIDRStatus": { "description": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", "properties": { "conditions": { @@ -13639,83 +14762,276 @@ }, "type": "object" }, - "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { - "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { + "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "properties": { + "conditions": { + "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentHealthy": { + "description": "current number of healthy pods", + "format": "int32", + "type": "integer" + }, + "desiredHealthy": { + "description": "minimum desired number of healthy pods", + "format": "int32", + "type": "integer" + }, + "disruptedPods": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "type": "object" + }, + "disruptionsAllowed": { + "description": "Number of pod disruptions that are currently allowed.", + "format": "int32", + "type": "integer" + }, + "expectedPods": { + "description": "total number of pods counted by this disruption budget", + "format": "int32", + "type": "integer" + }, + "observedGeneration": { + "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "disruptionsAllowed", + "currentHealthy", + "desiredHealthy", + "expectedPods" + ], + "type": "object" + }, + "io.k8s.api.rbac.v1.AggregationRule": { + "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "properties": { + "clusterRoleSelectors": { + "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.rbac.v1.ClusterRole": { + "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "properties": { + "aggregationRule": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule", + "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller." + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "rules": { + "description": "Rules holds all the PolicyRules for this ClusterRole", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBinding": { + "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "roleRef": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef", + "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "roleRef" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBindingList": { + "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoleBindings", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleList": { + "description": "ClusterRoleList is a collection of ClusterRoles", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoles", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.PolicyRule": { + "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "properties": { - "conditions": { - "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "items": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + "type": "string" }, "type": "array", - "x-kubernetes-list-map-keys": [ - "type" - ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" - }, - "currentHealthy": { - "description": "current number of healthy pods", - "format": "int32", - "type": "integer" - }, - "desiredHealthy": { - "description": "minimum desired number of healthy pods", - "format": "int32", - "type": "integer" + "x-kubernetes-list-type": "atomic" }, - "disruptedPods": { - "additionalProperties": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "items": { + "type": "string" }, - "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", - "type": "object" + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "disruptionsAllowed": { - "description": "Number of pod disruptions that are currently allowed.", - "format": "int32", - "type": "integer" + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "expectedPods": { - "description": "total number of pods counted by this disruption budget", - "format": "int32", - "type": "integer" + "resources": { + "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "observedGeneration": { - "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", - "format": "int64", - "type": "integer" - } - }, - "required": [ - "disruptionsAllowed", - "currentHealthy", - "desiredHealthy", - "expectedPods" - ], - "type": "object" - }, - "io.k8s.api.rbac.v1.AggregationRule": { - "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", - "properties": { - "clusterRoleSelectors": { - "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "verbs": { + "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "items": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, + "required": [ + "verbs" + ], "type": "object" }, - "io.k8s.api.rbac.v1.ClusterRole": { - "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "io.k8s.api.rbac.v1.Role": { + "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", "properties": { - "aggregationRule": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule", - "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller." - }, "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" @@ -13729,24 +15045,25 @@ "description": "Standard object's metadata." }, "rules": { - "description": "Rules holds all the PolicyRules for this ClusterRole", + "description": "Rules holds all the PolicyRules for this Role", "items": { "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object", "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRole", + "kind": "Role", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleBinding": { - "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "io.k8s.api.rbac.v1.RoleBinding": { + "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -13762,14 +15079,15 @@ }, "roleRef": { "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef", - "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." + "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." }, "subjects": { "description": "Subjects holds references to the objects the role applies to.", "items": { "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -13779,22 +15097,22 @@ "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleBinding", + "kind": "RoleBinding", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleBindingList": { - "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "io.k8s.api.rbac.v1.RoleBindingList": { + "description": "RoleBindingList is a collection of RoleBindings", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "items": { - "description": "Items is a list of ClusterRoleBindings", + "description": "Items is a list of RoleBindings", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" }, "type": "array" }, @@ -13814,22 +15132,22 @@ "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleBindingList", + "kind": "RoleBindingList", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleList": { - "description": "ClusterRoleList is a collection of ClusterRoles", + "io.k8s.api.rbac.v1.RoleList": { + "description": "RoleList is a collection of Roles", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "items": { - "description": "Items is a list of ClusterRoles", + "description": "Items is a list of Roles", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" }, "type": "array" }, @@ -13849,89 +15167,250 @@ "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleList", + "kind": "RoleList", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.PolicyRule": { - "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "io.k8s.api.rbac.v1.RoleRef": { + "description": "RoleRef contains information that points to the role being used", "properties": { - "apiGroups": { - "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "required": [ + "apiGroup", + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.rbac.v1.Subject": { + "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "properties": { + "apiGroup": { + "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "type": "string" + }, + "kind": { + "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "type": "string" + }, + "name": { + "description": "Name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.resource.v1alpha3.AllocationResult": { + "description": "AllocationResult contains attributes of an allocated resource.", + "properties": { + "controller": { + "description": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "type": "string" + }, + "devices": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAllocationResult", + "description": "Devices is the result of allocating devices." + }, + "nodeSelector": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere." + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.BasicDevice": { + "description": "BasicDevice defines one device instance.", + "properties": { + "attributes": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAttribute" + }, + "description": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "type": "object" + }, + "capacity": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.CELDeviceSelector": { + "description": "CELDeviceSelector contains a CEL expression for selecting a device.", + "properties": { + "expression": { + "description": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool \u0026\u0026 dra.anotherBool)", + "type": "string" + } + }, + "required": [ + "expression" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.Device": { + "description": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "properties": { + "basic": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.BasicDevice", + "description": "Basic defines one device instance." + }, + "name": { + "description": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration": { + "description": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "properties": { + "opaque": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration", + "description": "Opaque provides driver-specific configuration parameters." + }, + "requests": { + "description": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "source": { + "description": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "type": "string" + } + }, + "required": [ + "source" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceAllocationResult": { + "description": "DeviceAllocationResult is the result of allocating devices.", + "properties": { + "config": { + "description": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "nonResourceURLs": { - "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "results": { + "description": "Results lists all allocated devices.", "items": { - "type": "string" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceAttribute": { + "description": "DeviceAttribute must have exactly one field set.", + "properties": { + "bool": { + "description": "BoolValue is a true/false value.", + "type": "boolean" }, - "resourceNames": { - "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "int": { + "description": "IntValue is a number.", + "format": "int64", + "type": "integer" + }, + "string": { + "description": "StringValue is a string. Must not be longer than 64 characters.", + "type": "string" + }, + "version": { + "description": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceClaim": { + "description": "DeviceClaim defines how to request devices with a ResourceClaim.", + "properties": { + "config": { + "description": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", "items": { - "type": "string" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "resources": { - "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "constraints": { + "description": "These constraints must be satisfied by the set of devices that get allocated for the claim.", "items": { - "type": "string" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceConstraint" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "verbs": { - "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", + "requests": { + "description": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", "items": { - "type": "string" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceRequest" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, - "required": [ - "verbs" - ], "type": "object" }, - "io.k8s.api.rbac.v1.Role": { - "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration": { + "description": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata." + "opaque": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration", + "description": "Opaque provides driver-specific configuration parameters." }, - "rules": { - "description": "Rules holds all the PolicyRules for this Role", + "requests": { + "description": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "rbac.authorization.k8s.io", - "kind": "Role", - "version": "v1" - } - ] + "type": "object" }, - "io.k8s.api.rbac.v1.RoleBinding": { - "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "io.k8s.api.resource.v1alpha3.DeviceClass": { + "description": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -13943,43 +15422,46 @@ }, "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata." - }, - "roleRef": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef", - "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." + "description": "Standard object metadata" }, - "subjects": { - "description": "Subjects holds references to the objects the role applies to.", - "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" - }, - "type": "array" + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClassSpec", + "description": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number." } }, "required": [ - "roleRef" + "spec" ], "type": "object", "x-kubernetes-group-version-kind": [ { - "group": "rbac.authorization.k8s.io", - "kind": "RoleBinding", - "version": "v1" + "group": "resource.k8s.io", + "kind": "DeviceClass", + "version": "v1alpha3" } ] }, - "io.k8s.api.rbac.v1.RoleBindingList": { - "description": "RoleBindingList is a collection of RoleBindings", + "io.k8s.api.resource.v1alpha3.DeviceClassConfiguration": { + "description": "DeviceClassConfiguration is used in DeviceClass.", + "properties": { + "opaque": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration", + "description": "Opaque provides driver-specific configuration parameters." + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceClassList": { + "description": "DeviceClassList is a collection of classes.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "items": { - "description": "Items is a list of RoleBindings", + "description": "Items is the list of resource classes.", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClass" }, "type": "array" }, @@ -13989,7 +15471,7 @@ }, "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", - "description": "Standard object's metadata." + "description": "Standard list metadata" } }, "required": [ @@ -13998,122 +15480,153 @@ "type": "object", "x-kubernetes-group-version-kind": [ { - "group": "rbac.authorization.k8s.io", - "kind": "RoleBindingList", - "version": "v1" + "group": "resource.k8s.io", + "kind": "DeviceClassList", + "version": "v1alpha3" } ] }, - "io.k8s.api.rbac.v1.RoleList": { - "description": "RoleList is a collection of Roles", + "io.k8s.api.resource.v1alpha3.DeviceClassSpec": { + "description": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" + "config": { + "description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClassConfiguration" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "items": { - "description": "Items is a list of Roles", + "selectors": { + "description": "Each selector must be satisfied by a device which is claimed via this class.", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceSelector" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "suitableNodes": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate." + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.DeviceConstraint": { + "description": "DeviceConstraint must have exactly one field set besides Requests.", + "properties": { + "matchAttribute": { + "description": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", "type": "string" }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", - "description": "Standard object's metadata." + "requests": { + "description": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" } }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "rbac.authorization.k8s.io", - "kind": "RoleList", - "version": "v1" - } - ] + "type": "object" }, - "io.k8s.api.rbac.v1.RoleRef": { - "description": "RoleRef contains information that points to the role being used", + "io.k8s.api.resource.v1alpha3.DeviceRequest": { + "description": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced", + "adminAccess": { + "description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", + "type": "boolean" + }, + "allocationMode": { + "description": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", "type": "string" }, - "kind": { - "description": "Kind is the type of resource being referenced", + "count": { + "description": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "format": "int64", + "type": "integer" + }, + "deviceClassName": { + "description": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", "type": "string" }, "name": { - "description": "Name is the name of resource being referenced", + "description": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", "type": "string" + }, + "selectors": { + "description": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceSelector" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ - "apiGroup", - "kind", - "name" + "name", + "deviceClassName" ], - "type": "object", - "x-kubernetes-map-type": "atomic" + "type": "object" }, - "io.k8s.api.rbac.v1.Subject": { - "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult": { + "description": "DeviceRequestAllocationResult contains the allocation result for one request.", "properties": { - "apiGroup": { - "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "device": { + "description": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", "type": "string" }, - "kind": { - "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "driver": { + "description": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", "type": "string" }, - "name": { - "description": "Name of the object being referenced.", + "pool": { + "description": "This name together with the driver name and the device name field identify which device was allocated (`\u003cdriver name\u003e/\u003cpool name\u003e/\u003cdevice name\u003e`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", "type": "string" }, - "namespace": { - "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "request": { + "description": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", "type": "string" } }, "required": [ - "kind", - "name" + "request", + "driver", + "pool", + "device" ], - "type": "object", - "x-kubernetes-map-type": "atomic" + "type": "object" }, - "io.k8s.api.resource.v1alpha2.AllocationResult": { - "description": "AllocationResult contains attributes of an allocated resource.", + "io.k8s.api.resource.v1alpha3.DeviceSelector": { + "description": "DeviceSelector must have exactly one field set.", "properties": { - "availableOnNodes": { - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", - "description": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere." - }, - "resourceHandles": { - "description": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", - "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceHandle" - }, - "type": "array", - "x-kubernetes-list-type": "atomic" + "cel": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.CELDeviceSelector", + "description": "CEL contains a CEL expression for selecting a device." + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration": { + "description": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "properties": { + "driver": { + "description": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "type": "string" }, - "shareable": { - "description": "Shareable determines whether the resource supports more than one consumer at a time.", - "type": "boolean" + "parameters": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension", + "description": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions." } }, + "required": [ + "driver", + "parameters" + ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContext": { - "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.PodSchedulingContext": { + "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -14128,11 +15641,11 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec", "description": "Spec describes where resources for the Pod are needed." }, "status": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus", "description": "Status describes where resources for the Pod can be allocated." } }, @@ -14144,11 +15657,11 @@ { "group": "resource.k8s.io", "kind": "PodSchedulingContext", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextList": { "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", "properties": { "apiVersion": { @@ -14158,7 +15671,7 @@ "items": { "description": "Items is the list of PodSchedulingContext objects.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" }, "type": "array" }, @@ -14179,11 +15692,11 @@ { "group": "resource.k8s.io", "kind": "PodSchedulingContextList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec": { "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", "properties": { "potentialNodes": { @@ -14201,13 +15714,13 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus": { "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", "properties": { "resourceClaims": { "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus" }, "type": "array", "x-kubernetes-list-map-keys": [ @@ -14218,8 +15731,8 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClaim": { - "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.ResourceClaim": { + "description": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -14234,12 +15747,12 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", - "description": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim." + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSpec", + "description": "Spec describes what is being requested and how to configure it. The spec is immutable." }, "status": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimStatus", - "description": "Status describes whether the resource is available and with which attributes." + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimStatus", + "description": "Status describes whether the claim is ready to use and what has been allocated." } }, "required": [ @@ -14250,11 +15763,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { + "io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference": { "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", "properties": { "apiGroup": { @@ -14281,7 +15794,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClaimList": { + "io.k8s.api.resource.v1alpha3.ResourceClaimList": { "description": "ResourceClaimList is a collection of claims.", "properties": { "apiVersion": { @@ -14291,7 +15804,7 @@ "items": { "description": "Items is the list of resource claims.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaim" }, "type": "array" }, @@ -14312,33 +15825,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { - "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", - "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "type": "string" - }, - "kind": { - "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", - "type": "string" - }, - "name": { - "description": "Name is the name of resource being referenced.", - "type": "string" - } - }, - "required": [ - "kind", - "name" - ], - "type": "object" - }, - "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { + "io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus": { "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", "properties": { "name": { @@ -14354,60 +15845,54 @@ "x-kubernetes-list-type": "atomic" } }, + "required": [ + "name" + ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { - "description": "ResourceClaimSpec defines how a resource is to be allocated.", + "io.k8s.api.resource.v1alpha3.ResourceClaimSpec": { + "description": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", "properties": { - "allocationMode": { - "description": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", - "type": "string" - }, - "parametersRef": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference", - "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim." - }, - "resourceClassName": { - "description": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", + "controller": { + "description": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", "type": "string" + }, + "devices": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClaim", + "description": "Devices defines how to request devices." } }, - "required": [ - "resourceClassName" - ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { - "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", + "io.k8s.api.resource.v1alpha3.ResourceClaimStatus": { + "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "properties": { "allocation": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.AllocationResult", - "description": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet." + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.AllocationResult", + "description": "Allocation is set once the claim has been allocated successfully." }, "deallocationRequested": { - "description": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", + "description": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", "type": "boolean" }, - "driverName": { - "description": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "type": "string" - }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference" }, "type": "array", "x-kubernetes-list-map-keys": [ "uid" ], - "x-kubernetes-list-type": "map" + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" } }, "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { - "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplate": { + "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -14422,7 +15907,7 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec", "description": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore." } }, @@ -14434,11 +15919,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplateList": { "description": "ResourceClaimTemplateList is a collection of claim templates.", "properties": { "apiVersion": { @@ -14448,7 +15933,7 @@ "items": { "description": "Items is the list of resource claim templates.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimTemplate" }, "type": "array" }, @@ -14469,11 +15954,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplateList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec": { "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", "properties": { "metadata": { @@ -14481,7 +15966,7 @@ "description": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSpec", "description": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here." } }, @@ -14490,15 +15975,36 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceClass": { - "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.ResourcePool": { + "description": "ResourcePool describes the pool that ResourceSlices belong to.", "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "generation": { + "description": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "format": "int64", + "type": "integer" + }, + "name": { + "description": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", "type": "string" }, - "driverName": { - "description": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", + "resourceSliceCount": { + "description": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "name", + "generation", + "resourceSliceCount" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha3.ResourceSlice": { + "description": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple \u003cdriver name\u003e, \u003cpool name\u003e, \u003cdevice name\u003e.\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "kind": { @@ -14509,38 +16015,34 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", "description": "Standard object metadata" }, - "parametersRef": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference", - "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec." - }, - "suitableNodes": { - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", - "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates." + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceSliceSpec", + "description": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number." } }, "required": [ - "driverName" + "spec" ], "type": "object", "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", - "kind": "ResourceClass", - "version": "v1alpha2" + "kind": "ResourceSlice", + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClassList": { - "description": "ResourceClassList is a collection of classes.", + "io.k8s.api.resource.v1alpha3.ResourceSliceList": { + "description": "ResourceSliceList is a collection of ResourceSlices.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "items": { - "description": "Items is the list of resource classes.", + "description": "Items is the list of resource ResourceSlices.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceSlice" }, "type": "array" }, @@ -14560,51 +16062,49 @@ "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", - "kind": "ResourceClassList", - "version": "v1alpha2" + "kind": "ResourceSliceList", + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { - "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", + "io.k8s.api.resource.v1alpha3.ResourceSliceSpec": { + "description": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "type": "string" + "allNodes": { + "description": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "type": "boolean" }, - "kind": { - "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", - "type": "string" + "devices": { + "description": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.Device" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, - "name": { - "description": "Name is the name of resource being referenced.", + "driver": { + "description": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", "type": "string" }, - "namespace": { - "description": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", + "nodeName": { + "description": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", "type": "string" + }, + "nodeSelector": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set." + }, + "pool": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourcePool", + "description": "Pool describes the pool that this ResourceSlice belongs to." } }, "required": [ - "kind", - "name" + "driver", + "pool" ], "type": "object" }, - "io.k8s.api.resource.v1alpha2.ResourceHandle": { - "description": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", - "properties": { - "data": { - "description": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "type": "string" - }, - "driverName": { - "description": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", - "type": "string" - } - }, - "type": "object" - }, "io.k8s.api.scheduling.v1.PriorityClass": { "description": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "properties": { @@ -14760,11 +16260,11 @@ "type": "boolean" }, "fsGroupPolicy": { - "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes \u003c 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", "type": "string" }, "podInfoOnMount": { - "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes \u003c 1.29 and now is mutable.", "type": "boolean" }, "requiresRepublish": { @@ -14850,7 +16350,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -14903,6 +16404,10 @@ "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeDriver" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -14968,11 +16473,7 @@ "items": { "$ref": "#/definitions/io.k8s.api.storage.v1.CSIStorageCapacity" }, - "type": "array", - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map" + "type": "array" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -15027,7 +16528,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "parameters": { "additionalProperties": { @@ -15257,76 +16759,272 @@ "description": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", "type": "string" }, - "time": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "time represents the time the error was encountered." + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "time represents the time the error was encountered." + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeNodeResources": { + "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "properties": { + "count": { + "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object" + } + }, + "required": [ + "driverName" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1beta1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object" + } + }, + "required": [ + "driverName" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.storage.v1beta1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1beta1.VolumeAttributesClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.storagemigration.v1alpha1.GroupVersionResource": { + "description": "The names of the group, the version, and the resource.", + "properties": { + "group": { + "description": "The name of the group.", + "type": "string" + }, + "resource": { + "description": "The name of the resource.", + "type": "string" + }, + "version": { + "description": "The name of the version.", + "type": "string" } }, "type": "object" }, - "io.k8s.api.storage.v1.VolumeNodeResources": { - "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "io.k8s.api.storagemigration.v1alpha1.MigrationCondition": { + "description": "Describes the state of a migration at a certain point.", "properties": { - "count": { - "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", - "format": "int32", - "type": "integer" + "lastUpdateTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The last time this condition was updated." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of the condition.", + "type": "string" } }, + "required": [ + "type", + "status" + ], "type": "object" }, - "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { - "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration": { + "description": "StorageVersionMigration represents a migration of stored data to the latest storage version.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "driverName": { - "description": "Name of the CSI driver This field is immutable.", - "type": "string" - }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, - "parameters": { - "additionalProperties": { - "type": "string" - }, - "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", - "type": "object" + "spec": { + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec", + "description": "Specification of the migration." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus", + "description": "Status of the migration." } }, - "required": [ - "driverName" - ], "type": "object", "x-kubernetes-group-version-kind": [ { - "group": "storage.k8s.io", - "kind": "VolumeAttributesClass", + "group": "storagemigration.k8s.io", + "kind": "StorageVersionMigration", "version": "v1alpha1" } ] }, - "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { - "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationList": { + "description": "StorageVersionMigrationList is a collection of storage version migrations.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "items": { - "description": "items is the list of VolumeAttributesClass objects.", + "description": "Items is the list of StorageVersionMigration", "items": { - "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration" }, - "type": "array" + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -15343,12 +17041,52 @@ "type": "object", "x-kubernetes-group-version-kind": [ { - "group": "storage.k8s.io", - "kind": "VolumeAttributesClassList", + "group": "storagemigration.k8s.io", + "kind": "StorageVersionMigrationList", "version": "v1alpha1" } ] }, + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec": { + "description": "Spec of the storage version migration.", + "properties": { + "continueToken": { + "description": "The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration.", + "type": "string" + }, + "resource": { + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.GroupVersionResource", + "description": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable." + } + }, + "required": [ + "resource" + ], + "type": "object" + }, + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus": { + "description": "Status of the storage version migration.", + "properties": { + "conditions": { + "description": "The latest available observations of the migration's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.MigrationCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "resourceVersion": { + "description": "ResourceVersion to compare with the GC cache for performing the migration. This is the current resource version of given group, version and resource when kube-controller-manager first observes this StorageVersionMigration resource.", + "type": "string" + } + }, + "type": "object" + }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition": { "description": "CustomResourceColumnDefinition specifies a column for server side printing.", "properties": { @@ -15511,7 +17249,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.", @@ -15530,7 +17269,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "singular": { "description": "singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.", @@ -15571,7 +17311,8 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -15605,7 +17346,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "type": "object" @@ -15618,7 +17360,8 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "deprecated": { "description": "deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.", @@ -15636,6 +17379,14 @@ "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation", "description": "schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource." }, + "selectableFields": { + "description": "selectableFields specifies paths to fields that may be used as field selectors. A maximum of 8 selectable fields are allowed. See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.SelectableField" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "served": { "description": "served is a flag enabling/disabling this version from being served via REST APIs", "type": "boolean" @@ -15740,13 +17491,15 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "anyOf": { "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "default": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON", @@ -15771,7 +17524,8 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "example": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" @@ -15841,7 +17595,8 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "pattern": { "type": "string" @@ -15862,7 +17617,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "title": { "type": "string" @@ -15886,7 +17642,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "x-kubernetes-list-type": { "description": "x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:\n\n1) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic lists will be entirely replaced when updated. This extension\n may be used on any type of list (struct, scalar, ...).\n2) `set`:\n Sets are lists that must not have multiple items with the same value. Each\n value must be a scalar, an object with x-kubernetes-map-type `atomic` or an\n array with x-kubernetes-list-type `atomic`.\n3) `map`:\n These lists are like maps in that their elements have a non-index key\n used to identify them. Order is preserved upon merge. The map tag\n must only be used on a list with elements of type object.\nDefaults to atomic for arrays.", @@ -15901,7 +17658,7 @@ "type": "boolean" }, "x-kubernetes-validations": { - "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.", + "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language.", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule" }, @@ -15925,6 +17682,19 @@ "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray": { "description": "JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array." }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.SelectableField": { + "description": "SelectableField specifies the JSON path of a field that may be used with field selectors.", + "properties": { + "jsonPath": { + "description": "jsonPath is a simple JSON path which is evaluated against each custom resource to produce a field selector value. Only JSON paths without the array notation are allowed. Must point to a field of type string, boolean or integer. Types with enum values and strings with formats are allowed. If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. Must not point to metdata fields. Required.", + "type": "string" + } + }, + "required": [ + "jsonPath" + ], + "type": "object" + }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference": { "description": "ServiceReference holds a reference to Service.legacy.k8s.io", "properties": { @@ -16016,7 +17786,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -16052,14 +17823,16 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "versions": { "description": "versions are the versions supported in this group.", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -16087,7 +17860,8 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -16114,7 +17888,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "group": { "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", @@ -16137,7 +17912,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "singularName": { "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", @@ -16188,7 +17964,8 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -16220,14 +17997,16 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "versions": { "description": "versions are the api versions that are available.", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -16293,7 +18072,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "gracePeriodSeconds": { "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", @@ -16459,6 +18239,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "coordination.k8s.io", "kind": "DeleteOptions", @@ -16577,7 +18362,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha2" + "version": "v1alpha3" }, { "group": "scheduling.k8s.io", @@ -16608,6 +18393,11 @@ "group": "storage.k8s.io", "kind": "DeleteOptions", "version": "v1beta1" + }, + { + "group": "storagemigration.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" } ] }, @@ -16615,6 +18405,32 @@ "description": "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.", "type": "string" }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement": { + "description": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "description": "key is the field selector key that the requirement applies to.", + "type": "string" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "type": "string" + }, + "values": { + "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", "type": "object" @@ -16645,7 +18461,8 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "matchLabels": { "additionalProperties": { @@ -16674,7 +18491,8 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" } }, "required": [ @@ -16774,6 +18592,7 @@ "type": "string" }, "type": "array", + "x-kubernetes-list-type": "set", "x-kubernetes-patch-strategy": "merge" }, "generateName": { @@ -16797,7 +18616,8 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "name": { "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", @@ -16813,6 +18633,10 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "uid" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "uid", "x-kubernetes-patch-strategy": "merge" }, @@ -16918,7 +18742,8 @@ }, "details": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails", - "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type." + "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -16947,11 +18772,6 @@ "group": "", "kind": "Status", "version": "v1" - }, - { - "group": "resource.k8s.io", - "kind": "Status", - "version": "v1alpha2" } ] }, @@ -16981,7 +18801,8 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "group": { "description": "The group attribute of the resource associated with the status StatusReason.", @@ -17169,6 +18990,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "coordination.k8s.io", "kind": "WatchEvent", @@ -17287,7 +19113,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha2" + "version": "v1alpha3" }, { "group": "scheduling.k8s.io", @@ -17318,6 +19144,11 @@ "group": "storage.k8s.io", "kind": "WatchEvent", "version": "v1beta1" + }, + { + "group": "storagemigration.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" } ] }, @@ -20884,14 +22715,16 @@ "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -20899,6 +22732,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -20907,7 +22744,8 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" }, - "type": "array" + "type": "array", + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -20997,6 +22835,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -21006,6 +22848,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, "type": "array", + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 4bf6fb8758..6e852cf2a0 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -6,6 +6,42 @@ }, "paths": {}, "definitions": { + "io.k8s.api.admissionregistration.v1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "type": "object", + "required": [ + "key", + "valueExpression" + ], + "properties": { + "key": { + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "type": "string" + }, + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "type": "object", + "required": [ + "fieldRef", + "warning" + ], + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "type": "string" + }, + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", + "type": "string" + } + } + }, "io.k8s.api.admissionregistration.v1.MatchCondition": { "description": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", "type": "object", @@ -24,6 +60,41 @@ } } }, + "io.k8s.api.admissionregistration.v1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "object", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "objectSelector": { + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-map-type": "atomic" + }, "io.k8s.api.admissionregistration.v1.MutatingWebhook": { "description": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "type": "object", @@ -39,7 +110,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "clientConfig": { "description": "ClientConfig defines how to communicate with the hook. Required", @@ -50,7 +122,7 @@ "type": "string" }, "matchConditions": { - "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" @@ -87,7 +159,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" - } + }, + "x-kubernetes-list-type": "atomic" }, "sideEffects": { "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", @@ -122,6 +195,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhook" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -169,6 +246,95 @@ } ] }, + "io.k8s.api.admissionregistration.v1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "type": "object", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "type": "object", + "properties": { + "name": { + "description": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "type": "string" + }, + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", + "type": "string" + }, + "selector": { + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + }, + "x-kubernetes-map-type": "atomic" + }, "io.k8s.api.admissionregistration.v1.RuleWithOperations": { "description": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", "type": "object", @@ -238,80 +404,22 @@ } } }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { - "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "io.k8s.api.admissionregistration.v1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", "type": "object", - "required": [ - "name", - "clientConfig", - "sideEffects", - "admissionReviewVersions" - ], "properties": { - "admissionReviewVersions": { - "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", - "type": "array", - "items": { - "type": "string" - } - }, - "clientConfig": { - "description": "ClientConfig defines how to communicate with the hook. Required", - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig" - }, - "failurePolicy": { - "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", - "type": "string" - }, - "matchConditions": { - "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "expressionWarnings": { + "description": "The type checking warnings for each expression.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ExpressionWarning" }, - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" - }, - "matchPolicy": { - "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", - "type": "string" - }, - "name": { - "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "type": "string" - }, - "namespaceSelector": { - "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" - }, - "objectSelector": { - "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" - }, - "rules": { - "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" - } - }, - "sideEffects": { - "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", - "type": "string" - }, - "timeoutSeconds": { - "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", - "type": "integer", - "format": "int32" + "x-kubernetes-list-type": "atomic" } } }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { - "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", "type": "object", "properties": { "apiVersion": { @@ -326,38 +434,360 @@ "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "webhooks": { - "description": "Webhooks is a list of webhooks and the affected resources and operations.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" - }, - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec" + }, + "status": { + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus" } }, "x-kubernetes-group-version-kind": [ { "group": "admissionregistration.k8s.io", - "kind": "ValidatingWebhookConfiguration", + "kind": "ValidatingAdmissionPolicy", "version": "v1" } ] }, - "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { - "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", "type": "object", - "required": [ - "items" - ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "items": { - "description": "List of ValidatingWebhookConfiguration.", - "type": "array", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "type": "object", + "properties": { + "matchResources": { + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchResources" + }, + "paramRef": { + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ParamRef" + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "type": "object", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.AuditAnnotation" + }, + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchResources" + }, + "paramKind": { + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ParamKind" + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.Validation" + }, + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.Variable" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "type": "object", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "type": "integer", + "format": "int64" + }, + "typeChecking": { + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.TypeChecking" + } + } + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { + "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "type": "object", + "required": [ + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" + ], + "properties": { + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "clientConfig": { + "description": "ClientConfig defines how to communicate with the hook. Required", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig" + }, + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "name": { + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "type": "string" + }, + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "objectSelector": { + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + }, + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { + "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { + "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingWebhookConfiguration.", + "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" } @@ -379,6 +809,50 @@ } ] }, + "io.k8s.api.admissionregistration.v1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "type": "object", + "required": [ + "expression" + ], + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1.Variable": { + "description": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "type": "object", + "required": [ + "name", + "expression" + ], + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, "io.k8s.api.admissionregistration.v1.WebhookClientConfig": { "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "type": "object", @@ -1429,7 +1903,8 @@ "required": [ "type", "status", - "reason" + "reason", + "message" ], "properties": { "lastTransitionTime": { @@ -1753,6 +2228,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -1981,6 +2460,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2168,6 +2651,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2374,11 +2861,11 @@ "format": "int32" }, "ordinals": { - "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetOrdinals" }, "persistentVolumeClaimRetentionPolicy": { - "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", + "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.", "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy" }, "podManagementPolicy": { @@ -2416,7 +2903,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -2443,6 +2931,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -2604,7 +3096,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "boundObjectRef": { "description": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", @@ -2680,7 +3173,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "token": { "description": "Token is the opaque bearer token.", @@ -2697,7 +3191,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "authenticated": { "description": "Authenticated indicates that the token was associated with a known user.", @@ -2732,7 +3227,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "uid": { "description": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", @@ -2822,6 +3318,42 @@ } } }, + "io.k8s.api.authorization.v1.FieldSelectorAttributes": { + "description": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "type": "object", + "properties": { + "rawSelector": { + "description": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "type": "string" + }, + "requirements": { + "description": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.authorization.v1.LabelSelectorAttributes": { + "description": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "type": "object", + "properties": { + "rawSelector": { + "description": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "type": "string" + }, + "requirements": { + "description": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "io.k8s.api.authorization.v1.LocalSubjectAccessReview": { "description": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "type": "object", @@ -2884,14 +3416,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "verbs": { "description": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -2899,10 +3433,18 @@ "description": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", "type": "object", "properties": { + "fieldSelector": { + "description": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "$ref": "#/definitions/io.k8s.api.authorization.v1.FieldSelectorAttributes" + }, "group": { "description": "Group is the API Group of the Resource. \"*\" means all.", "type": "string" }, + "labelSelector": { + "description": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "$ref": "#/definitions/io.k8s.api.authorization.v1.LabelSelectorAttributes" + }, "name": { "description": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", "type": "string" @@ -2941,28 +3483,32 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "resourceNames": { "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "resources": { "description": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "verbs": { "description": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -3117,7 +3663,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "nonResourceAttributes": { "description": "NonResourceAttributes describes information for a non-resource access request", @@ -3184,14 +3731,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceRule" - } + }, + "x-kubernetes-list-type": "atomic" }, "resourceRules": { "description": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceRule" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -4274,6 +4823,10 @@ "type": "integer", "format": "int32" }, + "managedBy": { + "description": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "type": "string" + }, "manualSelector": { "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" @@ -4289,7 +4842,7 @@ "format": "int32" }, "podFailurePolicy": { - "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", + "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicy" }, "podReplacementPolicy": { @@ -4300,6 +4853,10 @@ "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, + "successPolicy": { + "description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", + "$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicy" + }, "suspend": { "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "type": "boolean" @@ -4320,7 +4877,7 @@ "type": "object", "properties": { "active": { - "description": "The number of pending and running pods.", + "description": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", "type": "integer", "format": "int32" }, @@ -4329,11 +4886,11 @@ "type": "string" }, "completionTime": { - "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "conditions": { - "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". A Job cannot have both the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition" @@ -4343,25 +4900,25 @@ "x-kubernetes-patch-strategy": "merge" }, "failed": { - "description": "The number of pods which reached phase Failed.", + "description": "The number of pods which reached phase Failed. The value increases monotonically.", "type": "integer", "format": "int32" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "description": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { - "description": "The number of pods which have a Ready condition.", + "description": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", "type": "integer", "format": "int32" }, "startTime": { - "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "succeeded": { - "description": "The number of pods which reached phase Succeeded.", + "description": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", "type": "integer", "format": "int32" }, @@ -4371,7 +4928,7 @@ "format": "int32" }, "uncountedTerminatedPods": { - "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", "$ref": "#/definitions/io.k8s.api.batch.v1.UncountedTerminatedPods" } } @@ -4477,6 +5034,38 @@ } } }, + "io.k8s.api.batch.v1.SuccessPolicy": { + "description": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", + "type": "object", + "required": [ + "rules" + ], + "properties": { + "rules": { + "description": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded \u003e= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicyRule" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.batch.v1.SuccessPolicyRule": { + "description": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.", + "type": "object", + "properties": { + "succeededCount": { + "description": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.", + "type": "integer", + "format": "int32" + }, + "succeededIndexes": { + "description": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.", + "type": "string" + } + } + }, "io.k8s.api.batch.v1.UncountedTerminatedPods": { "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "type": "object", @@ -4840,22 +5429,132 @@ "description": "acquireTime is a time when the current lease was acquired.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" }, - "holderIdentity": { - "description": "holderIdentity contains the identity of the holder of a current lease.", + "holderIdentity": { + "description": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "type": "string" + }, + "leaseDurationSeconds": { + "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", + "type": "integer", + "format": "int32" + }, + "leaseTransitions": { + "description": "leaseTransitions is the number of transitions of a lease between holders.", + "type": "integer", + "format": "int32" + }, + "preferredHolder": { + "description": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", + "type": "string" + }, + "renewTime": { + "description": "renewTime is a time when the current holder of a lease has last updated the lease.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + }, + "strategy": { + "description": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "type": "string" + } + } + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidate": { + "description": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseCandidate", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidateList": { + "description": "LeaseCandidateList is a list of Lease objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.coordination.v1alpha1.LeaseCandidate" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseCandidateList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec": { + "description": "LeaseCandidateSpec is a specification of a Lease.", + "type": "object", + "required": [ + "leaseName", + "preferredStrategies" + ], + "properties": { + "binaryVersion": { + "description": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", + "type": "string" + }, + "emulationVersion": { + "description": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", "type": "string" }, - "leaseDurationSeconds": { - "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", - "type": "integer", - "format": "int32" + "leaseName": { + "description": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "type": "string" }, - "leaseTransitions": { - "description": "leaseTransitions is the number of transitions of a lease between holders.", - "type": "integer", - "format": "int32" + "pingTime": { + "description": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + }, + "preferredStrategies": { + "description": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, "renewTime": { - "description": "renewTime is a time when the current holder of a lease has last updated the lease.", + "description": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" } } @@ -4904,6 +5603,31 @@ } } }, + "io.k8s.api.core.v1.AppArmorProfile": { + "description": "AppArmorProfile defines a pod or container's AppArmor settings.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "localhostProfile": { + "description": "localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is \"Localhost\".", + "type": "string" + }, + "type": { + "description": "type indicates which kind of AppArmor profile will be applied. Valid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "localhostProfile": "LocalhostProfile" + } + } + ] + }, "io.k8s.api.core.v1.AttachedVolume": { "description": "AttachedVolume describes a volume attached to a node", "type": "object", @@ -5130,14 +5854,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "drop": { "description": "Removed capabilities", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -5153,7 +5879,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "path": { "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", @@ -5189,7 +5916,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "path": { "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", @@ -5263,20 +5991,6 @@ } } }, - "io.k8s.api.core.v1.ClaimSource": { - "description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", - "type": "object", - "properties": { - "resourceClaimName": { - "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", - "type": "string" - }, - "resourceClaimTemplateName": { - "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", - "type": "string" - } - } - }, "io.k8s.api.core.v1.ClientIPConfig": { "description": "ClientIPConfig represents the configurations of Client IP based session affinity.", "type": "object", @@ -5357,6 +6071,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ComponentCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -5461,7 +6179,7 @@ "type": "object", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5482,7 +6200,7 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5567,10 +6285,11 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" - } + }, + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5593,10 +6312,11 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" - } + }, + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -5617,14 +6337,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -5632,6 +6354,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -5640,7 +6366,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" - } + }, + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -5730,6 +6457,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -5739,6 +6470,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -5757,7 +6492,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "sizeBytes": { "description": "The size of the image in bytes.", @@ -5914,6 +6650,19 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" } }, + "allocatedResourcesStatus": { + "description": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceStatus" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, "containerID": { "description": "ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", "type": "string" @@ -5954,6 +6703,33 @@ "state": { "description": "State holds details about the container's current condition.", "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState" + }, + "user": { + "description": "User represents user identity information initially attached to the first process of the container", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerUser" + }, + "volumeMounts": { + "description": "Status of volume mounts.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMountStatus" + }, + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.core.v1.ContainerUser": { + "description": "ContainerUser represents user identity information", + "type": "object", + "properties": { + "linux": { + "description": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.", + "$ref": "#/definitions/io.k8s.api.core.v1.LinuxContainerUser" } } }, @@ -5980,7 +6756,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -5992,7 +6769,7 @@ ], "properties": { "fieldRef": { - "description": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "description": "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.", "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector" }, "mode": { @@ -6024,7 +6801,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -6104,21 +6882,24 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" - } + }, + "x-kubernetes-list-type": "atomic" }, "notReadyAddresses": { "description": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" - } + }, + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "Port numbers available on the related IP addresses.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointPort" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -6143,7 +6924,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EndpointSubset" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ @@ -6262,14 +7044,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -6277,6 +7061,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -6285,7 +7073,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" - } + }, + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images", @@ -6379,6 +7168,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -6388,6 +7181,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -6566,7 +7363,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -6592,14 +7390,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "wwids": { "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -6809,7 +7609,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.HTTPHeader" - } + }, + "x-kubernetes-list-type": "atomic" }, "path": { "description": "Path to access on the HTTP server.", @@ -6846,13 +7647,17 @@ "io.k8s.api.core.v1.HostAlias": { "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", "type": "object", + "required": [ + "ip" + ], "properties": { "hostnames": { "description": "Hostnames for the above IP address.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "ip": { "description": "IP address of the host file entry.", @@ -6863,6 +7668,9 @@ "io.k8s.api.core.v1.HostIP": { "description": "HostIP represents a single IP address allocated to the host.", "type": "object", + "required": [ + "ip" + ], "properties": { "ip": { "description": "IP is the IP address assigned to the host", @@ -6930,7 +7738,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "readOnly": { "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", @@ -6989,7 +7798,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "readOnly": { "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", @@ -7005,6 +7815,20 @@ } } }, + "io.k8s.api.core.v1.ImageVolumeSource": { + "description": "ImageVolumeSource represents a image volume resource.", + "type": "object", + "properties": { + "pullPolicy": { + "description": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", + "type": "string" + }, + "reference": { + "description": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + } + } + }, "io.k8s.api.core.v1.KeyToPath": { "description": "Maps a string key to a path within a volume.", "type": "object", @@ -7188,7 +8012,37 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeItem" - } + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.core.v1.LinuxContainerUser": { + "description": "LinuxContainerUser represents user identity information in Linux containers", + "type": "object", + "required": [ + "uid", + "gid" + ], + "properties": { + "gid": { + "description": "GID is the primary gid initially attached to the first process in the container", + "type": "integer", + "format": "int64" + }, + "supplementalGroups": { + "description": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "x-kubernetes-list-type": "atomic" + }, + "uid": { + "description": "UID is the primary uid initially attached to the first process in the container", + "type": "integer", + "format": "int64" } } }, @@ -7227,7 +8081,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerIngress" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -7236,7 +8091,7 @@ "type": "object", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" } }, @@ -7402,7 +8257,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -7416,6 +8272,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7485,7 +8345,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm" - } + }, + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", @@ -7569,6 +8430,16 @@ } } }, + "io.k8s.api.core.v1.NodeFeatures": { + "description": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "type": "object", + "properties": { + "supplementalGroupsPolicy": { + "description": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", + "type": "boolean" + } + } + }, "io.k8s.api.core.v1.NodeList": { "description": "NodeList is the whole list of all Nodes which have been registered with master.", "type": "object", @@ -7602,7 +8473,35 @@ "kind": "NodeList", "version": "v1" } - ] + ] + }, + "io.k8s.api.core.v1.NodeRuntimeHandler": { + "description": "NodeRuntimeHandler is a set of runtime handler information.", + "type": "object", + "properties": { + "features": { + "description": "Supported features.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeRuntimeHandlerFeatures" + }, + "name": { + "description": "Runtime handler name. Empty for the default runtime handler.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.NodeRuntimeHandlerFeatures": { + "description": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", + "type": "object", + "properties": { + "recursiveReadOnlyMounts": { + "description": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "type": "boolean" + }, + "userNamespaces": { + "description": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", + "type": "boolean" + } + } }, "io.k8s.api.core.v1.NodeSelector": { "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", @@ -7616,7 +8515,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-map-type": "atomic" @@ -7642,7 +8542,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -7655,14 +8556,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" - } + }, + "x-kubernetes-list-type": "atomic" }, "matchFields": { "description": "A list of node selector requirements by node's fields.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-map-type": "atomic" @@ -7689,6 +8592,7 @@ "items": { "type": "string" }, + "x-kubernetes-list-type": "set", "x-kubernetes-patch-strategy": "merge" }, "providerID": { @@ -7700,7 +8604,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Taint" - } + }, + "x-kubernetes-list-type": "atomic" }, "unschedulable": { "description": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", @@ -7718,6 +8623,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeAddress" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7729,7 +8638,7 @@ } }, "capacity": { - "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "type": "object", "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" @@ -7741,6 +8650,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.NodeCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -7752,12 +8665,17 @@ "description": "Endpoints of daemons running on the Node.", "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints" }, + "features": { + "description": "Features describes the set of features implemented by the CRI implementation.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeFeatures" + }, "images": { "description": "List of container images on this node", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerImage" - } + }, + "x-kubernetes-list-type": "atomic" }, "nodeInfo": { "description": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", @@ -7767,19 +8685,29 @@ "description": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "type": "string" }, + "runtimeHandlers": { + "description": "The available runtime handlers.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeRuntimeHandler" + }, + "x-kubernetes-list-type": "atomic" + }, "volumesAttached": { "description": "List of volumes that are attached to the node.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.AttachedVolume" - } + }, + "x-kubernetes-list-type": "atomic" }, "volumesInUse": { "description": "List of attachable volumes in use (mounted) by the node.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -7816,7 +8744,7 @@ "type": "string" }, "kubeProxyVersion": { - "description": "KubeProxy Version reported by the node.", + "description": "Deprecated: KubeProxy Version reported by the node.", "type": "string" }, "kubeletVersion": { @@ -7981,7 +8909,7 @@ "type": "string" }, "reason": { - "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", "type": "string" }, "status": { @@ -8036,7 +8964,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "dataSource": { "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", @@ -8059,7 +8988,7 @@ "type": "string" }, "volumeAttributesClassName": { - "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", "type": "string" }, "volumeMode": { @@ -8081,7 +9010,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "allocatedResourceStatuses": { "description": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", @@ -8106,20 +9036,24 @@ } }, "conditions": { - "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, "currentVolumeAttributesClassName": { - "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", "type": "string" }, "modifyVolumeStatus": { - "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", "$ref": "#/definitions/io.k8s.api.core.v1.ModifyVolumeStatus" }, "phase": { @@ -8206,7 +9140,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "awsElasticBlockStore": { "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", @@ -8281,7 +9216,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "nfs": { "description": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", @@ -8324,7 +9260,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource" }, "volumeAttributesClassName": { - "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", "type": "string" }, "volumeMode": { @@ -8342,7 +9278,7 @@ "type": "object", "properties": { "lastPhaseTransitionTime": { - "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { @@ -8418,14 +9354,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" - } + }, + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -8441,7 +9379,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "matchLabelKeys": { - "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", "type": "array", "items": { "type": "string" @@ -8449,7 +9387,7 @@ "x-kubernetes-list-type": "atomic" }, "mismatchLabelKeys": { - "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", "type": "array", "items": { "type": "string" @@ -8465,7 +9403,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "topologyKey": { "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", @@ -8482,14 +9421,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" - } + }, + "x-kubernetes-list-type": "atomic" }, "requiredDuringSchedulingIgnoredDuringExecution": { "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -8536,21 +9477,24 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "options": { "description": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfigOption" - } + }, + "x-kubernetes-list-type": "atomic" }, "searches": { "description": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -8570,6 +9514,9 @@ "io.k8s.api.core.v1.PodIP": { "description": "PodIP represents a single IP address allocated to the pod.", "type": "object", + "required": [ + "ip" + ], "properties": { "ip": { "description": "IP is the IP address assigned to the pod", @@ -8639,7 +9586,7 @@ } }, "io.k8s.api.core.v1.PodResourceClaim": { - "description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "description": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", "type": "object", "required": [ "name" @@ -8649,9 +9596,13 @@ "description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", "type": "string" }, - "source": { - "description": "Source describes where to find the ResourceClaim.", - "$ref": "#/definitions/io.k8s.api.core.v1.ClaimSource" + "resourceClaimName": { + "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "type": "string" + }, + "resourceClaimTemplateName": { + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "type": "string" } } }, @@ -8667,7 +9618,7 @@ "type": "string" }, "resourceClaimName": { - "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", "type": "string" } } @@ -8689,6 +9640,10 @@ "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "type": "object", "properties": { + "appArmorProfile": { + "description": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.AppArmorProfile" + }, "fsGroup": { "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.", "type": "integer", @@ -8721,19 +9676,25 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile" }, "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", "type": "array", "items": { "type": "integer", "format": "int64" - } + }, + "x-kubernetes-list-type": "atomic" + }, + "supplementalGroupsPolicy": { + "description": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" }, "sysctls": { "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Sysctl" - } + }, + "x-kubernetes-list-type": "atomic" }, "windowsOptions": { "description": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", @@ -8767,6 +9728,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Container" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -8788,15 +9753,23 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralContainer" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, "hostAliases": { - "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.HostAlias" }, + "x-kubernetes-list-map-keys": [ + "ip" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "ip", "x-kubernetes-patch-strategy": "merge" }, @@ -8826,6 +9799,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -8835,11 +9812,15 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Container" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, "nodeName": { - "description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "description": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "type": "string" }, "nodeSelector": { @@ -8851,7 +9832,7 @@ "x-kubernetes-map-type": "atomic" }, "os": { - "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "$ref": "#/definitions/io.k8s.api.core.v1.PodOS" }, "overhead": { @@ -8879,7 +9860,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodReadinessGate" - } + }, + "x-kubernetes-list-type": "atomic" }, "resourceClaims": { "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", @@ -8907,7 +9889,7 @@ "type": "string" }, "schedulingGates": { - "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodSchedulingGate" @@ -8924,7 +9906,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" }, "serviceAccount": { - "description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "description": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "type": "string" }, "serviceAccountName": { @@ -8953,7 +9935,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" - } + }, + "x-kubernetes-list-type": "atomic" }, "topologySpreadConstraints": { "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", @@ -8975,6 +9958,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.Volume" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge,retainKeys" } @@ -8990,6 +9977,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -8998,14 +9989,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" - } + }, + "x-kubernetes-list-type": "atomic" }, "ephemeralContainerStatuses": { "description": "Status for any ephemeral containers that have run in this pod.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" - } + }, + "x-kubernetes-list-type": "atomic" }, "hostIP": { "description": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", @@ -9026,7 +10019,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" - } + }, + "x-kubernetes-list-type": "atomic" }, "message": { "description": "A human readable message indicating details about why the pod is in this condition.", @@ -9050,6 +10044,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PodIP" }, + "x-kubernetes-list-map-keys": [ + "ip" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "ip", "x-kubernetes-patch-strategy": "merge" }, @@ -9286,11 +10284,12 @@ "format": "int32" }, "sources": { - "description": "sources is the list of volume projections", + "description": "sources is the list of volume projections. Each entry in this list handles one source.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -9353,7 +10352,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "pool": { "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", @@ -9398,7 +10398,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "pool": { "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", @@ -9562,6 +10563,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerCondition" }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, @@ -9597,6 +10602,10 @@ "name": { "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", "type": "string" + }, + "request": { + "description": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", + "type": "string" } } }, @@ -9622,6 +10631,23 @@ }, "x-kubernetes-map-type": "atomic" }, + "io.k8s.api.core.v1.ResourceHealth": { + "description": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "type": "object", + "required": [ + "resourceID" + ], + "properties": { + "health": { + "description": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", + "type": "string" + }, + "resourceID": { + "description": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "type": "string" + } + } + }, "io.k8s.api.core.v1.ResourceQuota": { "description": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "type": "object", @@ -9710,7 +10736,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -9765,6 +10792,29 @@ } } }, + "io.k8s.api.core.v1.ResourceStatus": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "type": "string" + }, + "resources": { + "description": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceHealth" + }, + "x-kubernetes-list-map-keys": [ + "resourceID" + ], + "x-kubernetes-list-type": "map" + } + } + }, "io.k8s.api.core.v1.SELinuxOptions": { "description": "SELinuxOptions are the labels to be applied to the container", "type": "object", @@ -9898,7 +10948,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-map-type": "atomic" @@ -9924,7 +10975,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -10006,7 +11058,7 @@ "type": "object", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10027,7 +11079,7 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10081,10 +11133,11 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" - } + }, + "x-kubernetes-list-type": "atomic" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "optional": { @@ -10122,7 +11175,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" - } + }, + "x-kubernetes-list-type": "atomic" }, "optional": { "description": "optional field specify whether the Secret or its keys must be defined", @@ -10142,6 +11196,10 @@ "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", "type": "boolean" }, + "appArmorProfile": { + "description": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.AppArmorProfile" + }, "capabilities": { "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.", "$ref": "#/definitions/io.k8s.api.core.v1.Capabilities" @@ -10151,7 +11209,7 @@ "type": "boolean" }, "procMount": { - "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "description": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "type": "string" }, "readOnlyRootFilesystem": { @@ -10236,7 +11294,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" - } + }, + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -10252,6 +11311,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -10416,7 +11479,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "externalName": { "description": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", @@ -10460,7 +11524,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", @@ -10496,6 +11561,10 @@ "description": "sessionAffinityConfig contains the configurations of session affinity.", "$ref": "#/definitions/io.k8s.api.core.v1.SessionAffinityConfig" }, + "trafficDistribution": { + "description": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", + "type": "string" + }, "type": { "description": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "type": "string" @@ -10706,7 +11775,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -10719,7 +11789,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorLabelRequirement" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-map-type": "atomic" @@ -10751,7 +11822,7 @@ "format": "int32" }, "minDomains": { - "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).", + "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.", "type": "integer", "format": "int32" }, @@ -10896,6 +11967,10 @@ "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource" }, + "image": { + "description": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", + "$ref": "#/definitions/io.k8s.api.core.v1.ImageVolumeSource" + }, "iscsi": { "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource" @@ -10981,7 +12056,7 @@ "type": "string" }, "mountPropagation": { - "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).", "type": "string" }, "name": { @@ -10992,6 +12067,10 @@ "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", "type": "boolean" }, + "recursiveReadOnly": { + "description": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.", + "type": "string" + }, "subPath": { "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "type": "string" @@ -11002,6 +12081,32 @@ } } }, + "io.k8s.api.core.v1.VolumeMountStatus": { + "description": "VolumeMountStatus shows status of volume mounts.", + "type": "object", + "required": [ + "name", + "mountPath" + ], + "properties": { + "mountPath": { + "description": "MountPath corresponds to the original VolumeMount.", + "type": "string" + }, + "name": { + "description": "Name corresponds to the name of the original VolumeMount.", + "type": "string" + }, + "readOnly": { + "description": "ReadOnly corresponds to the original VolumeMount.", + "type": "boolean" + }, + "recursiveReadOnly": { + "description": "RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, depending on the mount result.", + "type": "string" + } + } + }, "io.k8s.api.core.v1.VolumeNodeAffinity": { "description": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", "type": "object", @@ -11013,7 +12118,7 @@ } }, "io.k8s.api.core.v1.VolumeProjection": { - "description": "Projection that may be projected along with other supported volume types", + "description": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", "type": "object", "properties": { "clusterTrustBundle": { @@ -12687,7 +13792,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -12912,7 +14018,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerIngress" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -13065,14 +14172,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" - } + }, + "x-kubernetes-list-type": "atomic" }, "to": { "description": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -13085,14 +14194,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" - } + }, + "x-kubernetes-list-type": "atomic" }, "ports": { "description": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -13180,14 +14291,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule" - } + }, + "x-kubernetes-list-type": "atomic" }, "ingress": { "description": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule" - } + }, + "x-kubernetes-list-type": "atomic" }, "podSelector": { "description": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", @@ -13198,7 +14311,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -13215,9 +14329,10 @@ "type": "integer", "format": "int32" } - } + }, + "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.networking.v1alpha1.IPAddress": { + "io.k8s.api.networking.v1beta1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "type": "object", "properties": { @@ -13235,18 +14350,18 @@ }, "spec": { "description": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddressSpec" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.IPAddressSpec" } }, "x-kubernetes-group-version-kind": [ { "group": "networking.k8s.io", "kind": "IPAddress", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.IPAddressList": { + "io.k8s.api.networking.v1beta1.IPAddressList": { "description": "IPAddressList contains a list of IPAddress.", "type": "object", "required": [ @@ -13261,7 +14376,7 @@ "description": "items is the list of IPAddresses.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddress" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.IPAddress" } }, "kind": { @@ -13277,23 +14392,30 @@ { "group": "networking.k8s.io", "kind": "IPAddressList", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.IPAddressSpec": { + "io.k8s.api.networking.v1beta1.IPAddressSpec": { "description": "IPAddressSpec describe the attributes in an IP Address.", "type": "object", + "required": [ + "parentRef" + ], "properties": { "parentRef": { "description": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ParentReference" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ParentReference" } } }, - "io.k8s.api.networking.v1alpha1.ParentReference": { + "io.k8s.api.networking.v1beta1.ParentReference": { "description": "ParentReference describes a reference to a parent object.", "type": "object", + "required": [ + "resource", + "name" + ], "properties": { "group": { "description": "Group is the group of the object being referenced.", @@ -13313,7 +14435,7 @@ } } }, - "io.k8s.api.networking.v1alpha1.ServiceCIDR": { + "io.k8s.api.networking.v1beta1.ServiceCIDR": { "description": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", "type": "object", "properties": { @@ -13331,22 +14453,22 @@ }, "spec": { "description": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRSpec" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDRSpec" }, "status": { "description": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRStatus" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDRStatus" } }, "x-kubernetes-group-version-kind": [ { "group": "networking.k8s.io", "kind": "ServiceCIDR", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRList": { + "io.k8s.api.networking.v1beta1.ServiceCIDRList": { "description": "ServiceCIDRList contains a list of ServiceCIDR objects.", "type": "object", "required": [ @@ -13361,7 +14483,7 @@ "description": "items is the list of ServiceCIDRs.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR" + "$ref": "#/definitions/io.k8s.api.networking.v1beta1.ServiceCIDR" } }, "kind": { @@ -13377,11 +14499,11 @@ { "group": "networking.k8s.io", "kind": "ServiceCIDRList", - "version": "v1alpha1" + "version": "v1beta1" } ] }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRSpec": { + "io.k8s.api.networking.v1beta1.ServiceCIDRSpec": { "description": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", "type": "object", "properties": { @@ -13390,11 +14512,12 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, - "io.k8s.api.networking.v1alpha1.ServiceCIDRStatus": { + "io.k8s.api.networking.v1beta1.ServiceCIDRStatus": { "description": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", "type": "object", "properties": { @@ -13641,86 +14764,279 @@ "description": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", "type": "string" } - } + } + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { + "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "type": "object", + "required": [ + "disruptionsAllowed", + "currentHealthy", + "desiredHealthy", + "expectedPods" + ], + "properties": { + "conditions": { + "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentHealthy": { + "description": "current number of healthy pods", + "type": "integer", + "format": "int32" + }, + "desiredHealthy": { + "description": "minimum desired number of healthy pods", + "type": "integer", + "format": "int32" + }, + "disruptedPods": { + "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + }, + "disruptionsAllowed": { + "description": "Number of pod disruptions that are currently allowed.", + "type": "integer", + "format": "int32" + }, + "expectedPods": { + "description": "total number of pods counted by this disruption budget", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.rbac.v1.AggregationRule": { + "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "type": "object", + "properties": { + "clusterRoleSelectors": { + "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.rbac.v1.ClusterRole": { + "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "type": "object", + "properties": { + "aggregationRule": { + "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "rules": { + "description": "Rules holds all the PolicyRules for this ClusterRole", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBinding": { + "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "type": "object", + "required": [ + "roleRef" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "roleRef": { + "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef" + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBindingList": { + "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoleBindings", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleList": { + "description": "ClusterRoleList is a collection of ClusterRoles", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoles", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleList", + "version": "v1" + } + ] }, - "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { - "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "io.k8s.api.rbac.v1.PolicyRule": { + "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "type": "object", "required": [ - "disruptionsAllowed", - "currentHealthy", - "desiredHealthy", - "expectedPods" + "verbs" ], "properties": { - "conditions": { - "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + "type": "string" }, - "x-kubernetes-list-map-keys": [ - "type" - ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" - }, - "currentHealthy": { - "description": "current number of healthy pods", - "type": "integer", - "format": "int32" - }, - "desiredHealthy": { - "description": "minimum desired number of healthy pods", - "type": "integer", - "format": "int32" + "x-kubernetes-list-type": "atomic" }, - "disruptedPods": { - "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" - } + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "disruptionsAllowed": { - "description": "Number of pod disruptions that are currently allowed.", - "type": "integer", - "format": "int32" + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "expectedPods": { - "description": "total number of pods counted by this disruption budget", - "type": "integer", - "format": "int32" + "resources": { + "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "observedGeneration": { - "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", - "type": "integer", - "format": "int64" - } - } - }, - "io.k8s.api.rbac.v1.AggregationRule": { - "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", - "type": "object", - "properties": { - "clusterRoleSelectors": { - "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "verbs": { + "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" - } + "type": "string" + }, + "x-kubernetes-list-type": "atomic" } } }, - "io.k8s.api.rbac.v1.ClusterRole": { - "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "io.k8s.api.rbac.v1.Role": { + "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", "type": "object", "properties": { - "aggregationRule": { - "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", - "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule" - }, "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" @@ -13734,23 +15050,24 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "rules": { - "description": "Rules holds all the PolicyRules for this ClusterRole", + "description": "Rules holds all the PolicyRules for this Role", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRole", + "kind": "Role", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleBinding": { - "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "io.k8s.api.rbac.v1.RoleBinding": { + "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "type": "object", "required": [ "roleRef" @@ -13769,7 +15086,7 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "roleRef": { - "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", + "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef" }, "subjects": { @@ -13777,19 +15094,20 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleBinding", + "kind": "RoleBinding", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleBindingList": { - "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "io.k8s.api.rbac.v1.RoleBindingList": { + "description": "RoleBindingList is a collection of RoleBindings", "type": "object", "required": [ "items" @@ -13800,10 +15118,10 @@ "type": "string" }, "items": { - "description": "Items is a list of ClusterRoleBindings", + "description": "Items is a list of RoleBindings", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" } }, "kind": { @@ -13818,13 +15136,13 @@ "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleBindingList", + "kind": "RoleBindingList", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.ClusterRoleList": { - "description": "ClusterRoleList is a collection of ClusterRoles", + "io.k8s.api.rbac.v1.RoleList": { + "description": "RoleList is a collection of Roles", "type": "object", "required": [ "items" @@ -13835,10 +15153,10 @@ "type": "string" }, "items": { - "description": "Items is a list of ClusterRoles", + "description": "Items is a list of Roles", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" } }, "kind": { @@ -13853,92 +15171,253 @@ "x-kubernetes-group-version-kind": [ { "group": "rbac.authorization.k8s.io", - "kind": "ClusterRoleList", + "kind": "RoleList", "version": "v1" } ] }, - "io.k8s.api.rbac.v1.PolicyRule": { - "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "io.k8s.api.rbac.v1.RoleRef": { + "description": "RoleRef contains information that points to the role being used", + "type": "object", + "required": [ + "apiGroup", + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.rbac.v1.Subject": { + "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "type": "string" + }, + "kind": { + "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "type": "string" + }, + "name": { + "description": "Name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.resource.v1alpha3.AllocationResult": { + "description": "AllocationResult contains attributes of an allocated resource.", + "type": "object", + "properties": { + "controller": { + "description": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "type": "string" + }, + "devices": { + "description": "Devices is the result of allocating devices.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAllocationResult" + }, + "nodeSelector": { + "description": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + } + } + }, + "io.k8s.api.resource.v1alpha3.BasicDevice": { + "description": "BasicDevice defines one device instance.", + "type": "object", + "properties": { + "attributes": { + "description": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAttribute" + } + }, + "capacity": { + "description": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + } + }, + "io.k8s.api.resource.v1alpha3.CELDeviceSelector": { + "description": "CELDeviceSelector contains a CEL expression for selecting a device.", + "type": "object", + "required": [ + "expression" + ], + "properties": { + "expression": { + "description": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool \u0026\u0026 dra.anotherBool)", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha3.Device": { + "description": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "basic": { + "description": "Basic defines one device instance.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.BasicDevice" + }, + "name": { + "description": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration": { + "description": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "type": "object", + "required": [ + "source" + ], + "properties": { + "opaque": { + "description": "Opaque provides driver-specific configuration parameters.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration" + }, + "requests": { + "description": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "source": { + "description": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceAllocationResult": { + "description": "DeviceAllocationResult is the result of allocating devices.", "type": "object", - "required": [ - "verbs" - ], "properties": { - "apiGroups": { - "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", + "config": { + "description": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", "type": "array", "items": { - "type": "string" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration" + }, + "x-kubernetes-list-type": "atomic" }, - "nonResourceURLs": { - "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "results": { + "description": "Results lists all allocated devices.", "type": "array", "items": { - "type": "string" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceAttribute": { + "description": "DeviceAttribute must have exactly one field set.", + "type": "object", + "properties": { + "bool": { + "description": "BoolValue is a true/false value.", + "type": "boolean" }, - "resourceNames": { - "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "int": { + "description": "IntValue is a number.", + "type": "integer", + "format": "int64" + }, + "string": { + "description": "StringValue is a string. Must not be longer than 64 characters.", + "type": "string" + }, + "version": { + "description": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceClaim": { + "description": "DeviceClaim defines how to request devices with a ResourceClaim.", + "type": "object", + "properties": { + "config": { + "description": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", "type": "array", "items": { - "type": "string" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration" + }, + "x-kubernetes-list-type": "atomic" }, - "resources": { - "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "constraints": { + "description": "These constraints must be satisfied by the set of devices that get allocated for the claim.", "type": "array", "items": { - "type": "string" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceConstraint" + }, + "x-kubernetes-list-type": "atomic" }, - "verbs": { - "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", + "requests": { + "description": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", "type": "array", "items": { - "type": "string" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceRequest" + }, + "x-kubernetes-list-type": "atomic" } } }, - "io.k8s.api.rbac.v1.Role": { - "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration": { + "description": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", "type": "object", "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "description": "Standard object's metadata.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + "opaque": { + "description": "Opaque provides driver-specific configuration parameters.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration" }, - "rules": { - "description": "Rules holds all the PolicyRules for this Role", + "requests": { + "description": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" - } - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "rbac.authorization.k8s.io", - "kind": "Role", - "version": "v1" + "type": "string" + }, + "x-kubernetes-list-type": "atomic" } - ] + } }, - "io.k8s.api.rbac.v1.RoleBinding": { - "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "io.k8s.api.resource.v1alpha3.DeviceClass": { + "description": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "type": "object", "required": [ - "roleRef" + "spec" ], "properties": { "apiVersion": { @@ -13950,31 +15429,34 @@ "type": "string" }, "metadata": { - "description": "Standard object's metadata.", + "description": "Standard object metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "roleRef": { - "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", - "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef" - }, - "subjects": { - "description": "Subjects holds references to the objects the role applies to.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" - } + "spec": { + "description": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClassSpec" } }, "x-kubernetes-group-version-kind": [ { - "group": "rbac.authorization.k8s.io", - "kind": "RoleBinding", - "version": "v1" + "group": "resource.k8s.io", + "kind": "DeviceClass", + "version": "v1alpha3" } ] }, - "io.k8s.api.rbac.v1.RoleBindingList": { - "description": "RoleBindingList is a collection of RoleBindings", + "io.k8s.api.resource.v1alpha3.DeviceClassConfiguration": { + "description": "DeviceClassConfiguration is used in DeviceClass.", + "type": "object", + "properties": { + "opaque": { + "description": "Opaque provides driver-specific configuration parameters.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceClassList": { + "description": "DeviceClassList is a collection of classes.", "type": "object", "required": [ "items" @@ -13985,10 +15467,10 @@ "type": "string" }, "items": { - "description": "Items is a list of RoleBindings", + "description": "Items is the list of resource classes.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClass" } }, "kind": { @@ -13996,128 +15478,159 @@ "type": "string" }, "metadata": { - "description": "Standard object's metadata.", + "description": "Standard list metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } }, "x-kubernetes-group-version-kind": [ { - "group": "rbac.authorization.k8s.io", - "kind": "RoleBindingList", - "version": "v1" + "group": "resource.k8s.io", + "kind": "DeviceClassList", + "version": "v1alpha3" } ] }, - "io.k8s.api.rbac.v1.RoleList": { - "description": "RoleList is a collection of Roles", + "io.k8s.api.resource.v1alpha3.DeviceClassSpec": { + "description": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", "type": "object", - "required": [ - "items" - ], "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" + "config": { + "description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClassConfiguration" + }, + "x-kubernetes-list-type": "atomic" }, - "items": { - "description": "Items is a list of Roles", + "selectors": { + "description": "Each selector must be satisfied by a device which is claimed via this class.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" - } + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceSelector" + }, + "x-kubernetes-list-type": "atomic" }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "suitableNodes": { + "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + } + } + }, + "io.k8s.api.resource.v1alpha3.DeviceConstraint": { + "description": "DeviceConstraint must have exactly one field set besides Requests.", + "type": "object", + "properties": { + "matchAttribute": { + "description": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", "type": "string" }, - "metadata": { - "description": "Standard object's metadata.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - }, - "x-kubernetes-group-version-kind": [ - { - "group": "rbac.authorization.k8s.io", - "kind": "RoleList", - "version": "v1" + "requests": { + "description": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" } - ] + } }, - "io.k8s.api.rbac.v1.RoleRef": { - "description": "RoleRef contains information that points to the role being used", + "io.k8s.api.resource.v1alpha3.DeviceRequest": { + "description": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", "type": "object", "required": [ - "apiGroup", - "kind", - "name" + "name", + "deviceClassName" ], "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced", + "adminAccess": { + "description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", + "type": "boolean" + }, + "allocationMode": { + "description": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", "type": "string" }, - "kind": { - "description": "Kind is the type of resource being referenced", + "count": { + "description": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "type": "integer", + "format": "int64" + }, + "deviceClassName": { + "description": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", "type": "string" }, "name": { - "description": "Name is the name of resource being referenced", + "description": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", "type": "string" + }, + "selectors": { + "description": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceSelector" + }, + "x-kubernetes-list-type": "atomic" } - }, - "x-kubernetes-map-type": "atomic" + } }, - "io.k8s.api.rbac.v1.Subject": { - "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult": { + "description": "DeviceRequestAllocationResult contains the allocation result for one request.", "type": "object", "required": [ - "kind", - "name" + "request", + "driver", + "pool", + "device" ], "properties": { - "apiGroup": { - "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "device": { + "description": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", "type": "string" }, - "kind": { - "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "driver": { + "description": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", "type": "string" }, - "name": { - "description": "Name of the object being referenced.", + "pool": { + "description": "This name together with the driver name and the device name field identify which device was allocated (`\u003cdriver name\u003e/\u003cpool name\u003e/\u003cdevice name\u003e`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", "type": "string" }, - "namespace": { - "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "request": { + "description": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", "type": "string" } - }, - "x-kubernetes-map-type": "atomic" + } }, - "io.k8s.api.resource.v1alpha2.AllocationResult": { - "description": "AllocationResult contains attributes of an allocated resource.", + "io.k8s.api.resource.v1alpha3.DeviceSelector": { + "description": "DeviceSelector must have exactly one field set.", "type": "object", "properties": { - "availableOnNodes": { - "description": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" - }, - "resourceHandles": { - "description": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", - "type": "array", - "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceHandle" - }, - "x-kubernetes-list-type": "atomic" + "cel": { + "description": "CEL contains a CEL expression for selecting a device.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.CELDeviceSelector" + } + } + }, + "io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration": { + "description": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "type": "object", + "required": [ + "driver", + "parameters" + ], + "properties": { + "driver": { + "description": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "type": "string" }, - "shareable": { - "description": "Shareable determines whether the resource supports more than one consumer at a time.", - "type": "boolean" + "parameters": { + "description": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" } } }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContext": { - "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.PodSchedulingContext": { + "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", "type": "object", "required": [ "spec" @@ -14137,22 +15650,22 @@ }, "spec": { "description": "Spec describes where resources for the Pod are needed.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec" }, "status": { "description": "Status describes where resources for the Pod can be allocated.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus" } }, "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", "kind": "PodSchedulingContext", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextList": { "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", "type": "object", "required": [ @@ -14167,7 +15680,7 @@ "description": "Items is the list of PodSchedulingContext objects.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" } }, "kind": { @@ -14183,11 +15696,11 @@ { "group": "resource.k8s.io", "kind": "PodSchedulingContextList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec": { "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", "type": "object", "properties": { @@ -14205,7 +15718,7 @@ } } }, - "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": { + "io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus": { "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", "type": "object", "properties": { @@ -14213,7 +15726,7 @@ "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus" }, "x-kubernetes-list-map-keys": [ "name" @@ -14222,8 +15735,8 @@ } } }, - "io.k8s.api.resource.v1alpha2.ResourceClaim": { - "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.ResourceClaim": { + "description": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "type": "object", "required": [ "spec" @@ -14242,23 +15755,23 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" + "description": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSpec" }, "status": { - "description": "Status describes whether the resource is available and with which attributes.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimStatus" + "description": "Status describes whether the claim is ready to use and what has been allocated.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimStatus" } }, "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { + "io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference": { "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", "type": "object", "required": [ @@ -14285,7 +15798,7 @@ } } }, - "io.k8s.api.resource.v1alpha2.ResourceClaimList": { + "io.k8s.api.resource.v1alpha3.ResourceClaimList": { "description": "ResourceClaimList is a collection of claims.", "type": "object", "required": [ @@ -14300,7 +15813,7 @@ "description": "Items is the list of resource claims.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaim" } }, "kind": { @@ -14316,35 +15829,16 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { - "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", + "io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus": { + "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", "type": "object", "required": [ - "kind", "name" ], - "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "type": "string" - }, - "kind": { - "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", - "type": "string" - }, - "name": { - "description": "Name is the name of resource being referenced.", - "type": "string" - } - } - }, - "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { - "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "type": "object", "properties": { "name": { "description": "Name matches the pod.spec.resourceClaims[*].Name field.", @@ -14360,58 +15854,49 @@ } } }, - "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { - "description": "ResourceClaimSpec defines how a resource is to be allocated.", - "type": "object", - "required": [ - "resourceClassName" - ], + "io.k8s.api.resource.v1alpha3.ResourceClaimSpec": { + "description": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "type": "object", "properties": { - "allocationMode": { - "description": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", + "controller": { + "description": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", "type": "string" }, - "parametersRef": { - "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference" - }, - "resourceClassName": { - "description": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", - "type": "string" + "devices": { + "description": "Devices defines how to request devices.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClaim" } } }, - "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { - "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", + "io.k8s.api.resource.v1alpha3.ResourceClaimStatus": { + "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "type": "object", "properties": { "allocation": { - "description": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.AllocationResult" + "description": "Allocation is set once the claim has been allocated successfully.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.AllocationResult" }, "deallocationRequested": { - "description": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", + "description": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", "type": "boolean" }, - "driverName": { - "description": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "type": "string" - }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference" }, "x-kubernetes-list-map-keys": [ "uid" ], - "x-kubernetes-list-type": "map" + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" } } }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { - "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplate": { + "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "type": "object", "required": [ "spec" @@ -14431,18 +15916,18 @@ }, "spec": { "description": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec" } }, "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplateList": { "description": "ResourceClaimTemplateList is a collection of claim templates.", "type": "object", "required": [ @@ -14457,7 +15942,7 @@ "description": "Items is the list of resource claim templates.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimTemplate" } }, "kind": { @@ -14473,11 +15958,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplateList", - "version": "v1alpha2" + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { + "io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec": { "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", "type": "object", "required": [ @@ -14490,23 +15975,44 @@ }, "spec": { "description": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSpec" } } }, - "io.k8s.api.resource.v1alpha2.ResourceClass": { - "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "io.k8s.api.resource.v1alpha3.ResourcePool": { + "description": "ResourcePool describes the pool that ResourceSlices belong to.", "type": "object", "required": [ - "driverName" + "name", + "generation", + "resourceSliceCount" ], "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "generation": { + "description": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "type": "integer", + "format": "int64" + }, + "name": { + "description": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", "type": "string" }, - "driverName": { - "description": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", + "resourceSliceCount": { + "description": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.resource.v1alpha3.ResourceSlice": { + "description": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple \u003cdriver name\u003e, \u003cpool name\u003e, \u003cdevice name\u003e.\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "kind": { @@ -14517,25 +16023,21 @@ "description": "Standard object metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "parametersRef": { - "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference" - }, - "suitableNodes": { - "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + "spec": { + "description": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceSliceSpec" } }, "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", - "kind": "ResourceClass", - "version": "v1alpha2" + "kind": "ResourceSlice", + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClassList": { - "description": "ResourceClassList is a collection of classes.", + "io.k8s.api.resource.v1alpha3.ResourceSliceList": { + "description": "ResourceSliceList is a collection of ResourceSlices.", "type": "object", "required": [ "items" @@ -14546,10 +16048,10 @@ "type": "string" }, "items": { - "description": "Items is the list of resource classes.", + "description": "Items is the list of resource ResourceSlices.", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceSlice" } }, "kind": { @@ -14564,48 +16066,46 @@ "x-kubernetes-group-version-kind": [ { "group": "resource.k8s.io", - "kind": "ResourceClassList", - "version": "v1alpha2" + "kind": "ResourceSliceList", + "version": "v1alpha3" } ] }, - "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { - "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", + "io.k8s.api.resource.v1alpha3.ResourceSliceSpec": { + "description": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", "type": "object", "required": [ - "kind", - "name" + "driver", + "pool" ], "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "type": "string" + "allNodes": { + "description": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "type": "boolean" }, - "kind": { - "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", - "type": "string" + "devices": { + "description": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.Device" + }, + "x-kubernetes-list-type": "atomic" }, - "name": { - "description": "Name is the name of resource being referenced.", + "driver": { + "description": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", "type": "string" }, - "namespace": { - "description": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", - "type": "string" - } - } - }, - "io.k8s.api.resource.v1alpha2.ResourceHandle": { - "description": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", - "type": "object", - "properties": { - "data": { - "description": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", + "nodeName": { + "description": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", "type": "string" }, - "driverName": { - "description": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", - "type": "string" + "nodeSelector": { + "description": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + }, + "pool": { + "description": "Pool describes the pool that this ResourceSlice belongs to.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourcePool" } } }, @@ -14765,11 +16265,11 @@ "type": "boolean" }, "fsGroupPolicy": { - "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes \u003c 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", "type": "string" }, "podInfoOnMount": { - "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes \u003c 1.29 and now is mutable.", "type": "boolean" }, "requiresRepublish": { @@ -14859,7 +16359,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -14911,6 +16412,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeDriver" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" } @@ -14976,11 +16481,7 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.storage.v1.CSIStorageCapacity" - }, - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map" + } }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -15035,7 +16536,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "parameters": { "description": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", @@ -15262,64 +16764,254 @@ "description": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", "type": "string" }, - "time": { - "description": "time represents the time the error was encountered.", - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + "time": { + "description": "time represents the time the error was encountered.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.storage.v1.VolumeNodeResources": { + "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "type": "object", + "properties": { + "count": { + "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "type": "object", + "required": [ + "driverName" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "parameters": { + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1beta1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "type": "object", + "required": [ + "driverName" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "parameters": { + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.storage.v1beta1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1beta1.VolumeAttributesClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.storagemigration.v1alpha1.GroupVersionResource": { + "description": "The names of the group, the version, and the resource.", + "type": "object", + "properties": { + "group": { + "description": "The name of the group.", + "type": "string" + }, + "resource": { + "description": "The name of the resource.", + "type": "string" + }, + "version": { + "description": "The name of the version.", + "type": "string" } } }, - "io.k8s.api.storage.v1.VolumeNodeResources": { - "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "io.k8s.api.storagemigration.v1alpha1.MigrationCondition": { + "description": "Describes the state of a migration at a certain point.", "type": "object", + "required": [ + "type", + "status" + ], "properties": { - "count": { - "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", - "type": "integer", - "format": "int32" + "lastUpdateTime": { + "description": "The last time this condition was updated.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of the condition.", + "type": "string" } } }, - "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { - "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration": { + "description": "StorageVersionMigration represents a migration of stored data to the latest storage version.", "type": "object", - "required": [ - "driverName" - ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "driverName": { - "description": "Name of the CSI driver This field is immutable.", - "type": "string" - }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, "metadata": { - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "parameters": { - "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", - "type": "object", - "additionalProperties": { - "type": "string" - } + "spec": { + "description": "Specification of the migration.", + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec" + }, + "status": { + "description": "Status of the migration.", + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus" } }, "x-kubernetes-group-version-kind": [ { - "group": "storage.k8s.io", - "kind": "VolumeAttributesClass", + "group": "storagemigration.k8s.io", + "kind": "StorageVersionMigration", "version": "v1alpha1" } ] }, - "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { - "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationList": { + "description": "StorageVersionMigrationList is a collection of storage version migrations.", "type": "object", "required": [ "items" @@ -15330,11 +17022,17 @@ "type": "string" }, "items": { - "description": "items is the list of VolumeAttributesClass objects.", + "description": "Items is the list of StorageVersionMigration", "type": "array", "items": { - "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" - } + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -15347,12 +17045,52 @@ }, "x-kubernetes-group-version-kind": [ { - "group": "storage.k8s.io", - "kind": "VolumeAttributesClassList", + "group": "storagemigration.k8s.io", + "kind": "StorageVersionMigrationList", "version": "v1alpha1" } ] }, + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec": { + "description": "Spec of the storage version migration.", + "type": "object", + "required": [ + "resource" + ], + "properties": { + "continueToken": { + "description": "The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration.", + "type": "string" + }, + "resource": { + "description": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable.", + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.GroupVersionResource" + } + } + }, + "io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus": { + "description": "Status of the storage version migration.", + "type": "object", + "properties": { + "conditions": { + "description": "The latest available observations of the migration's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storagemigration.v1alpha1.MigrationCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "resourceVersion": { + "description": "ResourceVersion to compare with the GC cache for performing the migration. This is the current resource version of given group, version and resource when kube-controller-manager first observes this StorageVersionMigration resource.", + "type": "string" + } + } + }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition": { "description": "CustomResourceColumnDefinition specifies a column for server side printing.", "type": "object", @@ -15520,7 +17258,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.", @@ -15539,7 +17278,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "singular": { "description": "singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.", @@ -15582,7 +17322,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -15610,7 +17351,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -15628,7 +17370,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition" - } + }, + "x-kubernetes-list-type": "atomic" }, "deprecated": { "description": "deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.", @@ -15646,6 +17389,14 @@ "description": "schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.", "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation" }, + "selectableFields": { + "description": "selectableFields specifies paths to fields that may be used as field selectors. A maximum of 8 selectable fields are allowed. See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.SelectableField" + }, + "x-kubernetes-list-type": "atomic" + }, "served": { "description": "served is a flag enabling/disabling this version from being served via REST APIs", "type": "boolean" @@ -15745,13 +17496,15 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" - } + }, + "x-kubernetes-list-type": "atomic" }, "anyOf": { "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" - } + }, + "x-kubernetes-list-type": "atomic" }, "default": { "description": "default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.", @@ -15776,7 +17529,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" - } + }, + "x-kubernetes-list-type": "atomic" }, "example": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" @@ -15846,7 +17600,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" - } + }, + "x-kubernetes-list-type": "atomic" }, "pattern": { "type": "string" @@ -15867,7 +17622,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "title": { "type": "string" @@ -15891,7 +17647,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "x-kubernetes-list-type": { "description": "x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:\n\n1) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic lists will be entirely replaced when updated. This extension\n may be used on any type of list (struct, scalar, ...).\n2) `set`:\n Sets are lists that must not have multiple items with the same value. Each\n value must be a scalar, an object with x-kubernetes-map-type `atomic` or an\n array with x-kubernetes-list-type `atomic`.\n3) `map`:\n These lists are like maps in that their elements have a non-index key\n used to identify them. Order is preserved upon merge. The map tag\n must only be used on a list with elements of type object.\nDefaults to atomic for arrays.", @@ -15906,7 +17663,7 @@ "type": "boolean" }, "x-kubernetes-validations": { - "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.", + "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule" @@ -15929,6 +17686,19 @@ "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray": { "description": "JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array." }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.SelectableField": { + "description": "SelectableField specifies the JSON path of a field that may be used with field selectors.", + "type": "object", + "required": [ + "jsonPath" + ], + "properties": { + "jsonPath": { + "description": "jsonPath is a simple JSON path which is evaluated against each custom resource to produce a field selector value. Only JSON paths without the array notation are allowed. Must point to a field of type string, boolean or integer. Types with enum values and strings with formats are allowed. If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. Must not point to metdata fields. Required.", + "type": "string" + } + } + }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference": { "description": "ServiceReference holds a reference to Service.legacy.k8s.io", "type": "object", @@ -16024,7 +17794,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -16061,14 +17832,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" - } + }, + "x-kubernetes-list-type": "atomic" }, "versions": { "description": "versions are the versions supported in this group.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ @@ -16095,7 +17868,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup" - } + }, + "x-kubernetes-list-type": "atomic" }, "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -16126,7 +17900,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "group": { "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", @@ -16149,7 +17924,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "singularName": { "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", @@ -16197,7 +17973,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ @@ -16229,14 +18006,16 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" - } + }, + "x-kubernetes-list-type": "atomic" }, "versions": { "description": "versions are the api versions that are available.", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } }, "x-kubernetes-group-version-kind": [ @@ -16298,7 +18077,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "gracePeriodSeconds": { "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", @@ -16463,6 +18243,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "coordination.k8s.io", "kind": "DeleteOptions", @@ -16581,7 +18366,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha2" + "version": "v1alpha3" }, { "group": "scheduling.k8s.io", @@ -16612,6 +18397,11 @@ "group": "storage.k8s.io", "kind": "DeleteOptions", "version": "v1beta1" + }, + { + "group": "storagemigration.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" } ] }, @@ -16619,6 +18409,32 @@ "description": "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.", "type": "string" }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement": { + "description": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "type": "object", + "required": [ + "key", + "operator" + ], + "properties": { + "key": { + "description": "key is the field selector key that the requirement applies to.", + "type": "string" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "type": "string" + }, + "values": { + "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", "type": "object" @@ -16650,7 +18466,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" - } + }, + "x-kubernetes-list-type": "atomic" }, "matchLabels": { "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -16683,7 +18500,8 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" } } }, @@ -16779,6 +18597,7 @@ "items": { "type": "string" }, + "x-kubernetes-list-type": "set", "x-kubernetes-patch-strategy": "merge" }, "generateName": { @@ -16802,7 +18621,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" - } + }, + "x-kubernetes-list-type": "atomic" }, "name": { "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", @@ -16818,6 +18638,10 @@ "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" }, + "x-kubernetes-list-map-keys": [ + "uid" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "uid", "x-kubernetes-patch-strategy": "merge" }, @@ -16923,6 +18747,7 @@ }, "details": { "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "x-kubernetes-list-type": "atomic", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails" }, "kind": { @@ -16951,11 +18776,6 @@ "group": "", "kind": "Status", "version": "v1" - }, - { - "group": "resource.k8s.io", - "kind": "Status", - "version": "v1alpha2" } ] }, @@ -16986,7 +18806,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause" - } + }, + "x-kubernetes-list-type": "atomic" }, "group": { "description": "The group attribute of the resource associated with the status StatusReason.", @@ -17173,6 +18994,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "coordination.k8s.io", "kind": "WatchEvent", @@ -17291,7 +19117,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha2" + "version": "v1alpha3" }, { "group": "scheduling.k8s.io", @@ -17322,6 +19148,11 @@ "group": "storage.k8s.io", "kind": "WatchEvent", "version": "v1beta1" + }, + { + "group": "storagemigration.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" } ] }, @@ -20865,14 +22696,16 @@ "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "type": "array", "items": { "type": "string" - } + }, + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -20880,6 +22713,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -20888,7 +22725,8 @@ "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" - } + }, + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -20978,6 +22816,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" }, + "x-kubernetes-list-map-keys": [ + "devicePath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -20987,6 +22829,10 @@ "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" }, + "x-kubernetes-list-map-keys": [ + "mountPath" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, diff --git a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml index 0fb6ff2583..993551ebbc 100644 --- a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: interstepbufferservices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -64,11 +63,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -80,12 +81,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -94,6 +98,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -110,11 +115,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -126,16 +133,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -157,16 +169,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -190,20 +205,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -217,6 +236,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -233,16 +253,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -266,26 +289,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -307,16 +335,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -340,20 +371,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -367,6 +402,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -383,16 +419,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -416,26 +455,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -458,12 +502,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -473,6 +519,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -488,17 +535,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -510,19 +560,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -570,6 +624,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -598,16 +654,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -662,6 +729,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -671,10 +739,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -684,8 +754,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -714,12 +786,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -729,6 +803,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -744,17 +819,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -766,19 +844,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -826,6 +908,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -854,16 +938,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -950,12 +1045,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -965,6 +1062,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -980,17 +1078,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1002,19 +1103,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1062,6 +1167,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1090,16 +1197,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1157,13 +1275,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -1172,6 +1287,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -1210,6 +1334,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1222,6 +1349,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -1274,23 +1402,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -1320,11 +1452,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1336,12 +1470,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -1350,6 +1487,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -1366,11 +1504,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1382,16 +1522,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -1413,16 +1558,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1446,20 +1594,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1473,6 +1625,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1489,16 +1642,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1522,26 +1678,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -1563,16 +1724,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1596,20 +1760,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1623,6 +1791,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1639,16 +1808,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1672,26 +1844,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1702,6 +1879,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1711,10 +1889,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1722,8 +1902,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -1741,12 +1923,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1756,6 +1940,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1771,17 +1956,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1793,19 +1981,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1853,6 +2045,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1881,16 +2075,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1966,12 +2171,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1981,6 +2188,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1996,17 +2204,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2018,19 +2229,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2078,6 +2293,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2106,16 +2323,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2202,12 +2430,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2217,6 +2447,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2232,17 +2463,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2254,19 +2488,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2314,6 +2552,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2342,16 +2582,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2409,13 +2660,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2424,6 +2672,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2462,6 +2719,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2474,6 +2734,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2502,12 +2763,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2517,6 +2780,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2532,17 +2796,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2554,19 +2821,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2614,6 +2885,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2642,16 +2915,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2785,46 +3069,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object streamConfig: type: string @@ -2842,23 +3134,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -2889,9 +3185,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index e4b27b015c..13f0cd9f70 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: monovertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -71,11 +70,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -87,12 +88,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -101,6 +105,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -117,11 +122,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -133,16 +140,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -164,16 +176,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -197,20 +212,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -224,6 +243,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -240,16 +260,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -273,26 +296,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -314,16 +342,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -347,20 +378,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -374,6 +409,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -390,16 +426,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -423,26 +462,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -463,12 +507,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -478,6 +524,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -493,17 +540,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -515,19 +565,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -575,6 +629,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -603,16 +659,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -683,11 +750,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -699,12 +768,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -713,6 +785,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -729,11 +802,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -745,16 +820,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -776,16 +856,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -809,20 +892,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -836,6 +923,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -852,16 +940,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -885,26 +976,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -926,16 +1022,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -959,20 +1058,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -986,6 +1089,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1002,16 +1106,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1035,26 +1142,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1075,12 +1187,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1090,6 +1204,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1105,17 +1220,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1127,19 +1245,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1187,6 +1309,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1215,16 +1339,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1279,6 +1414,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1288,10 +1424,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1299,8 +1437,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -1318,12 +1458,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1333,6 +1475,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1348,17 +1491,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1370,19 +1516,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1430,6 +1580,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1458,16 +1610,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1544,13 +1707,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -1559,6 +1719,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -1597,6 +1766,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1609,6 +1781,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -1646,6 +1819,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1655,10 +1829,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1666,8 +1842,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: items: @@ -1676,10 +1854,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -1694,12 +1874,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1709,6 +1891,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1724,43 +1907,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -1775,6 +1969,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -1792,6 +1987,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1833,6 +2029,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -1850,6 +2047,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1892,6 +2090,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -1902,6 +2101,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -1922,6 +2122,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1996,6 +2197,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2006,6 +2208,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2026,6 +2229,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2086,6 +2290,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2116,16 +2322,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2181,6 +2398,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2191,6 +2409,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2211,6 +2430,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2273,6 +2493,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -2284,6 +2507,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -2293,6 +2518,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -2353,13 +2581,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2406,6 +2631,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2444,6 +2678,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2456,6 +2693,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2477,10 +2715,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -2495,12 +2735,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2510,6 +2752,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2525,43 +2768,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -2576,6 +2830,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -2593,6 +2848,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2634,6 +2890,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -2651,6 +2908,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2693,6 +2951,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2703,6 +2962,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2723,6 +2983,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2797,6 +3058,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2807,6 +3069,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2827,6 +3090,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2887,6 +3151,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2917,16 +3183,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2982,6 +3259,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2992,6 +3270,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -3012,6 +3291,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -3074,6 +3354,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -3085,6 +3368,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -3094,6 +3379,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -3127,34 +3415,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -3164,12 +3458,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -3187,23 +3483,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3217,23 +3517,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3247,23 +3551,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3278,23 +3586,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -3302,12 +3614,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -3342,12 +3656,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3357,6 +3673,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3372,17 +3689,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3394,19 +3714,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -3456,6 +3780,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3484,16 +3810,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3552,6 +3889,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -3585,34 +3924,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -3622,12 +3967,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -3645,23 +3992,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3675,23 +4026,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3705,23 +4060,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3736,23 +4095,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -3760,12 +4123,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -3815,12 +4180,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3830,6 +4197,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3845,17 +4213,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3867,19 +4238,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -3929,6 +4304,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3957,16 +4334,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4025,6 +4413,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -4075,12 +4465,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -4096,46 +4488,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -4146,23 +4546,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -4170,12 +4574,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -4204,34 +4610,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -4241,12 +4653,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -4264,23 +4678,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -4294,23 +4712,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -4324,23 +4746,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -4355,23 +4781,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -4379,12 +4809,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -4402,46 +4834,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -4454,23 +4894,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -4478,12 +4922,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -4501,12 +4947,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -4570,12 +5018,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4585,6 +5035,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4600,17 +5051,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4622,19 +5076,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4684,6 +5142,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4712,16 +5172,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4780,6 +5251,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -4817,12 +5290,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4832,6 +5307,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4847,17 +5323,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4869,19 +5348,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4931,6 +5414,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4959,16 +5444,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5027,6 +5523,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5100,10 +5598,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -5127,6 +5627,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -5136,8 +5637,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -5152,8 +5655,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -5179,11 +5684,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -5193,8 +5701,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -5221,6 +5731,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -5241,10 +5752,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -5269,6 +5782,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -5281,6 +5795,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -5327,16 +5842,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -5363,10 +5881,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -5383,8 +5903,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -5441,6 +5963,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -5454,6 +5983,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -5462,13 +5992,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -5542,16 +6075,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -5580,11 +6116,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -5599,6 +6138,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -5619,10 +6159,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -5641,11 +6183,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -5660,6 +6205,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -5686,21 +6232,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -5709,6 +6261,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -5719,11 +6272,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -5756,6 +6312,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -5770,8 +6327,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -5888,9 +6447,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index d262a8664b..7c192a9e79 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: pipelines.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -175,12 +174,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -190,6 +191,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -205,17 +207,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -227,19 +232,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -289,6 +298,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -317,16 +328,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -385,6 +407,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -432,10 +456,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -459,6 +485,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -468,8 +495,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -484,8 +513,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -511,11 +542,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -525,8 +559,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -553,6 +589,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -573,10 +610,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -601,6 +640,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -613,6 +653,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -659,16 +700,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -695,10 +739,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -715,8 +761,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -773,6 +821,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -786,6 +841,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -794,13 +850,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -874,16 +933,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -912,11 +974,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -931,6 +996,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -951,10 +1017,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -973,11 +1041,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -992,6 +1063,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -1018,21 +1090,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -1041,6 +1119,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -1051,11 +1130,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -1088,6 +1170,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -1102,8 +1185,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -1156,11 +1241,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1172,12 +1259,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -1186,6 +1276,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -1202,11 +1293,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1218,16 +1311,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -1249,16 +1347,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1282,20 +1383,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1309,6 +1414,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1325,16 +1431,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1358,26 +1467,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -1399,16 +1513,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1432,20 +1549,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1459,6 +1580,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1475,16 +1597,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1508,26 +1633,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1548,12 +1678,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1563,6 +1695,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1578,17 +1711,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1600,19 +1736,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1660,6 +1800,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1688,16 +1830,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1752,6 +1905,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1761,10 +1915,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1772,8 +1928,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -1791,12 +1949,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1806,6 +1966,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1821,17 +1982,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1843,19 +2007,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1903,6 +2071,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1931,16 +2101,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2017,13 +2198,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2032,6 +2210,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2070,6 +2257,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2082,6 +2272,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2135,11 +2326,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2151,12 +2344,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -2165,6 +2361,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -2181,11 +2378,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2197,16 +2396,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -2228,16 +2432,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -2261,20 +2468,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -2288,6 +2499,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -2304,16 +2516,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -2337,26 +2552,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -2378,16 +2598,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -2411,20 +2634,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -2438,6 +2665,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -2454,16 +2682,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -2487,26 +2718,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -2530,12 +2766,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2545,6 +2783,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2560,17 +2799,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2582,19 +2824,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2642,6 +2888,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2670,16 +2918,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2734,6 +2993,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -2743,10 +3003,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -2754,8 +3016,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -2782,13 +3046,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2797,6 +3058,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2835,6 +3105,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2847,6 +3120,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2903,11 +3177,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2919,12 +3195,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -2933,6 +3212,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -2949,11 +3229,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2965,16 +3247,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -2996,16 +3283,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3029,20 +3319,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3056,6 +3350,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3072,16 +3367,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3105,26 +3403,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -3146,16 +3449,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3179,20 +3485,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3206,6 +3516,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3222,16 +3533,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3255,26 +3569,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -3295,12 +3614,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3310,6 +3631,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3325,17 +3647,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3347,19 +3672,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -3407,6 +3736,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3435,16 +3766,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3499,6 +3841,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -3508,10 +3851,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -3519,8 +3864,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -3538,12 +3885,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3553,6 +3902,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3568,17 +3918,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3590,19 +3943,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -3650,6 +4007,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3678,16 +4037,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3761,13 +4131,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -3776,6 +4143,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -3814,6 +4190,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -3826,6 +4205,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -3879,11 +4259,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3895,12 +4277,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -3909,6 +4294,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -3925,11 +4311,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3941,16 +4329,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -3972,16 +4365,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -4005,20 +4401,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -4032,6 +4432,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -4048,16 +4449,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -4081,26 +4485,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -4122,16 +4531,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -4155,20 +4567,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -4182,6 +4598,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -4198,16 +4615,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -4231,26 +4651,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -4271,12 +4696,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4286,6 +4713,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4301,17 +4729,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4323,19 +4754,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4383,6 +4818,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4411,16 +4848,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4475,6 +4923,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -4484,10 +4933,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -4495,8 +4946,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -4514,12 +4967,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4529,6 +4984,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4544,17 +5000,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4566,19 +5025,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4626,6 +5089,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4654,16 +5119,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4737,13 +5213,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -4752,6 +5225,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -4790,6 +5272,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -4802,6 +5287,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -4857,11 +5343,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -4873,12 +5361,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -4887,6 +5378,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -4903,11 +5395,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -4919,16 +5413,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -4950,16 +5449,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -4983,20 +5485,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -5010,6 +5516,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -5026,16 +5533,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -5059,26 +5569,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -5100,16 +5615,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -5133,20 +5651,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -5160,6 +5682,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -5176,16 +5699,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -5209,26 +5735,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -5249,12 +5780,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -5264,6 +5797,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -5279,17 +5813,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -5301,19 +5838,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -5361,6 +5902,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5389,16 +5932,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5453,6 +6007,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -5462,10 +6017,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -5473,8 +6030,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -5492,12 +6051,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -5507,6 +6068,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -5522,17 +6084,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -5544,19 +6109,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -5604,6 +6173,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5632,16 +6203,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5697,10 +6279,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -5715,12 +6299,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -5730,6 +6316,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -5745,43 +6332,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -5796,6 +6394,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5813,6 +6412,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5854,6 +6454,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5871,6 +6472,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5913,6 +6515,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5923,6 +6526,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5943,6 +6547,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -6017,6 +6622,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -6027,6 +6633,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -6047,6 +6654,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -6107,6 +6715,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6137,16 +6747,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6202,6 +6823,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -6212,6 +6834,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -6232,6 +6855,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -6294,6 +6918,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -6305,6 +6932,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -6314,6 +6943,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -6364,13 +6996,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -6417,6 +7046,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -6455,6 +7093,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -6467,6 +7108,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -6501,12 +7143,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6516,6 +7160,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6531,17 +7176,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -6553,19 +7201,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -6613,6 +7265,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6641,16 +7295,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6706,10 +7371,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -6724,12 +7391,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6739,6 +7408,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6754,43 +7424,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -6805,6 +7486,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -6822,6 +7504,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -6863,6 +7546,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -6880,6 +7564,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -6922,6 +7607,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -6932,6 +7618,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -6952,6 +7639,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -7026,6 +7714,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -7036,6 +7725,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -7056,6 +7746,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -7116,6 +7807,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7146,16 +7839,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7211,6 +7915,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -7221,6 +7926,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -7241,6 +7947,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -7303,6 +8010,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -7314,6 +8024,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7323,6 +8035,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -7356,34 +8071,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -7393,12 +8114,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -7416,23 +8139,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7446,23 +8173,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7476,23 +8207,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7507,23 +8242,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7531,12 +8270,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -7571,12 +8312,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -7586,6 +8329,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -7601,17 +8345,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -7623,19 +8370,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -7685,6 +8436,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7713,16 +8466,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7781,6 +8545,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7814,34 +8580,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -7851,12 +8623,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -7874,23 +8648,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7904,23 +8682,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7934,23 +8716,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7965,23 +8751,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7989,12 +8779,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -8044,12 +8836,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -8059,6 +8853,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -8074,17 +8869,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -8096,19 +8894,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -8158,6 +8960,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -8186,16 +8990,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -8254,6 +9069,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -8304,12 +9121,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -8325,46 +9144,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -8375,23 +9202,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -8399,12 +9230,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -8433,34 +9266,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -8470,12 +9309,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -8493,23 +9334,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -8523,23 +9368,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -8553,23 +9402,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -8584,23 +9437,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -8608,12 +9465,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -8631,46 +9490,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -8683,23 +9550,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -8707,12 +9578,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -8730,12 +9603,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -8799,12 +9674,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -8814,6 +9691,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -8829,17 +9707,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -8851,19 +9732,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -8913,6 +9798,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -8941,16 +9828,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -9009,6 +9907,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -9046,12 +9946,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -9061,6 +9963,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -9076,17 +9979,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9098,19 +10004,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -9160,6 +10070,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -9188,16 +10100,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -9256,6 +10179,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -9330,12 +10255,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -9345,6 +10272,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -9360,17 +10288,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9382,19 +10313,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -9444,6 +10379,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -9472,16 +10409,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -9540,6 +10488,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -9656,10 +10606,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -9683,6 +10635,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -9692,8 +10645,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -9708,8 +10663,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -9735,11 +10692,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -9749,8 +10709,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -9777,6 +10739,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -9797,10 +10760,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -9825,6 +10790,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -9837,6 +10803,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -9883,16 +10850,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -9919,10 +10889,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -9939,8 +10911,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -9997,6 +10971,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -10010,6 +10991,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -10018,13 +11000,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -10098,16 +11083,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -10136,11 +11124,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -10155,6 +11146,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -10175,10 +11167,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -10197,11 +11191,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -10216,6 +11213,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -10242,21 +11240,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -10265,6 +11269,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -10275,11 +11280,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -10312,6 +11320,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -10326,8 +11335,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -10459,9 +11470,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index a1756ec313..297ca4c99f 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: vertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -71,11 +70,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -87,12 +88,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -101,6 +105,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -117,11 +122,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -133,16 +140,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -164,16 +176,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -197,20 +212,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -224,6 +243,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -240,16 +260,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -273,26 +296,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -314,16 +342,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -347,20 +378,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -374,6 +409,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -390,16 +426,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -423,26 +462,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -463,12 +507,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -478,6 +524,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -493,17 +540,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -515,19 +565,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -575,6 +629,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -603,16 +659,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -667,6 +734,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -676,10 +744,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -764,8 +834,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -783,12 +855,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -798,6 +872,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -813,17 +888,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -835,19 +913,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -895,6 +977,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -923,16 +1007,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -988,10 +1083,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -1006,12 +1103,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1021,6 +1120,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1036,43 +1136,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -1087,6 +1198,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -1104,6 +1216,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1145,6 +1258,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -1162,6 +1276,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1204,6 +1319,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -1214,6 +1330,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -1234,6 +1351,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1308,6 +1426,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -1318,6 +1437,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -1338,6 +1458,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1398,6 +1519,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1428,16 +1551,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1493,6 +1627,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -1503,6 +1638,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -1523,6 +1659,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -1585,6 +1722,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -1596,6 +1736,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -1605,6 +1747,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -1663,13 +1808,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -1716,6 +1858,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -1754,6 +1905,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1766,6 +1920,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -1800,12 +1955,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1815,6 +1972,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1830,17 +1988,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1852,19 +2013,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1912,6 +2077,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1940,16 +2107,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2005,10 +2183,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -2023,12 +2203,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2038,6 +2220,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2053,43 +2236,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -2104,6 +2298,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -2121,6 +2316,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2162,6 +2358,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -2179,6 +2376,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2221,6 +2419,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2231,6 +2430,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2251,6 +2451,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2325,6 +2526,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2335,6 +2537,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2355,6 +2558,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2415,6 +2619,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2445,16 +2651,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2510,6 +2727,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -2520,6 +2738,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -2540,6 +2759,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -2602,6 +2822,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -2613,6 +2836,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -2622,6 +2847,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -2655,34 +2883,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -2692,12 +2926,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -2715,23 +2951,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -2745,23 +2985,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -2775,23 +3019,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -2806,23 +3054,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -2830,12 +3082,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -2870,12 +3124,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2885,6 +3141,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2900,17 +3157,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2922,19 +3182,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -2984,6 +3248,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3012,16 +3278,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3080,6 +3357,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -3113,34 +3392,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -3150,12 +3435,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -3173,23 +3460,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3203,23 +3494,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3233,23 +3528,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3264,23 +3563,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -3288,12 +3591,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -3343,12 +3648,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3358,6 +3665,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3373,17 +3681,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3395,19 +3706,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -3457,6 +3772,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3485,16 +3802,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3553,6 +3881,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -3603,12 +3933,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -3624,46 +3956,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -3674,23 +4014,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -3698,12 +4042,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -3732,34 +4078,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -3769,12 +4121,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -3792,23 +4146,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3822,23 +4180,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3852,23 +4214,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -3883,23 +4249,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -3907,12 +4277,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -3930,46 +4302,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -3982,23 +4362,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -4006,12 +4390,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -4029,12 +4415,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -4098,12 +4486,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4113,6 +4503,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4128,17 +4519,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4150,19 +4544,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4212,6 +4610,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4240,16 +4640,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4308,6 +4719,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -4345,12 +4758,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4360,6 +4775,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4375,17 +4791,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4397,19 +4816,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4459,6 +4882,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4487,16 +4912,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4555,6 +4991,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -4706,12 +5144,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4721,6 +5161,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4736,17 +5177,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4758,19 +5202,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -4820,6 +5268,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4848,16 +5298,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4916,6 +5377,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5032,10 +5495,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -5059,6 +5524,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -5068,8 +5534,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -5084,8 +5552,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -5111,11 +5581,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -5125,8 +5598,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -5153,6 +5628,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -5173,10 +5649,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -5201,6 +5679,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -5213,6 +5692,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -5259,16 +5739,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -5295,10 +5778,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -5315,8 +5800,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -5373,6 +5860,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -5386,6 +5880,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -5394,13 +5889,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -5474,16 +5972,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -5512,11 +6013,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -5531,6 +6035,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -5551,10 +6056,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -5573,11 +6080,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -5592,6 +6102,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -5618,21 +6129,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -5641,6 +6158,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -5651,11 +6169,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -5688,6 +6209,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -5702,8 +6224,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -5838,9 +6362,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/install.yaml b/config/install.yaml index 367657d66f..85cd57c65e 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: interstepbufferservices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -63,11 +62,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -79,12 +80,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -93,6 +97,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -109,11 +114,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -125,16 +132,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -156,16 +168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -189,20 +204,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -216,6 +235,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -232,16 +252,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -265,26 +288,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -306,16 +334,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -339,20 +370,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -366,6 +401,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -382,16 +418,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -415,26 +454,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -457,12 +501,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -472,6 +518,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -487,17 +534,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -509,19 +559,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -569,6 +623,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -597,16 +653,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -661,6 +728,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -670,10 +738,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -683,8 +753,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -713,12 +785,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -728,6 +802,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -743,17 +818,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -765,19 +843,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -825,6 +907,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -853,16 +937,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -949,12 +1044,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -964,6 +1061,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -979,17 +1077,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1001,19 +1102,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1061,6 +1166,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1089,16 +1196,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1156,13 +1274,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -1171,6 +1286,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -1209,6 +1333,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1221,6 +1348,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -1273,23 +1401,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -1319,11 +1451,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1335,12 +1469,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -1349,6 +1486,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -1365,11 +1503,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1381,16 +1521,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -1412,16 +1557,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1445,20 +1593,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1472,6 +1624,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1488,16 +1641,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1521,26 +1677,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -1562,16 +1723,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1595,20 +1759,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1622,6 +1790,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1638,16 +1807,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1671,26 +1843,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1701,6 +1878,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1710,10 +1888,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1721,8 +1901,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -1740,12 +1922,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1755,6 +1939,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1770,17 +1955,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1792,19 +1980,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1852,6 +2044,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1880,16 +2074,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1965,12 +2170,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1980,6 +2187,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1995,17 +2203,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2017,19 +2228,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2077,6 +2292,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2105,16 +2322,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2201,12 +2429,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2216,6 +2446,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2231,17 +2462,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2253,19 +2487,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2313,6 +2551,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2341,16 +2581,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2408,13 +2659,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2423,6 +2671,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2461,6 +2718,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2473,6 +2733,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2501,12 +2762,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2516,6 +2779,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2531,17 +2795,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2553,19 +2820,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2613,6 +2884,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2641,16 +2914,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2784,46 +3068,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object streamConfig: type: string @@ -2841,23 +3133,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -2888,19 +3184,12 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: monovertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -2967,11 +3256,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2983,12 +3274,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -2997,6 +3291,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -3013,11 +3308,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3029,16 +3326,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -3060,16 +3362,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3093,20 +3398,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3120,6 +3429,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3136,16 +3446,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3169,26 +3482,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -3210,16 +3528,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3243,20 +3564,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3270,6 +3595,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3286,16 +3612,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3319,26 +3648,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -3359,12 +3693,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3374,6 +3710,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3389,17 +3726,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3411,19 +3751,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -3471,6 +3815,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3499,16 +3845,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3579,11 +3936,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3595,12 +3954,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -3609,6 +3971,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -3625,11 +3988,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3641,16 +4006,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -3672,16 +4042,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3705,20 +4078,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3732,6 +4109,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3748,16 +4126,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3781,26 +4162,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -3822,16 +4208,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3855,20 +4244,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3882,6 +4275,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3898,16 +4292,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3931,26 +4328,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -3971,12 +4373,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3986,6 +4390,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4001,17 +4406,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4023,19 +4431,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4083,6 +4495,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4111,16 +4525,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4175,6 +4600,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -4184,10 +4610,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -4195,8 +4623,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -4214,12 +4644,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4229,6 +4661,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4244,17 +4677,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4266,19 +4702,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4326,6 +4766,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4354,16 +4796,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4440,13 +4893,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -4455,6 +4905,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -4493,6 +4952,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -4505,6 +4967,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -4542,6 +5005,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -4551,10 +5015,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -4562,8 +5028,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: items: @@ -4572,10 +5040,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -4590,12 +5060,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4605,6 +5077,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4620,43 +5093,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -4671,6 +5155,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -4688,6 +5173,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4729,6 +5215,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -4746,6 +5233,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4788,6 +5276,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -4798,6 +5287,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -4818,6 +5308,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4892,6 +5383,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -4902,6 +5394,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -4922,6 +5415,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4982,6 +5476,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5012,16 +5508,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5077,6 +5584,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5087,6 +5595,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5107,6 +5616,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5169,6 +5679,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -5180,6 +5693,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5189,6 +5704,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -5249,13 +5767,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -5302,6 +5817,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -5340,6 +5864,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -5352,6 +5879,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -5373,10 +5901,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -5391,12 +5921,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -5406,6 +5938,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -5421,43 +5954,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -5472,6 +6016,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5489,6 +6034,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5530,6 +6076,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5547,6 +6094,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5589,6 +6137,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5599,6 +6148,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5619,6 +6169,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5693,6 +6244,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5703,6 +6255,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5723,6 +6276,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5783,6 +6337,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5813,16 +6369,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5878,6 +6445,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5888,6 +6456,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5908,6 +6477,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5970,6 +6540,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -5981,6 +6554,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5990,6 +6565,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -6023,34 +6601,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -6060,12 +6644,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -6083,23 +6669,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6113,23 +6703,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6143,23 +6737,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6174,23 +6772,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -6198,12 +6800,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -6238,12 +6842,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6253,6 +6859,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6268,17 +6875,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -6290,19 +6900,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -6352,6 +6966,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6380,16 +6996,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6448,6 +7075,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -6481,34 +7110,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -6518,12 +7153,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -6541,23 +7178,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6571,23 +7212,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6601,23 +7246,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6632,23 +7281,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -6656,12 +7309,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -6711,12 +7366,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6726,6 +7383,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6741,17 +7399,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -6763,19 +7424,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -6825,6 +7490,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6853,16 +7520,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6921,6 +7599,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -6971,12 +7651,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -6992,46 +7674,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -7042,23 +7732,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7066,12 +7760,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -7100,34 +7796,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -7137,12 +7839,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -7160,23 +7864,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7190,23 +7898,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7220,23 +7932,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7251,23 +7967,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7275,12 +7995,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -7298,46 +8020,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -7350,23 +8080,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7374,12 +8108,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -7397,12 +8133,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -7466,12 +8204,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -7481,6 +8221,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -7496,17 +8237,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -7518,19 +8262,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -7580,6 +8328,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7608,16 +8358,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7676,6 +8437,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7713,12 +8476,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -7728,6 +8493,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -7743,17 +8509,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -7765,19 +8534,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -7827,6 +8600,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7855,16 +8630,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7923,6 +8709,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7996,10 +8784,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -8023,6 +8813,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -8032,8 +8823,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -8048,8 +8841,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -8075,11 +8870,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -8089,8 +8887,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -8117,6 +8917,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -8137,10 +8938,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -8165,6 +8968,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -8177,6 +8981,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -8223,16 +9028,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -8259,10 +9067,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -8279,8 +9089,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -8337,6 +9149,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -8350,6 +9169,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -8358,13 +9178,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -8438,16 +9261,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -8476,11 +9302,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -8495,6 +9324,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -8515,10 +9345,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -8537,11 +9369,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -8556,6 +9391,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -8582,21 +9418,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -8605,6 +9447,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -8615,11 +9458,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -8652,6 +9498,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -8666,8 +9513,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -8784,19 +9633,12 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: pipelines.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -8967,12 +9809,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -8982,6 +9826,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -8997,17 +9842,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9019,19 +9867,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -9081,6 +9933,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -9109,16 +9963,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -9177,6 +10042,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -9224,10 +10091,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -9251,6 +10120,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -9260,8 +10130,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -9276,8 +10148,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -9303,11 +10177,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -9317,8 +10194,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -9345,6 +10224,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -9365,10 +10245,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -9393,6 +10275,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -9405,6 +10288,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -9451,16 +10335,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -9487,10 +10374,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -9507,8 +10396,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -9565,6 +10456,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -9578,6 +10476,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -9586,13 +10485,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -9666,16 +10568,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -9704,11 +10609,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -9723,6 +10631,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -9743,10 +10652,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -9765,11 +10676,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -9784,6 +10698,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -9810,21 +10725,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -9833,6 +10754,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -9843,11 +10765,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -9880,6 +10805,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -9894,8 +10820,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -9948,11 +10876,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -9964,12 +10894,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -9978,6 +10911,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -9994,11 +10928,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10010,16 +10946,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -10041,16 +10982,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10074,20 +11018,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -10101,6 +11049,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -10117,16 +11066,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10150,26 +11102,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -10191,16 +11148,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10224,20 +11184,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -10251,6 +11215,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -10267,16 +11232,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10300,26 +11268,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -10340,12 +11313,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -10355,6 +11330,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -10370,17 +11346,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -10392,19 +11371,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -10452,6 +11435,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -10480,16 +11465,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -10544,6 +11540,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -10553,10 +11550,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -10564,8 +11563,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -10583,12 +11584,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -10598,6 +11601,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -10613,17 +11617,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -10635,19 +11642,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -10695,6 +11706,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -10723,16 +11736,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -10809,13 +11833,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -10824,6 +11845,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -10862,6 +11892,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -10874,6 +11907,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -10927,11 +11961,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10943,12 +11979,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -10957,6 +11996,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -10973,11 +12013,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10989,16 +12031,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -11020,16 +12067,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11053,20 +12103,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11080,6 +12134,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11096,16 +12151,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11129,26 +12187,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -11170,16 +12233,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11203,20 +12269,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11230,6 +12300,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11246,16 +12317,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11279,26 +12353,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -11322,12 +12401,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -11337,6 +12418,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -11352,17 +12434,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -11374,19 +12459,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -11434,6 +12523,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -11462,16 +12553,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -11526,6 +12628,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -11535,10 +12638,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -11546,8 +12651,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -11574,13 +12681,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -11589,6 +12693,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -11627,6 +12740,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -11639,6 +12755,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -11695,11 +12812,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -11711,12 +12830,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -11725,6 +12847,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -11741,11 +12864,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -11757,16 +12882,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -11788,16 +12918,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11821,20 +12954,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11848,6 +12985,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11864,16 +13002,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11897,26 +13038,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -11938,16 +13084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11971,20 +13120,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11998,6 +13151,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12014,16 +13168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12047,26 +13204,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -12087,12 +13249,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -12102,6 +13266,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -12117,17 +13282,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -12139,19 +13307,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -12199,6 +13371,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -12227,16 +13401,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -12291,6 +13476,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -12300,10 +13486,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -12311,8 +13499,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -12330,12 +13520,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -12345,6 +13537,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -12360,17 +13553,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -12382,19 +13578,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -12442,6 +13642,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -12470,16 +13672,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -12553,13 +13766,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -12568,6 +13778,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -12606,6 +13825,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -12618,6 +13840,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -12671,11 +13894,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -12687,12 +13912,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -12701,6 +13929,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -12717,11 +13946,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -12733,16 +13964,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -12764,16 +14000,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12797,20 +14036,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -12824,6 +14067,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12840,16 +14084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12873,26 +14120,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -12914,16 +14166,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12947,20 +14202,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -12974,6 +14233,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12990,16 +14250,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13023,26 +14286,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -13063,12 +14331,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -13078,6 +14348,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -13093,17 +14364,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -13115,19 +14389,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -13175,6 +14453,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -13203,16 +14483,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -13267,6 +14558,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -13276,10 +14568,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -13287,8 +14581,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -13306,12 +14602,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -13321,6 +14619,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -13336,17 +14635,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -13358,19 +14660,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -13418,6 +14724,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -13446,16 +14754,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -13529,13 +14848,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -13544,6 +14860,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -13582,6 +14907,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -13594,6 +14922,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -13649,11 +14978,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -13665,12 +14996,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -13679,6 +15013,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -13695,11 +15030,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -13711,16 +15048,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -13742,16 +15084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13775,20 +15120,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -13802,6 +15151,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -13818,16 +15168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13851,26 +15204,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -13892,16 +15250,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13925,20 +15286,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -13952,6 +15317,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -13968,16 +15334,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -14001,26 +15370,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -14041,12 +15415,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14056,6 +15432,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14071,17 +15448,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -14093,19 +15473,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -14153,6 +15537,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14181,16 +15567,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14245,6 +15642,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -14254,10 +15652,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -14265,8 +15665,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -14284,12 +15686,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14299,6 +15703,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14314,17 +15719,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -14336,19 +15744,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -14396,6 +15808,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14424,16 +15838,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14489,10 +15914,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -14507,12 +15934,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14522,6 +15951,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14537,43 +15967,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -14588,6 +16029,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -14605,6 +16047,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14646,6 +16089,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -14663,6 +16107,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14705,6 +16150,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -14715,6 +16161,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -14735,6 +16182,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14809,6 +16257,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -14819,6 +16268,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -14839,6 +16289,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14899,6 +16350,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14929,16 +16382,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14994,6 +16458,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15004,6 +16469,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15024,6 +16490,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15086,6 +16553,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -15097,6 +16567,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -15106,6 +16578,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -15156,13 +16631,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -15209,6 +16681,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -15247,6 +16728,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -15259,6 +16743,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -15293,12 +16778,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -15308,6 +16795,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -15323,17 +16811,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -15345,19 +16836,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -15405,6 +16900,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -15433,16 +16930,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -15498,10 +17006,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -15516,12 +17026,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -15531,6 +17043,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -15546,43 +17059,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -15597,6 +17121,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -15614,6 +17139,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15655,6 +17181,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -15672,6 +17199,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15714,6 +17242,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15724,6 +17253,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15744,6 +17274,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15818,6 +17349,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15828,6 +17360,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15848,6 +17381,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15908,6 +17442,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -15938,16 +17474,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -16003,6 +17550,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -16013,6 +17561,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -16033,6 +17582,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -16095,6 +17645,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -16106,6 +17659,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -16115,6 +17670,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -16148,34 +17706,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -16185,12 +17749,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -16208,23 +17774,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16238,23 +17808,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16268,23 +17842,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16299,23 +17877,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -16323,12 +17905,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -16363,12 +17947,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -16378,6 +17964,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -16393,17 +17980,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -16415,19 +18005,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -16477,6 +18071,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -16505,16 +18101,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -16573,6 +18180,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -16606,34 +18215,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -16643,12 +18258,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -16666,23 +18283,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16696,23 +18317,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16726,23 +18351,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16757,23 +18386,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -16781,12 +18414,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -16836,12 +18471,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -16851,6 +18488,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -16866,17 +18504,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -16888,19 +18529,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -16950,6 +18595,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -16978,16 +18625,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -17046,6 +18704,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -17096,12 +18756,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -17117,46 +18779,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -17167,23 +18837,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17191,12 +18865,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -17225,34 +18901,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -17262,12 +18944,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -17285,23 +18969,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17315,23 +19003,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17345,23 +19037,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17376,23 +19072,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17400,12 +19100,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -17423,46 +19125,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -17475,23 +19185,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17499,12 +19213,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -17522,12 +19238,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -17591,12 +19309,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -17606,6 +19326,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -17621,17 +19342,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -17643,19 +19367,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -17705,6 +19433,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -17733,16 +19463,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -17801,6 +19542,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -17838,12 +19581,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -17853,6 +19598,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -17868,17 +19614,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -17890,19 +19639,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -17952,6 +19705,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -17980,16 +19735,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -18048,6 +19814,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -18122,12 +19890,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -18137,6 +19907,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -18152,17 +19923,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -18174,19 +19948,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -18236,6 +20014,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -18264,16 +20044,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -18332,6 +20123,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -18448,10 +20241,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -18475,6 +20270,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -18484,8 +20280,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -18500,8 +20298,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -18527,11 +20327,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -18541,8 +20344,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -18569,6 +20374,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -18589,10 +20395,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -18617,6 +20425,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -18629,6 +20438,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -18675,16 +20485,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -18711,10 +20524,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -18731,8 +20546,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -18789,6 +20606,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -18802,6 +20626,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -18810,13 +20635,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -18890,16 +20718,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -18928,11 +20759,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -18947,6 +20781,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -18967,10 +20802,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -18989,11 +20826,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -19008,6 +20848,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -19034,21 +20875,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -19057,6 +20904,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -19067,11 +20915,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -19104,6 +20955,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -19118,8 +20970,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -19251,19 +21105,12 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: vertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -19330,11 +21177,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -19346,12 +21195,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -19360,6 +21212,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -19376,11 +21229,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -19392,16 +21247,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -19423,16 +21283,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19456,20 +21319,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -19483,6 +21350,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -19499,16 +21367,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19532,26 +21403,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -19573,16 +21449,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19606,20 +21485,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -19633,6 +21516,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -19649,16 +21533,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19682,26 +21569,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -19722,12 +21614,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -19737,6 +21631,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -19752,17 +21647,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -19774,19 +21672,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -19834,6 +21736,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -19862,16 +21766,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -19926,6 +21841,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -19935,10 +21851,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -20023,8 +21941,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -20042,12 +21962,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -20057,6 +21979,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -20072,17 +21995,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -20094,19 +22020,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -20154,6 +22084,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -20182,16 +22114,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -20247,10 +22190,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -20265,12 +22210,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -20280,6 +22227,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -20295,43 +22243,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -20346,6 +22305,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -20363,6 +22323,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20404,6 +22365,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -20421,6 +22383,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20463,6 +22426,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20473,6 +22437,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20493,6 +22458,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20567,6 +22533,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20577,6 +22544,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20597,6 +22565,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20657,6 +22626,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -20687,16 +22658,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -20752,6 +22734,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20762,6 +22745,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20782,6 +22766,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20844,6 +22829,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -20855,6 +22843,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -20864,6 +22854,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -20922,13 +22915,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -20975,6 +22965,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -21013,6 +23012,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -21025,6 +23027,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -21059,12 +23062,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -21074,6 +23079,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -21089,17 +23095,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -21111,19 +23120,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -21171,6 +23184,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -21199,16 +23214,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -21264,10 +23290,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -21282,12 +23310,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -21297,6 +23327,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -21312,43 +23343,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -21363,6 +23405,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -21380,6 +23423,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21421,6 +23465,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -21438,6 +23483,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21480,6 +23526,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21490,6 +23537,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21510,6 +23558,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21584,6 +23633,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21594,6 +23644,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21614,6 +23665,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21674,6 +23726,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -21704,16 +23758,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -21769,6 +23834,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21779,6 +23845,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21799,6 +23866,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21861,6 +23929,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -21872,6 +23943,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -21881,6 +23954,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -21914,34 +23990,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -21951,12 +24033,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -21974,23 +24058,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22004,23 +24092,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22034,23 +24126,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22065,23 +24161,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22089,12 +24189,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -22129,12 +24231,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -22144,6 +24248,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -22159,17 +24264,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -22181,19 +24289,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -22243,6 +24355,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -22271,16 +24385,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -22339,6 +24464,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -22372,34 +24499,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -22409,12 +24542,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -22432,23 +24567,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22462,23 +24601,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22492,23 +24635,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22523,23 +24670,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22547,12 +24698,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -22602,12 +24755,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -22617,6 +24772,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -22632,17 +24788,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -22654,19 +24813,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -22716,6 +24879,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -22744,16 +24909,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -22812,6 +24988,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -22862,12 +25040,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -22883,46 +25063,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -22933,23 +25121,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22957,12 +25149,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -22991,34 +25185,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -23028,12 +25228,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -23051,23 +25253,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23081,23 +25287,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23111,23 +25321,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23142,23 +25356,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -23166,12 +25384,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -23189,46 +25409,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -23241,23 +25469,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -23265,12 +25497,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -23288,12 +25522,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -23357,12 +25593,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23372,6 +25610,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23387,17 +25626,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -23409,19 +25651,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -23471,6 +25717,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -23499,16 +25747,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -23567,6 +25826,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -23604,12 +25865,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23619,6 +25882,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23634,17 +25898,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -23656,19 +25923,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -23718,6 +25989,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -23746,16 +26019,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -23814,6 +26098,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -23965,12 +26251,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23980,6 +26268,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23995,17 +26284,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -24017,19 +26309,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -24079,6 +26375,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -24107,16 +26405,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -24175,6 +26484,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -24291,10 +26602,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -24318,6 +26631,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -24327,8 +26641,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -24343,8 +26659,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -24370,11 +26688,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -24384,8 +26705,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -24412,6 +26735,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -24432,10 +26756,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -24460,6 +26786,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -24472,6 +26799,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -24518,16 +26846,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -24554,10 +26885,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -24574,8 +26907,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -24632,6 +26967,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -24645,6 +26987,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -24653,13 +26996,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -24733,16 +27079,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -24771,11 +27120,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -24790,6 +27142,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -24810,10 +27163,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -24832,11 +27187,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -24851,6 +27209,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -24877,21 +27236,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -24900,6 +27265,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -24910,11 +27276,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -24947,6 +27316,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -24961,8 +27331,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -25097,12 +27469,6 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: v1 kind: ServiceAccount diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 48c6d0677c..4048cda38e 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: interstepbufferservices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -63,11 +62,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -79,12 +80,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -93,6 +97,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -109,11 +114,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -125,16 +132,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -156,16 +168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -189,20 +204,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -216,6 +235,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -232,16 +252,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -265,26 +288,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -306,16 +334,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -339,20 +370,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -366,6 +401,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -382,16 +418,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -415,26 +454,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -457,12 +501,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -472,6 +518,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -487,17 +534,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -509,19 +559,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -569,6 +623,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -597,16 +653,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -661,6 +728,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -670,10 +738,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -683,8 +753,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -713,12 +785,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -728,6 +802,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -743,17 +818,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -765,19 +843,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -825,6 +907,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -853,16 +937,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -949,12 +1044,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -964,6 +1061,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -979,17 +1077,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1001,19 +1102,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1061,6 +1166,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1089,16 +1196,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1156,13 +1274,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -1171,6 +1286,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -1209,6 +1333,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -1221,6 +1348,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -1273,23 +1401,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -1319,11 +1451,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1335,12 +1469,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -1349,6 +1486,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -1365,11 +1503,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -1381,16 +1521,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -1412,16 +1557,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1445,20 +1593,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1472,6 +1624,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1488,16 +1641,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1521,26 +1677,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -1562,16 +1723,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1595,20 +1759,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -1622,6 +1790,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -1638,16 +1807,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -1671,26 +1843,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1701,6 +1878,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -1710,10 +1888,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -1721,8 +1901,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -1740,12 +1922,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1755,6 +1939,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1770,17 +1955,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -1792,19 +1980,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -1852,6 +2044,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -1880,16 +2074,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -1965,12 +2170,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -1980,6 +2187,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -1995,17 +2203,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2017,19 +2228,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2077,6 +2292,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2105,16 +2322,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2201,12 +2429,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2216,6 +2446,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2231,17 +2462,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2253,19 +2487,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2313,6 +2551,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2341,16 +2581,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2408,13 +2659,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -2423,6 +2671,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -2461,6 +2718,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -2473,6 +2733,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -2501,12 +2762,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -2516,6 +2779,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -2531,17 +2795,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -2553,19 +2820,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -2613,6 +2884,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -2641,16 +2914,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -2784,46 +3068,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object streamConfig: type: string @@ -2841,23 +3133,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelPassword: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic sentinelUrl: type: string url: @@ -2888,19 +3184,12 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: monovertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -2967,11 +3256,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -2983,12 +3274,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -2997,6 +3291,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -3013,11 +3308,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3029,16 +3326,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -3060,16 +3362,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3093,20 +3398,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3120,6 +3429,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3136,16 +3446,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3169,26 +3482,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -3210,16 +3528,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3243,20 +3564,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3270,6 +3595,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3286,16 +3612,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3319,26 +3648,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -3359,12 +3693,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3374,6 +3710,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -3389,17 +3726,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -3411,19 +3751,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -3471,6 +3815,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -3499,16 +3845,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -3579,11 +3936,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3595,12 +3954,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -3609,6 +3971,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -3625,11 +3988,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -3641,16 +4006,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -3672,16 +4042,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3705,20 +4078,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3732,6 +4109,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3748,16 +4126,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3781,26 +4162,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -3822,16 +4208,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3855,20 +4244,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -3882,6 +4275,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -3898,16 +4292,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -3931,26 +4328,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -3971,12 +4373,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -3986,6 +4390,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4001,17 +4406,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4023,19 +4431,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4083,6 +4495,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4111,16 +4525,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4175,6 +4600,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -4184,10 +4610,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -4195,8 +4623,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -4214,12 +4644,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4229,6 +4661,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4244,17 +4677,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -4266,19 +4702,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -4326,6 +4766,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -4354,16 +4796,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -4440,13 +4893,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -4455,6 +4905,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -4493,6 +4952,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -4505,6 +4967,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -4542,6 +5005,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -4551,10 +5015,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -4562,8 +5028,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainers: items: @@ -4572,10 +5040,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -4590,12 +5060,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -4605,6 +5077,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -4620,43 +5093,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -4671,6 +5155,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -4688,6 +5173,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4729,6 +5215,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -4746,6 +5233,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4788,6 +5276,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -4798,6 +5287,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -4818,6 +5308,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4892,6 +5383,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -4902,6 +5394,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -4922,6 +5415,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -4982,6 +5476,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5012,16 +5508,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5077,6 +5584,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5087,6 +5595,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5107,6 +5616,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5169,6 +5679,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -5180,6 +5693,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5189,6 +5704,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -5249,13 +5767,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -5302,6 +5817,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -5340,6 +5864,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -5352,6 +5879,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -5373,10 +5901,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -5391,12 +5921,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -5406,6 +5938,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -5421,43 +5954,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -5472,6 +6016,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5489,6 +6034,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5530,6 +6076,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -5547,6 +6094,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5589,6 +6137,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5599,6 +6148,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5619,6 +6169,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5693,6 +6244,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5703,6 +6255,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5723,6 +6276,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5783,6 +6337,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -5813,16 +6369,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -5878,6 +6445,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -5888,6 +6456,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -5908,6 +6477,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -5970,6 +6540,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -5981,6 +6554,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -5990,6 +6565,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -6023,34 +6601,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -6060,12 +6644,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -6083,23 +6669,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6113,23 +6703,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6143,23 +6737,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6174,23 +6772,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -6198,12 +6800,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -6238,12 +6842,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6253,6 +6859,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6268,17 +6875,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -6290,19 +6900,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -6352,6 +6966,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6380,16 +6996,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6448,6 +7075,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -6481,34 +7110,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -6518,12 +7153,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -6541,23 +7178,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6571,23 +7212,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6601,23 +7246,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -6632,23 +7281,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -6656,12 +7309,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -6711,12 +7366,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -6726,6 +7383,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -6741,17 +7399,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -6763,19 +7424,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -6825,6 +7490,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -6853,16 +7520,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -6921,6 +7599,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -6971,12 +7651,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -6992,46 +7674,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -7042,23 +7732,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7066,12 +7760,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -7100,34 +7796,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -7137,12 +7839,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -7160,23 +7864,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7190,23 +7898,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7220,23 +7932,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -7251,23 +7967,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7275,12 +7995,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -7298,46 +8020,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -7350,23 +8080,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -7374,12 +8108,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -7397,12 +8133,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -7466,12 +8204,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -7481,6 +8221,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -7496,17 +8237,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -7518,19 +8262,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -7580,6 +8328,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7608,16 +8358,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7676,6 +8437,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7713,12 +8476,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -7728,6 +8493,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -7743,17 +8509,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -7765,19 +8534,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -7827,6 +8600,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -7855,16 +8630,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -7923,6 +8709,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -7996,10 +8784,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -8023,6 +8813,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -8032,8 +8823,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -8048,8 +8841,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -8075,11 +8870,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -8089,8 +8887,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -8117,6 +8917,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -8137,10 +8938,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -8165,6 +8968,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -8177,6 +8981,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -8223,16 +9028,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -8259,10 +9067,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -8279,8 +9089,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -8337,6 +9149,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -8350,6 +9169,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -8358,13 +9178,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -8438,16 +9261,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -8476,11 +9302,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -8495,6 +9324,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -8515,10 +9345,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -8537,11 +9369,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -8556,6 +9391,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -8582,21 +9418,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -8605,6 +9447,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -8615,11 +9458,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -8652,6 +9498,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -8666,8 +9513,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -8784,19 +9633,12 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: pipelines.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -8967,12 +9809,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -8982,6 +9826,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -8997,17 +9842,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -9019,19 +9867,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -9081,6 +9933,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -9109,16 +9963,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -9177,6 +10042,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -9224,10 +10091,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -9251,6 +10120,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -9260,8 +10130,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -9276,8 +10148,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -9303,11 +10177,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -9317,8 +10194,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -9345,6 +10224,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -9365,10 +10245,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -9393,6 +10275,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -9405,6 +10288,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -9451,16 +10335,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -9487,10 +10374,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -9507,8 +10396,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -9565,6 +10456,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -9578,6 +10476,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -9586,13 +10485,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -9666,16 +10568,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -9704,11 +10609,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -9723,6 +10631,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -9743,10 +10652,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -9765,11 +10676,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -9784,6 +10698,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -9810,21 +10725,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -9833,6 +10754,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -9843,11 +10765,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -9880,6 +10805,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -9894,8 +10820,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -9948,11 +10876,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -9964,12 +10894,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -9978,6 +10911,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -9994,11 +10928,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10010,16 +10946,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -10041,16 +10982,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10074,20 +11018,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -10101,6 +11049,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -10117,16 +11066,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10150,26 +11102,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -10191,16 +11148,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10224,20 +11184,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -10251,6 +11215,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -10267,16 +11232,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -10300,26 +11268,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -10340,12 +11313,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -10355,6 +11330,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -10370,17 +11346,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -10392,19 +11371,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -10452,6 +11435,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -10480,16 +11465,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -10544,6 +11540,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -10553,10 +11550,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -10564,8 +11563,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -10583,12 +11584,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -10598,6 +11601,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -10613,17 +11617,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -10635,19 +11642,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -10695,6 +11706,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -10723,16 +11736,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -10809,13 +11833,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -10824,6 +11845,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -10862,6 +11892,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -10874,6 +11907,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -10927,11 +11961,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10943,12 +11979,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -10957,6 +11996,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -10973,11 +12013,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -10989,16 +12031,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -11020,16 +12067,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11053,20 +12103,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11080,6 +12134,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11096,16 +12151,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11129,26 +12187,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -11170,16 +12233,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11203,20 +12269,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11230,6 +12300,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11246,16 +12317,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11279,26 +12353,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -11322,12 +12401,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -11337,6 +12418,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -11352,17 +12434,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -11374,19 +12459,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -11434,6 +12523,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -11462,16 +12553,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -11526,6 +12628,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -11535,10 +12638,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -11546,8 +12651,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array metadata: properties: @@ -11574,13 +12681,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -11589,6 +12693,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -11627,6 +12740,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -11639,6 +12755,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -11695,11 +12812,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -11711,12 +12830,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -11725,6 +12847,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -11741,11 +12864,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -11757,16 +12882,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -11788,16 +12918,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11821,20 +12954,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11848,6 +12985,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -11864,16 +13002,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11897,26 +13038,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -11938,16 +13084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -11971,20 +13120,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -11998,6 +13151,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12014,16 +13168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12047,26 +13204,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -12087,12 +13249,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -12102,6 +13266,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -12117,17 +13282,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -12139,19 +13307,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -12199,6 +13371,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -12227,16 +13401,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -12291,6 +13476,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -12300,10 +13486,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -12311,8 +13499,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -12330,12 +13520,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -12345,6 +13537,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -12360,17 +13553,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -12382,19 +13578,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -12442,6 +13642,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -12470,16 +13672,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -12553,13 +13766,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -12568,6 +13778,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -12606,6 +13825,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -12618,6 +13840,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -12671,11 +13894,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -12687,12 +13912,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -12701,6 +13929,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -12717,11 +13946,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -12733,16 +13964,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -12764,16 +14000,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12797,20 +14036,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -12824,6 +14067,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12840,16 +14084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12873,26 +14120,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -12914,16 +14166,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -12947,20 +14202,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -12974,6 +14233,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -12990,16 +14250,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13023,26 +14286,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -13063,12 +14331,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -13078,6 +14348,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -13093,17 +14364,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -13115,19 +14389,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -13175,6 +14453,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -13203,16 +14483,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -13267,6 +14558,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -13276,10 +14568,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -13287,8 +14581,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -13306,12 +14602,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -13321,6 +14619,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -13336,17 +14635,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -13358,19 +14660,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -13418,6 +14724,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -13446,16 +14754,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -13529,13 +14848,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -13544,6 +14860,15 @@ spec: type: string securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -13582,6 +14907,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -13594,6 +14922,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -13649,11 +14978,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -13665,12 +14996,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -13679,6 +15013,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -13695,11 +15030,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -13711,16 +15048,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -13742,16 +15084,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13775,20 +15120,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -13802,6 +15151,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -13818,16 +15168,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13851,26 +15204,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -13892,16 +15250,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -13925,20 +15286,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -13952,6 +15317,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -13968,16 +15334,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -14001,26 +15370,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -14041,12 +15415,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14056,6 +15432,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14071,17 +15448,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -14093,19 +15473,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -14153,6 +15537,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14181,16 +15567,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14245,6 +15642,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -14254,10 +15652,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -14265,8 +15665,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -14284,12 +15686,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14299,6 +15703,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14314,17 +15719,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -14336,19 +15744,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -14396,6 +15808,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14424,16 +15838,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14489,10 +15914,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -14507,12 +15934,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -14522,6 +15951,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -14537,43 +15967,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -14588,6 +16029,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -14605,6 +16047,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14646,6 +16089,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -14663,6 +16107,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14705,6 +16150,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -14715,6 +16161,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -14735,6 +16182,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14809,6 +16257,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -14819,6 +16268,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -14839,6 +16289,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -14899,6 +16350,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -14929,16 +16382,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -14994,6 +16458,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15004,6 +16469,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15024,6 +16490,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15086,6 +16553,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -15097,6 +16567,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -15106,6 +16578,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -15156,13 +16631,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -15209,6 +16681,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -15247,6 +16728,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -15259,6 +16743,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -15293,12 +16778,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -15308,6 +16795,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -15323,17 +16811,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -15345,19 +16836,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -15405,6 +16900,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -15433,16 +16930,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -15498,10 +17006,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -15516,12 +17026,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -15531,6 +17043,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -15546,43 +17059,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -15597,6 +17121,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -15614,6 +17139,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15655,6 +17181,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -15672,6 +17199,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15714,6 +17242,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15724,6 +17253,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15744,6 +17274,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15818,6 +17349,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -15828,6 +17360,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -15848,6 +17381,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -15908,6 +17442,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -15938,16 +17474,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -16003,6 +17550,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -16013,6 +17561,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -16033,6 +17582,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -16095,6 +17645,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -16106,6 +17659,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -16115,6 +17670,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -16148,34 +17706,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -16185,12 +17749,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -16208,23 +17774,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16238,23 +17808,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16268,23 +17842,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16299,23 +17877,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -16323,12 +17905,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -16363,12 +17947,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -16378,6 +17964,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -16393,17 +17980,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -16415,19 +18005,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -16477,6 +18071,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -16505,16 +18101,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -16573,6 +18180,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -16606,34 +18215,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -16643,12 +18258,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -16666,23 +18283,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16696,23 +18317,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16726,23 +18351,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -16757,23 +18386,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -16781,12 +18414,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -16836,12 +18471,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -16851,6 +18488,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -16866,17 +18504,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -16888,19 +18529,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -16950,6 +18595,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -16978,16 +18625,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -17046,6 +18704,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -17096,12 +18756,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -17117,46 +18779,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -17167,23 +18837,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17191,12 +18865,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -17225,34 +18901,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -17262,12 +18944,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -17285,23 +18969,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17315,23 +19003,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17345,23 +19037,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -17376,23 +19072,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17400,12 +19100,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -17423,46 +19125,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -17475,23 +19185,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -17499,12 +19213,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -17522,12 +19238,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -17591,12 +19309,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -17606,6 +19326,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -17621,17 +19342,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -17643,19 +19367,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -17705,6 +19433,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -17733,16 +19463,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -17801,6 +19542,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -17838,12 +19581,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -17853,6 +19598,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -17868,17 +19614,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -17890,19 +19639,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -17952,6 +19705,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -17980,16 +19735,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -18048,6 +19814,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -18122,12 +19890,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -18137,6 +19907,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -18152,17 +19923,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -18174,19 +19948,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -18236,6 +20014,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -18264,16 +20044,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -18332,6 +20123,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -18448,10 +20241,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -18475,6 +20270,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -18484,8 +20280,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -18500,8 +20298,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -18527,11 +20327,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -18541,8 +20344,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -18569,6 +20374,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -18589,10 +20395,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -18617,6 +20425,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -18629,6 +20438,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -18675,16 +20485,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -18711,10 +20524,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -18731,8 +20546,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -18789,6 +20606,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -18802,6 +20626,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -18810,13 +20635,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -18890,16 +20718,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -18928,11 +20759,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -18947,6 +20781,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -18967,10 +20802,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -18989,11 +20826,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -19008,6 +20848,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -19034,21 +20875,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -19057,6 +20904,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -19067,11 +20915,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -19104,6 +20955,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -19118,8 +20970,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -19251,19 +21105,12 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.3 name: vertices.numaflow.numaproj.io spec: group: numaflow.numaproj.io @@ -19330,11 +21177,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -19346,12 +21195,15 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: format: int32 type: integer @@ -19360,6 +21212,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: properties: nodeSelectorTerms: @@ -19376,11 +21229,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: items: properties: @@ -19392,16 +21247,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: properties: @@ -19423,16 +21283,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19456,20 +21319,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -19483,6 +21350,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -19499,16 +21367,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19532,26 +21403,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: properties: @@ -19573,16 +21449,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19606,20 +21485,24 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: @@ -19633,6 +21516,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: items: properties: @@ -19649,16 +21533,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic matchLabelKeys: items: type: string @@ -19682,26 +21569,31 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic namespaces: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -19722,12 +21614,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -19737,6 +21631,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -19752,17 +21647,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -19774,19 +21672,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -19834,6 +21736,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -19862,16 +21766,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -19926,6 +21841,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: items: properties: @@ -19935,10 +21851,12 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: type: string @@ -20023,8 +21941,10 @@ spec: items: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic type: array initContainerTemplate: properties: @@ -20042,12 +21962,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -20057,6 +21979,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -20072,17 +21995,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -20094,19 +22020,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -20154,6 +22084,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -20182,16 +22114,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -20247,10 +22190,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -20265,12 +22210,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -20280,6 +22227,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -20295,43 +22243,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -20346,6 +22305,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -20363,6 +22323,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20404,6 +22365,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -20421,6 +22383,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20463,6 +22426,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20473,6 +22437,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20493,6 +22458,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20567,6 +22533,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20577,6 +22544,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20597,6 +22565,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20657,6 +22626,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -20687,16 +22658,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -20752,6 +22734,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -20762,6 +22745,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -20782,6 +22766,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -20844,6 +22829,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -20855,6 +22843,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -20864,6 +22854,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -20922,13 +22915,10 @@ spec: properties: name: type: string - source: - properties: - resourceClaimName: - type: string - resourceClaimTemplateName: - type: string - type: object + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string required: - name type: object @@ -20975,6 +22965,15 @@ spec: type: object securityContext: properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: format: int64 type: integer @@ -21013,6 +23012,9 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: items: properties: @@ -21025,6 +23027,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: properties: gmsaCredentialSpec: @@ -21059,12 +23062,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -21074,6 +23079,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -21089,17 +23095,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -21111,19 +23120,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array imagePullPolicy: @@ -21171,6 +23184,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -21199,16 +23214,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -21264,10 +23290,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: items: type: string type: array + x-kubernetes-list-type: atomic env: items: properties: @@ -21282,12 +23310,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -21297,6 +23327,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -21312,43 +23343,54 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: items: properties: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: type: string imagePullPolicy: @@ -21363,6 +23405,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -21380,6 +23423,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21421,6 +23465,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: properties: @@ -21438,6 +23483,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21480,6 +23526,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21490,6 +23537,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21510,6 +23558,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21584,6 +23633,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21594,6 +23644,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21614,6 +23665,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21674,6 +23726,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -21704,16 +23758,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -21769,6 +23834,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: format: int32 @@ -21779,6 +23845,7 @@ spec: format: int32 type: integer service: + default: "" type: string required: - port @@ -21799,6 +23866,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: type: string port: @@ -21861,6 +23929,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: items: properties: @@ -21872,6 +23943,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -21881,6 +23954,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: type: string required: @@ -21914,34 +23990,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -21951,12 +24033,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -21974,23 +24058,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22004,23 +24092,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22034,23 +24126,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22065,23 +24161,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22089,12 +24189,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -22129,12 +24231,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -22144,6 +24248,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -22159,17 +24264,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -22181,19 +24289,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -22243,6 +24355,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -22271,16 +24385,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -22339,6 +24464,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -22372,34 +24499,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -22409,12 +24542,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -22432,23 +24567,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22462,23 +24601,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22492,23 +24635,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -22523,23 +24670,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22547,12 +24698,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -22602,12 +24755,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -22617,6 +24772,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -22632,17 +24788,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -22654,19 +24813,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -22716,6 +24879,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -22744,16 +24909,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -22812,6 +24988,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -22862,12 +25040,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object service: type: boolean @@ -22883,46 +25063,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object stream: type: string @@ -22933,23 +25121,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -22957,12 +25149,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -22991,34 +25185,40 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic keytabSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic passwordSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic realm: type: string serviceName: @@ -23028,12 +25228,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - authType - realm @@ -23051,23 +25253,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23081,23 +25287,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23111,23 +25321,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic userSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic required: - handshake - userSecret @@ -23142,23 +25356,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -23166,12 +25384,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object topic: type: string @@ -23189,46 +25409,54 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic user: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object nkey: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic token: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object queue: type: string @@ -23241,23 +25469,27 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic certSecret: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic insecureSkipVerify: type: boolean keySecret: @@ -23265,12 +25497,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object url: type: string @@ -23288,12 +25522,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object msgIDHeaderKey: type: string @@ -23357,12 +25593,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23372,6 +25610,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23387,17 +25626,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -23409,19 +25651,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -23471,6 +25717,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -23499,16 +25747,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -23567,6 +25826,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -23604,12 +25865,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23619,6 +25882,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23634,17 +25898,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -23656,19 +25923,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -23718,6 +25989,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -23746,16 +26019,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -23814,6 +26098,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -23965,12 +26251,14 @@ spec: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic fieldRef: properties: apiVersion: @@ -23980,6 +26268,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: properties: containerName: @@ -23995,17 +26284,20 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: properties: key: type: string name: + default: "" type: string optional: type: boolean required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name @@ -24017,19 +26309,23 @@ spec: configMapRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic prefix: type: string secretRef: properties: name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array image: @@ -24079,6 +26375,8 @@ spec: properties: name: type: string + request: + type: string required: - name type: object @@ -24107,16 +26405,27 @@ spec: properties: allowPrivilegeEscalation: type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: properties: add: items: type: string type: array + x-kubernetes-list-type: atomic drop: items: type: string type: array + x-kubernetes-list-type: atomic type: object privileged: type: boolean @@ -24175,6 +26484,8 @@ spec: type: string readOnly: type: boolean + recursiveReadOnly: + type: string subPath: type: string subPathExpr: @@ -24291,10 +26602,12 @@ spec: diskURI: type: string fsType: + default: ext4 type: string kind: type: string readOnly: + default: false type: boolean required: - diskName @@ -24318,6 +26631,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: type: string readOnly: @@ -24327,8 +26641,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: type: string required: @@ -24343,8 +26659,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeID: type: string required: @@ -24370,11 +26688,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic csi: properties: driver: @@ -24384,8 +26705,10 @@ spec: nodePublishSecretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic readOnly: type: boolean volumeAttributes: @@ -24412,6 +26735,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -24432,10 +26756,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: properties: @@ -24460,6 +26786,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: properties: apiGroup: @@ -24472,6 +26799,7 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: properties: apiGroup: @@ -24518,16 +26846,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic storageClassName: type: string volumeAttributesClassName: @@ -24554,10 +26885,12 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: properties: @@ -24574,8 +26907,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic required: - driver type: object @@ -24632,6 +26967,13 @@ spec: required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: properties: chapAuthDiscovery: @@ -24645,6 +26987,7 @@ spec: iqn: type: string iscsiInterface: + default: default type: string lun: format: int32 @@ -24653,13 +26996,16 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic targetPortal: type: string required: @@ -24733,16 +27079,19 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string type: object type: object + x-kubernetes-map-type: atomic name: type: string optional: @@ -24771,11 +27120,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: properties: items: @@ -24790,6 +27142,7 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: format: int32 type: integer @@ -24810,10 +27163,12 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: properties: @@ -24832,11 +27187,14 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" type: string optional: type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: properties: audience: @@ -24851,6 +27209,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: properties: @@ -24877,21 +27236,27 @@ spec: image: type: string keyring: + default: /etc/ceph/keyring type: string monitors: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd type: string readOnly: type: boolean secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic user: + default: admin type: string required: - image @@ -24900,6 +27265,7 @@ spec: scaleIO: properties: fsType: + default: xfs type: string gateway: type: string @@ -24910,11 +27276,14 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic sslEnabled: type: boolean storageMode: + default: ThinProvisioned type: string storagePool: type: string @@ -24947,6 +27316,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: type: boolean secretName: @@ -24961,8 +27331,10 @@ spec: secretRef: properties: name: + default: "" type: string type: object + x-kubernetes-map-type: atomic volumeName: type: string volumeNamespace: @@ -25097,12 +27469,6 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: v1 kind: ServiceAccount diff --git a/go.mod b/go.mod index ba62a6f28d..49ee6df1df 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/numaproj/numaflow -go 1.22.7 +go 1.23.1 require ( github.com/IBM/sarama v1.43.2 @@ -26,21 +26,21 @@ require ( github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/imdario/mergo v0.3.16 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.55.0 github.com/redis/go-redis/v9 v9.0.3 github.com/robfig/cron/v3 v3.0.1 github.com/soheilhy/cmux v0.1.5 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 github.com/xdg-go/scram v1.1.2 @@ -52,21 +52,20 @@ require ( golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.8.0 - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d + golang.org/x/tools v0.24.0 google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 google.golang.org/grpc v1.66.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 google.golang.org/protobuf v1.34.2 - k8s.io/api v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/client-go v0.29.0 - k8s.io/code-generator v0.29.2 - k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 + k8s.io/code-generator v0.31.0 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 k8s.io/metrics v0.23.3 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.17.2 - sigs.k8s.io/controller-tools v0.8.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/controller-tools v0.16.3 sigs.k8s.io/yaml v1.4.0 ) @@ -89,15 +88,15 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-jose/go-jose/v4 v4.0.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -111,9 +110,8 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.19.0 // indirect - github.com/gobuffalo/flect v0.2.3 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -147,13 +145,12 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -166,7 +163,7 @@ require ( github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -189,6 +186,7 @@ require ( github.com/ugorji/go/codec v1.2.12 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.37.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -200,7 +198,7 @@ require ( go.mongodb.org/mongo-driver v1.15.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect - golang.org/x/mod v0.17.0 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect golang.org/x/text v0.18.0 // indirect @@ -208,14 +206,16 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/component-base v0.29.0 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/gengo v0.0.0-20240911193312-2b36238f13e9 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/klog v1.0.0 // indirect - k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect moul.io/http2curl/v2 v2.3.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index 9670ccac4b..c6f4ec4e73 100644 --- a/go.sum +++ b/go.sum @@ -113,7 +113,7 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -142,10 +142,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -160,6 +160,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gavv/httpexpect/v2 v2.16.0 h1:Ty2favARiTYTOkCRZGX7ojXXjGyNAIohM1lZ3vqaEwI= @@ -187,9 +189,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= @@ -235,10 +236,10 @@ github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaC github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-swagger/go-swagger v0.31.0 h1:H8eOYQnY2u7vNKWDNykv2xJP3pBhRG/R+SOCAmKrLlc= github.com/go-swagger/go-swagger v0.31.0/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= -github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -249,8 +250,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -326,8 +325,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -350,8 +349,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaW github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -444,8 +443,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -459,8 +456,9 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -498,13 +496,13 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= @@ -521,15 +519,15 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k= @@ -574,8 +572,8 @@ github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNo github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= @@ -624,6 +622,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.37.0 h1:7WHCyI7EAkQMVmrfBhWTCOaeROb1aCBiTopx63LkMbE= github.com/valyala/fasthttp v1.37.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -726,8 +726,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -946,8 +946,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1074,6 +1074,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1102,42 +1104,42 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.29.2 h1:c9/iw2KnNpw2IRV+wwuG/Wns2TjPSgjWzbbjTevyiHI= -k8s.io/code-generator v0.29.2/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8= +k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20240911193312-2b36238f13e9 h1:B0l8GxRsVc/tP/uCLBQdAjf2nBARx6u/r2OGuL/CyXQ= +k8s.io/gengo v0.0.0-20240911193312-2b36238f13e9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/metrics v0.23.3 h1:rX/RBOwqi0atFlTSlpbQ7CX5s/kfqGR9zEefCx9Rv1s= k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= @@ -1145,10 +1147,10 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= -sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= -sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o= -sigs.k8s.io/controller-tools v0.8.0/go.mod h1:qE2DXhVOiEq5ijmINcFbqi9GZrrUjzB1TuJU0xa6eoY= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-tools v0.16.3 h1:z48C5/d4jCVQQvtiSBL5MYyZ3EO2eFIOXrIKMgHVhFY= +sigs.k8s.io/controller-tools v0.16.3/go.mod h1:AEj6k+w1kYpLZv2einOH3mj52ips4W/6FUjnB5tkJGs= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index 7d9f19cb67..b3b8d71c97 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -52,12 +52,17 @@ go install -mod=vendor ./vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc go install -mod=vendor ./vendor/golang.org/x/tools/cmd/goimports go install -mod=vendor ./vendor/k8s.io/code-generator/cmd/go-to-protobuf -export GO111MODULE="off" +export GO111MODULE="on" + +# go-to-protobuf expects dependency proto files to be in $GOPATH/src. Copy them there. +rm -rf "${GOPATH}/src/k8s.io/apimachinery" && mkdir -p "${GOPATH}/src/k8s.io" && cp -r "${FAKE_REPOPATH}/vendor/k8s.io/apimachinery" "${GOPATH}/src/k8s.io" +rm -rf "${GOPATH}/src/k8s.io/api" && mkdir -p "${GOPATH}/src/k8s.io" && cp -r "${FAKE_REPOPATH}/vendor/k8s.io/api" "${GOPATH}/src/k8s.io" go-to-protobuf \ --go-header-file=./hack/boilerplate/boilerplate.go.txt \ --packages=github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1 \ - --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1beta1 \ + --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,+k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1beta1 \ + --output-dir="${GOPATH}/src/" \ --proto-import ./vendor # Following 2 proto files are needed diff --git a/hack/openapi-gen.sh b/hack/openapi-gen.sh index 39e6140521..4e83240308 100755 --- a/hack/openapi-gen.sh +++ b/hack/openapi-gen.sh @@ -8,18 +8,12 @@ source $(dirname $0)/library.sh header "updating open-apis" ensure_vendor -make_fake_paths - -export GOPATH="${FAKE_GOPATH}" -export GO111MODULE="off" - -CODEGEN_PKG=${FAKE_REPOPATH}/vendor/k8s.io/kube-openapi -VERSION="v1alpha1" - -cd "${FAKE_REPOPATH}" +CODEGEN_PKG=${REPO_ROOT}/vendor/k8s.io/kube-openapi go run ${CODEGEN_PKG}/cmd/openapi-gen/openapi-gen.go \ --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt \ - --input-dirs github.com/numaproj/numaflow/pkg/apis/numaflow/${VERSION} \ - --output-package github.com/numaproj/numaflow/pkg/apis/numaflow/${VERSION} \ - $@ + --output-dir pkg/apis/numaflow/v1alpha1 \ + --output-pkg v1alpha1 \ + --output-file zz_generated.openapi.go \ + github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1 + diff --git a/hack/swagger-gen.sh b/hack/swagger-gen.sh index 1b523d2acb..d5e3f0ff36 100755 --- a/hack/swagger-gen.sh +++ b/hack/swagger-gen.sh @@ -20,7 +20,7 @@ if [ "`command -v swagger`" = "" ]; then go install -mod=vendor ./vendor/github.com/go-swagger/go-swagger/cmd/swagger fi -curl -Ls https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.29/api/openapi-spec/swagger.json -o ${k8s_swagger} +curl -Ls https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.31/api/openapi-spec/swagger.json -o ${k8s_swagger} go run ./hack/gen-openapi-spec/main.go ${VERSION} ${k8s_swagger} ${kubeified_swagger} diff --git a/hack/tools.go b/hack/tools.go index 38bdc36ffe..e788f37146 100644 --- a/hack/tools.go +++ b/hack/tools.go @@ -32,18 +32,6 @@ import ( _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" _ "google.golang.org/protobuf/cmd/protoc-gen-go" _ "k8s.io/code-generator" - _ "k8s.io/code-generator/cmd/client-gen" - _ "k8s.io/code-generator/cmd/conversion-gen" - _ "k8s.io/code-generator/cmd/deepcopy-gen" - _ "k8s.io/code-generator/cmd/defaulter-gen" - _ "k8s.io/code-generator/cmd/go-to-protobuf" - _ "k8s.io/code-generator/cmd/import-boss" - _ "k8s.io/code-generator/cmd/informer-gen" - _ "k8s.io/code-generator/cmd/lister-gen" - _ "k8s.io/code-generator/cmd/openapi-gen" - _ "k8s.io/code-generator/cmd/register-gen" - _ "k8s.io/code-generator/cmd/set-gen" - _ "k8s.io/gengo/examples/deepcopy-gen" _ "k8s.io/kube-openapi/cmd/openapi-gen" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" ) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 3ab861b73b..3bbc029f34 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -8,28 +8,44 @@ source $(dirname $0)/library.sh header "running codegen" ensure_vendor -make_fake_paths -export GOPATH="${FAKE_GOPATH}" -export GO111MODULE="off" +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${REPO_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} -cd "${FAKE_REPOPATH}" +source "${CODEGEN_PKG}/kube_codegen.sh" -CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${FAKE_REPOPATH}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +THIS_PKG="github.com/numaproj/numaflow" -chmod +x ${CODEGEN_PKG}/*.sh +subheader "running deepcopy gen" -subheader "running codegen" -bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ - github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ - "numaflow:v1alpha1" \ - --go-header-file hack/boilerplate/boilerplate.go.txt +kube::codegen::gen_helpers \ + --boilerplate "${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt" \ + "${REPO_ROOT}/pkg/apis" -bash -x ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ - github.com/numaproj/numaflow/pkg/client github.com/numaproj/numaflow/pkg/apis \ - "numaflow:v1alpha1" \ - --plural-exceptions="Vertex:Vertices,MonoVertex:MonoVertices" \ - --go-header-file hack/boilerplate/boilerplate.go.txt +subheader "running clients gen" + +kube::codegen::gen_client \ + --with-watch \ + --output-dir "${REPO_ROOT}/pkg/client" \ + --output-pkg "${THIS_PKG}/pkg/client" \ + --boilerplate "${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt" \ + --plural-exceptions "Vertex:Vertices,MonoVertex:MonoVertices" \ + --one-input-api "numaflow/v1alpha1" \ + "${REPO_ROOT}/pkg/apis" + +# +# Do not use following scripts for openapi generation, because it also +# generates apimachinery APIs, which makes trouble for swagger gen. +# +#subheader "running openapi gen" + +#kube::codegen::gen_openapi \ +# --output-dir "${REPO_ROOT}/pkg/apis/numaflow/v1alpha1" \ +# --output-pkg "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" \ +# --report-filename "/dev/null" \ +# --update-report \ +# --boilerplate "${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt" \ +# "${REPO_ROOT}/pkg/apis" +# # gofmt the tree subheader "running gofmt" diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index f96a526599..308fcd4382 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -45,12 +45,12 @@ message AbstractPodTemplate { // If specified, the pod's tolerations. // +optional - repeated k8s.io.api.core.v1.Toleration tolerations = 3; + repeated .k8s.io.api.core.v1.Toleration tolerations = 3; // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - optional k8s.io.api.core.v1.PodSecurityContext securityContext = 4; + optional .k8s.io.api.core.v1.PodSecurityContext securityContext = 4; // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, @@ -59,7 +59,7 @@ message AbstractPodTemplate { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 5; + repeated .k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 5; // If specified, indicates the Redis pod's priority. "system-node-critical" // and "system-cluster-critical" are two special keywords which indicate the @@ -83,7 +83,7 @@ message AbstractPodTemplate { // The pod's scheduling constraints // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ // +optional - optional k8s.io.api.core.v1.Affinity affinity = 8; + optional .k8s.io.api.core.v1.Affinity affinity = 8; // ServiceAccountName applied to the pod // +optional @@ -114,7 +114,7 @@ message AbstractPodTemplate { // Parameters specified here will be merged to the generated DNS // configuration based on DNSPolicy. // +optional - optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 13; + optional .k8s.io.api.core.v1.PodDNSConfig dnsConfig = 13; // ResourceClaims defines which ResourceClaims must be allocated and reserved // before the Pod is allowed to start. The resources will be made available to those @@ -122,7 +122,7 @@ message AbstractPodTemplate { // +patchMergeKey=name // +patchStrategy=merge,retainKeys // +optional - repeated k8s.io.api.core.v1.PodResourceClaim resourceClaims = 14; + repeated .k8s.io.api.core.v1.PodResourceClaim resourceClaims = 14; } message AbstractSink { @@ -170,7 +170,7 @@ message AbstractVertex { // +optional // +patchStrategy=merge // +patchMergeKey=name - repeated k8s.io.api.core.v1.Volume volumes = 8; + repeated .k8s.io.api.core.v1.Volume volumes = 8; // Limits define the limitations such as buffer read batch size for all the vertices of a pipeline, will override pipeline level settings // +optional @@ -183,11 +183,11 @@ message AbstractVertex { // List of customized init containers belonging to the pod. // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +optional - repeated k8s.io.api.core.v1.Container initContainers = 11; + repeated .k8s.io.api.core.v1.Container initContainers = 11; // List of customized sidecar containers belonging to the pod. // +optional - repeated k8s.io.api.core.v1.Container sidecars = 12; + repeated .k8s.io.api.core.v1.Container sidecars = 12; // Number of partitions of the vertex owned buffers. // It applies to udf and sink vertices only. @@ -212,7 +212,7 @@ message Authorization { // A secret selector which contains bearer token // To use this, the client needs to add "Authorization: Bearer " in the header // +optional - optional k8s.io.api.core.v1.SecretKeySelector token = 1; + optional .k8s.io.api.core.v1.SecretKeySelector token = 1; } // Backoff defines parameters used to systematically configure the retry strategy. @@ -220,7 +220,7 @@ message Backoff { // Interval sets the delay to wait before retry, after a failure occurs. // +kubebuilder:default="1ms" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration interval = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration interval = 1; // Steps defines the number of times to try writing to a sink including retries // +optional @@ -231,11 +231,11 @@ message Backoff { message BasicAuth { // Secret for auth user // +optional - optional k8s.io.api.core.v1.SecretKeySelector user = 1; + optional .k8s.io.api.core.v1.SecretKeySelector user = 1; // Secret for auth password // +optional - optional k8s.io.api.core.v1.SecretKeySelector password = 2; + optional .k8s.io.api.core.v1.SecretKeySelector password = 2; } // Blackhole is a sink to emulate /dev/null @@ -288,19 +288,19 @@ message Container { repeated string args = 3; // +optional - repeated k8s.io.api.core.v1.EnvVar env = 4; + repeated .k8s.io.api.core.v1.EnvVar env = 4; // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 5; + repeated .k8s.io.api.core.v1.EnvFromSource envFrom = 5; // +optional - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 6; + repeated .k8s.io.api.core.v1.VolumeMount volumeMounts = 6; // +optional - optional k8s.io.api.core.v1.ResourceRequirements resources = 7; + optional .k8s.io.api.core.v1.ResourceRequirements resources = 7; // +optional - optional k8s.io.api.core.v1.SecurityContext securityContext = 8; + optional .k8s.io.api.core.v1.SecurityContext securityContext = 8; // +optional optional string imagePullPolicy = 9; @@ -315,19 +315,19 @@ message Container { // ContainerTemplate defines customized spec for a container message ContainerTemplate { // +optional - optional k8s.io.api.core.v1.ResourceRequirements resources = 1; + optional .k8s.io.api.core.v1.ResourceRequirements resources = 1; // +optional optional string imagePullPolicy = 2; // +optional - optional k8s.io.api.core.v1.SecurityContext securityContext = 3; + optional .k8s.io.api.core.v1.SecurityContext securityContext = 3; // +optional - repeated k8s.io.api.core.v1.EnvVar env = 4; + repeated .k8s.io.api.core.v1.EnvVar env = 4; // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 5; + repeated .k8s.io.api.core.v1.EnvFromSource envFrom = 5; // +optional optional Probe readinessProbe = 6; @@ -374,7 +374,7 @@ message Edge { // FixedWindow describes a fixed window message FixedWindow { // Length is the duration of the fixed window. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration length = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration length = 1; // +optional // Streaming should be set to true if the reduce udf is streaming. @@ -404,22 +404,22 @@ message GSSAPI { optional string realm = 2; // UsernameSecret refers to the secret that contains the username - optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 3; + optional .k8s.io.api.core.v1.SecretKeySelector usernameSecret = 3; // valid inputs - KRB5_USER_AUTH, KRB5_KEYTAB_AUTH optional string authType = 4; // PasswordSecret refers to the secret that contains the password // +optional - optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 5; + optional .k8s.io.api.core.v1.SecretKeySelector passwordSecret = 5; // KeytabSecret refers to the secret that contains the keytab // +optional - optional k8s.io.api.core.v1.SecretKeySelector keytabSecret = 6; + optional .k8s.io.api.core.v1.SecretKeySelector keytabSecret = 6; // KerberosConfigSecret refers to the secret that contains the kerberos config // +optional - optional k8s.io.api.core.v1.SecretKeySelector kerberosConfigSecret = 7; + optional .k8s.io.api.core.v1.SecretKeySelector kerberosConfigSecret = 7; } message GeneratorSource { @@ -429,7 +429,7 @@ message GeneratorSource { // +kubebuilder:default="1s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration duration = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration duration = 2; // Size of each generated message // +kubebuilder:default=8 @@ -449,7 +449,7 @@ message GeneratorSource { // time between 0 and 10s which will result in the message being out of order by 0 to 10s // +kubebuilder:default="0s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration jitter = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration jitter = 6; // ValueBlob is an optional string which is the base64 encoding of direct payload to send. // This is useful for attaching a GeneratorSource to a true pipeline to test load behavior @@ -467,9 +467,9 @@ message GetDaemonDeploymentReq { optional string pullPolicy = 3; - repeated k8s.io.api.core.v1.EnvVar env = 4; + repeated .k8s.io.api.core.v1.EnvVar env = 4; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 5; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 5; } message GetJetStreamServiceSpecReq { @@ -513,7 +513,7 @@ message GetJetStreamStatefulSetSpecReq { optional string startCommand = 14; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 15; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 15; } message GetMonoVertexDaemonDeploymentReq { @@ -521,9 +521,9 @@ message GetMonoVertexDaemonDeploymentReq { optional string pullPolicy = 2; - repeated k8s.io.api.core.v1.EnvVar env = 3; + repeated .k8s.io.api.core.v1.EnvVar env = 3; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; } message GetMonoVertexPodSpecReq { @@ -531,9 +531,9 @@ message GetMonoVertexPodSpecReq { optional string pullPolicy = 2; - repeated k8s.io.api.core.v1.EnvVar env = 3; + repeated .k8s.io.api.core.v1.EnvVar env = 3; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 4; } message GetRedisServiceSpecReq { @@ -575,7 +575,7 @@ message GetRedisStatefulSetSpecReq { optional string healthConfigMapName = 15; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 16; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 16; } message GetSideInputDeploymentReq { @@ -585,9 +585,9 @@ message GetSideInputDeploymentReq { optional string pullPolicy = 3; - repeated k8s.io.api.core.v1.EnvVar env = 4; + repeated .k8s.io.api.core.v1.EnvVar env = 4; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 5; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 5; } message GetVertexPodSpecReq { @@ -597,7 +597,7 @@ message GetVertexPodSpecReq { optional string pullPolicy = 3; - repeated k8s.io.api.core.v1.EnvVar env = 4; + repeated .k8s.io.api.core.v1.EnvVar env = 4; optional string sideInputsStoreName = 5; @@ -605,7 +605,7 @@ message GetVertexPodSpecReq { optional PipelineSpec pipelineSpec = 7; - optional k8s.io.api.core.v1.ResourceRequirements defaultResources = 8; + optional .k8s.io.api.core.v1.ResourceRequirements defaultResources = 8; } // GroupBy indicates it is a reducer UDF @@ -619,7 +619,7 @@ message GroupBy { // AllowedLateness allows late data to be included for the Reduce operation as long as the late data is not later // than (Watermark - AllowedLateness). // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration allowedLateness = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration allowedLateness = 3; // Storage is used to define the PBQ storage for a reduce vertex. optional PBQStorage storage = 4; @@ -637,17 +637,17 @@ message HTTPSource { message IdleSource { // Threshold is the duration after which a source is marked as Idle due to lack of data. // Ex: If watermark found to be idle after the Threshold duration then the watermark is progressed by `IncrementBy`. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration threshold = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration threshold = 1; // StepInterval is the duration between the subsequent increment of the watermark as long the source remains Idle. // The default value is 0s which means that once we detect idle source, we will be incrementing the watermark by // `IncrementBy` for time we detect that we source is empty (in other words, this will be a very frequent update). // +kubebuilder:default="0s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration stepInterval = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration stepInterval = 2; // IncrementBy is the duration to be added to the current watermark to progress the watermark when source is idling. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration incrementBy = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration incrementBy = 3; } // +genclient @@ -661,7 +661,7 @@ message IdleSource { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message InterStepBufferService { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; optional InterStepBufferServiceSpec spec = 2; @@ -672,7 +672,7 @@ message InterStepBufferService { // InterStepBufferServiceList is the list of InterStepBufferService resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message InterStepBufferServiceList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated InterStepBufferService items = 2; } @@ -891,7 +891,7 @@ message Metadata { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message MonoVertex { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; optional MonoVertexSpec spec = 2; @@ -915,13 +915,13 @@ message MonoVertexLimits { // Read timeout duration from the source. // +kubebuilder:default= "1s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 2; } // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message MonoVertexList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated MonoVertex items = 2; } @@ -945,7 +945,7 @@ message MonoVertexSpec { // +optional // +patchStrategy=merge // +patchMergeKey=name - repeated k8s.io.api.core.v1.Volume volumes = 6; + repeated .k8s.io.api.core.v1.Volume volumes = 6; // Limits define the limitations such as read batch size for the mono vertex. // +optional @@ -958,11 +958,11 @@ message MonoVertexSpec { // List of customized init containers belonging to the pod. // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +optional - repeated k8s.io.api.core.v1.Container initContainers = 9; + repeated .k8s.io.api.core.v1.Container initContainers = 9; // List of customized sidecar containers belonging to the pod. // +optional - repeated k8s.io.api.core.v1.Container sidecars = 10; + repeated .k8s.io.api.core.v1.Container sidecars = 10; // Template for the daemon service deployment. // +optional @@ -1003,11 +1003,11 @@ message MonoVertexStatus { optional string message = 7; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 8; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 8; // Time of last scaling operation. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 9; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 9; // The generation observed by the MonoVertex controller. // +optional @@ -1072,11 +1072,11 @@ message NatsAuth { // Token auth // +optional - optional k8s.io.api.core.v1.SecretKeySelector token = 2; + optional .k8s.io.api.core.v1.SecretKeySelector token = 2; // NKey auth // +optional - optional k8s.io.api.core.v1.SecretKeySelector nkey = 3; + optional .k8s.io.api.core.v1.SecretKeySelector nkey = 3; } message NatsSource { @@ -1109,7 +1109,7 @@ message PBQStorage { optional PersistenceStrategy persistentVolumeClaim = 1; // +optional - optional k8s.io.api.core.v1.EmptyDirVolumeSource emptyDir = 2; + optional .k8s.io.api.core.v1.EmptyDirVolumeSource emptyDir = 2; // +optional optional NoStore no_store = 3; @@ -1128,7 +1128,7 @@ message PersistenceStrategy { optional string accessMode = 2; // Volume size, e.g. 50Gi - optional k8s.io.apimachinery.pkg.api.resource.Quantity volumeSize = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity volumeSize = 3; } // +genclient @@ -1147,7 +1147,7 @@ message PersistenceStrategy { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message Pipeline { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; optional PipelineSpec spec = 2; @@ -1178,13 +1178,13 @@ message PipelineLimits { // Read timeout for all the vertices in the pipeline, can be overridden by the vertex's limit settings // +kubebuilder:default= "1s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 4; } // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message PipelineList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Pipeline items = 2; } @@ -1234,7 +1234,7 @@ message PipelineStatus { optional string message = 3; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdated = 4; // +optional optional uint32 vertexCount = 5; @@ -1318,11 +1318,11 @@ message RedisConfig { // Redis password secret selector // +optional - optional k8s.io.api.core.v1.SecretKeySelector password = 5; + optional .k8s.io.api.core.v1.SecretKeySelector password = 5; // Sentinel password secret selector // +optional - optional k8s.io.api.core.v1.SecretKeySelector sentinelPassword = 6; + optional .k8s.io.api.core.v1.SecretKeySelector sentinelPassword = 6; } message RedisSettings { @@ -1368,7 +1368,7 @@ message RollingUpdateStrategy { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; } message SASL { @@ -1394,11 +1394,11 @@ message SASL { message SASLPlain { // UserSecret refers to the secret that contains the user - optional k8s.io.api.core.v1.SecretKeySelector userSecret = 1; + optional .k8s.io.api.core.v1.SecretKeySelector userSecret = 1; // PasswordSecret refers to the secret that contains the password // +optional - optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; + optional .k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; optional bool handshake = 3; } @@ -1488,13 +1488,13 @@ message ServingStore { // TTL for the data in the store and tracker // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration ttl = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration ttl = 2; } // SessionWindow describes a session window message SessionWindow { // Timeout is the duration of inactivity after which a session window closes. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration timeout = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration timeout = 1; } // SideInput defines information of a Side Input @@ -1506,7 +1506,7 @@ message SideInput { // +optional // +patchStrategy=merge // +patchMergeKey=name - repeated k8s.io.api.core.v1.Volume volumes = 3; + repeated .k8s.io.api.core.v1.Volume volumes = 3; optional SideInputTrigger trigger = 4; } @@ -1550,10 +1550,10 @@ message Sink { // SlidingWindow describes a sliding window message SlidingWindow { // Length is the duration of the sliding window. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration length = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration length = 1; // Slide is the slide parameter that controls the frequency at which the sliding window is created. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration slide = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration slide = 2; // +optional // Streaming should be set to true if the reduce udf is streaming. @@ -1592,7 +1592,7 @@ message Status { // +optional // +patchMergeKey=type // +patchStrategy=merge - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } message TLS { @@ -1601,15 +1601,15 @@ message TLS { // CACertSecret refers to the secret that contains the CA cert // +optional - optional k8s.io.api.core.v1.SecretKeySelector caCertSecret = 2; + optional .k8s.io.api.core.v1.SecretKeySelector caCertSecret = 2; // CertSecret refers to the secret that contains the cert // +optional - optional k8s.io.api.core.v1.SecretKeySelector certSecret = 3; + optional .k8s.io.api.core.v1.SecretKeySelector certSecret = 3; // KeySecret refers to the secret that contains the key // +optional - optional k8s.io.api.core.v1.SecretKeySelector keySecret = 4; + optional .k8s.io.api.core.v1.SecretKeySelector keySecret = 4; } message TagConditions { @@ -1707,7 +1707,7 @@ message UpdateStrategy { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true message Vertex { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; optional VertexSpec spec = 2; @@ -1733,7 +1733,7 @@ message VertexLimits { // Read timeout duration from the source or buffer // It overrides the settings from pipeline limits. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration readTimeout = 2; // BufferMaxLength is used to define the max length of a buffer. // It overrides the settings from pipeline limits. @@ -1749,7 +1749,7 @@ message VertexLimits { // +kubebuilder:object:root=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message VertexList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Vertex items = 2; } @@ -1803,7 +1803,7 @@ message VertexStatus { // Time of last scaling operation. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaledAt = 8; // The generation observed by the Vertex controller. // +optional @@ -1848,7 +1848,7 @@ message Watermark { // Maximum delay allowed for watermark calculation, defaults to "0s", which means no delay. // +kubebuilder:default="0s" // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration maxDelay = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration maxDelay = 2; // IdleSource defines the idle watermark properties, it could be configured in case source is idling. // +optional diff --git a/pkg/apis/numaflow/v1alpha1/openapi_generated.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go similarity index 99% rename from pkg/apis/numaflow/v1alpha1/openapi_generated.go rename to pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 83186d25b7..e28fbbe28a 100644 --- a/pkg/apis/numaflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -19,8 +19,6 @@ limitations under the License. // Code generated by openapi-gen. DO NOT EDIT. -// This file was autogenerated by openapi-gen. Do not edit it manually! - package v1alpha1 import ( @@ -6273,6 +6271,11 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall }, }, "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -6288,6 +6291,11 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall }, }, "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -6335,6 +6343,11 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall }, }, "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", Type: []string{"array"}, @@ -6351,6 +6364,10 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall "env": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "name", + }, + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, @@ -6404,6 +6421,10 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "mountPath", + }, + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge", }, @@ -6424,6 +6445,10 @@ func schema_pkg_apis_numaflow_v1alpha1_containerBuilder(ref common.ReferenceCall "volumeDevices": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "devicePath", + }, + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge", }, diff --git a/pkg/apis/proto/daemon/daemon.pb.gw.go b/pkg/apis/proto/daemon/daemon.pb.gw.go index 83114f4a96..e964701404 100644 --- a/pkg/apis/proto/daemon/daemon.pb.gw.go +++ b/pkg/apis/proto/daemon/daemon.pb.gw.go @@ -468,21 +468,21 @@ func RegisterDaemonServiceHandlerServer(ctx context.Context, mux *runtime.ServeM // RegisterDaemonServiceHandlerFromEndpoint is same as RegisterDaemonServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterDaemonServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() diff --git a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go index 80741d34f6..53badc433c 100644 --- a/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go +++ b/pkg/apis/proto/mvtxdaemon/mvtxdaemon.pb.gw.go @@ -130,21 +130,21 @@ func RegisterMonoVertexDaemonServiceHandlerServer(ctx context.Context, mux *runt // RegisterMonoVertexDaemonServiceHandlerFromEndpoint is same as RegisterMonoVertexDaemonServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterMonoVertexDaemonServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 821681f867..94573a27dd 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -31,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_interstepbufferservice.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_interstepbufferservice.go index 1cbe774451..0580e19540 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_interstepbufferservice.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_interstepbufferservice.go @@ -41,22 +41,24 @@ var interstepbufferservicesKind = v1alpha1.SchemeGroupVersion.WithKind("InterSte // Get takes name of the interStepBufferService, and returns the corresponding interStepBufferService object, and an error if there is any. func (c *FakeInterStepBufferServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InterStepBufferService, err error) { + emptyResult := &v1alpha1.InterStepBufferService{} obj, err := c.Fake. - Invokes(testing.NewGetAction(interstepbufferservicesResource, c.ns, name), &v1alpha1.InterStepBufferService{}) + Invokes(testing.NewGetActionWithOptions(interstepbufferservicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.InterStepBufferService), err } // List takes label and field selectors, and returns the list of InterStepBufferServices that match those selectors. func (c *FakeInterStepBufferServices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InterStepBufferServiceList, err error) { + emptyResult := &v1alpha1.InterStepBufferServiceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(interstepbufferservicesResource, interstepbufferservicesKind, c.ns, opts), &v1alpha1.InterStepBufferServiceList{}) + Invokes(testing.NewListActionWithOptions(interstepbufferservicesResource, interstepbufferservicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,40 +77,43 @@ func (c *FakeInterStepBufferServices) List(ctx context.Context, opts v1.ListOpti // Watch returns a watch.Interface that watches the requested interStepBufferServices. func (c *FakeInterStepBufferServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(interstepbufferservicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(interstepbufferservicesResource, c.ns, opts)) } // Create takes the representation of a interStepBufferService and creates it. Returns the server's representation of the interStepBufferService, and an error, if there is any. func (c *FakeInterStepBufferServices) Create(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.CreateOptions) (result *v1alpha1.InterStepBufferService, err error) { + emptyResult := &v1alpha1.InterStepBufferService{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(interstepbufferservicesResource, c.ns, interStepBufferService), &v1alpha1.InterStepBufferService{}) + Invokes(testing.NewCreateActionWithOptions(interstepbufferservicesResource, c.ns, interStepBufferService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.InterStepBufferService), err } // Update takes the representation of a interStepBufferService and updates it. Returns the server's representation of the interStepBufferService, and an error, if there is any. func (c *FakeInterStepBufferServices) Update(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (result *v1alpha1.InterStepBufferService, err error) { + emptyResult := &v1alpha1.InterStepBufferService{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(interstepbufferservicesResource, c.ns, interStepBufferService), &v1alpha1.InterStepBufferService{}) + Invokes(testing.NewUpdateActionWithOptions(interstepbufferservicesResource, c.ns, interStepBufferService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.InterStepBufferService), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeInterStepBufferServices) UpdateStatus(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (*v1alpha1.InterStepBufferService, error) { +func (c *FakeInterStepBufferServices) UpdateStatus(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (result *v1alpha1.InterStepBufferService, err error) { + emptyResult := &v1alpha1.InterStepBufferService{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(interstepbufferservicesResource, "status", c.ns, interStepBufferService), &v1alpha1.InterStepBufferService{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(interstepbufferservicesResource, "status", c.ns, interStepBufferService, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.InterStepBufferService), err } @@ -123,7 +128,7 @@ func (c *FakeInterStepBufferServices) Delete(ctx context.Context, name string, o // DeleteCollection deletes a collection of objects. func (c *FakeInterStepBufferServices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(interstepbufferservicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(interstepbufferservicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.InterStepBufferServiceList{}) return err @@ -131,11 +136,12 @@ func (c *FakeInterStepBufferServices) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched interStepBufferService. func (c *FakeInterStepBufferServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InterStepBufferService, err error) { + emptyResult := &v1alpha1.InterStepBufferService{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(interstepbufferservicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.InterStepBufferService{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(interstepbufferservicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.InterStepBufferService), err } diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go index 4e25ccd34b..a8199da799 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_monovertex.go @@ -41,22 +41,24 @@ var monoverticesKind = v1alpha1.SchemeGroupVersion.WithKind("MonoVertex") // Get takes name of the monoVertex, and returns the corresponding monoVertex object, and an error if there is any. func (c *FakeMonoVertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MonoVertex, err error) { + emptyResult := &v1alpha1.MonoVertex{} obj, err := c.Fake. - Invokes(testing.NewGetAction(monoverticesResource, c.ns, name), &v1alpha1.MonoVertex{}) + Invokes(testing.NewGetActionWithOptions(monoverticesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.MonoVertex), err } // List takes label and field selectors, and returns the list of MonoVertices that match those selectors. func (c *FakeMonoVertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MonoVertexList, err error) { + emptyResult := &v1alpha1.MonoVertexList{} obj, err := c.Fake. - Invokes(testing.NewListAction(monoverticesResource, monoverticesKind, c.ns, opts), &v1alpha1.MonoVertexList{}) + Invokes(testing.NewListActionWithOptions(monoverticesResource, monoverticesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,40 +77,43 @@ func (c *FakeMonoVertices) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested monoVertices. func (c *FakeMonoVertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(monoverticesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(monoverticesResource, c.ns, opts)) } // Create takes the representation of a monoVertex and creates it. Returns the server's representation of the monoVertex, and an error, if there is any. func (c *FakeMonoVertices) Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (result *v1alpha1.MonoVertex, err error) { + emptyResult := &v1alpha1.MonoVertex{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(monoverticesResource, c.ns, monoVertex), &v1alpha1.MonoVertex{}) + Invokes(testing.NewCreateActionWithOptions(monoverticesResource, c.ns, monoVertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.MonoVertex), err } // Update takes the representation of a monoVertex and updates it. Returns the server's representation of the monoVertex, and an error, if there is any. func (c *FakeMonoVertices) Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { + emptyResult := &v1alpha1.MonoVertex{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(monoverticesResource, c.ns, monoVertex), &v1alpha1.MonoVertex{}) + Invokes(testing.NewUpdateActionWithOptions(monoverticesResource, c.ns, monoVertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.MonoVertex), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMonoVertices) UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) { +func (c *FakeMonoVertices) UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { + emptyResult := &v1alpha1.MonoVertex{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(monoverticesResource, "status", c.ns, monoVertex), &v1alpha1.MonoVertex{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(monoverticesResource, "status", c.ns, monoVertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.MonoVertex), err } @@ -123,7 +128,7 @@ func (c *FakeMonoVertices) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeMonoVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(monoverticesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(monoverticesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.MonoVertexList{}) return err @@ -131,11 +136,12 @@ func (c *FakeMonoVertices) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched monoVertex. func (c *FakeMonoVertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MonoVertex, err error) { + emptyResult := &v1alpha1.MonoVertex{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(monoverticesResource, c.ns, name, pt, data, subresources...), &v1alpha1.MonoVertex{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(monoverticesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.MonoVertex), err } diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_pipeline.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_pipeline.go index 6930383ded..ac55b08c57 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_pipeline.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_pipeline.go @@ -41,22 +41,24 @@ var pipelinesKind = v1alpha1.SchemeGroupVersion.WithKind("Pipeline") // Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. func (c *FakePipelines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { + emptyResult := &v1alpha1.Pipeline{} obj, err := c.Fake. - Invokes(testing.NewGetAction(pipelinesResource, c.ns, name), &v1alpha1.Pipeline{}) + Invokes(testing.NewGetActionWithOptions(pipelinesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Pipeline), err } // List takes label and field selectors, and returns the list of Pipelines that match those selectors. func (c *FakePipelines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { + emptyResult := &v1alpha1.PipelineList{} obj, err := c.Fake. - Invokes(testing.NewListAction(pipelinesResource, pipelinesKind, c.ns, opts), &v1alpha1.PipelineList{}) + Invokes(testing.NewListActionWithOptions(pipelinesResource, pipelinesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,40 +77,43 @@ func (c *FakePipelines) List(ctx context.Context, opts v1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested pipelines. func (c *FakePipelines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(pipelinesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(pipelinesResource, c.ns, opts)) } // Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. func (c *FakePipelines) Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (result *v1alpha1.Pipeline, err error) { + emptyResult := &v1alpha1.Pipeline{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(pipelinesResource, c.ns, pipeline), &v1alpha1.Pipeline{}) + Invokes(testing.NewCreateActionWithOptions(pipelinesResource, c.ns, pipeline, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Pipeline), err } // Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. func (c *FakePipelines) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { + emptyResult := &v1alpha1.Pipeline{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(pipelinesResource, c.ns, pipeline), &v1alpha1.Pipeline{}) + Invokes(testing.NewUpdateActionWithOptions(pipelinesResource, c.ns, pipeline, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Pipeline), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePipelines) UpdateStatus(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) { +func (c *FakePipelines) UpdateStatus(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { + emptyResult := &v1alpha1.Pipeline{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(pipelinesResource, "status", c.ns, pipeline), &v1alpha1.Pipeline{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(pipelinesResource, "status", c.ns, pipeline, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Pipeline), err } @@ -123,7 +128,7 @@ func (c *FakePipelines) Delete(ctx context.Context, name string, opts v1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakePipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(pipelinesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(pipelinesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.PipelineList{}) return err @@ -131,11 +136,12 @@ func (c *FakePipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOpti // Patch applies the patch and returns the patched pipeline. func (c *FakePipelines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { + emptyResult := &v1alpha1.Pipeline{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(pipelinesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Pipeline{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(pipelinesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Pipeline), err } diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_vertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_vertex.go index 5f2ede64a7..e81af81437 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_vertex.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/fake/fake_vertex.go @@ -41,22 +41,24 @@ var verticesKind = v1alpha1.SchemeGroupVersion.WithKind("Vertex") // Get takes name of the vertex, and returns the corresponding vertex object, and an error if there is any. func (c *FakeVertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Vertex, err error) { + emptyResult := &v1alpha1.Vertex{} obj, err := c.Fake. - Invokes(testing.NewGetAction(verticesResource, c.ns, name), &v1alpha1.Vertex{}) + Invokes(testing.NewGetActionWithOptions(verticesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Vertex), err } // List takes label and field selectors, and returns the list of Vertices that match those selectors. func (c *FakeVertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VertexList, err error) { + emptyResult := &v1alpha1.VertexList{} obj, err := c.Fake. - Invokes(testing.NewListAction(verticesResource, verticesKind, c.ns, opts), &v1alpha1.VertexList{}) + Invokes(testing.NewListActionWithOptions(verticesResource, verticesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,40 +77,43 @@ func (c *FakeVertices) List(ctx context.Context, opts v1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested vertices. func (c *FakeVertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(verticesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(verticesResource, c.ns, opts)) } // Create takes the representation of a vertex and creates it. Returns the server's representation of the vertex, and an error, if there is any. func (c *FakeVertices) Create(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.CreateOptions) (result *v1alpha1.Vertex, err error) { + emptyResult := &v1alpha1.Vertex{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(verticesResource, c.ns, vertex), &v1alpha1.Vertex{}) + Invokes(testing.NewCreateActionWithOptions(verticesResource, c.ns, vertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Vertex), err } // Update takes the representation of a vertex and updates it. Returns the server's representation of the vertex, and an error, if there is any. func (c *FakeVertices) Update(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (result *v1alpha1.Vertex, err error) { + emptyResult := &v1alpha1.Vertex{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(verticesResource, c.ns, vertex), &v1alpha1.Vertex{}) + Invokes(testing.NewUpdateActionWithOptions(verticesResource, c.ns, vertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Vertex), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVertices) UpdateStatus(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (*v1alpha1.Vertex, error) { +func (c *FakeVertices) UpdateStatus(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (result *v1alpha1.Vertex, err error) { + emptyResult := &v1alpha1.Vertex{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(verticesResource, "status", c.ns, vertex), &v1alpha1.Vertex{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(verticesResource, "status", c.ns, vertex, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Vertex), err } @@ -123,7 +128,7 @@ func (c *FakeVertices) Delete(ctx context.Context, name string, opts v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(verticesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(verticesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.VertexList{}) return err @@ -131,11 +136,12 @@ func (c *FakeVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptio // Patch applies the patch and returns the patched vertex. func (c *FakeVertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Vertex, err error) { + emptyResult := &v1alpha1.Vertex{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(verticesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Vertex{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(verticesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Vertex), err } diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/interstepbufferservice.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/interstepbufferservice.go index 92d4f964c9..dba00b37ef 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/interstepbufferservice.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/interstepbufferservice.go @@ -20,14 +20,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" scheme "github.com/numaproj/numaflow/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // InterStepBufferServicesGetter has a method to return a InterStepBufferServiceInterface. @@ -40,6 +39,7 @@ type InterStepBufferServicesGetter interface { type InterStepBufferServiceInterface interface { Create(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.CreateOptions) (*v1alpha1.InterStepBufferService, error) Update(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (*v1alpha1.InterStepBufferService, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (*v1alpha1.InterStepBufferService, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -52,144 +52,18 @@ type InterStepBufferServiceInterface interface { // interStepBufferServices implements InterStepBufferServiceInterface type interStepBufferServices struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.InterStepBufferService, *v1alpha1.InterStepBufferServiceList] } // newInterStepBufferServices returns a InterStepBufferServices func newInterStepBufferServices(c *NumaflowV1alpha1Client, namespace string) *interStepBufferServices { return &interStepBufferServices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.InterStepBufferService, *v1alpha1.InterStepBufferServiceList]( + "interstepbufferservices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.InterStepBufferService { return &v1alpha1.InterStepBufferService{} }, + func() *v1alpha1.InterStepBufferServiceList { return &v1alpha1.InterStepBufferServiceList{} }), } } - -// Get takes name of the interStepBufferService, and returns the corresponding interStepBufferService object, and an error if there is any. -func (c *interStepBufferServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InterStepBufferService, err error) { - result = &v1alpha1.InterStepBufferService{} - err = c.client.Get(). - Namespace(c.ns). - Resource("interstepbufferservices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of InterStepBufferServices that match those selectors. -func (c *interStepBufferServices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InterStepBufferServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.InterStepBufferServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("interstepbufferservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested interStepBufferServices. -func (c *interStepBufferServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("interstepbufferservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a interStepBufferService and creates it. Returns the server's representation of the interStepBufferService, and an error, if there is any. -func (c *interStepBufferServices) Create(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.CreateOptions) (result *v1alpha1.InterStepBufferService, err error) { - result = &v1alpha1.InterStepBufferService{} - err = c.client.Post(). - Namespace(c.ns). - Resource("interstepbufferservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(interStepBufferService). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a interStepBufferService and updates it. Returns the server's representation of the interStepBufferService, and an error, if there is any. -func (c *interStepBufferServices) Update(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (result *v1alpha1.InterStepBufferService, err error) { - result = &v1alpha1.InterStepBufferService{} - err = c.client.Put(). - Namespace(c.ns). - Resource("interstepbufferservices"). - Name(interStepBufferService.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(interStepBufferService). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *interStepBufferServices) UpdateStatus(ctx context.Context, interStepBufferService *v1alpha1.InterStepBufferService, opts v1.UpdateOptions) (result *v1alpha1.InterStepBufferService, err error) { - result = &v1alpha1.InterStepBufferService{} - err = c.client.Put(). - Namespace(c.ns). - Resource("interstepbufferservices"). - Name(interStepBufferService.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(interStepBufferService). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the interStepBufferService and deletes it. Returns an error if one occurs. -func (c *interStepBufferServices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("interstepbufferservices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *interStepBufferServices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("interstepbufferservices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched interStepBufferService. -func (c *interStepBufferServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InterStepBufferService, err error) { - result = &v1alpha1.InterStepBufferService{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("interstepbufferservices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go index 674a3bd906..8018072af5 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/monovertex.go @@ -20,14 +20,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" scheme "github.com/numaproj/numaflow/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // MonoVerticesGetter has a method to return a MonoVertexInterface. @@ -40,6 +39,7 @@ type MonoVerticesGetter interface { type MonoVertexInterface interface { Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (*v1alpha1.MonoVertex, error) Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (*v1alpha1.MonoVertex, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -52,144 +52,18 @@ type MonoVertexInterface interface { // monoVertices implements MonoVertexInterface type monoVertices struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.MonoVertex, *v1alpha1.MonoVertexList] } // newMonoVertices returns a MonoVertices func newMonoVertices(c *NumaflowV1alpha1Client, namespace string) *monoVertices { return &monoVertices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.MonoVertex, *v1alpha1.MonoVertexList]( + "monovertices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.MonoVertex { return &v1alpha1.MonoVertex{} }, + func() *v1alpha1.MonoVertexList { return &v1alpha1.MonoVertexList{} }), } } - -// Get takes name of the monoVertex, and returns the corresponding monoVertex object, and an error if there is any. -func (c *monoVertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MonoVertex, err error) { - result = &v1alpha1.MonoVertex{} - err = c.client.Get(). - Namespace(c.ns). - Resource("monovertices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MonoVertices that match those selectors. -func (c *monoVertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MonoVertexList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.MonoVertexList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("monovertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested monoVertices. -func (c *monoVertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("monovertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a monoVertex and creates it. Returns the server's representation of the monoVertex, and an error, if there is any. -func (c *monoVertices) Create(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.CreateOptions) (result *v1alpha1.MonoVertex, err error) { - result = &v1alpha1.MonoVertex{} - err = c.client.Post(). - Namespace(c.ns). - Resource("monovertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(monoVertex). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a monoVertex and updates it. Returns the server's representation of the monoVertex, and an error, if there is any. -func (c *monoVertices) Update(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { - result = &v1alpha1.MonoVertex{} - err = c.client.Put(). - Namespace(c.ns). - Resource("monovertices"). - Name(monoVertex.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(monoVertex). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *monoVertices) UpdateStatus(ctx context.Context, monoVertex *v1alpha1.MonoVertex, opts v1.UpdateOptions) (result *v1alpha1.MonoVertex, err error) { - result = &v1alpha1.MonoVertex{} - err = c.client.Put(). - Namespace(c.ns). - Resource("monovertices"). - Name(monoVertex.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(monoVertex). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the monoVertex and deletes it. Returns an error if one occurs. -func (c *monoVertices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("monovertices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *monoVertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("monovertices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched monoVertex. -func (c *monoVertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MonoVertex, err error) { - result = &v1alpha1.MonoVertex{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("monovertices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/pipeline.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/pipeline.go index 53e668a72f..22b311b22b 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/pipeline.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/pipeline.go @@ -20,14 +20,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" scheme "github.com/numaproj/numaflow/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // PipelinesGetter has a method to return a PipelineInterface. @@ -40,6 +39,7 @@ type PipelinesGetter interface { type PipelineInterface interface { Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (*v1alpha1.Pipeline, error) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -52,144 +52,18 @@ type PipelineInterface interface { // pipelines implements PipelineInterface type pipelines struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.Pipeline, *v1alpha1.PipelineList] } // newPipelines returns a Pipelines func newPipelines(c *NumaflowV1alpha1Client, namespace string) *pipelines { return &pipelines{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.Pipeline, *v1alpha1.PipelineList]( + "pipelines", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Pipeline { return &v1alpha1.Pipeline{} }, + func() *v1alpha1.PipelineList { return &v1alpha1.PipelineList{} }), } } - -// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. -func (c *pipelines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pipelines that match those selectors. -func (c *pipelines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PipelineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pipelines. -func (c *pipelines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelines"). - Name(pipeline.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pipelines) UpdateStatus(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelines"). - Name(pipeline.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. -func (c *pipelines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pipeline. -func (c *pipelines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/vertex.go b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/vertex.go index 40647528a3..1e0d23ead0 100644 --- a/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/vertex.go +++ b/pkg/client/clientset/versioned/typed/numaflow/v1alpha1/vertex.go @@ -20,14 +20,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" scheme "github.com/numaproj/numaflow/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // VerticesGetter has a method to return a VertexInterface. @@ -40,6 +39,7 @@ type VerticesGetter interface { type VertexInterface interface { Create(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.CreateOptions) (*v1alpha1.Vertex, error) Update(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (*v1alpha1.Vertex, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (*v1alpha1.Vertex, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -52,144 +52,18 @@ type VertexInterface interface { // vertices implements VertexInterface type vertices struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.Vertex, *v1alpha1.VertexList] } // newVertices returns a Vertices func newVertices(c *NumaflowV1alpha1Client, namespace string) *vertices { return &vertices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.Vertex, *v1alpha1.VertexList]( + "vertices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Vertex { return &v1alpha1.Vertex{} }, + func() *v1alpha1.VertexList { return &v1alpha1.VertexList{} }), } } - -// Get takes name of the vertex, and returns the corresponding vertex object, and an error if there is any. -func (c *vertices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Vertex, err error) { - result = &v1alpha1.Vertex{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vertices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Vertices that match those selectors. -func (c *vertices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VertexList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VertexList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested vertices. -func (c *vertices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("vertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a vertex and creates it. Returns the server's representation of the vertex, and an error, if there is any. -func (c *vertices) Create(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.CreateOptions) (result *v1alpha1.Vertex, err error) { - result = &v1alpha1.Vertex{} - err = c.client.Post(). - Namespace(c.ns). - Resource("vertices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vertex). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a vertex and updates it. Returns the server's representation of the vertex, and an error, if there is any. -func (c *vertices) Update(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (result *v1alpha1.Vertex, err error) { - result = &v1alpha1.Vertex{} - err = c.client.Put(). - Namespace(c.ns). - Resource("vertices"). - Name(vertex.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vertex). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *vertices) UpdateStatus(ctx context.Context, vertex *v1alpha1.Vertex, opts v1.UpdateOptions) (result *v1alpha1.Vertex, err error) { - result = &v1alpha1.Vertex{} - err = c.client.Put(). - Namespace(c.ns). - Resource("vertices"). - Name(vertex.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vertex). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the vertex and deletes it. Returns an error if one occurs. -func (c *vertices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("vertices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *vertices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("vertices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched vertex. -func (c *vertices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Vertex, err error) { - result = &v1alpha1.Vertex{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("vertices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index d548e28e55..28d151107d 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -228,6 +228,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/pkg/client/listers/numaflow/v1alpha1/interstepbufferservice.go b/pkg/client/listers/numaflow/v1alpha1/interstepbufferservice.go index 427b1af0ba..fce8b32e66 100644 --- a/pkg/client/listers/numaflow/v1alpha1/interstepbufferservice.go +++ b/pkg/client/listers/numaflow/v1alpha1/interstepbufferservice.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type InterStepBufferServiceLister interface { // interStepBufferServiceLister implements the InterStepBufferServiceLister interface. type interStepBufferServiceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.InterStepBufferService] } // NewInterStepBufferServiceLister returns a new InterStepBufferServiceLister. func NewInterStepBufferServiceLister(indexer cache.Indexer) InterStepBufferServiceLister { - return &interStepBufferServiceLister{indexer: indexer} -} - -// List lists all InterStepBufferServices in the indexer. -func (s *interStepBufferServiceLister) List(selector labels.Selector) (ret []*v1alpha1.InterStepBufferService, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.InterStepBufferService)) - }) - return ret, err + return &interStepBufferServiceLister{listers.New[*v1alpha1.InterStepBufferService](indexer, v1alpha1.Resource("interstepbufferservice"))} } // InterStepBufferServices returns an object that can list and get InterStepBufferServices. func (s *interStepBufferServiceLister) InterStepBufferServices(namespace string) InterStepBufferServiceNamespaceLister { - return interStepBufferServiceNamespaceLister{indexer: s.indexer, namespace: namespace} + return interStepBufferServiceNamespaceLister{listers.NewNamespaced[*v1alpha1.InterStepBufferService](s.ResourceIndexer, namespace)} } // InterStepBufferServiceNamespaceLister helps list and get InterStepBufferServices. @@ -74,26 +66,5 @@ type InterStepBufferServiceNamespaceLister interface { // interStepBufferServiceNamespaceLister implements the InterStepBufferServiceNamespaceLister // interface. type interStepBufferServiceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all InterStepBufferServices in the indexer for a given namespace. -func (s interStepBufferServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.InterStepBufferService, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.InterStepBufferService)) - }) - return ret, err -} - -// Get retrieves the InterStepBufferService from the indexer for a given namespace and name. -func (s interStepBufferServiceNamespaceLister) Get(name string) (*v1alpha1.InterStepBufferService, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("interstepbufferservice"), name) - } - return obj.(*v1alpha1.InterStepBufferService), nil + listers.ResourceIndexer[*v1alpha1.InterStepBufferService] } diff --git a/pkg/client/listers/numaflow/v1alpha1/monovertex.go b/pkg/client/listers/numaflow/v1alpha1/monovertex.go index a120405445..e5f3949295 100644 --- a/pkg/client/listers/numaflow/v1alpha1/monovertex.go +++ b/pkg/client/listers/numaflow/v1alpha1/monovertex.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type MonoVertexLister interface { // monoVertexLister implements the MonoVertexLister interface. type monoVertexLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.MonoVertex] } // NewMonoVertexLister returns a new MonoVertexLister. func NewMonoVertexLister(indexer cache.Indexer) MonoVertexLister { - return &monoVertexLister{indexer: indexer} -} - -// List lists all MonoVertices in the indexer. -func (s *monoVertexLister) List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MonoVertex)) - }) - return ret, err + return &monoVertexLister{listers.New[*v1alpha1.MonoVertex](indexer, v1alpha1.Resource("monovertex"))} } // MonoVertices returns an object that can list and get MonoVertices. func (s *monoVertexLister) MonoVertices(namespace string) MonoVertexNamespaceLister { - return monoVertexNamespaceLister{indexer: s.indexer, namespace: namespace} + return monoVertexNamespaceLister{listers.NewNamespaced[*v1alpha1.MonoVertex](s.ResourceIndexer, namespace)} } // MonoVertexNamespaceLister helps list and get MonoVertices. @@ -74,26 +66,5 @@ type MonoVertexNamespaceLister interface { // monoVertexNamespaceLister implements the MonoVertexNamespaceLister // interface. type monoVertexNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MonoVertices in the indexer for a given namespace. -func (s monoVertexNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MonoVertex, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MonoVertex)) - }) - return ret, err -} - -// Get retrieves the MonoVertex from the indexer for a given namespace and name. -func (s monoVertexNamespaceLister) Get(name string) (*v1alpha1.MonoVertex, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("monovertex"), name) - } - return obj.(*v1alpha1.MonoVertex), nil + listers.ResourceIndexer[*v1alpha1.MonoVertex] } diff --git a/pkg/client/listers/numaflow/v1alpha1/pipeline.go b/pkg/client/listers/numaflow/v1alpha1/pipeline.go index 7514e2ce59..4b0d1368ee 100644 --- a/pkg/client/listers/numaflow/v1alpha1/pipeline.go +++ b/pkg/client/listers/numaflow/v1alpha1/pipeline.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PipelineLister interface { // pipelineLister implements the PipelineLister interface. type pipelineLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.Pipeline] } // NewPipelineLister returns a new PipelineLister. func NewPipelineLister(indexer cache.Indexer) PipelineLister { - return &pipelineLister{indexer: indexer} -} - -// List lists all Pipelines in the indexer. -func (s *pipelineLister) List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Pipeline)) - }) - return ret, err + return &pipelineLister{listers.New[*v1alpha1.Pipeline](indexer, v1alpha1.Resource("pipeline"))} } // Pipelines returns an object that can list and get Pipelines. func (s *pipelineLister) Pipelines(namespace string) PipelineNamespaceLister { - return pipelineNamespaceLister{indexer: s.indexer, namespace: namespace} + return pipelineNamespaceLister{listers.NewNamespaced[*v1alpha1.Pipeline](s.ResourceIndexer, namespace)} } // PipelineNamespaceLister helps list and get Pipelines. @@ -74,26 +66,5 @@ type PipelineNamespaceLister interface { // pipelineNamespaceLister implements the PipelineNamespaceLister // interface. type pipelineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Pipelines in the indexer for a given namespace. -func (s pipelineNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Pipeline)) - }) - return ret, err -} - -// Get retrieves the Pipeline from the indexer for a given namespace and name. -func (s pipelineNamespaceLister) Get(name string) (*v1alpha1.Pipeline, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("pipeline"), name) - } - return obj.(*v1alpha1.Pipeline), nil + listers.ResourceIndexer[*v1alpha1.Pipeline] } diff --git a/pkg/client/listers/numaflow/v1alpha1/vertex.go b/pkg/client/listers/numaflow/v1alpha1/vertex.go index d055a85e3b..021ecd5952 100644 --- a/pkg/client/listers/numaflow/v1alpha1/vertex.go +++ b/pkg/client/listers/numaflow/v1alpha1/vertex.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type VertexLister interface { // vertexLister implements the VertexLister interface. type vertexLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.Vertex] } // NewVertexLister returns a new VertexLister. func NewVertexLister(indexer cache.Indexer) VertexLister { - return &vertexLister{indexer: indexer} -} - -// List lists all Vertices in the indexer. -func (s *vertexLister) List(selector labels.Selector) (ret []*v1alpha1.Vertex, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Vertex)) - }) - return ret, err + return &vertexLister{listers.New[*v1alpha1.Vertex](indexer, v1alpha1.Resource("vertex"))} } // Vertices returns an object that can list and get Vertices. func (s *vertexLister) Vertices(namespace string) VertexNamespaceLister { - return vertexNamespaceLister{indexer: s.indexer, namespace: namespace} + return vertexNamespaceLister{listers.NewNamespaced[*v1alpha1.Vertex](s.ResourceIndexer, namespace)} } // VertexNamespaceLister helps list and get Vertices. @@ -74,26 +66,5 @@ type VertexNamespaceLister interface { // vertexNamespaceLister implements the VertexNamespaceLister // interface. type vertexNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Vertices in the indexer for a given namespace. -func (s vertexNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Vertex, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Vertex)) - }) - return ret, err -} - -// Get retrieves the Vertex from the indexer for a given namespace and name. -func (s vertexNamespaceLister) Get(name string) (*v1alpha1.Vertex, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("vertex"), name) - } - return obj.(*v1alpha1.Vertex), nil + listers.ResourceIndexer[*v1alpha1.Vertex] } diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index 0e5f86542c..0b0df1847d 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -136,31 +136,32 @@ func Start(namespaced bool, managedNamespace string) { } // Watch ISB Services - if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &dfv1.InterStepBufferService{}), &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, - )); err != nil { + if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &dfv1.InterStepBufferService{}, &handler.TypedEnqueueRequestForObject[*dfv1.InterStepBufferService]{}, + predicate.Or[*dfv1.InterStepBufferService]( + predicate.TypedGenerationChangedPredicate[*dfv1.InterStepBufferService]{}, + predicate.TypedLabelChangedPredicate[*dfv1.InterStepBufferService]{}, + ))); err != nil { logger.Fatalw("Unable to watch InterStepBuffer", zap.Error(err)) } // Watch ConfigMaps with ResourceVersion changes, and enqueue owning InterStepBuffer key - if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &corev1.ConfigMap{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &corev1.ConfigMap{}, + handler.TypedEnqueueRequestForOwner[*corev1.ConfigMap](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.ConfigMap]{})); err != nil { logger.Fatalw("Unable to watch ConfigMaps", zap.Error(err)) } // Watch StatefulSets with Generation changes, and enqueue owning InterStepBuffer key - if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &appv1.StatefulSet{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &appv1.StatefulSet{}, + handler.TypedEnqueueRequestForOwner[*appv1.StatefulSet](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*appv1.StatefulSet]{})); err != nil { logger.Fatalw("Unable to watch StatefulSets", zap.Error(err)) } // Watch Services with ResourceVersion changes, and enqueue owning InterStepBuffer key - if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := isbSvcController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}, + handler.TypedEnqueueRequestForOwner[*corev1.Service](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.InterStepBufferService{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Service]{})); err != nil { logger.Fatalw("Unable to watch Services", zap.Error(err)) } @@ -173,33 +174,32 @@ func Start(namespaced bool, managedNamespace string) { } // Watch Pipelines - if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &dfv1.Pipeline{}), &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, - )); err != nil { + if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &dfv1.Pipeline{}, &handler.TypedEnqueueRequestForObject[*dfv1.Pipeline]{}, + predicate.Or[*dfv1.Pipeline]( + predicate.TypedGenerationChangedPredicate[*dfv1.Pipeline]{}, + predicate.TypedLabelChangedPredicate[*dfv1.Pipeline]{}, + ))); err != nil { logger.Fatalw("Unable to watch Pipelines", zap.Error(err)) } // Watch Vertices with Generation changes (excluding scaling up/down) - if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &dfv1.Vertex{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), - predicate.And( - predicate.ResourceVersionChangedPredicate{}, - )); err != nil { + if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &dfv1.Vertex{}, + handler.TypedEnqueueRequestForOwner[*dfv1.Vertex](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*dfv1.Vertex]{})); err != nil { logger.Fatalw("Unable to watch Vertices", zap.Error(err)) } // Watch Services with ResourceVersion changes - if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}, + handler.TypedEnqueueRequestForOwner[*corev1.Service](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Service]{})); err != nil { logger.Fatalw("Unable to watch Services", zap.Error(err)) } // Watch Deployments changes - if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := pipelineController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}, + handler.TypedEnqueueRequestForOwner[*appv1.Deployment](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Pipeline{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*appv1.Deployment]{})); err != nil { logger.Fatalw("Unable to watch Deployments", zap.Error(err)) } @@ -213,27 +213,28 @@ func Start(namespaced bool, managedNamespace string) { } // Watch Vertices - if err := vertexController.Watch(source.Kind(mgr.GetCache(), &dfv1.Vertex{}), &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, - )); err != nil { + if err := vertexController.Watch(source.Kind(mgr.GetCache(), &dfv1.Vertex{}, &handler.TypedEnqueueRequestForObject[*dfv1.Vertex]{}, + predicate.Or[*dfv1.Vertex]( + predicate.TypedGenerationChangedPredicate[*dfv1.Vertex]{}, + predicate.TypedLabelChangedPredicate[*dfv1.Vertex]{}, + ))); err != nil { logger.Fatalw("Unable to watch Vertices", zap.Error(err)) } // Watch Pods - if err := vertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Vertex{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}, - predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { return false }, // Do not watch pod create events - }); err != nil { + if err := vertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, + handler.TypedEnqueueRequestForOwner[*corev1.Pod](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Vertex{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Pod]{}, + predicate.TypedFuncs[*corev1.Pod]{ + CreateFunc: func(event.TypedCreateEvent[*corev1.Pod]) bool { return false }, // Do not watch pod create events + })); err != nil { logger.Fatalw("Unable to watch Pods", zap.Error(err)) } // Watch Services with ResourceVersion changes - if err := vertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Vertex{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := vertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}, + handler.TypedEnqueueRequestForOwner[*corev1.Service](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.Vertex{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Service]{})); err != nil { logger.Fatalw("Unable to watch Services", zap.Error(err)) } @@ -247,34 +248,35 @@ func Start(namespaced bool, managedNamespace string) { } // Watch MonoVertices - if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &dfv1.MonoVertex{}), &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, - )); err != nil { + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &dfv1.MonoVertex{}, &handler.TypedEnqueueRequestForObject[*dfv1.MonoVertex]{}, + predicate.Or[*dfv1.MonoVertex]( + predicate.TypedGenerationChangedPredicate[*dfv1.MonoVertex]{}, + predicate.TypedLabelChangedPredicate[*dfv1.MonoVertex]{}, + ))); err != nil { logger.Fatalw("Unable to watch MonoVertices", zap.Error(err)) } // Watch Pods - if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}, - predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { return false }, // Do not watch pod create events - }); err != nil { + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, + handler.TypedEnqueueRequestForOwner[*corev1.Pod](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Pod]{}, + predicate.TypedFuncs[*corev1.Pod]{ + CreateFunc: func(event.TypedCreateEvent[*corev1.Pod]) bool { return false }, // Do not watch pod create events + })); err != nil { logger.Fatalw("Unable to watch Pods", zap.Error(err)) } // Watch Services with ResourceVersion changes - if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}, + handler.TypedEnqueueRequestForOwner[*corev1.Service](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*corev1.Service]{})); err != nil { logger.Fatalw("Unable to watch Services", zap.Error(err)) } // Watch Deployments changes - if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), - predicate.ResourceVersionChangedPredicate{}); err != nil { + if err := monoVertexController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}, + handler.TypedEnqueueRequestForOwner[*appv1.Deployment](mgr.GetScheme(), mgr.GetRESTMapper(), &dfv1.MonoVertex{}, handler.OnlyControllerOwner()), + predicate.TypedResourceVersionChangedPredicate[*appv1.Deployment]{})); err != nil { logger.Fatalw("Unable to watch Deployments", zap.Error(err)) } diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 9837be100c..5d2d27d111 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -71,7 +71,7 @@ dependencies = [ "regex", "ring", "rustls-native-certs 0.7.3", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-webpki 0.102.8", "serde", "serde_json", @@ -88,9 +88,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -99,9 +99,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -127,15 +127,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" +checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -145,9 +145,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" +checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ "bindgen", "cc", @@ -160,9 +160,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -239,8 +239,8 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "pin-project-lite", - "rustls 0.23.13", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -359,9 +359,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "c4cbd4ab9fef358caa9c599eae3105af638ead5fb47a718315d8e03c852b9f0d" dependencies = [ "jobserver", "libc", @@ -706,9 +706,9 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -721,9 +721,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -731,15 +731,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -748,15 +748,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -765,21 +765,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -816,9 +816,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -838,7 +838,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -857,7 +857,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -878,9 +878,9 @@ checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "headers" @@ -985,9 +985,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1085,7 +1085,7 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -1171,19 +1171,19 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "itertools" @@ -1255,9 +1255,9 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19501afb943ae5806548bc3ebd7f3374153ca057a38f480ef30adfde5ef09755" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" dependencies = [ "base64 0.22.1", "chrono", @@ -1268,9 +1268,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.94.2" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ace78a62b361077505f2950bd48aa3e46596fb15350c9c993de15ddfa3cac5" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" dependencies = [ "k8s-openapi", "kube-client", @@ -1279,9 +1279,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.94.2" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18ec0fcafd3add30b413b096a61d69b0a37f94d3f95b6f505a57ea3d27cec2a7" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" dependencies = [ "base64 0.22.1", "bytes", @@ -1301,8 +1301,8 @@ dependencies = [ "k8s-openapi", "kube-core", "pem", - "rustls 0.23.13", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "secrecy", "serde", "serde_json", @@ -1317,9 +1317,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.94.2" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50c095f051dada37740d883b6d47ad0430e95082140718073b773c8a70f231c" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" dependencies = [ "chrono", "form_urlencoded", @@ -1597,7 +1597,7 @@ dependencies = [ "prost", "prost-types", "rcgen", - "rustls 0.23.13", + "rustls 0.23.14", "semver", "serde", "serde_json", @@ -1631,18 +1631,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" @@ -1798,23 +1798,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -1845,9 +1845,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30538d42559de6b034bc76fd6dd4c38961b1ee5c6c56e3808c50128fdbc22ce" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -1970,7 +1970,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "socket2", "thiserror", "tokio", @@ -1987,7 +1987,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -2086,23 +2086,23 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62871f2d65009c0256aed1b9cfeeb8ac272833c404e13d53d400cd0dad7a2ac0" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -2116,13 +2116,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -2133,9 +2133,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -2181,9 +2181,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -2203,8 +2203,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.13", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "serde_json", @@ -2312,9 +2312,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "aws-lc-rs", "log", @@ -2333,7 +2333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -2346,7 +2346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -2363,19 +2363,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" @@ -2549,9 +2548,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -2574,7 +2573,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -2586,7 +2585,7 @@ name = "servesink" version = "0.1.0" dependencies = [ "numaflow 0.1.1", - "reqwest 0.12.7", + "reqwest 0.12.8", "tokio", "tonic", "tracing", @@ -2747,9 +2746,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -2794,9 +2793,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -2946,7 +2945,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -3002,7 +3001,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -3041,13 +3040,14 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4ee8877250136bd7e3d2331632810a4df4ea5e004656990d8d66d2f5ee8a67" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", "prost-build", + "prost-types", "quote", "syn", ] @@ -3218,9 +3218,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicase" @@ -3233,9 +3233,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -3645,9 +3645,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c52ac009d615e79296318c1bcce2d422aaca15ad08515e344feeda07df67a587" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index c94af812a9..0060ae0ebf 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -32,7 +32,7 @@ pep440_rs = "0.6.6" backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" -kube = "0.94.0" +kube = "0.95.0" log = "0.4.22" [dev-dependencies] diff --git a/rust/numaflow-models/Cargo.toml b/rust/numaflow-models/Cargo.toml index ecc0592efe..e1189ef665 100644 --- a/rust/numaflow-models/Cargo.toml +++ b/rust/numaflow-models/Cargo.toml @@ -7,8 +7,8 @@ license = "Apache-2.0 license" edition = "2021" [dependencies] -k8s-openapi = { version = "0.22.0", features = ["v1_29"] } -kube = "0.94.0" +k8s-openapi = { version = "0.23.0", features = ["v1_31"] } +kube = "0.95.0" serde = "^1.0" serde_derive = "^1.0" serde_json = "^1.0" diff --git a/rust/numaflow-models/templates/Cargo.mustache b/rust/numaflow-models/templates/Cargo.mustache index ecc0592efe..e1189ef665 100644 --- a/rust/numaflow-models/templates/Cargo.mustache +++ b/rust/numaflow-models/templates/Cargo.mustache @@ -7,8 +7,8 @@ license = "Apache-2.0 license" edition = "2021" [dependencies] -k8s-openapi = { version = "0.22.0", features = ["v1_29"] } -kube = "0.94.0" +k8s-openapi = { version = "0.23.0", features = ["v1_31"] } +kube = "0.95.0" serde = "^1.0" serde_derive = "^1.0" serde_json = "^1.0" From 3182db3af3a13ace6e7c2cb26d3be9df04173a5a Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 6 Oct 2024 21:38:04 -0700 Subject: [PATCH 089/188] feat: generator based on ticker (#2126) Signed-off-by: Vigith Maurice --- rust/Cargo.lock | 2 + rust/numaflow-core/Cargo.toml | 2 + rust/numaflow-core/src/monovertex.rs | 10 + rust/numaflow-core/src/source.rs | 2 + rust/numaflow-core/src/source/generator.rs | 355 +++++++++++++++++++++ 5 files changed, 371 insertions(+) create mode 100644 rust/numaflow-core/src/source/generator.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 5d2d27d111..e3b1ececab 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1586,6 +1586,7 @@ dependencies = [ "base64 0.22.1", "bytes", "chrono", + "futures", "hyper-util", "kube", "log", @@ -1593,6 +1594,7 @@ dependencies = [ "numaflow-models", "parking_lot", "pep440_rs", + "pin-project", "prometheus-client", "prost", "prost-types", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 0060ae0ebf..9766ffc4cc 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -34,6 +34,8 @@ parking_lot = "0.12.3" prometheus-client = "0.22.3" kube = "0.95.0" log = "0.4.22" +futures = "0.3.30" +pin-project = "1.1.5" [dev-dependencies] tempfile = "3.11.0" diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 601957a94b..61eab72749 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -3,6 +3,7 @@ use crate::error; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; +use crate::source::generator; use crate::source::user_defined::new_source; use crate::transformer::user_defined::SourceTransformer; use forwarder::ForwarderBuilder; @@ -10,6 +11,7 @@ use metrics::MetricsState; use sink_pb::sink_client::SinkClient; use source_pb::source_client::SourceClient; use sourcetransform_pb::source_transform_client::SourceTransformClient; +use std::time::Duration; use tokio::signal; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; @@ -102,6 +104,14 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> ) .await?; + // FIXME: use me and use me right :) + let _ = generator::new_generator( + bytes::Bytes::from("fix me"), + 1, + 10, + Duration::from_millis(1000), + ); + let mut source_grpc_client = SourceClient::new(create_rpc_channel(sdk_config.source_socket_path.into()).await?) .max_encoding_message_size(sdk_config.grpc_max_message_size) diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 787f122dd7..32fea1c0ad 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -5,6 +5,8 @@ use crate::message::{Message, Offset}; /// [User-Defined Source]: https://numaflow.numaproj.io/user-guide/sources/user-defined-sources/ pub(crate) mod user_defined; +pub(crate) mod generator; + /// Set of Read related items that has to be implemented to become a Source. pub(crate) trait SourceReader { #[allow(dead_code)] diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs new file mode 100644 index 0000000000..294f612f4f --- /dev/null +++ b/rust/numaflow-core/src/source/generator.rs @@ -0,0 +1,355 @@ +use crate::message::{Message, Offset}; +use crate::reader; +use crate::source; +use bytes::Bytes; +use futures::StreamExt; +use std::time::Duration; + +/// Stream Generator returns a set of messages for every `.next` call. It will throttle itself if +/// the call exceeds the RPU. It will return a max (batch size, RPU) till the quota for that unit of +/// time is over. If `.next` is called after the quota is over, it will park itself so that it won't +/// return more than the RPU. Once parked, it will unpark itself and return as soon as the next poll +/// happens. +/// We skip the missed ticks because there is no point to give a burst, most likely that burst cannot +/// be absorbed. +/// ```text +/// Ticks: | 1 | 2 | 3 | 4 | 5 | 6 | +/// =========================================================================> time +/// Read RPU=5: | :xxx:xx: | :xxx |:xxx:xx:| :xxx:xx: | :xxx:xx: | +/// 2 batches only 1 batch (no reread) 5 5 5 +/// +/// ``` +/// NOTE: The minimum granularity of duration is 10ms. +mod stream_generator { + use bytes::Bytes; + use futures::Stream; + use pin_project::pin_project; + use std::pin::Pin; + use std::task::{Context, Poll}; + use std::time::Duration; + use tokio::time::{Instant, MissedTickBehavior}; + + #[pin_project] + pub(super) struct StreamGenerator { + /// the content generated by Generator. + content: Bytes, + /// requests per unit of time-period. + rpu: usize, + /// batch size per read + batch: usize, + /// unit of time-period over which the [rpu] is defined. + unit: Duration, + /// the amount of credits used for the current time-period. + /// remaining = (rpu - used) for that time-period + used: usize, + /// the last time we generated data. now() - prev_time is compared against the duration. + prev_time: Instant, + #[pin] + tick: tokio::time::Interval, + } + + impl StreamGenerator { + pub(super) fn new(content: Bytes, rpu: usize, batch: usize, unit: Duration) -> Self { + let mut tick = tokio::time::interval(unit); + tick.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // set default value of the unit to 10ms, + // and subtract 5ms due to timer precision constraints + let unit = (if unit < Duration::from_millis(10) { + Duration::from_millis(10) + } else { + unit + }) + .checked_sub(Duration::from_millis(5)) + .expect("there is +-5ms precision, so unit > 5ms"); + + Self { + content, + rpu, + // batch cannot > rpu + batch: if batch > rpu { rpu } else { batch }, + unit, + used: 0, + prev_time: Instant::now().checked_sub(unit).unwrap(), + tick, + } + } + } + + impl Stream for StreamGenerator { + type Item = Vec; + + fn poll_next( + mut self: Pin<&mut StreamGenerator>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.as_mut().project(); + + // Calculate the elapsed time since the last poll + let elapsed = this.prev_time.elapsed(); + + // we can return the complete batch if enough time has passed, with a precision +- 5ms + if elapsed >= *this.unit { + // Reset the timer + *this.prev_time = Instant::now(); + + // Generate data that equals to batch data + let data = vec![this.content.clone(); *this.batch]; + // reset used quota + *this.used = *this.batch; + + Poll::Ready(Some(data)) + } else if this.used < this.rpu { + // even if enough time hasn't passed, we can still send data if we have + // quota (rpu - used) left + + // make sure we do not send more than desired + let to_send = if *this.rpu - *this.used < *this.batch { + *this.rpu - *this.used + } else { + *this.batch + }; + + // update the counters + *this.used += to_send; + + Poll::Ready(Some(vec![this.content.clone(); to_send])) + } else { + // we have to wait for the next tick because we are out of quota + let mut tick = this.tick; + match tick.poll_tick(cx) { + // we can recurse ourselves to return data since enough time has passed + Poll::Ready(_) => { + // recursively call the poll_next since we are ready to serve + self.poll_next(cx) + } + Poll::Pending => Poll::Pending, + } + } + } + + /// size is roughly what is remaining and upper bound is for sure RPU. This is a very + /// rough approximation because Duration is not taken into account for the lower bound. + fn size_hint(&self) -> (usize, Option) { + (self.rpu - self.used, Some(self.rpu)) + } + } + + #[cfg(test)] + mod tests { + use super::*; + use futures::StreamExt; + + #[tokio::test] + async fn test_stream_generator() { + // Define the content to be generated + let content = Bytes::from("test_data"); + // Define requests per unit (rpu), batch size, and time unit + let rpu = 10; + let batch = 6; + let unit = Duration::from_millis(100); + + // Create a new StreamGenerator + let mut stream_generator = StreamGenerator::new(content.clone(), rpu, batch, unit); + + // Collect the first batch of data + let first_batch = stream_generator.next().await.unwrap(); + assert_eq!(first_batch.len(), batch); + for item in first_batch { + assert_eq!(item, content); + } + + // Collect the second batch of data + let second_batch = stream_generator.next().await.unwrap(); + assert_eq!(second_batch.len(), rpu - batch); + for item in second_batch { + assert_eq!(item, content); + } + + // no there is no more data left in the quota + let size = stream_generator.size_hint(); + assert_eq!(size.0, 0); + assert_eq!(size.1, Some(rpu)); + + let third_batch = stream_generator.next().await.unwrap(); + assert_eq!(third_batch.len(), 6); + for item in third_batch { + assert_eq!(item, content); + } + + // we should now have data + let size = stream_generator.size_hint(); + assert_eq!(size.0, 4); + assert_eq!(size.1, Some(rpu)); + } + } +} + +/// Creates a new generator and returns all the necessary implementation of the Source trait. +/// Generator Source is mainly used for development purpose, where you want to have self-contained +/// source to generate some messages. We mainly use generator for load testing and integration +/// testing of Numaflow. The load generated is per replica. +pub(crate) fn new_generator( + content: Bytes, + rpu: usize, + batch: usize, + unit: Duration, +) -> crate::Result<(GeneratorRead, GeneratorAck, GeneratorLagReader)> { + let gen_read = GeneratorRead::new(content, rpu, batch, unit); + let gen_ack = GeneratorAck::new(); + let gen_lag_reader = GeneratorLagReader::new(); + + Ok((gen_read, gen_ack, gen_lag_reader)) +} + +pub(crate) struct GeneratorRead { + stream_generator: stream_generator::StreamGenerator, +} + +impl GeneratorRead { + /// A new [GeneratorRead] is returned. It takes a static content, requests per unit-time, batch size + /// to return per [source::SourceReader::read], and the unit-time as duration. + fn new(content: Bytes, rpu: usize, batch: usize, unit: Duration) -> Self { + let stream_generator = stream_generator::StreamGenerator::new(content, rpu, batch, unit); + Self { stream_generator } + } +} + +impl source::SourceReader for GeneratorRead { + fn name(&self) -> &'static str { + "generator" + } + + async fn read(&mut self) -> crate::error::Result> { + match self.stream_generator.next().await { + None => { + panic!("Stream generator has stopped"); + } + Some(data) => Ok(data + .iter() + .map(|msg| { + // FIXME: better id? + let id = chrono::Utc::now() + .timestamp_nanos_opt() + .unwrap_or_default() + .to_string(); + + Message { + keys: vec![], + value: msg.clone().to_vec(), + // FIXME: better offset? + offset: Offset { + offset: id.clone(), + partition_id: 0, + }, + event_time: Default::default(), + id, + headers: Default::default(), + } + }) + .collect::>()), + } + } + + fn partitions(&self) -> Vec { + todo!() + } +} + +pub(crate) struct GeneratorAck {} + +impl GeneratorAck { + fn new() -> Self { + Self {} + } +} + +impl source::SourceAcker for GeneratorAck { + async fn ack(&mut self, _: Vec) -> crate::error::Result<()> { + Ok(()) + } +} + +#[derive(Clone)] +pub(crate) struct GeneratorLagReader {} + +impl GeneratorLagReader { + fn new() -> Self { + Self {} + } +} + +impl reader::LagReader for GeneratorLagReader { + async fn pending(&mut self) -> crate::error::Result> { + // Generator is not meant to auto-scale. + Ok(None) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::reader::LagReader; + use crate::source::{SourceAcker, SourceReader}; + use tokio::time::Duration; + + #[tokio::test] + async fn test_generator_read() { + // Define the content to be generated + let content = Bytes::from("test_data"); + // Define requests per unit (rpu), batch size, and time unit + let rpu = 10; + let batch = 5; + let unit = Duration::from_millis(100); + + // Create a new Generator + let mut generator = GeneratorRead::new(content.clone(), rpu, batch, unit); + + // Read the first batch of messages + let messages = generator.read().await.unwrap(); + assert_eq!(messages.len(), batch); + + // Verify that each message has the expected structure + + // Read the second batch of messages + let messages = generator.read().await.unwrap(); + assert_eq!(messages.len(), rpu - batch); + } + + #[tokio::test] + async fn test_generator_lag_pending() { + // Create a new GeneratorLagReader + let mut lag_reader = GeneratorLagReader::new(); + + // Call the pending method and check the result + let pending_result = lag_reader.pending().await; + + // Assert that the result is Ok(None) + assert!(pending_result.is_ok()); + assert_eq!(pending_result.unwrap(), None); + } + + #[tokio::test] + async fn test_generator_ack() { + // Create a new GeneratorAck instance + let mut generator_ack = GeneratorAck::new(); + + // Create a vector of offsets to acknowledge + let offsets = vec![ + Offset { + offset: "offset1".to_string(), + partition_id: 0, + }, + Offset { + offset: "offset2".to_string(), + partition_id: 1, + }, + ]; + + // Call the ack method and check the result + let ack_result = generator_ack.ack(offsets).await; + + // Assert that the result is Ok(()) + assert!(ack_result.is_ok()); + } +} From 8b22359c56fa4b2c008ab3fb19282186b72d00e0 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Mon, 7 Oct 2024 09:40:51 -0700 Subject: [PATCH 090/188] chore: enable python source e2e (#2116) Signed-off-by: Sidhant Kohli --- test/udsource-e2e/udsource_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/udsource-e2e/udsource_test.go b/test/udsource-e2e/udsource_test.go index 8d3de49c74..fe7edf56e7 100644 --- a/test/udsource-e2e/udsource_test.go +++ b/test/udsource-e2e/udsource_test.go @@ -59,7 +59,7 @@ func (s *UserDefinedSourceSuite) TestUDSource() { wg.Add(4) go func() { defer wg.Done() - // s.testSimpleSourcePython() // FIXME: python udsource + s.testSimpleSourcePython() }() go func() { defer wg.Done() From fcef50536cf85373e0ba8ac5162a27fd4e58db5f Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 7 Oct 2024 18:34:11 -0700 Subject: [PATCH 091/188] refactor: generate static gRPC clients (#2128) --- .codecov.yml | 1 + Dockerfile | 3 +- Makefile | 6 +- hack/generate-proto.sh | 28 +- hack/library.sh | 60 ++- hack/update-api-docs.sh | 101 ++-- rust/Cargo.lock | 13 +- rust/Cargo.toml | 2 + rust/Makefile | 9 +- rust/numaflow-core/Cargo.toml | 3 +- rust/numaflow-core/build.rs | 13 - rust/numaflow-core/proto/sink.proto | 78 --- rust/numaflow-core/proto/source.proto | 188 ------- .../numaflow-core/proto/sourcetransform.proto | 66 --- rust/numaflow-core/src/message.rs | 31 +- rust/numaflow-core/src/monovertex.rs | 18 +- .../numaflow-core/src/monovertex/forwarder.rs | 8 +- rust/numaflow-core/src/monovertex/metrics.rs | 6 +- rust/numaflow-core/src/shared/utils.rs | 6 +- rust/numaflow-core/src/sink/user_defined.rs | 8 +- rust/numaflow-core/src/source/user_defined.rs | 16 +- .../src/transformer/user_defined.rs | 8 +- rust/numaflow-grpc/Cargo.toml | 14 + rust/numaflow-grpc/Makefile | 14 + rust/numaflow-grpc/codegen.sh | 19 + rust/numaflow-grpc/src/clients.rs | 31 ++ rust/numaflow-grpc/src/clients/map.v1.rs | 176 +++++++ .../numaflow-grpc/src/clients/mapstream.v1.rs | 185 +++++++ rust/numaflow-grpc/src/clients/reduce.v1.rs | 261 ++++++++++ .../src/clients/sessionreduce.v1.rs | 277 ++++++++++ .../numaflow-grpc/src/clients/sideinput.v1.rs | 169 ++++++ rust/numaflow-grpc/src/clients/sink.v1.rs | 246 +++++++++ rust/numaflow-grpc/src/clients/source.v1.rs | 486 ++++++++++++++++++ .../src/clients/sourcetransformer.v1.rs | 225 ++++++++ rust/numaflow-grpc/src/lib.rs | 1 + rust/numaflow-grpc/src/main.rs | 20 + 36 files changed, 2321 insertions(+), 475 deletions(-) delete mode 100644 rust/numaflow-core/build.rs delete mode 100644 rust/numaflow-core/proto/sink.proto delete mode 100644 rust/numaflow-core/proto/source.proto delete mode 100644 rust/numaflow-core/proto/sourcetransform.proto create mode 100644 rust/numaflow-grpc/Cargo.toml create mode 100644 rust/numaflow-grpc/Makefile create mode 100755 rust/numaflow-grpc/codegen.sh create mode 100644 rust/numaflow-grpc/src/clients.rs create mode 100644 rust/numaflow-grpc/src/clients/map.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/mapstream.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/reduce.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/sessionreduce.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/sideinput.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/sink.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/source.v1.rs create mode 100644 rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs create mode 100644 rust/numaflow-grpc/src/lib.rs create mode 100644 rust/numaflow-grpc/src/main.rs diff --git a/.codecov.yml b/.codecov.yml index f4c0d21aa5..98604b458f 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -10,6 +10,7 @@ ignore: - "test/.*" - "rust/**/error.rs" - "rust/numaflow-models/**" # ignore generated files +- "rust/numaflow-grpc/**" # ignore generated files coverage: status: patch: off diff --git a/Dockerfile b/Dockerfile index efee5a8fd2..027dfa0376 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,6 @@ ARG TARGETPLATFORM WORKDIR /numaflow RUN apt-get update && apt-get install -y protobuf-compiler - FROM chef AS planner COPY ./rust/ . RUN cargo chef prepare --recipe-path recipe.json @@ -90,4 +89,4 @@ RUN chmod +x /bin/e2eapi #################################################################################################### FROM scratch AS e2eapi COPY --from=testbase /bin/e2eapi . -ENTRYPOINT ["/e2eapi"] \ No newline at end of file +ENTRYPOINT ["/e2eapi"] diff --git a/Makefile b/Makefile index 27aa203aec..038e6d8e11 100644 --- a/Makefile +++ b/Makefile @@ -213,6 +213,10 @@ swagger: api/json-schema/schema.json: api/openapi-spec/swagger.json hack/json-schema/main.go go run ./hack/json-schema +.PHONY: rustgen +rustgen: + $(MAKE) --directory rust generate + .PHONY: codegen codegen: ./hack/generate-proto.sh @@ -223,7 +227,7 @@ codegen: $(MAKE) manifests rm -rf ./vendor go mod tidy - $(MAKE) --directory rust/numaflow-models generate + $(MAKE) rustgen clean: -rm -rf ${CURRENT_DIR}/dist diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index b3b8d71c97..ada70d6869 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -14,33 +14,7 @@ export GOPATH="${FAKE_GOPATH}" export PATH="${GOPATH}/bin:${PATH}" cd "${FAKE_REPOPATH}" -install-protobuf() { - # protobuf version - PROTOBUF_VERSION=27.2 - PB_REL="https://github.com/protocolbuffers/protobuf/releases" - OS=$(uname_os) - ARCH=$(uname_arch) - - echo "OS: $OS ARCH: $ARCH" - if [[ "$ARCH" = "amd64" ]]; then - ARCH="x86_64" - elif [[ "$ARCH" = "arm64" ]]; then - ARCH="aarch_64" - fi - BINARY_URL=$PB_REL/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-${OS}-${ARCH}.zip - if [[ "$OS" = "darwin" ]]; then - BINARY_URL=$PB_REL/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-osx-universal_binary.zip - fi - echo "Downloading $BINARY_URL" - - tmp=$(mktemp -d) - trap 'rm -rf ${tmp}' EXIT - - curl -sL -o ${tmp}/protoc-${PROTOBUF_VERSION}-${OS}-${ARCH}.zip $BINARY_URL - unzip ${tmp}/protoc-${PROTOBUF_VERSION}-${OS}-${ARCH}.zip -d ${GOPATH} -} - -install-protobuf +install-protobuf --install-dir ${GOPATH} go install -mod=vendor ./vendor/github.com/gogo/protobuf/protoc-gen-gogo go install -mod=vendor ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast diff --git a/hack/library.sh b/hack/library.sh index 4b499dde19..f53ceda35e 100644 --- a/hack/library.sh +++ b/hack/library.sh @@ -59,7 +59,7 @@ uname_os() { mingw*) os="windows" ;; cygwin*) os="windows" ;; win*) os="windows" ;; - sunos) [ "$(uname -o)" = "illumos" ] && os=illumos ;; + sunos) [[ "$(uname -o)" = "illumos" ]] && os=illumos ;; esac echo "$os" } @@ -80,3 +80,61 @@ uname_arch() { esac echo "${arch}" } + +install-protobuf() { + local install_dir="" + while [[ "$#" -gt 0 ]]; do + case "$1" in + "--install-dir") + install_dir="$2" + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$install_dir" ]; then + echo "too many arguments: $1 (already have $install_dir)" >&2 + return 1 + fi + install_dir="$1" + shift + ;; + esac + done + + if [[ -z "${install_dir}" ]]; then + echo "install-dir argument is required" >&2 + return 1 + fi + + if [[ ! -d "${install_dir}" ]]; then + echo "${install_dir} does not exist" >&2 + return 1 + fi + + # protobuf version + local protobuf_version=27.2 + local pb_rel="https://github.com/protocolbuffers/protobuf/releases" + local os=$(uname_os) + local arch=$(uname_arch) + + echo "OS: $os ARCH: $arch" + if [[ "$arch" = "amd64" ]]; then + arch="x86_64" + elif [[ "$arch" = "arm64" ]]; then + arch="aarch_64" + fi + local binary_url=${pb_rel}/download/v${protobuf_version}/protoc-${protobuf_version}-${os}-${arch}.zip + if [[ "$os" = "darwin" ]]; then + binary_url=${pb_rel}/download/v${protobuf_version}/protoc-${protobuf_version}-osx-universal_binary.zip + fi + echo "Downloading $binary_url" + + tmp=$(mktemp -d) + curl -sL -o ${tmp}/protoc-${protobuf_version}-${os}-${arch}.zip $binary_url + unzip ${tmp}/protoc-${protobuf_version}-${os}-${arch}.zip -d ${install_dir} + rm -rf ${tmp} +} + diff --git a/hack/update-api-docs.sh b/hack/update-api-docs.sh index 6c7d7d089e..b2de22b7c3 100755 --- a/hack/update-api-docs.sh +++ b/hack/update-api-docs.sh @@ -24,57 +24,78 @@ go run ${FAKE_REPOPATH}/vendor/github.com/ahmetb/gen-crd-api-reference-docs/main -template-dir "${FAKE_REPOPATH}/hack/api-docs-template" install-pandoc() { - # pandoc version - PANDOC_VERSION=3.2.1 - - if [[ "`command -v pandoc`" != "" ]]; then - if [[ "`pandoc -v | head -1 | awk '{print $2}'`" != "${PANDOC_VERSION}" ]]; then - warning "Existing pandoc version does not match the requirement (${PANDOC_VERSION}), will download a new one..." - else - PANDOC_BINARY="`command -v pandoc`" - return - fi + local install_dir="" + while [[ "$#" -gt 0 ]]; do + case "$1" in + "--install-dir") + install_dir="$2" + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$install_dir" ]; then + echo "too many arguments: $1 (already have $install_dir)" >&2 + return 1 + fi + install_dir="$1" + shift + ;; + esac + done + + if [[ -z "${install_dir}" ]]; then + echo "install-dir argument is required" >&2 + return 1 fi - PD_REL="https://github.com/jgm/pandoc/releases" - OS=$(uname_os) - ARCH=$(uname_arch) - - echo "OS: $OS ARCH: $ARCH" + if [[ ! -d "${install_dir}" ]]; then + echo "${install_dir} does not exist" >&2 + return 1 + fi - BINARY_NAME="pandoc-${PANDOC_VERSION}-${OS}-${ARCH}.zip" - if [[ "$OS" = "darwin" ]]; then - if [[ "$ARCH" = "arm64" ]]; then - BINARY_NAME="pandoc-${PANDOC_VERSION}-arm64-macOS.zip" - elif [[ "$ARCH" = "amd64" ]]; then - BINARY_NAME="pandoc-${PANDOC_VERSION}-x86_64-macOS.zip" + # pandoc version + local pandoc_version=3.2.1 + local pd_rel="https://github.com/jgm/pandoc/releases" + local os=$(uname_os) + local arch=$(uname_arch) + + echo "OS: $os ARCH: $arch" + + local binary_name="pandoc-${pandoc_version}-${os}-${arch}.zip" + if [[ "$os" = "darwin" ]]; then + if [[ "$arch" = "arm64" ]]; then + binary_name="pandoc-${pandoc_version}-arm64-macOS.zip" + elif [[ "$arch" = "amd64" ]]; then + binary_name="pandoc-${pandoc_version}-x86_64-macOS.zip" fi - elif [[ "$OS" = "linux" ]]; then - if [[ "$ARCH" = "arm64" ]]; then - BINARY_NAME="pandoc-${PANDOC_VERSION}-linux-arm64.tar.gz" - elif [[ "$ARCH" = "amd64" ]]; then - BINARY_NAME="pandoc-${PANDOC_VERSION}-linux-amd64.tar.gz" + elif [[ "$os" = "linux" ]]; then + if [[ "$arch" = "arm64" ]]; then + binary_name="pandoc-${pandoc_version}-linux-arm64.tar.gz" + elif [[ "$arch" = "amd64" ]]; then + binary_name="pandoc-${pandoc_version}-linux-amd64.tar.gz" fi fi - BINARY_URL=$PD_REL/download/${PANDOC_VERSION}/${BINARY_NAME} - echo "Downloading $BINARY_URL" - - tmp=$(mktemp -d) - trap 'rm -rf ${tmp}' EXIT - - curl -sL -o ${tmp}/${BINARY_NAME} $BINARY_URL - if [[ "$BINARY_NAME" =~ .zip$ ]]; then - unzip ${tmp}/${BINARY_NAME} -d ${tmp} - for a in `ls -d -1 ${tmp}/* | grep pandoc | grep -v zip`; do mv $a/* ${tmp}; rmdir $a; done - elif [[ "$BINARY_NAME" =~ .tar.gz$ ]]; then - tar xvzf ${tmp}/${BINARY_NAME} --strip-components 1 -C ${tmp}/ + local binary_url=$pd_rel/download/${pandoc_version}/${binary_name} + echo "Downloading $binary_url" + + curl -sL -o ${tmp}/${binary_name} $binary_url + if [[ "$binary_name" =~ .zip$ ]]; then + unzip ${install_dir}/${binary_name} -d ${install_dir} + for a in `ls -d -1 ${install_dir}/* | grep pandoc | grep -v zip`; do mv $a/* ${install_dir}; rmdir $a; done + elif [[ "$binary_name" =~ .tar.gz$ ]]; then + tar xvzf ${install_dir}/${binary_name} --strip-components 1 -C ${install_dir}/ fi - PANDOC_BINARY="${tmp}/bin/pandoc" } -install-pandoc +tmp=$(mktemp -d) +install-pandoc ${tmp} +PANDOC_BINARY="${tmp}/bin/pandoc" ${PANDOC_BINARY} --from markdown --to gfm ${FAKE_REPOPATH}/docs/APIs.html > ${FAKE_REPOPATH}/docs/APIs.md +rm -rf ${tmp} rm ${FAKE_REPOPATH}/docs/APIs.html diff --git a/rust/Cargo.lock b/rust/Cargo.lock index e3b1ececab..8e6ff8c3d3 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1547,6 +1547,7 @@ version = "0.1.0" dependencies = [ "backoff", "numaflow-core", + "numaflow-grpc", "servesink", "serving", "tokio", @@ -1591,6 +1592,7 @@ dependencies = [ "kube", "log", "numaflow 0.1.1", + "numaflow-grpc", "numaflow-models", "parking_lot", "pep440_rs", @@ -1609,7 +1611,6 @@ dependencies = [ "tokio-stream", "tokio-util", "tonic", - "tonic-build", "tower 0.4.13", "tracing", "tracing-subscriber", @@ -1617,6 +1618,16 @@ dependencies = [ "uuid", ] +[[package]] +name = "numaflow-grpc" +version = "0.1.0" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tonic-build", +] + [[package]] name = "numaflow-models" version = "0.0.0-pre" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 54f4a91173..f273627ae5 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -4,6 +4,7 @@ workspace = { members = [ "servesink", "serving", "numaflow-core", + "numaflow-grpc", ] } [[bin]] @@ -22,5 +23,6 @@ backoff = { path = "backoff" } servesink = { path = "servesink" } serving = { path = "serving" } numaflow-core = { path = "numaflow-core" } +numaflow-grpc = { path = "numaflow-grpc" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/Makefile b/rust/Makefile index 761882a4ee..c2d29917b1 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -1,3 +1,10 @@ +SHELL:=/bin/bash + +.PHONY: generate +generate: + $(MAKE) --directory numaflow-models generate + $(MAKE) --directory numaflow-grpc generate + .PHONY: build build: cargo build --release @@ -12,4 +19,4 @@ all-tests: .PHONY: clean clean: - cargo clean \ No newline at end of file + cargo clean diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 9766ffc4cc..28cc7007cf 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -23,6 +23,7 @@ tower = "0.4.13" uuid = { version = "1.10.0", features = ["v4"] } serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models" } +numaflow-grpc = { path = "../numaflow-grpc" } trait-variant = "0.1.2" rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } @@ -42,4 +43,4 @@ tempfile = "3.11.0" numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } [build-dependencies] -tonic-build = "0.12.1" + diff --git a/rust/numaflow-core/build.rs b/rust/numaflow-core/build.rs deleted file mode 100644 index fc30e6b678..0000000000 --- a/rust/numaflow-core/build.rs +++ /dev/null @@ -1,13 +0,0 @@ -fn main() { - tonic_build::configure() - .build_server(true) - .compile( - &[ - "proto/source.proto", - "proto/sourcetransform.proto", - "proto/sink.proto", - ], - &["proto"], - ) - .unwrap_or_else(|e| panic!("failed to compile the proto, {:?}", e)) -} diff --git a/rust/numaflow-core/proto/sink.proto b/rust/numaflow-core/proto/sink.proto deleted file mode 100644 index 300e570314..0000000000 --- a/rust/numaflow-core/proto/sink.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -package sink.v1; - -service Sink { - // SinkFn writes the request to a user defined sink. - rpc SinkFn(stream SinkRequest) returns (stream SinkResponse); - - // IsReady is the heartbeat endpoint for gRPC. - rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); -} - -/** - * SinkRequest represents a request element. - */ -message SinkRequest { - message Request { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - string id = 5; - map headers = 6; - } - message Status { - bool eot = 1; - } - // Required field indicating the request. - Request request = 1; - // Required field indicating the status of the request. - // If eot is set to true, it indicates the end of transmission. - Status status = 2; - // optional field indicating the handshake message. - optional Handshake handshake = 3; -} - -/* - * Handshake message between client and server to indicate the start of transmission. - */ -message Handshake { - // Required field indicating the start of transmission. - bool sot = 1; -} - -/** - * ReadyResponse is the health check result. - */ -message ReadyResponse { - bool ready = 1; -} - -/* - * Status is the status of the response. - */ -enum Status { - SUCCESS = 0; - FAILURE = 1; - FALLBACK = 2; -} - -/** - * SinkResponse is the individual response of each message written to the sink. - */ -message SinkResponse { - message Result { - // id is the ID of the message, can be used to uniquely identify the message. - string id = 1; - // status denotes the status of persisting to sink. It can be SUCCESS, FAILURE, or FALLBACK. - Status status = 2; - // err_msg is the error message, set it if success is set to false. - string err_msg = 3; - } - Result result = 1; - optional Handshake handshake = 2; -} \ No newline at end of file diff --git a/rust/numaflow-core/proto/source.proto b/rust/numaflow-core/proto/source.proto deleted file mode 100644 index 69ff154127..0000000000 --- a/rust/numaflow-core/proto/source.proto +++ /dev/null @@ -1,188 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/empty.proto"; - -package source.v1; - -service Source { - // Read returns a stream of datum responses. - // The size of the returned responses is less than or equal to the num_records specified in each ReadRequest. - // If the request timeout is reached on the server side, the returned responses will contain all the datum that have been read (which could be an empty list). - // The server will continue to read and respond to subsequent ReadRequests until the client closes the stream. - // Once it has sent all the datum, the server will send a ReadResponse with the end of transmission flag set to true. - rpc ReadFn(stream ReadRequest) returns (stream ReadResponse); - - // AckFn acknowledges a stream of datum offsets. - // When AckFn is called, it implicitly indicates that the datum stream has been processed by the source vertex. - // The caller (numa) expects the AckFn to be successful, and it does not expect any errors. - // If there are some irrecoverable errors when the callee (UDSource) is processing the AckFn request, - // then it is best to crash because there are no other retry mechanisms possible. - // Clients sends n requests and expects n responses. - rpc AckFn(stream AckRequest) returns (stream AckResponse); - - // PendingFn returns the number of pending records at the user defined source. - rpc PendingFn(google.protobuf.Empty) returns (PendingResponse); - - // PartitionsFn returns the list of partitions for the user defined source. - rpc PartitionsFn(google.protobuf.Empty) returns (PartitionsResponse); - - // IsReady is the heartbeat endpoint for user defined source gRPC. - rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); -} - -/* - * Handshake message between client and server to indicate the start of transmission. - */ -message Handshake { - // Required field indicating the start of transmission. - bool sot = 1; -} - -/* - * ReadRequest is the request for reading datum stream from user defined source. - */ -message ReadRequest { - message Request { - // Required field indicating the number of records to read. - uint64 num_records = 1; - // Required field indicating the request timeout in milliseconds. - // uint32 can represent 2^32 milliseconds, which is about 49 days. - // We don't use uint64 because time.Duration takes int64 as nano seconds. Using uint64 for milli will cause overflow. - uint32 timeout_in_ms = 2; - } - // Required field indicating the request. - Request request = 1; - optional Handshake handshake = 2; -} - -/* - * ReadResponse is the response for reading datum stream from user defined source. - */ -message ReadResponse { - message Result { - // Required field holding the payload of the datum. - bytes payload = 1; - // Required field indicating the offset information of the datum. - Offset offset = 2; - // Required field representing the time associated with each datum. It is used for watermarking. - google.protobuf.Timestamp event_time = 3; - // Optional list of keys associated with the datum. - // Key is the "key" attribute in (key,value) as in the map-reduce paradigm. - // We add this optional field to support the use case where the user defined source can provide keys for the datum. - // e.g. Kafka and Redis Stream message usually include information about the keys. - repeated string keys = 4; - // Optional list of headers associated with the datum. - // Headers are the metadata associated with the datum. - // e.g. Kafka and Redis Stream message usually include information about the headers. - map headers = 5; - } - message Status { - // Code to indicate the status of the response. - enum Code { - SUCCESS = 0; - FAILURE = 1; - } - - // Error to indicate the error type. If the code is FAILURE, then the error field will be populated. - enum Error { - UNACKED = 0; - OTHER = 1; - } - - // End of transmission flag. - bool eot = 1; - Code code = 2; - optional Error error = 3; - optional string msg = 4; - } - // Required field holding the result. - Result result = 1; - // Status of the response. Holds the end of transmission flag and the status code. - Status status = 2; - // Handshake message between client and server to indicate the start of transmission. - optional Handshake handshake = 3; -} - -/* - * AckRequest is the request for acknowledging datum. - * It takes a list of offsets to be acknowledged. - */ -message AckRequest { - message Request { - // Required field holding the offset to be acked - Offset offset = 1; - } - // Required field holding the request. The list will be ordered and will have the same order as the original Read response. - Request request = 1; - optional Handshake handshake = 2; -} - -/* - * AckResponse is the response for acknowledging datum. It contains one empty field confirming - * the batch of offsets that have been successfully acknowledged. The contract between client and server - * is that the server will only return the AckResponse if the ack request is successful. - * If the server hangs during the ack request, the client can decide to timeout and error out the data forwarder. - * The reason why we define such contract is that we always expect the server to be able to process the ack request. - * Client is expected to send the AckRequest to the server with offsets that are strictly - * corresponding to the previously read batch. If the client sends the AckRequest with offsets that are not, - * it is considered as a client error and the server will not return the AckResponse. - */ -message AckResponse { - message Result { - // Required field indicating the ack request is successful. - google.protobuf.Empty success = 1; - } - // Required field holding the result. - Result result = 1; - // Handshake message between client and server to indicate the start of transmission. - optional Handshake handshake = 2; -} - -/* - * ReadyResponse is the health check result for user defined source. - */ -message ReadyResponse { - // Required field holding the health check result. - bool ready = 1; -} - -/* - * PendingResponse is the response for the pending request. - */ -message PendingResponse { - message Result { - // Required field holding the number of pending records at the user defined source. - // A negative count indicates that the pending information is not available. - int64 count = 1; - } - // Required field holding the result. - Result result = 1; -} - -/* - * PartitionsResponse is the response for the partitions request. - */ -message PartitionsResponse { - message Result { - // Required field holding the list of partitions. - repeated int32 partitions = 1; - } - // Required field holding the result. - Result result = 1; -} - -/* - * Offset is the offset of the datum. - */ -message Offset { - // offset is the offset of the datum. This field is required. - // We define Offset as a byte array because different input data sources can have different representations for Offset. - // The only way to generalize it is to define it as a byte array, - // Such that we can let the UDSource to de-serialize the offset using its own interpretation logics. - bytes offset = 1; - // Optional partition_id indicates which partition of the source the datum belongs to. - // It is useful for sources that have multiple partitions. e.g. Kafka. - // If the partition_id is not specified, it is assumed that the source has a single partition. - int32 partition_id = 2; -} diff --git a/rust/numaflow-core/proto/sourcetransform.proto b/rust/numaflow-core/proto/sourcetransform.proto deleted file mode 100644 index 9d0a63a9dc..0000000000 --- a/rust/numaflow-core/proto/sourcetransform.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/empty.proto"; - -package sourcetransformer.v1; - -service SourceTransform { - // SourceTransformFn applies a function to each request element. - // In addition to map function, SourceTransformFn also supports assigning a new event time to response. - // SourceTransformFn can be used only at source vertex by source data transformer. - rpc SourceTransformFn(stream SourceTransformRequest) returns (stream SourceTransformResponse); - - // IsReady is the heartbeat endpoint for gRPC. - rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); -} - -/* - * Handshake message between client and server to indicate the start of transmission. - */ - message Handshake { - // Required field indicating the start of transmission. - bool sot = 1; -} - - -/** - * SourceTransformerRequest represents a request element. - */ -message SourceTransformRequest { - message Request { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - map headers = 5; - // This ID is used to uniquely identify a transform request - string id = 6; - } - Request request = 1; - optional Handshake handshake = 2; -} - -/** - * SourceTransformerResponse represents a response element. - */ -message SourceTransformResponse { - message Result { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - repeated string tags = 4; - } - repeated Result results = 1; - // This ID is used to refer the responses to the request it corresponds to. - string id = 2; - // Handshake message between client and server to indicate the start of transmission. - optional Handshake handshake = 3; -} - -/** - * ReadyResponse is the health check result. - */ -message ReadyResponse { - bool ready = 1; -} diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 64f4079764..b3d1eab848 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -5,12 +5,11 @@ use base64::Engine; use chrono::{DateTime, Utc}; use crate::error::Error; -use crate::monovertex::sink_pb::sink_request::Request; -use crate::monovertex::sink_pb::SinkRequest; -use crate::monovertex::source_pb::{read_response, AckRequest}; -use crate::monovertex::sourcetransform_pb::SourceTransformRequest; -use crate::monovertex::{source_pb, sourcetransform_pb}; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; +use numaflow_grpc::clients::sink::sink_request::Request; +use numaflow_grpc::clients::sink::SinkRequest; +use numaflow_grpc::clients::source::{read_response, AckRequest}; +use numaflow_grpc::clients::sourcetransformer::SourceTransformRequest; /// A message that is sent from the source to the sink. #[derive(Debug, Clone)] @@ -41,8 +40,8 @@ pub(crate) struct Offset { impl From for AckRequest { fn from(offset: Offset) -> Self { Self { - request: Some(source_pb::ack_request::Request { - offset: Some(source_pb::Offset { + request: Some(numaflow_grpc::clients::source::ack_request::Request { + offset: Some(numaflow_grpc::clients::source::Offset { offset: BASE64_STANDARD .decode(offset.offset) .expect("we control the encoding, so this should never fail"), @@ -58,14 +57,16 @@ impl From for AckRequest { impl From for SourceTransformRequest { fn from(message: Message) -> Self { Self { - request: Some(sourcetransform_pb::source_transform_request::Request { - id: message.id, - keys: message.keys, - value: message.value, - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - headers: message.headers, - }), + request: Some( + numaflow_grpc::clients::sourcetransformer::source_transform_request::Request { + id: message.id, + keys: message.keys, + value: message.value, + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + }, + ), handshake: None, } } diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 61eab72749..5e6c9a0b00 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -8,9 +8,9 @@ use crate::source::user_defined::new_source; use crate::transformer::user_defined::SourceTransformer; use forwarder::ForwarderBuilder; use metrics::MetricsState; -use sink_pb::sink_client::SinkClient; -use source_pb::source_client::SourceClient; -use sourcetransform_pb::source_transform_client::SourceTransformClient; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use std::time::Duration; use tokio::signal; use tokio::task::JoinHandle; @@ -26,18 +26,6 @@ use tracing::info; mod forwarder; pub(crate) mod metrics; -pub(crate) mod source_pb { - tonic::include_proto!("source.v1"); -} - -pub(crate) mod sink_pb { - tonic::include_proto!("sink.v1"); -} - -pub(crate) mod sourcetransform_pb { - tonic::include_proto!("sourcetransformer.v1"); -} - pub async fn mono_vertex() -> error::Result<()> { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index f84488907d..164864d185 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -10,10 +10,10 @@ use crate::error::Error; use crate::message::{Message, Offset}; use crate::monovertex::metrics; use crate::monovertex::metrics::forward_metrics; -use crate::monovertex::sink_pb::Status::{Failure, Fallback, Success}; use crate::sink::user_defined::SinkWriter; use crate::transformer::user_defined::SourceTransformer; use crate::{error, source}; +use numaflow_grpc::clients::sink::Status::{Failure, Fallback, Success}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages @@ -548,9 +548,6 @@ mod tests { use crate::config::config; use crate::monovertex::forwarder::ForwarderBuilder; - use crate::monovertex::sink_pb::sink_client::SinkClient; - use crate::monovertex::source_pb::source_client::SourceClient; - use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; use crate::source::user_defined::new_source; @@ -558,6 +555,9 @@ mod tests { use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; + use numaflow_grpc::clients::sink::sink_client::SinkClient; + use numaflow_grpc::clients::source::source_client::SourceClient; + use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index bf6ee4d30b..0ed0e09680 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -25,10 +25,10 @@ use tracing::{debug, error, info}; use crate::config::config; use crate::error::Error; -use crate::monovertex::sink_pb::sink_client::SinkClient; -use crate::monovertex::source_pb::source_client::SourceClient; -use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::reader; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 4068aaf9b6..23d1fc3eef 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -7,11 +7,11 @@ use crate::error::Error; use crate::monovertex::metrics::{ start_metrics_https_server, MetricsState, PendingReader, PendingReaderBuilder, }; -use crate::monovertex::sink_pb::sink_client::SinkClient; -use crate::monovertex::source_pb::source_client::SourceClient; -use crate::monovertex::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::shared::server_info; use crate::{error, reader}; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use axum::http::Uri; use backoff::retry::Retry; diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 0489bacd46..54ec4bc527 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,9 +1,9 @@ use crate::error; use crate::error::Error; use crate::message::Message; -use crate::monovertex::sink_pb::sink_client::SinkClient; -use crate::monovertex::sink_pb::sink_request::Status; -use crate::monovertex::sink_pb::{Handshake, SinkRequest, SinkResponse}; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::sink::sink_request::Status; +use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; @@ -109,9 +109,9 @@ mod tests { use crate::error::Result; use crate::message::{Message, Offset}; - use crate::monovertex::sink_pb::sink_client::SinkClient; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; + use numaflow_grpc::clients::sink::sink_client::SinkClient; struct Logger; #[tonic::async_trait] diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 67b77aa309..3eb0846667 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -2,13 +2,13 @@ use crate::config::config; use crate::error; use crate::error::Error::SourceError; use crate::message::{Message, Offset}; -use crate::monovertex::source_pb; -use crate::monovertex::source_pb::source_client::SourceClient; -use crate::monovertex::source_pb::{ - read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, -}; use crate::reader::LagReader; use crate::source::{SourceAcker, SourceReader}; +use numaflow_grpc::clients::source; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::source::{ + read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; @@ -72,7 +72,7 @@ impl UserDefinedSourceRead { // do a handshake for read with the server before we start sending read requests let handshake_request = ReadRequest { request: None, - handshake: Some(source_pb::Handshake { sot: true }), + handshake: Some(source::Handshake { sot: true }), }; read_tx .send(handshake_request) @@ -157,7 +157,7 @@ impl UserDefinedSourceAck { // do a handshake for ack with the server before we start sending ack requests let ack_handshake_request = AckRequest { request: None, - handshake: Some(source_pb::Handshake { sot: true }), + handshake: Some(source::Handshake { sot: true }), }; ack_tx .send(ack_handshake_request) @@ -235,8 +235,8 @@ mod tests { use std::collections::HashSet; - use crate::monovertex::source_pb::source_client::SourceClient; use crate::shared::utils::create_rpc_channel; + use numaflow_grpc::clients::source::source_client::SourceClient; use chrono::Utc; use numaflow::source; diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index b2564b0e72..f06346053e 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -3,11 +3,11 @@ use std::collections::HashMap; use crate::config::config; use crate::error::{Error, Result}; use crate::message::{Message, Offset}; -use crate::monovertex::sourcetransform_pb::{ +use crate::shared::utils::utc_from_timestamp; +use numaflow_grpc::clients::sourcetransformer::{ self, source_transform_client::SourceTransformClient, SourceTransformRequest, SourceTransformResponse, }; -use crate::shared::utils::utc_from_timestamp; use tokio::sync::mpsc; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; @@ -32,7 +32,7 @@ impl SourceTransformer { // do a handshake for read with the server before we start sending read requests let handshake_request = SourceTransformRequest { request: None, - handshake: Some(sourcetransform_pb::Handshake { sot: true }), + handshake: Some(sourcetransformer::Handshake { sot: true }), }; read_tx.send(handshake_request).await.map_err(|e| { Error::TransformerError(format!("failed to send handshake request: {}", e)) @@ -175,9 +175,9 @@ mod tests { use std::time::Duration; use crate::shared::utils::create_rpc_channel; - use crate::transformer::user_defined::sourcetransform_pb::source_transform_client::SourceTransformClient; use crate::transformer::user_defined::SourceTransformer; use numaflow::sourcetransform; + use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; struct NowCat; diff --git a/rust/numaflow-grpc/Cargo.toml b/rust/numaflow-grpc/Cargo.toml new file mode 100644 index 0000000000..4b970d45c2 --- /dev/null +++ b/rust/numaflow-grpc/Cargo.toml @@ -0,0 +1,14 @@ +[[bin]] +name = "numaflow-grpc" +path = "src/main.rs" + +[package] +name = "numaflow-grpc" +version = "0.1.0" +edition = "2021" + +[dependencies] +tonic-build = "0.12.3" +prost = "0.13.2" +prost-types = "0.13.1" +tonic = "0.12.3" diff --git a/rust/numaflow-grpc/Makefile b/rust/numaflow-grpc/Makefile new file mode 100644 index 0000000000..369b0ff6f7 --- /dev/null +++ b/rust/numaflow-grpc/Makefile @@ -0,0 +1,14 @@ +SHELL:=/bin/bash + +clean: + rm -rf proto + +.PHONY: generate +generate: clean + rm -rf src/clients/*.rs + cp -r ../../pkg/apis/proto proto + mv src/clients.rs /tmp/clients.rs.bak + > src/clients.rs + -./codegen.sh + mv /tmp/clients.rs.bak src/clients.rs + $(MAKE) clean diff --git a/rust/numaflow-grpc/codegen.sh b/rust/numaflow-grpc/codegen.sh new file mode 100755 index 0000000000..0e0a1bc457 --- /dev/null +++ b/rust/numaflow-grpc/codegen.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../../hack/library.sh +header "generating pb files" + +tmpath=$(mktemp -d) +trap 'rm -rf ${tmpath}' EXIT + +export PATH="${tmpath}/bin:${PATH}" + +install-protobuf --install-dir ${tmpath} + +RUST_BACKTRACE=1 cargo run + +cargo fmt diff --git a/rust/numaflow-grpc/src/clients.rs b/rust/numaflow-grpc/src/clients.rs new file mode 100644 index 0000000000..31c76191fa --- /dev/null +++ b/rust/numaflow-grpc/src/clients.rs @@ -0,0 +1,31 @@ +#[path = "clients/sourcetransformer.v1.rs"] +#[rustfmt::skip] +pub mod sourcetransformer; + +#[path = "clients/source.v1.rs"] +#[rustfmt::skip] +pub mod source; + +#[path = "clients/sink.v1.rs"] +#[rustfmt::skip] +pub mod sink; + +#[path = "clients/map.v1.rs"] +#[rustfmt::skip] +pub mod map; + +#[path = "clients/mapstream.v1.rs"] +#[rustfmt::skip] +pub mod mapstream; + +#[path = "clients/reduce.v1.rs"] +#[rustfmt::skip] +pub mod reduce; + +#[path = "clients/sessionreduce.v1.rs"] +#[rustfmt::skip] +pub mod sessionreduce; + +#[path = "clients/sideinput.v1.rs"] +#[rustfmt::skip] +pub mod sideinput; diff --git a/rust/numaflow-grpc/src/clients/map.v1.rs b/rust/numaflow-grpc/src/clients/map.v1.rs new file mode 100644 index 0000000000..e0016e1963 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/map.v1.rs @@ -0,0 +1,176 @@ +// This file is @generated by prost-build. +/// * +/// MapRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapRequest { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// * +/// MapResponse represents a response element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapResponse { + #[prost(message, repeated, tag = "1")] + pub results: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `MapResponse`. +pub mod map_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod map_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct MapClient { + inner: tonic::client::Grpc, + } + impl MapClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MapClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MapClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + MapClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// MapFn applies a function to each map request element. + pub async fn map_fn( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/map.v1.Map/MapFn"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("map.v1.Map", "MapFn")); + self.inner.unary(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/map.v1.Map/IsReady"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("map.v1.Map", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/mapstream.v1.rs b/rust/numaflow-grpc/src/clients/mapstream.v1.rs new file mode 100644 index 0000000000..e37f47d4b2 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/mapstream.v1.rs @@ -0,0 +1,185 @@ +// This file is @generated by prost-build. +/// * +/// MapStreamRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapStreamRequest { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// * +/// MapStreamResponse represents a response element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MapStreamResponse { + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `MapStreamResponse`. +pub mod map_stream_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod map_stream_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct MapStreamClient { + inner: tonic::client::Grpc, + } + impl MapStreamClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MapStreamClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MapStreamClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + MapStreamClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// MapStreamFn applies a function to each request element and returns a stream. + pub async fn map_stream_fn( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/mapstream.v1.MapStream/MapStreamFn", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("mapstream.v1.MapStream", "MapStreamFn")); + self.inner.server_streaming(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/mapstream.v1.MapStream/IsReady", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("mapstream.v1.MapStream", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/reduce.v1.rs b/rust/numaflow-grpc/src/clients/reduce.v1.rs new file mode 100644 index 0000000000..40a2771f20 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/reduce.v1.rs @@ -0,0 +1,261 @@ +// This file is @generated by prost-build. +/// * +/// ReduceRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReduceRequest { + #[prost(message, optional, tag = "1")] + pub payload: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub operation: ::core::option::Option, +} +/// Nested message and enum types in `ReduceRequest`. +pub mod reduce_request { + /// WindowOperation represents a window operation. + /// For Aligned windows, OPEN, APPEND and CLOSE events are sent. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct WindowOperation { + #[prost(enumeration = "window_operation::Event", tag = "1")] + pub event: i32, + #[prost(message, repeated, tag = "2")] + pub windows: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `WindowOperation`. + pub mod window_operation { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Event { + Open = 0, + Close = 1, + Append = 4, + } + impl Event { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Open => "OPEN", + Self::Close => "CLOSE", + Self::Append => "APPEND", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OPEN" => Some(Self::Open), + "CLOSE" => Some(Self::Close), + "APPEND" => Some(Self::Append), + _ => None, + } + } + } + } + /// Payload represents a payload element. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Payload { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + } +} +/// Window represents a window. +/// Since the client doesn't track keys, window doesn't have a keys field. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Window { + #[prost(message, optional, tag = "1")] + pub start: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "2")] + pub end: ::core::option::Option<::prost_types::Timestamp>, + #[prost(string, tag = "3")] + pub slot: ::prost::alloc::string::String, +} +/// * +/// ReduceResponse represents a response element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReduceResponse { + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + /// window represents a window to which the result belongs. + #[prost(message, optional, tag = "2")] + pub window: ::core::option::Option, + /// EOF represents the end of the response for a window. + #[prost(bool, tag = "3")] + pub eof: bool, +} +/// Nested message and enum types in `ReduceResponse`. +pub mod reduce_response { + /// Result represents a result element. It contains the result of the reduce function. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod reduce_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct ReduceClient { + inner: tonic::client::Grpc, + } + impl ReduceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ReduceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ReduceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ReduceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// ReduceFn applies a reduce function to a request stream. + pub async fn reduce_fn( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/reduce.v1.Reduce/ReduceFn", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new("reduce.v1.Reduce", "ReduceFn")); + self.inner.streaming(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/reduce.v1.Reduce/IsReady"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("reduce.v1.Reduce", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/sessionreduce.v1.rs b/rust/numaflow-grpc/src/clients/sessionreduce.v1.rs new file mode 100644 index 0000000000..c5d08e7873 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/sessionreduce.v1.rs @@ -0,0 +1,277 @@ +// This file is @generated by prost-build. +/// KeyedWindow represents a window with keys. +/// since the client track the keys, we use keyed window. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyedWindow { + #[prost(message, optional, tag = "1")] + pub start: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "2")] + pub end: ::core::option::Option<::prost_types::Timestamp>, + #[prost(string, tag = "3")] + pub slot: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "4")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// * +/// SessionReduceRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SessionReduceRequest { + #[prost(message, optional, tag = "1")] + pub payload: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub operation: ::core::option::Option, +} +/// Nested message and enum types in `SessionReduceRequest`. +pub mod session_reduce_request { + /// WindowOperation represents a window operation. + /// For Aligned window values can be one of OPEN, CLOSE, EXPAND, MERGE and APPEND. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct WindowOperation { + #[prost(enumeration = "window_operation::Event", tag = "1")] + pub event: i32, + #[prost(message, repeated, tag = "2")] + pub keyed_windows: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `WindowOperation`. + pub mod window_operation { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Event { + Open = 0, + Close = 1, + Expand = 2, + Merge = 3, + Append = 4, + } + impl Event { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Open => "OPEN", + Self::Close => "CLOSE", + Self::Expand => "EXPAND", + Self::Merge => "MERGE", + Self::Append => "APPEND", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OPEN" => Some(Self::Open), + "CLOSE" => Some(Self::Close), + "EXPAND" => Some(Self::Expand), + "MERGE" => Some(Self::Merge), + "APPEND" => Some(Self::Append), + _ => None, + } + } + } + } + /// Payload represents a payload element. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Payload { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + } +} +/// * +/// SessionReduceResponse represents a response element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SessionReduceResponse { + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + /// keyedWindow represents a window to which the result belongs. + #[prost(message, optional, tag = "2")] + pub keyed_window: ::core::option::Option, + /// EOF represents the end of the response for a window. + #[prost(bool, tag = "3")] + pub eof: bool, +} +/// Nested message and enum types in `SessionReduceResponse`. +pub mod session_reduce_response { + /// Result represents a result element. It contains the result of the reduce function. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod session_reduce_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SessionReduceClient { + inner: tonic::client::Grpc, + } + impl SessionReduceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SessionReduceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SessionReduceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + SessionReduceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// SessionReduceFn applies a reduce function to a request stream. + pub async fn session_reduce_fn( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::SessionReduceRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sessionreduce.v1.SessionReduce/SessionReduceFn", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("sessionreduce.v1.SessionReduce", "SessionReduceFn"), + ); + self.inner.streaming(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sessionreduce.v1.SessionReduce/IsReady", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("sessionreduce.v1.SessionReduce", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/sideinput.v1.rs b/rust/numaflow-grpc/src/clients/sideinput.v1.rs new file mode 100644 index 0000000000..29d0e34f20 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/sideinput.v1.rs @@ -0,0 +1,169 @@ +// This file is @generated by prost-build. +/// * +/// SideInputResponse represents a response to a given side input retrieval request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SideInputResponse { + /// value represents the latest value of the side input payload + #[prost(bytes = "vec", tag = "1")] + pub value: ::prost::alloc::vec::Vec, + /// noBroadcast indicates whether the side input value should be broadcasted to all + /// True if value should not be broadcasted + /// False if value should be broadcasted + #[prost(bool, tag = "2")] + pub no_broadcast: bool, +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod side_input_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// SideInput is the gRPC service for user-defined Side Inputs. + /// It is used to propagate changes in the values of the provided Side Inputs + /// which allows access to slow updated data or configuration without needing to retrieve + /// it during each message processing. + /// Through this service we should should be able to:- + /// 1) Invoke retrieval request for a single Side Input parameter, which in turn should + /// check for updates and return its latest value. + /// 2) Provide a health check endpoint to indicate whether the service is ready to be used. + #[derive(Debug, Clone)] + pub struct SideInputClient { + inner: tonic::client::Grpc, + } + impl SideInputClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SideInputClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SideInputClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + SideInputClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// RetrieveSideInput is the endpoint to retrieve the latest value of a given Side Input. + pub async fn retrieve_side_input( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sideinput.v1.SideInput/RetrieveSideInput", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("sideinput.v1.SideInput", "RetrieveSideInput")); + self.inner.unary(req, path, codec).await + } + /// IsReady is the health check endpoint to indicate whether the service is ready to be used. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sideinput.v1.SideInput/IsReady", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("sideinput.v1.SideInput", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/sink.v1.rs b/rust/numaflow-grpc/src/clients/sink.v1.rs new file mode 100644 index 0000000000..2316cc0a9a --- /dev/null +++ b/rust/numaflow-grpc/src/clients/sink.v1.rs @@ -0,0 +1,246 @@ +// This file is @generated by prost-build. +/// * +/// SinkRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SinkRequest { + /// Required field indicating the request. + #[prost(message, optional, tag = "1")] + pub request: ::core::option::Option, + /// Required field indicating the status of the request. + /// If eot is set to true, it indicates the end of transmission. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + /// optional field indicating the handshake message. + #[prost(message, optional, tag = "3")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `SinkRequest`. +pub mod sink_request { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Request { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(string, tag = "5")] + pub id: ::prost::alloc::string::String, + #[prost(map = "string, string", tag = "6")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + } + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Status { + #[prost(bool, tag = "1")] + pub eot: bool, + } +} +/// +/// Handshake message between client and server to indicate the start of transmission. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Handshake { + /// Required field indicating the start of transmission. + #[prost(bool, tag = "1")] + pub sot: bool, +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// * +/// SinkResponse is the individual response of each message written to the sink. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SinkResponse { + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `SinkResponse`. +pub mod sink_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + /// id is the ID of the message, can be used to uniquely identify the message. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// status denotes the status of persisting to sink. It can be SUCCESS, FAILURE, or FALLBACK. + #[prost(enumeration = "super::Status", tag = "2")] + pub status: i32, + /// err_msg is the error message, set it if success is set to false. + #[prost(string, tag = "3")] + pub err_msg: ::prost::alloc::string::String, + } +} +/// +/// Status is the status of the response. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Status { + Success = 0, + Failure = 1, + Fallback = 2, +} +impl Status { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Success => "SUCCESS", + Self::Failure => "FAILURE", + Self::Fallback => "FALLBACK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SUCCESS" => Some(Self::Success), + "FAILURE" => Some(Self::Failure), + "FALLBACK" => Some(Self::Fallback), + _ => None, + } + } +} +/// Generated client implementations. +pub mod sink_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SinkClient { + inner: tonic::client::Grpc, + } + impl SinkClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SinkClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SinkClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + SinkClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// SinkFn writes the request to a user defined sink. + pub async fn sink_fn( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/sink.v1.Sink/SinkFn"); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new("sink.v1.Sink", "SinkFn")); + self.inner.streaming(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/sink.v1.Sink/IsReady"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("sink.v1.Sink", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/source.v1.rs b/rust/numaflow-grpc/src/clients/source.v1.rs new file mode 100644 index 0000000000..f60a48315c --- /dev/null +++ b/rust/numaflow-grpc/src/clients/source.v1.rs @@ -0,0 +1,486 @@ +// This file is @generated by prost-build. +/// +/// Handshake message between client and server to indicate the start of transmission. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Handshake { + /// Required field indicating the start of transmission. + #[prost(bool, tag = "1")] + pub sot: bool, +} +/// +/// ReadRequest is the request for reading datum stream from user defined source. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadRequest { + /// Required field indicating the request. + #[prost(message, optional, tag = "1")] + pub request: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `ReadRequest`. +pub mod read_request { + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Request { + /// Required field indicating the number of records to read. + #[prost(uint64, tag = "1")] + pub num_records: u64, + /// Required field indicating the request timeout in milliseconds. + /// uint32 can represent 2^32 milliseconds, which is about 49 days. + /// We don't use uint64 because time.Duration takes int64 as nano seconds. Using uint64 for milli will cause overflow. + #[prost(uint32, tag = "2")] + pub timeout_in_ms: u32, + } +} +/// +/// ReadResponse is the response for reading datum stream from user defined source. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadResponse { + /// Required field holding the result. + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + /// Status of the response. Holds the end of transmission flag and the status code. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + /// Handshake message between client and server to indicate the start of transmission. + #[prost(message, optional, tag = "3")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `ReadResponse`. +pub mod read_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + /// Required field holding the payload of the datum. + #[prost(bytes = "vec", tag = "1")] + pub payload: ::prost::alloc::vec::Vec, + /// Required field indicating the offset information of the datum. + #[prost(message, optional, tag = "2")] + pub offset: ::core::option::Option, + /// Required field representing the time associated with each datum. It is used for watermarking. + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional list of keys associated with the datum. + /// Key is the "key" attribute in (key,value) as in the map-reduce paradigm. + /// We add this optional field to support the use case where the user defined source can provide keys for the datum. + /// e.g. Kafka and Redis Stream message usually include information about the keys. + #[prost(string, repeated, tag = "4")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional list of headers associated with the datum. + /// Headers are the metadata associated with the datum. + /// e.g. Kafka and Redis Stream message usually include information about the headers. + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + } + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Status { + /// End of transmission flag. + #[prost(bool, tag = "1")] + pub eot: bool, + #[prost(enumeration = "status::Code", tag = "2")] + pub code: i32, + #[prost(enumeration = "status::Error", optional, tag = "3")] + pub error: ::core::option::Option, + #[prost(string, optional, tag = "4")] + pub msg: ::core::option::Option<::prost::alloc::string::String>, + } + /// Nested message and enum types in `Status`. + pub mod status { + /// Code to indicate the status of the response. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Code { + Success = 0, + Failure = 1, + } + impl Code { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Success => "SUCCESS", + Self::Failure => "FAILURE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SUCCESS" => Some(Self::Success), + "FAILURE" => Some(Self::Failure), + _ => None, + } + } + } + /// Error to indicate the error type. If the code is FAILURE, then the error field will be populated. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Error { + Unacked = 0, + Other = 1, + } + impl Error { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unacked => "UNACKED", + Self::Other => "OTHER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNACKED" => Some(Self::Unacked), + "OTHER" => Some(Self::Other), + _ => None, + } + } + } + } +} +/// +/// AckRequest is the request for acknowledging datum. +/// It takes a list of offsets to be acknowledged. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AckRequest { + /// Required field holding the request. The list will be ordered and will have the same order as the original Read response. + #[prost(message, optional, tag = "1")] + pub request: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `AckRequest`. +pub mod ack_request { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Request { + /// Required field holding the offset to be acked + #[prost(message, optional, tag = "1")] + pub offset: ::core::option::Option, + } +} +/// +/// AckResponse is the response for acknowledging datum. It contains one empty field confirming +/// the batch of offsets that have been successfully acknowledged. The contract between client and server +/// is that the server will only return the AckResponse if the ack request is successful. +/// If the server hangs during the ack request, the client can decide to timeout and error out the data forwarder. +/// The reason why we define such contract is that we always expect the server to be able to process the ack request. +/// Client is expected to send the AckRequest to the server with offsets that are strictly +/// corresponding to the previously read batch. If the client sends the AckRequest with offsets that are not, +/// it is considered as a client error and the server will not return the AckResponse. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct AckResponse { + /// Required field holding the result. + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + /// Handshake message between client and server to indicate the start of transmission. + #[prost(message, optional, tag = "2")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `AckResponse`. +pub mod ack_response { + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Result { + /// Required field indicating the ack request is successful. + #[prost(message, optional, tag = "1")] + pub success: ::core::option::Option<()>, + } +} +/// +/// ReadyResponse is the health check result for user defined source. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + /// Required field holding the health check result. + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// +/// PendingResponse is the response for the pending request. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PendingResponse { + /// Required field holding the result. + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `PendingResponse`. +pub mod pending_response { + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Result { + /// Required field holding the number of pending records at the user defined source. + /// A negative count indicates that the pending information is not available. + #[prost(int64, tag = "1")] + pub count: i64, + } +} +/// +/// PartitionsResponse is the response for the partitions request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionsResponse { + /// Required field holding the result. + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `PartitionsResponse`. +pub mod partitions_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + /// Required field holding the list of partitions. + #[prost(int32, repeated, tag = "1")] + pub partitions: ::prost::alloc::vec::Vec, + } +} +/// +/// Offset is the offset of the datum. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Offset { + /// offset is the offset of the datum. This field is required. + /// We define Offset as a byte array because different input data sources can have different representations for Offset. + /// The only way to generalize it is to define it as a byte array, + /// Such that we can let the UDSource to de-serialize the offset using its own interpretation logics. + #[prost(bytes = "vec", tag = "1")] + pub offset: ::prost::alloc::vec::Vec, + /// Optional partition_id indicates which partition of the source the datum belongs to. + /// It is useful for sources that have multiple partitions. e.g. Kafka. + /// If the partition_id is not specified, it is assumed that the source has a single partition. + #[prost(int32, tag = "2")] + pub partition_id: i32, +} +/// Generated client implementations. +pub mod source_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SourceClient { + inner: tonic::client::Grpc, + } + impl SourceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SourceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SourceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + SourceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Read returns a stream of datum responses. + /// The size of the returned responses is less than or equal to the num_records specified in each ReadRequest. + /// If the request timeout is reached on the server side, the returned responses will contain all the datum that have been read (which could be an empty list). + /// The server will continue to read and respond to subsequent ReadRequests until the client closes the stream. + /// Once it has sent all the datum, the server will send a ReadResponse with the end of transmission flag set to true. + pub async fn read_fn( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/source.v1.Source/ReadFn"); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new("source.v1.Source", "ReadFn")); + self.inner.streaming(req, path, codec).await + } + /// AckFn acknowledges a stream of datum offsets. + /// When AckFn is called, it implicitly indicates that the datum stream has been processed by the source vertex. + /// The caller (numa) expects the AckFn to be successful, and it does not expect any errors. + /// If there are some irrecoverable errors when the callee (UDSource) is processing the AckFn request, + /// then it is best to crash because there are no other retry mechanisms possible. + /// Clients sends n requests and expects n responses. + pub async fn ack_fn( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/source.v1.Source/AckFn"); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new("source.v1.Source", "AckFn")); + self.inner.streaming(req, path, codec).await + } + /// PendingFn returns the number of pending records at the user defined source. + pub async fn pending_fn( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/source.v1.Source/PendingFn", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("source.v1.Source", "PendingFn")); + self.inner.unary(req, path, codec).await + } + /// PartitionsFn returns the list of partitions for the user defined source. + pub async fn partitions_fn( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/source.v1.Source/PartitionsFn", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("source.v1.Source", "PartitionsFn")); + self.inner.unary(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for user defined source gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/source.v1.Source/IsReady"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("source.v1.Source", "IsReady")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs b/rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs new file mode 100644 index 0000000000..ed30cf79f7 --- /dev/null +++ b/rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs @@ -0,0 +1,225 @@ +// This file is @generated by prost-build. +/// +/// Handshake message between client and server to indicate the start of transmission. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Handshake { + /// Required field indicating the start of transmission. + #[prost(bool, tag = "1")] + pub sot: bool, +} +/// * +/// SourceTransformerRequest represents a request element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SourceTransformRequest { + #[prost(message, optional, tag = "1")] + pub request: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `SourceTransformRequest`. +pub mod source_transform_request { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Request { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// This ID is used to uniquely identify a transform request + #[prost(string, tag = "6")] + pub id: ::prost::alloc::string::String, + } +} +/// * +/// SourceTransformerResponse represents a response element. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SourceTransformResponse { + #[prost(message, repeated, tag = "1")] + pub results: ::prost::alloc::vec::Vec, + /// This ID is used to refer the responses to the request it corresponds to. + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, + /// Handshake message between client and server to indicate the start of transmission. + #[prost(message, optional, tag = "3")] + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `SourceTransformResponse`. +pub mod source_transform_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Result { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(string, repeated, tag = "4")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// * +/// ReadyResponse is the health check result. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReadyResponse { + #[prost(bool, tag = "1")] + pub ready: bool, +} +/// Generated client implementations. +pub mod source_transform_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SourceTransformClient { + inner: tonic::client::Grpc, + } + impl SourceTransformClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SourceTransformClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SourceTransformClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + SourceTransformClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// SourceTransformFn applies a function to each request element. + /// In addition to map function, SourceTransformFn also supports assigning a new event time to response. + /// SourceTransformFn can be used only at source vertex by source data transformer. + pub async fn source_transform_fn( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::SourceTransformRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sourcetransformer.v1.SourceTransform/SourceTransformFn", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "sourcetransformer.v1.SourceTransform", + "SourceTransformFn", + ), + ); + self.inner.streaming(req, path, codec).await + } + /// IsReady is the heartbeat endpoint for gRPC. + pub async fn is_ready( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sourcetransformer.v1.SourceTransform/IsReady", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("sourcetransformer.v1.SourceTransform", "IsReady"), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/rust/numaflow-grpc/src/lib.rs b/rust/numaflow-grpc/src/lib.rs new file mode 100644 index 0000000000..705f46dba5 --- /dev/null +++ b/rust/numaflow-grpc/src/lib.rs @@ -0,0 +1 @@ +pub mod clients; diff --git a/rust/numaflow-grpc/src/main.rs b/rust/numaflow-grpc/src/main.rs new file mode 100644 index 0000000000..6c95d2b793 --- /dev/null +++ b/rust/numaflow-grpc/src/main.rs @@ -0,0 +1,20 @@ +fn main() { + tonic_build::configure() + .build_client(true) + .build_server(false) + .out_dir("src/clients") + .compile_protos( + &[ + "proto/source/v1/source.proto", + "proto/sourcetransform/v1/sourcetransform.proto", + "proto/sink/v1/sink.proto", + "proto/map/v1/map.proto", + "proto/mapstream/v1/mapstream.proto", + "proto/reduce/v1/reduce.proto", + "proto/sessionreduce/v1/sessionreduce.proto", + "proto/sideinput/v1/sideinput.proto", + ], + &["proto"], + ) + .expect("failed to compile protos"); +} From ceb8f5b721c097310a5b91d89d1bd3df8648f284 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Tue, 8 Oct 2024 10:06:06 +0530 Subject: [PATCH 092/188] feat: Make Generator Support Leaky Bucket (#2129) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/source/generator.rs | 26 ++++++++++------------ 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 294f612f4f..588df6126d 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -19,7 +19,7 @@ use std::time::Duration; /// 2 batches only 1 batch (no reread) 5 5 5 /// /// ``` -/// NOTE: The minimum granularity of duration is 10ms. +/// NOTE: The minimum granularity of duration is 5ms. mod stream_generator { use bytes::Bytes; use futures::Stream; @@ -53,16 +53,6 @@ mod stream_generator { let mut tick = tokio::time::interval(unit); tick.set_missed_tick_behavior(MissedTickBehavior::Skip); - // set default value of the unit to 10ms, - // and subtract 5ms due to timer precision constraints - let unit = (if unit < Duration::from_millis(10) { - Duration::from_millis(10) - } else { - unit - }) - .checked_sub(Duration::from_millis(5)) - .expect("there is +-5ms precision, so unit > 5ms"); - Self { content, rpu, @@ -70,7 +60,10 @@ mod stream_generator { batch: if batch > rpu { rpu } else { batch }, unit, used: 0, - prev_time: Instant::now().checked_sub(unit).unwrap(), + // rewind a bit to return on the first call + prev_time: Instant::now() + .checked_sub(unit) + .expect("subtraction cannot fail"), tick, } } @@ -118,9 +111,14 @@ mod stream_generator { // we have to wait for the next tick because we are out of quota let mut tick = this.tick; match tick.poll_tick(cx) { - // we can recurse ourselves to return data since enough time has passed Poll::Ready(_) => { - // recursively call the poll_next since we are ready to serve + // reset the prev_time as we are quite certain that we should be returning + // data, else we would have been in Pending + *this.prev_time = (*this.prev_time) + .checked_sub(elapsed) + .expect("subtraction cannot fail"); + + // we can recurse ourselves to return data since enough time has passed self.poll_next(cx) } Poll::Pending => Poll::Pending, From f02c699d7c7007dba4fb001d53f092d7c12719b1 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Mon, 7 Oct 2024 22:36:07 -0700 Subject: [PATCH 093/188] chore: simplified generator relying purely on Polling (#2132) Signed-off-by: Vigith Maurice --- rust/numaflow-core/src/source/generator.rs | 84 ++++++++-------------- 1 file changed, 28 insertions(+), 56 deletions(-) diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 588df6126d..611969c740 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -19,7 +19,7 @@ use std::time::Duration; /// 2 batches only 1 batch (no reread) 5 5 5 /// /// ``` -/// NOTE: The minimum granularity of duration is 5ms. +/// NOTE: The minimum granularity of duration is 10ms. mod stream_generator { use bytes::Bytes; use futures::Stream; @@ -27,7 +27,7 @@ mod stream_generator { use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; - use tokio::time::{Instant, MissedTickBehavior}; + use tokio::time::MissedTickBehavior; #[pin_project] pub(super) struct StreamGenerator { @@ -37,13 +37,9 @@ mod stream_generator { rpu: usize, /// batch size per read batch: usize, - /// unit of time-period over which the [rpu] is defined. - unit: Duration, /// the amount of credits used for the current time-period. /// remaining = (rpu - used) for that time-period used: usize, - /// the last time we generated data. now() - prev_time is compared against the duration. - prev_time: Instant, #[pin] tick: tokio::time::Interval, } @@ -58,12 +54,7 @@ mod stream_generator { rpu, // batch cannot > rpu batch: if batch > rpu { rpu } else { batch }, - unit, used: 0, - // rewind a bit to return on the first call - prev_time: Instant::now() - .checked_sub(unit) - .expect("subtraction cannot fail"), tick, } } @@ -76,52 +67,33 @@ mod stream_generator { mut self: Pin<&mut StreamGenerator>, cx: &mut Context<'_>, ) -> Poll> { - let this = self.as_mut().project(); - - // Calculate the elapsed time since the last poll - let elapsed = this.prev_time.elapsed(); - - // we can return the complete batch if enough time has passed, with a precision +- 5ms - if elapsed >= *this.unit { - // Reset the timer - *this.prev_time = Instant::now(); - - // Generate data that equals to batch data - let data = vec![this.content.clone(); *this.batch]; - // reset used quota - *this.used = *this.batch; - - Poll::Ready(Some(data)) - } else if this.used < this.rpu { - // even if enough time hasn't passed, we can still send data if we have - // quota (rpu - used) left - - // make sure we do not send more than desired - let to_send = if *this.rpu - *this.used < *this.batch { - *this.rpu - *this.used - } else { - *this.batch - }; - - // update the counters - *this.used += to_send; - - Poll::Ready(Some(vec![this.content.clone(); to_send])) - } else { - // we have to wait for the next tick because we are out of quota - let mut tick = this.tick; - match tick.poll_tick(cx) { - Poll::Ready(_) => { - // reset the prev_time as we are quite certain that we should be returning - // data, else we would have been in Pending - *this.prev_time = (*this.prev_time) - .checked_sub(elapsed) - .expect("subtraction cannot fail"); - - // we can recurse ourselves to return data since enough time has passed - self.poll_next(cx) + let mut this = self.as_mut().project(); + + match this.tick.poll_tick(cx) { + // Poll::Ready means we are ready to send data the whole batch since enough time + // has passed. + Poll::Ready(_) => { + // generate data that equals to batch data + let data = vec![this.content.clone(); *this.batch]; + // reset used quota + *this.used = *this.batch; + + Poll::Ready(Some(data)) + } + Poll::Pending => { + // even if enough time hasn't passed, we can still send data if we have + // quota (rpu - used) left + if this.used < this.rpu { + // make sure we do not send more than desired + let to_send = std::cmp::min(*this.rpu - *this.used, *this.batch); + + // update the counters + *this.used += to_send; + + Poll::Ready(Some(vec![this.content.clone(); to_send])) + } else { + Poll::Pending } - Poll::Pending => Poll::Pending, } } } From d5c96fd9538d6eebe7267f89f677c837b309d6a0 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 9 Oct 2024 13:16:13 +0530 Subject: [PATCH 094/188] feat: Use gRPC bidirectional streaming for map (#2120) --- go.mod | 2 +- go.sum | 4 +- pkg/apis/proto/map/v1/map.proto | 31 +- pkg/isb/message.go | 2 - pkg/isb/stores/jetstream/writer_test.go | 4 +- pkg/isb/stores/redis/read_test.go | 9 +- pkg/isb/stores/redis/write_test.go | 6 +- pkg/isb/testutils/udf.go | 49 +-- pkg/sdkclient/batchmapper/client.go | 2 +- pkg/sdkclient/batchmapper/interface.go | 2 +- pkg/sdkclient/mapper/client.go | 121 +++++- pkg/sdkclient/mapper/client_test.go | 164 +++++--- pkg/sdkclient/mapper/interface.go | 4 +- pkg/sdkclient/mapstreamer/client.go | 2 +- pkg/sdkclient/mapstreamer/interface.go | 2 +- .../sourcetransformer/client_test.go | 16 +- pkg/sources/forward/data_forward.go | 33 +- pkg/sources/forward/data_forward_test.go | 40 +- pkg/sources/forward/shutdown_test.go | 10 +- pkg/sources/transformer/grpc_transformer.go | 1 - .../transformer/grpc_transformer_test.go | 8 +- pkg/udf/forward/applier/mapper.go | 6 +- pkg/udf/forward/forward.go | 106 ++--- pkg/udf/forward/forward_test.go | 79 ++-- pkg/udf/forward/shutdown_test.go | 4 +- pkg/udf/map_udf.go | 132 ++++--- pkg/udf/rpc/grpc_batch_map.go | 7 +- pkg/udf/rpc/grpc_map.go | 154 +++----- pkg/udf/rpc/grpc_map_test.go | 369 +++++------------- pkg/udf/rpc/grpc_mapstream.go | 6 +- pkg/udf/rpc/grpc_mapstream_test.go | 17 + rust/numaflow-grpc/src/clients/map.v1.rs | 61 ++- test/diamond-e2e/testdata/join-on-reduce.yaml | 2 +- test/transformer-e2e/transformer_test.go | 10 +- 34 files changed, 691 insertions(+), 774 deletions(-) diff --git a/go.mod b/go.mod index 49ee6df1df..657849e773 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0 + github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.55.0 diff --git a/go.sum b/go.sum index c6f4ec4e73..80a55220bb 100644 --- a/go.sum +++ b/go.sum @@ -483,8 +483,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0 h1:MN4Q36mPrXqPrv2dNoK3gyV7c1CGwUF3wNJxTZSw1lk= -github.com/numaproj/numaflow-go v0.8.2-0.20241001031210-60188185d9c0/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= +github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a h1:KnpALzELgzX7GR2FDvADTDsauGW/B1fzFw9b+kXYkFc= +github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/apis/proto/map/v1/map.proto b/pkg/apis/proto/map/v1/map.proto index a7fd7d4ebd..34a189f08b 100644 --- a/pkg/apis/proto/map/v1/map.proto +++ b/pkg/apis/proto/map/v1/map.proto @@ -26,7 +26,7 @@ package map.v1; service Map { // MapFn applies a function to each map request element. - rpc MapFn(MapRequest) returns (MapResponse); + rpc MapFn(stream MapRequest) returns (stream MapResponse); // IsReady is the heartbeat endpoint for gRPC. rpc IsReady(google.protobuf.Empty) returns (ReadyResponse); @@ -36,11 +36,25 @@ service Map { * MapRequest represents a request element. */ message MapRequest { - repeated string keys = 1; - bytes value = 2; - google.protobuf.Timestamp event_time = 3; - google.protobuf.Timestamp watermark = 4; - map headers = 5; + message Request { + repeated string keys = 1; + bytes value = 2; + google.protobuf.Timestamp event_time = 3; + google.protobuf.Timestamp watermark = 4; + map headers = 5; + } + Request request = 1; + // This ID is used to uniquely identify a map request + string id = 2; + optional Handshake handshake = 3; +} + +/* + * Handshake message between client and server to indicate the start of transmission. + */ +message Handshake { + // Required field indicating the start of transmission. + bool sot = 1; } /** @@ -53,6 +67,9 @@ message MapResponse { repeated string tags = 3; } repeated Result results = 1; + // This ID is used to refer the responses to the request it corresponds to. + string id = 2; + optional Handshake handshake = 3; } /** @@ -60,4 +77,4 @@ message MapResponse { */ message ReadyResponse { bool ready = 1; -} +} \ No newline at end of file diff --git a/pkg/isb/message.go b/pkg/isb/message.go index 1c3de9c29e..5d1f8b7e6b 100644 --- a/pkg/isb/message.go +++ b/pkg/isb/message.go @@ -126,9 +126,7 @@ type WriteMessage struct { // ReadWriteMessagePair is a pair of ReadMessage and a list of WriteMessage which will be used // to map the read message to a list of write messages that the udf returns. -// The error field is used to capture any error that occurs during the processing of the message. type ReadWriteMessagePair struct { ReadMessage *ReadMessage WriteMessages []*WriteMessage - Err error } diff --git a/pkg/isb/stores/jetstream/writer_test.go b/pkg/isb/stores/jetstream/writer_test.go index 4928e2110a..78afb7738f 100644 --- a/pkg/isb/stores/jetstream/writer_test.go +++ b/pkg/isb/stores/jetstream/writer_test.go @@ -45,8 +45,8 @@ func (f myForwardJetStreamTest) WhereTo(_ []string, _ []string, s string) ([]for }}, nil } -func (f myForwardJetStreamTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "test-vertex", message) +func (f myForwardJetStreamTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "test-vertex", messages) } func (f myForwardJetStreamTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { diff --git a/pkg/isb/stores/redis/read_test.go b/pkg/isb/stores/redis/read_test.go index 562fa82751..6f42dc79c5 100644 --- a/pkg/isb/stores/redis/read_test.go +++ b/pkg/isb/stores/redis/read_test.go @@ -141,6 +141,7 @@ func TestRedisCheckBacklog(t *testing.T) { fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) f, err := forward.NewInterStepDataForward(vertexInstance, rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithReadBatchSize(10), forward.WithUDFUnaryMap(forwardReadWritePerformance{})) + assert.NoError(t, err) stopped := f.Start() // validate the length of the toStep stream. @@ -307,7 +308,7 @@ func (f forwardReadWritePerformance) WhereTo(_ []string, _ []string, _ string) ( }}, nil } -func (f forwardReadWritePerformance) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f forwardReadWritePerformance) ApplyMap(ctx context.Context, message []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return testutils.CopyUDFTestApply(ctx, "testVertex", message) } @@ -473,7 +474,7 @@ func (suite *ReadWritePerformance) TestReadWriteLatencyPipelining() { } func getPercentiles(t *testing.T, latency []float64) string { - min, err := stats.Min(latency) + mi, err := stats.Min(latency) assert.NoError(t, err) p50, err := stats.Percentile(latency, 50) assert.NoError(t, err) @@ -485,10 +486,10 @@ func getPercentiles(t *testing.T, latency []float64) string { assert.NoError(t, err) p99, err := stats.Percentile(latency, 99) assert.NoError(t, err) - max, err := stats.Max(latency) + mx, err := stats.Max(latency) assert.NoError(t, err) - return fmt.Sprintf("min=%f p50=%f p75=%f p90=%f p95=%f p99=%f max=%f", min, p50, p75, p90, p95, p99, max) + return fmt.Sprintf("min=%f p50=%f p75=%f p90=%f p95=%f p99=%f max=%f", mi, p50, p75, p90, p95, p99, mx) } // writeTestMessages is used to add some dummy messages using XADD diff --git a/pkg/isb/stores/redis/write_test.go b/pkg/isb/stores/redis/write_test.go index e3fa3e6241..92693a5890 100644 --- a/pkg/isb/stores/redis/write_test.go +++ b/pkg/isb/stores/redis/write_test.go @@ -351,7 +351,7 @@ func (f myForwardRedisTest) WhereTo(_ []string, _ []string, _ string) ([]forward }}, nil } -func (f myForwardRedisTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f myForwardRedisTest) ApplyMap(ctx context.Context, message []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return testutils.CopyUDFTestApply(ctx, "", message) } @@ -419,7 +419,7 @@ func TestNewInterStepDataForwardRedis(t *testing.T) { assert.False(t, to1.IsFull()) // forwardDataAndVerify is used to verify if data is forwarded from the from and received in the to buffer. - forwardDataAndVerify(ctx, t, fromStepWrite, to1Read, to1, fromStep, f, writeMessages, count) + forwardDataAndVerify(ctx, t, fromStepWrite, to1Read, to1, f, writeMessages, count) } // TestReadTimeout tests that even though we have a blocking read, our Stop function exits cleanly @@ -590,7 +590,7 @@ func TestSetWriteInfo(t *testing.T) { } // forwardDataAndVerify start the forwarder and verify the data. -func forwardDataAndVerify(ctx context.Context, t *testing.T, fromStepWrite *BufferWrite, to1Read *BufferRead, to1 *BufferWrite, fromStep *BufferRead, f *forward.InterStepDataForward, writeMessages []isb.Message, count int64) { +func forwardDataAndVerify(ctx context.Context, t *testing.T, fromStepWrite *BufferWrite, to1Read *BufferRead, to1 *BufferWrite, f *forward.InterStepDataForward, writeMessages []isb.Message, count int64) { stopped := f.Start() // write some data _, errs := fromStepWrite.Write(ctx, writeMessages[0:5]) diff --git a/pkg/isb/testutils/udf.go b/pkg/isb/testutils/udf.go index b17255373f..66221e5b50 100644 --- a/pkg/isb/testutils/udf.go +++ b/pkg/isb/testutils/udf.go @@ -23,32 +23,35 @@ import ( ) // CopyUDFTestApply applies a copy UDF that simply copies the input to output. -func CopyUDFTestApply(ctx context.Context, vertexName string, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { - _ = ctx - offset := readMessage.ReadOffset - payload := readMessage.Body.Payload - parentPaneInfo := readMessage.MessageInfo - - // apply UDF - _ = payload - // copy the payload - result := payload - var keys []string +func CopyUDFTestApply(ctx context.Context, vertexName string, readMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + udfResults := make([]isb.ReadWriteMessagePair, len(readMessages)) + for i, readMessage := range readMessages { + offset := readMessage.ReadOffset + payload := readMessage.Body.Payload + parentPaneInfo := readMessage.MessageInfo + // copy the payload + result := payload + var keys []string - writeMessage := isb.Message{ - Header: isb.Header{ - MessageInfo: parentPaneInfo, - ID: isb.MessageID{ - VertexName: vertexName, - Offset: offset.String(), + writeMessage := isb.Message{ + Header: isb.Header{ + MessageInfo: parentPaneInfo, + ID: isb.MessageID{ + VertexName: vertexName, + Offset: offset.String(), + }, + Keys: keys, }, - Keys: keys, - }, - Body: isb.Body{ - Payload: result, - }, + Body: isb.Body{ + Payload: result, + }, + } + udfResults[i] = isb.ReadWriteMessagePair{ + ReadMessage: readMessage, + WriteMessages: []*isb.WriteMessage{{Message: writeMessage}}, + } } - return []*isb.WriteMessage{{Message: writeMessage}}, nil + return udfResults, nil } func CopyUDFTestApplyStream(ctx context.Context, vertexName string, writeMessageCh chan<- isb.WriteMessage, readMessage *isb.ReadMessage) error { diff --git a/pkg/sdkclient/batchmapper/client.go b/pkg/sdkclient/batchmapper/client.go index 5cc1718492..74d1cb49f5 100644 --- a/pkg/sdkclient/batchmapper/client.go +++ b/pkg/sdkclient/batchmapper/client.go @@ -64,7 +64,7 @@ func NewFromClient(c batchmappb.BatchMapClient) (Client, error) { } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn() error { return c.conn.Close() } diff --git a/pkg/sdkclient/batchmapper/interface.go b/pkg/sdkclient/batchmapper/interface.go index 3831f856ac..4f0e40013d 100644 --- a/pkg/sdkclient/batchmapper/interface.go +++ b/pkg/sdkclient/batchmapper/interface.go @@ -25,7 +25,7 @@ import ( // Client contains methods to call a gRPC client. type Client interface { - CloseConn(ctx context.Context) error + CloseConn() error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) BatchMapFn(ctx context.Context, inputCh <-chan *batchmappb.BatchMapRequest) (<-chan *batchmappb.BatchMapResponse, <-chan error) } diff --git a/pkg/sdkclient/mapper/client.go b/pkg/sdkclient/mapper/client.go index 07ef848a09..8e3e3d24d6 100644 --- a/pkg/sdkclient/mapper/client.go +++ b/pkg/sdkclient/mapper/client.go @@ -18,7 +18,10 @@ package mapper import ( "context" + "fmt" + "time" + "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -28,16 +31,18 @@ import ( sdkerror "github.com/numaproj/numaflow/pkg/sdkclient/error" grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" + "github.com/numaproj/numaflow/pkg/shared/logging" ) // client contains the grpc connection and the grpc client. type client struct { conn *grpc.ClientConn grpcClt mappb.MapClient + stream mappb.Map_MapFnClient } // New creates a new client object. -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { +func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.MapAddr) for _, inputOption := range inputOptions { @@ -53,18 +58,81 @@ func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (C c := new(client) c.conn = conn c.grpcClt = mappb.NewMapClient(conn) + + var logger = logging.FromContext(ctx) + +waitUntilReady: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("waiting for mapper gRPC server to be ready: %w", ctx.Err()) + default: + _, err := c.IsReady(ctx, &emptypb.Empty{}) + if err != nil { + logger.Warnf("Mapper server is not ready: %v", err) + time.Sleep(100 * time.Millisecond) + continue waitUntilReady + } + break waitUntilReady + } + } + + c.stream, err = c.grpcClt.MapFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create a gRPC stream for map: %w", err) + } + + if err := doHandshake(c.stream); err != nil { + return nil, err + } + return c, nil } +func doHandshake(stream mappb.Map_MapFnClient) error { + // Send handshake request + handshakeReq := &mappb.MapRequest{ + Handshake: &mappb.Handshake{ + Sot: true, + }, + } + if err := stream.Send(handshakeReq); err != nil { + return fmt.Errorf("failed to send handshake request for map: %w", err) + } + + handshakeResp, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to receive handshake response from map stream: %w", err) + } + if resp := handshakeResp.GetHandshake(); resp == nil || !resp.GetSot() { + return fmt.Errorf("invalid handshake response for map. Received='%+v'", resp) + } + return nil +} + // NewFromClient creates a new client object from a grpc client. This is used for testing. -func NewFromClient(c mappb.MapClient) (Client, error) { +func NewFromClient(ctx context.Context, c mappb.MapClient) (Client, error) { + stream, err := c.MapFn(ctx) + if err != nil { + return nil, err + } + + if err := doHandshake(stream); err != nil { + return nil, err + } + return &client{ grpcClt: c, + stream: stream, }, nil } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn() error { + err := c.stream.CloseSend() + if err != nil { + return err + } if c.conn == nil { return nil } @@ -80,12 +148,47 @@ func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { return resp.GetReady(), nil } -// MapFn applies a function to each datum element. -func (c *client) MapFn(ctx context.Context, request *mappb.MapRequest) (*mappb.MapResponse, error) { - mapResponse, err := c.grpcClt.MapFn(ctx, request) - err = sdkerror.ToUDFErr("c.grpcClt.MapFn", err) - if err != nil { +// MapFn applies a function to each map request element. +func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*mappb.MapResponse, error) { + var eg errgroup.Group + // send n requests + eg.Go(func() error { + for _, req := range requests { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := c.stream.Send(req); err != nil { + return sdkerror.ToUDFErr("c.grpcClt.MapFn stream.Send", err) + } + } + return nil + }) + + // receive n responses + responses := make([]*mappb.MapResponse, len(requests)) + eg.Go(func() error { + for i := 0; i < len(requests); i++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + resp, err := c.stream.Recv() + if err != nil { + return sdkerror.ToUDFErr("c.grpcClt.MapFn stream.Recv", err) + } + responses[i] = resp + } + return nil + }) + + // wait for the send and receive goroutines to finish + // if any of the goroutines return an error, the error will be caught here + if err := eg.Wait(); err != nil { return nil, err } - return mapResponse, nil + + return responses, nil } diff --git a/pkg/sdkclient/mapper/client_test.go b/pkg/sdkclient/mapper/client_test.go index d9165841c6..1fea6b4abb 100644 --- a/pkg/sdkclient/mapper/client_test.go +++ b/pkg/sdkclient/mapper/client_test.go @@ -18,80 +18,130 @@ package mapper import ( "context" + "errors" "fmt" - "reflect" + "net" "testing" + "time" - "github.com/golang/mock/gomock" mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1/mapmock" - "github.com/stretchr/testify/assert" + "github.com/numaproj/numaflow-go/pkg/mapper" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" "google.golang.org/protobuf/types/known/emptypb" ) func TestClient_IsReady(t *testing.T) { var ctx = context.Background() + svc := &mapper.Service{ + Mapper: mapper.MapperFunc(func(ctx context.Context, keys []string, datum mapper.Datum) mapper.Messages { + return mapper.MessagesBuilder() + }), + } + + // Start the gRPC server + conn := newServer(t, func(server *grpc.Server) { + mappb.RegisterMapServer(server, svc) + }) + defer func(conn *grpc.ClientConn) { + _ = conn.Close() + }(conn) + + // Create a client connection to the server + client := mappb.NewMapClient(conn) - ctrl := gomock.NewController(t) - defer ctrl.Finish() + testClient, err := NewFromClient(ctx, client) + require.NoError(t, err) - mockClient := mapmock.NewMockMapClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mappb.ReadyResponse{Ready: true}, nil) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mappb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) + ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) + require.True(t, ready) + require.NoError(t, err) +} - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, +func newServer(t *testing.T, register func(server *grpc.Server)) *grpc.ClientConn { + lis := bufconn.Listen(100) + t.Cleanup(func() { + _ = lis.Close() }) - ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) - assert.True(t, ready) - assert.NoError(t, err) + server := grpc.NewServer() + t.Cleanup(func() { + server.Stop() + }) - ready, err = testClient.IsReady(ctx, &emptypb.Empty{}) - assert.False(t, ready) - assert.EqualError(t, err, "mock connection refused") -} + register(server) -func TestClient_MapFn(t *testing.T) { - var ctx = context.Background() + errChan := make(chan error, 1) + go func() { + // t.Fatal should only be called from the goroutine running the test + if err := server.Serve(lis); err != nil { + errChan <- err + } + }() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapmock.NewMockMapClient(ctrl) - mockClient.EXPECT().MapFn(gomock.Any(), gomock.Any()).Return(&mappb.MapResponse{Results: []*mappb.MapResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, nil) - mockClient.EXPECT().MapFn(gomock.Any(), gomock.Any()).Return(&mappb.MapResponse{Results: []*mappb.MapResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, fmt.Errorf("mock connection refused")) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, + dialer := func(context.Context, string) (net.Conn, error) { + return lis.Dial() + } + + conn, err := grpc.NewClient("passthrough://", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(insecure.NewCredentials())) + t.Cleanup(func() { + _ = conn.Close() }) + if err != nil { + t.Fatalf("Creating new gRPC client connection: %v", err) + } + + var grpcServerErr error + select { + case grpcServerErr = <-errChan: + case <-time.After(500 * time.Millisecond): + grpcServerErr = errors.New("gRPC server didn't start in 500ms") + } + if err != nil { + t.Fatalf("Failed to start gRPC server: %v", grpcServerErr) + } + + return conn +} - result, err := testClient.MapFn(ctx, &mappb.MapRequest{}) - assert.Equal(t, &mappb.MapResponse{Results: []*mappb.MapResponse_Result{ - { - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }, - }}, result) - assert.NoError(t, err) - - _, err = testClient.MapFn(ctx, &mappb.MapRequest{}) - assert.EqualError(t, err, "NonRetryable: mock connection refused") +func TestClient_MapFn(t *testing.T) { + svc := &mapper.Service{ + Mapper: mapper.MapperFunc(func(ctx context.Context, keys []string, datum mapper.Datum) mapper.Messages { + msg := datum.Value() + return mapper.MessagesBuilder().Append(mapper.NewMessage(msg).WithKeys([]string{keys[0] + "_test"})) + }), + } + conn := newServer(t, func(server *grpc.Server) { + mappb.RegisterMapServer(server, svc) + }) + mapClient := mappb.NewMapClient(conn) + var ctx = context.Background() + client, _ := NewFromClient(ctx, mapClient) + + requests := make([]*mappb.MapRequest, 5) + for i := 0; i < 5; i++ { + requests[i] = &mappb.MapRequest{ + Request: &mappb.MapRequest_Request{ + Keys: []string{fmt.Sprintf("client_key_%d", i)}, + Value: []byte("test"), + }, + } + } + + responses, err := client.MapFn(ctx, requests) + require.NoError(t, err) + var results [][]*mappb.MapResponse_Result + for _, resp := range responses { + results = append(results, resp.GetResults()) + } + expected := [][]*mappb.MapResponse_Result{ + {{Keys: []string{"client_key_0_test"}, Value: []byte("test")}}, + {{Keys: []string{"client_key_1_test"}, Value: []byte("test")}}, + {{Keys: []string{"client_key_2_test"}, Value: []byte("test")}}, + {{Keys: []string{"client_key_3_test"}, Value: []byte("test")}}, + {{Keys: []string{"client_key_4_test"}, Value: []byte("test")}}, + } + require.ElementsMatch(t, expected, results) } diff --git a/pkg/sdkclient/mapper/interface.go b/pkg/sdkclient/mapper/interface.go index 94ec1310a9..e079833767 100644 --- a/pkg/sdkclient/mapper/interface.go +++ b/pkg/sdkclient/mapper/interface.go @@ -25,7 +25,7 @@ import ( // Client contains methods to call a gRPC client. type Client interface { - CloseConn(ctx context.Context) error + CloseConn() error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) - MapFn(ctx context.Context, mapRequest *mappb.MapRequest) (*mappb.MapResponse, error) + MapFn(ctx context.Context, mapRequest []*mappb.MapRequest) ([]*mappb.MapResponse, error) } diff --git a/pkg/sdkclient/mapstreamer/client.go b/pkg/sdkclient/mapstreamer/client.go index ff5d07a7a6..405372d4af 100644 --- a/pkg/sdkclient/mapstreamer/client.go +++ b/pkg/sdkclient/mapstreamer/client.go @@ -64,7 +64,7 @@ func NewFromClient(c mapstreampb.MapStreamClient) (Client, error) { } // CloseConn closes the grpc client connection. -func (c *client) CloseConn(ctx context.Context) error { +func (c *client) CloseConn() error { return c.conn.Close() } diff --git a/pkg/sdkclient/mapstreamer/interface.go b/pkg/sdkclient/mapstreamer/interface.go index 515c85b0dd..f7543227f8 100644 --- a/pkg/sdkclient/mapstreamer/interface.go +++ b/pkg/sdkclient/mapstreamer/interface.go @@ -25,7 +25,7 @@ import ( // Client contains methods to call a gRPC client. type Client interface { - CloseConn(ctx context.Context) error + CloseConn() error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) MapStreamFn(ctx context.Context, request *v1.MapStreamRequest, responseCh chan<- *v1.MapStreamResponse) error } diff --git a/pkg/sdkclient/sourcetransformer/client_test.go b/pkg/sdkclient/sourcetransformer/client_test.go index c66abbd6ea..bab7ac2c2a 100644 --- a/pkg/sdkclient/sourcetransformer/client_test.go +++ b/pkg/sdkclient/sourcetransformer/client_test.go @@ -121,16 +121,14 @@ func TestClient_SourceTransformFn(t *testing.T) { client, _ := NewFromClient(ctx, transformClient) requests := make([]*transformpb.SourceTransformRequest, 5) - go func() { - for i := 0; i < 5; i++ { - requests[i] = &transformpb.SourceTransformRequest{ - Request: &transformpb.SourceTransformRequest_Request{ - Keys: []string{fmt.Sprintf("client_key_%d", i)}, - Value: []byte("test"), - }, - } + for i := 0; i < 5; i++ { + requests[i] = &transformpb.SourceTransformRequest{ + Request: &transformpb.SourceTransformRequest_Request{ + Keys: []string{fmt.Sprintf("client_key_%d", i)}, + Value: []byte("test"), + }, } - }() + } responses, err := client.SourceTransformFn(ctx, requests) require.NoError(t, err) diff --git a/pkg/sources/forward/data_forward.go b/pkg/sources/forward/data_forward.go index 913be67939..fc22d8ac0b 100644 --- a/pkg/sources/forward/data_forward.go +++ b/pkg/sources/forward/data_forward.go @@ -310,20 +310,24 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { m.Watermark = time.Time(processorWM) } - concurrentTransformerProcessingStart := time.Now() - readWriteMessagePairs = df.applyTransformer(ctx, readMessages) + transformerProcessingStart := time.Now() + readWriteMessagePairs, err = df.applyTransformer(ctx, readMessages) + if err != nil { + df.opts.logger.Errorw("failed to apply source transformer", zap.Error(err)) + return + } df.opts.logger.Debugw("concurrent applyTransformer completed", zap.Int("concurrency", df.opts.transformerConcurrency), - zap.Duration("took", time.Since(concurrentTransformerProcessingStart)), + zap.Duration("took", time.Since(transformerProcessingStart)), ) - metrics.SourceTransformerConcurrentProcessingTime.With(map[string]string{ + metrics.SourceTransformerProcessingTime.With(map[string]string{ metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.reader.GetName(), - }).Observe(float64(time.Since(concurrentTransformerProcessingStart).Microseconds())) + }).Observe(float64(time.Since(transformerProcessingStart).Microseconds())) } else { for idx, m := range readMessages { @@ -373,19 +377,6 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { // let's figure out which vertex to send the results to. // update the toBuffer(s) with writeMessages. for _, m := range readWriteMessagePairs { - // Look for errors in transformer processing if we see even 1 error we return. - // Handling partial retrying is not worth ATM. - if m.Err != nil { - metrics.SourceTransformerError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Inc() - - df.opts.logger.Errorw("failed to apply source transformer", zap.Error(m.Err)) - return - } // for each message, we will determine where to send the message. for _, message := range m.WriteMessages { if err = df.whereToStep(message, messageToStep); err != nil { @@ -646,7 +637,7 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. // applyTransformer applies the transformer and will block if there is any InternalErr. On the other hand, if this is a UserError // the skip flag is set. The ShutDown flag will only if there is an InternalErr and ForceStop has been invoked. // The UserError retry will be done on the applyTransformer. -func (df *DataForward) applyTransformer(ctx context.Context, messages []*isb.ReadMessage) []isb.ReadWriteMessagePair { +func (df *DataForward) applyTransformer(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { for { transformResults, err := df.opts.transformer.ApplyTransform(ctx, messages) if err != nil { @@ -664,11 +655,11 @@ func (df *DataForward) applyTransformer(ctx context.Context, messages []*isb.Rea metrics.LabelVertexType: string(dfv1.VertexTypeSource), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), }).Inc() - return []isb.ReadWriteMessagePair{{Err: err}} + return nil, err } continue } - return transformResults + return transformResults, nil } } diff --git a/pkg/sources/forward/data_forward_test.go b/pkg/sources/forward/data_forward_test.go index 96cb6760e6..59797dec2b 100644 --- a/pkg/sources/forward/data_forward_test.go +++ b/pkg/sources/forward/data_forward_test.go @@ -122,15 +122,7 @@ func (f myForwardTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.Ve } func (f myForwardTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - out := make([]isb.ReadWriteMessagePair, len(messages)) - for i, msg := range messages { - writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", msg) - out[i] = isb.ReadWriteMessagePair{ - ReadMessage: msg, - WriteMessages: writeMsg, - } - } - return out, nil + return testutils.CopyUDFTestApply(ctx, "test-vertex", messages) } func TestNewDataForward(t *testing.T) { @@ -1157,15 +1149,7 @@ func (f myForwardDropTest) WhereTo(_ []string, _ []string, s string) ([]forwarde } func (f myForwardDropTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - results := make([]isb.ReadWriteMessagePair, len(messages)) - for i, message := range messages { - writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) - results[i] = isb.ReadWriteMessagePair{ - ReadMessage: message, - WriteMessages: writeMsg, - } - } - return results, nil + return testutils.CopyUDFTestApply(ctx, "test-vertex", messages) } type myForwardToAllTest struct { @@ -1186,15 +1170,7 @@ func (f *myForwardToAllTest) WhereTo(_ []string, _ []string, s string) ([]forwar } func (f *myForwardToAllTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - results := make([]isb.ReadWriteMessagePair, len(messages)) - for i, message := range messages { - writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) - results[i] = isb.ReadWriteMessagePair{ - ReadMessage: message, - WriteMessages: writeMsg, - } - } - return results, nil + return testutils.CopyUDFTestApply(ctx, "test-vertex", messages) } type myForwardInternalErrTest struct { @@ -1229,15 +1205,7 @@ func (f myForwardApplyWhereToErrTest) WhereTo(_ []string, _ []string, s string) } func (f myForwardApplyWhereToErrTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - results := make([]isb.ReadWriteMessagePair, len(messages)) - for i, message := range messages { - writeMsg, _ := testutils.CopyUDFTestApply(ctx, "test-vertex", message) - results[i] = isb.ReadWriteMessagePair{ - ReadMessage: message, - WriteMessages: writeMsg, - } - } - return results, nil + return testutils.CopyUDFTestApply(ctx, "test-vertex", messages) } type myForwardApplyTransformerErrTest struct { diff --git a/pkg/sources/forward/shutdown_test.go b/pkg/sources/forward/shutdown_test.go index 34003e729f..45e18f35e3 100644 --- a/pkg/sources/forward/shutdown_test.go +++ b/pkg/sources/forward/shutdown_test.go @@ -44,15 +44,7 @@ func (s myShutdownTest) WhereTo([]string, []string, string) ([]forwarder.VertexB } func (f myShutdownTest) ApplyTransform(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - results := make([]isb.ReadWriteMessagePair, len(messages)) - for i, message := range messages { - writeMsg, _ := testutils.CopyUDFTestApply(ctx, "", message) - results[i] = isb.ReadWriteMessagePair{ - ReadMessage: message, - WriteMessages: writeMsg, - } - } - return results, nil + return testutils.CopyUDFTestApply(ctx, "", messages) } func (s myShutdownTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { diff --git a/pkg/sources/transformer/grpc_transformer.go b/pkg/sources/transformer/grpc_transformer.go index 459e99f21b..3becd414cb 100644 --- a/pkg/sources/transformer/grpc_transformer.go +++ b/pkg/sources/transformer/grpc_transformer.go @@ -145,7 +145,6 @@ func (u *GRPCBasedTransformer) ApplyTransform(ctx context.Context, messages []*i responsePair := isb.ReadWriteMessagePair{ ReadMessage: parentMessage, WriteMessages: taggedMessages, - Err: nil, } transformResults[i] = responsePair } diff --git a/pkg/sources/transformer/grpc_transformer_test.go b/pkg/sources/transformer/grpc_transformer_test.go index cd8ccbe852..9d08f3d0f7 100644 --- a/pkg/sources/transformer/grpc_transformer_test.go +++ b/pkg/sources/transformer/grpc_transformer_test.go @@ -29,13 +29,14 @@ import ( "google.golang.org/grpc/test/bufconn" transformpb "github.com/numaproj/numaflow-go/pkg/apis/proto/sourcetransform/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "github.com/numaproj/numaflow/pkg/isb" "github.com/numaproj/numaflow/pkg/isb/testutils" sourcetransformerSdk "github.com/numaproj/numaflow/pkg/sdkclient/sourcetransformer" "github.com/numaproj/numaflow/pkg/udf/rpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" ) func TestGRPCBasedTransformer_WaitUntilReadyWithServer(t *testing.T) { @@ -280,7 +281,6 @@ func TestGRPCBasedTransformer_Apply_ChangeEventTime(t *testing.T) { apply, err := u.ApplyTransform(context.TODO(), messages) assert.NoError(t, err) for _, pair := range apply { - assert.NoError(t, pair.Err) assert.Equal(t, testEventTime, pair.WriteMessages[0].EventTime) } } diff --git a/pkg/udf/forward/applier/mapper.go b/pkg/udf/forward/applier/mapper.go index 28c48fb54e..8c38120f36 100644 --- a/pkg/udf/forward/applier/mapper.go +++ b/pkg/udf/forward/applier/mapper.go @@ -25,12 +25,12 @@ import ( // MapApplier applies the map UDF on the read message and gives back a new message. Any UserError will be retried here, while // InternalErr can be returned and could be retried by the callee. type MapApplier interface { - ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) + ApplyMap(ctx context.Context, message []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) } // ApplyMapFunc utility function used to create a MapApplier implementation -type ApplyMapFunc func(context.Context, *isb.ReadMessage) ([]*isb.WriteMessage, error) +type ApplyMapFunc func(context.Context, []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) -func (f ApplyMapFunc) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f ApplyMapFunc) ApplyMap(ctx context.Context, message []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return f(ctx, message) } diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index e768808cc3..b3adb91e01 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -272,23 +272,26 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { // ie Batch Map or unary map // This will be a blocking call until the all the UDF results for the batch are received. if isdf.opts.batchMapUdfApplier != nil { - udfResults = isdf.processBatchMessages(ctx, dataMessages) + udfResults, err = isdf.processBatchMessages(ctx, dataMessages) + if err != nil { + isdf.opts.logger.Errorw("failed to processBatchMessages", zap.Error(err)) + // As there's no partial failure, non-ack all the readOffsets + isdf.fromBufferPartition.NoAck(ctx, readOffsets) + return + } } else { - udfResults = isdf.processConcurrentMap(ctx, dataMessages) + udfResults, err = isdf.applyUDF(ctx, dataMessages) + if err != nil { + isdf.opts.logger.Errorw("failed to applyUDF", zap.Error(err)) + // As there's no partial failure, non-ack all the readOffsets + isdf.fromBufferPartition.NoAck(ctx, readOffsets) + return + } } // let's figure out which vertex to send the results to. // update the toBuffer(s) with writeMessages. for _, m := range udfResults { - // look for errors in udf processing, if we see even 1 error NoAck all messages - // then return. Handling partial retrying is not worth ATM. - if m.Err != nil { - metrics.UDFError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() - isdf.opts.logger.Errorw("failed to applyUDF", zap.Error(m.Err)) - // As there's no partial failure, non-ack all the readOffsets - isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return - } // update toBuffers for _, message := range m.WriteMessages { if err := isdf.whereToStep(message, messageToStep, m.ReadMessage); err != nil { @@ -374,43 +377,6 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) } -// processConcurrentMap is used for concurrently processing the inputs using the traditional map mode -// here each input request is handled separately and the response for only request is received through one UDF call. -func (isdf *InterStepDataForward) processConcurrentMap(ctx context.Context, dataMessages []*isb.ReadMessage) []isb.ReadWriteMessagePair { - udfResults := make([]isb.ReadWriteMessagePair, len(dataMessages)) - // udf concurrent processing request channel - udfCh := make(chan *isb.ReadWriteMessagePair) - // udfResults stores the results after map UDF processing for all read messages. It indexes - // a read message to the corresponding write message - // applyUDF, if there is an Internal error it is a blocking call and will return only if shutdown has been initiated. - - // create a pool of map UDF Processors - var wg sync.WaitGroup - for i := 0; i < isdf.opts.udfConcurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - isdf.concurrentApplyUDF(ctx, udfCh) - }() - } - concurrentUDFProcessingStart := time.Now() - - // send to map UDF only the data messages - for idx, m := range dataMessages { - // send map UDF processing work to the channel - udfResults[idx].ReadMessage = m - udfCh <- &udfResults[idx] - } - // let the go routines know that there is no more work - close(udfCh) - // wait till the processing is done. this will not be an infinite wait because the map UDF processing will exit if - // context.Done() is closed. - wg.Wait() - isdf.opts.logger.Debugw("concurrent applyUDF completed", zap.Int("concurrency", isdf.opts.udfConcurrency), zap.Duration("took", time.Since(concurrentUDFProcessingStart))) - metrics.ConcurrentUDFProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(concurrentUDFProcessingStart).Microseconds())) - return udfResults -} - // processBatchMessages is used for processing the Batch Map mode UDF // batch map processing we send a list of N input requests together to the UDF and get the consolidated // response for all of them. @@ -418,15 +384,11 @@ func (isdf *InterStepDataForward) processConcurrentMap(ctx context.Context, data // - if there is a success while retrying // - if shutdown has been initiated. // - if context is cancelled -func (isdf *InterStepDataForward) processBatchMessages(ctx context.Context, dataMessages []*isb.ReadMessage) []isb.ReadWriteMessagePair { +func (isdf *InterStepDataForward) processBatchMessages(ctx context.Context, dataMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { concurrentUDFProcessingStart := time.Now() var udfResults []isb.ReadWriteMessagePair var err error - errorPair := isb.ReadWriteMessagePair{ - ReadMessage: nil, - WriteMessages: nil, - Err: nil, - } + for { // invoke the UDF call udfResults, err = isdf.opts.batchMapUdfApplier.ApplyBatchMap(ctx, dataMessages) @@ -437,15 +399,13 @@ func (isdf *InterStepDataForward) processBatchMessages(ctx context.Context, data metrics.PlatformError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() - errorPair.Err = err - return []isb.ReadWriteMessagePair{errorPair} + return nil, err } isdf.opts.logger.Errorw("batchMapUDF.Apply got error during batch udf processing", zap.Error(err)) select { case <-ctx.Done(): // no point in retrying if the context is cancelled - errorPair.Err = err - return []isb.ReadWriteMessagePair{errorPair} + return nil, err case <-time.After(time.Second): // sleep for 1 second and keep retrying after that // Keeping one second of timeout for consistency with other map modes (unary and stream) @@ -462,7 +422,7 @@ func (isdf *InterStepDataForward) processBatchMessages(ctx context.Context, data } isdf.opts.logger.Debugw("batch map applyUDF completed", zap.Int("concurrency", isdf.opts.udfConcurrency), zap.Duration("took", time.Since(concurrentUDFProcessingStart))) metrics.ConcurrentUDFProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(concurrentUDFProcessingStart).Microseconds())) - return udfResults + return udfResults, nil } // streamMessage streams the data messages to the next step. @@ -702,33 +662,21 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar return writeOffsets, nil } -// concurrentApplyUDF applies the map UDF based on the request from the channel -func (isdf *InterStepDataForward) concurrentApplyUDF(ctx context.Context, readMessagePair <-chan *isb.ReadWriteMessagePair) { - for message := range readMessagePair { - start := time.Now() - metrics.UDFReadMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Inc() - writeMessages, err := isdf.applyUDF(ctx, message.ReadMessage) - metrics.UDFWriteMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(writeMessages))) - // set the headers for the write messages - for _, m := range writeMessages { - m.Headers = message.ReadMessage.Headers - } - message.WriteMessages = append(message.WriteMessages, writeMessages...) - message.Err = err - metrics.UDFProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) - } -} - // applyUDF applies the map UDF and will block if there is any InternalErr. On the other hand, if this is a UserError // the skip flag is set. ShutDown flag will only if there is an InternalErr and ForceStop has been invoked. // The UserError retry will be done on the ApplyUDF. -func (isdf *InterStepDataForward) applyUDF(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (isdf *InterStepDataForward) applyUDF(ctx context.Context, readMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { for { - writeMessages, err := isdf.opts.unaryMapUdfApplier.ApplyMap(ctx, readMessage) + writeMessages, err := isdf.opts.unaryMapUdfApplier.ApplyMap(ctx, readMessages) if err != nil { isdf.opts.logger.Errorw("mapUDF.Apply error", zap.Error(err)) // TODO: implement retry with backoff etc. - time.Sleep(isdf.opts.retryInterval) + select { + case <-ctx.Done(): + // no point in retrying if the context is cancelled + return nil, err + case <-time.After(isdf.opts.retryInterval): + } // keep retrying, I cannot think of a use case where a user could say, errors are fine :-) // as a platform we should not lose or corrupt data. // this does not mean we should prohibit this from a shutdown. diff --git a/pkg/udf/forward/forward_test.go b/pkg/udf/forward/forward_test.go index e4ca74e3d2..da94970da0 100644 --- a/pkg/udf/forward/forward_test.go +++ b/pkg/udf/forward/forward_test.go @@ -18,6 +18,7 @@ package forward import ( "context" + "errors" "fmt" "strings" "sync" @@ -64,7 +65,7 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } -func (t *testForwardFetcher) ComputeWatermark(offset isb.Offset, partition int32) wmb.Watermark { +func (t *testForwardFetcher) ComputeWatermark(isb.Offset, int32) wmb.Watermark { return t.getWatermark() } @@ -86,15 +87,15 @@ func (f myForwardTest) ApplyBatchMap(ctx context.Context, messages []*isb.ReadMe return testutils.CopyUDFTestApplyBatchMap(ctx, "test-vertex", messages) } -func (f myForwardTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f myForwardTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: 0, }}, nil } -func (f myForwardTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (f myForwardTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "", messages) } func (f myForwardTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { @@ -1061,7 +1062,7 @@ func (t *testWMBFetcher) RevertBoolValue() { t.WMBTestDiffHeadWMB = !t.WMBTestDiffHeadWMB } -func (t *testWMBFetcher) ComputeWatermark(offset isb.Offset, partition int32) wmb.Watermark { +func (t *testWMBFetcher) ComputeWatermark(isb.Offset, int32) wmb.Watermark { return t.getWatermark() } @@ -1179,7 +1180,7 @@ func TestNewInterStepDataForwardIdleWatermark(t *testing.T) { for !fromStep.IsEmpty() { select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected the buffer to be empty", ctx.Err()) } default: @@ -1213,7 +1214,7 @@ func TestNewInterStepDataForwardIdleWatermark(t *testing.T) { for otDecode1.Offset != 0 { // the first ctrl message written to isb. can't use idle because default idle=false select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected to have idle watermark in to1 timeline", ctx.Err()) } default: @@ -1241,7 +1242,7 @@ func TestNewInterStepDataForwardIdleWatermark(t *testing.T) { for otDecode1.Idle { select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected to have active watermark in to1 timeline", ctx.Err()) } default: @@ -1370,7 +1371,7 @@ func TestNewInterStepDataForwardIdleWatermark_Reset(t *testing.T) { for otDecode1.Offset != 0 { // the first ctrl message written to isb. can't use idle because default idle=false select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected to have idle watermark in to1 timeline", ctx.Err()) } default: @@ -1398,7 +1399,7 @@ func TestNewInterStepDataForwardIdleWatermark_Reset(t *testing.T) { for otDecode1.Idle { select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected to have active watermark in to1 timeline", ctx.Err()) } default: @@ -1421,7 +1422,7 @@ func TestNewInterStepDataForwardIdleWatermark_Reset(t *testing.T) { for otDecode1.Offset != 3 { // the second ctrl message written to isb. can't use idle because default idle=false select { case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { t.Fatal("expected to have idle watermark in to1 timeline", ctx.Err()) } default: @@ -1459,7 +1460,7 @@ func TestNewInterStepDataForwardIdleWatermark_Reset(t *testing.T) { type mySourceForwardTest struct { } -func (f mySourceForwardTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f mySourceForwardTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: 0, @@ -1470,7 +1471,7 @@ type mySourceForwardTestRoundRobin struct { count int } -func (f *mySourceForwardTestRoundRobin) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f *mySourceForwardTestRoundRobin) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { var output = []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: int32(f.count % 2), @@ -1483,8 +1484,9 @@ func (f mySourceForwardTest) ApplyBatchMap(ctx context.Context, messages []*isb. return testutils.CopyUDFTestApplyBatchMap(ctx, "test-vertex", messages) } -func (f mySourceForwardTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return func(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f mySourceForwardTest) ApplyMap(ctx context.Context, readMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + resp := make([]isb.ReadWriteMessagePair, 0) + for _, readMessage := range readMessages { _ = ctx offset := readMessage.ReadOffset payload := readMessage.Body.Payload @@ -1509,8 +1511,11 @@ func (f mySourceForwardTest) ApplyMap(ctx context.Context, message *isb.ReadMess Payload: result, }, } - return []*isb.WriteMessage{{Message: writeMessage}}, nil - }(ctx, message) + writeMessage.Header.Headers = readMessage.Header.Headers + resp = append(resp, isb.ReadWriteMessagePair{ReadMessage: readMessage, WriteMessages: []*isb.WriteMessage{{Message: writeMessage}}}) + } + + return resp, nil } func (f mySourceForwardTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { @@ -1547,14 +1552,6 @@ func (f mySourceForwardTest) ApplyMapStream(ctx context.Context, message *isb.Re }(ctx, message, writeMessageCh) } -// TestSourceWatermarkPublisher is a dummy implementation of isb.SourcePublisher interface -type TestSourceWatermarkPublisher struct { -} - -func (p TestSourceWatermarkPublisher) PublishSourceWatermarks([]*isb.ReadMessage) { - // PublishSourceWatermarks is not tested in forwarder_test.go -} - func TestInterStepDataForwardSinglePartition(t *testing.T) { fromStep := simplebuffer.NewInMemoryBuffer("from", 25, 0) to1 := simplebuffer.NewInMemoryBuffer("to1", 10, 0, simplebuffer.WithReadTimeOut(time.Second*10)) @@ -1859,12 +1856,12 @@ func TestWriteToBuffer(t *testing.T) { type myForwardDropTest struct { } -func (f myForwardDropTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f myForwardDropTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{}, nil } -func (f myForwardDropTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (f myForwardDropTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "", messages) } func (f myForwardDropTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { @@ -1879,7 +1876,7 @@ type myForwardToAllTest struct { count int } -func (f *myForwardToAllTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f *myForwardToAllTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { var output = []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: int32(f.count % 2), @@ -1892,11 +1889,11 @@ func (f *myForwardToAllTest) WhereTo(_ []string, _ []string, s string) ([]forwar return output, nil } -func (f *myForwardToAllTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (f *myForwardToAllTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "", messages) } -func (f myForwardToAllTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { +func (f *myForwardToAllTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { return testutils.CopyUDFTestApplyStream(ctx, "", writeMessageCh, message) } @@ -1907,14 +1904,14 @@ func (f *myForwardToAllTest) ApplyBatchMap(ctx context.Context, messages []*isb. type myForwardInternalErrTest struct { } -func (f myForwardInternalErrTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f myForwardInternalErrTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: 0, }}, nil } -func (f myForwardInternalErrTest) ApplyMap(_ context.Context, _ *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f myForwardInternalErrTest) ApplyMap(_ context.Context, _ []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, &udfapplier.ApplyUDFErr{ UserUDFErr: false, InternalErr: struct { @@ -1925,7 +1922,7 @@ func (f myForwardInternalErrTest) ApplyMap(_ context.Context, _ *isb.ReadMessage } } -func (f myForwardInternalErrTest) ApplyBatchMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { +func (f myForwardInternalErrTest) ApplyBatchMap(context.Context, []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, &udfapplier.ApplyUDFErr{ UserUDFErr: false, InternalErr: struct { @@ -1951,15 +1948,15 @@ func (f myForwardInternalErrTest) ApplyMapStream(_ context.Context, _ *isb.ReadM type myForwardApplyWhereToErrTest struct { } -func (f myForwardApplyWhereToErrTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f myForwardApplyWhereToErrTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: 0, }}, fmt.Errorf("whereToStep failed") } -func (f myForwardApplyWhereToErrTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (f myForwardApplyWhereToErrTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "", messages) } func (f myForwardApplyWhereToErrTest) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { @@ -1973,14 +1970,14 @@ func (f myForwardApplyWhereToErrTest) ApplyBatchMap(ctx context.Context, message type myForwardApplyUDFErrTest struct { } -func (f myForwardApplyUDFErrTest) WhereTo(_ []string, _ []string, s string) ([]forwarder.VertexBuffer, error) { +func (f myForwardApplyUDFErrTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.VertexBuffer, error) { return []forwarder.VertexBuffer{{ ToVertexName: "to1", ToVertexPartitionIdx: 0, }}, nil } -func (f myForwardApplyUDFErrTest) ApplyMap(_ context.Context, _ *isb.ReadMessage) ([]*isb.WriteMessage, error) { +func (f myForwardApplyUDFErrTest) ApplyMap(context.Context, []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, fmt.Errorf("UDF error") } @@ -1989,7 +1986,7 @@ func (f myForwardApplyUDFErrTest) ApplyMapStream(_ context.Context, _ *isb.ReadM return fmt.Errorf("UDF error") } -func (f myForwardApplyUDFErrTest) ApplyBatchMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { +func (f myForwardApplyUDFErrTest) ApplyBatchMap(context.Context, []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { return nil, fmt.Errorf("UDF error") } diff --git a/pkg/udf/forward/shutdown_test.go b/pkg/udf/forward/shutdown_test.go index 42f547fbfd..338feaf941 100644 --- a/pkg/udf/forward/shutdown_test.go +++ b/pkg/udf/forward/shutdown_test.go @@ -38,8 +38,8 @@ func (s myShutdownTest) WhereTo(_ []string, _ []string, _ string) ([]forwarder.V return []forwarder.VertexBuffer{}, nil } -func (s myShutdownTest) ApplyMap(ctx context.Context, message *isb.ReadMessage) ([]*isb.WriteMessage, error) { - return testutils.CopyUDFTestApply(ctx, "", message) +func (s myShutdownTest) ApplyMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + return testutils.CopyUDFTestApply(ctx, "", messages) } func (s myShutdownTest) ApplyBatchMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 5d926751f0..3303738449 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -19,6 +19,7 @@ package udf import ( "context" "fmt" + "io" "os" "strconv" "sync" @@ -105,7 +106,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { } // created watermark related components only if watermark is enabled - // otherwise no op will used + // otherwise no op will be used if !u.VertexInstance.Vertex.Spec.Watermark.Disabled { // create from vertex watermark stores fromVertexWmStores, err = jetstream.BuildFromVertexWatermarkStores(ctx, u.VertexInstance, natsClientPool.NextAvailableClient()) @@ -143,83 +144,77 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { return err } - // Read the server info file to read which map mode is enabled - // Based on the value set, we will create the corresponding handler and clients - mapMode, ok := serverInfo.Metadata[serverinfo.MapModeKey] + // track all the resources that need to be closed + var resourcesToClose []io.Closer - if ok && (serverinfo.MapMode(mapMode) == serverinfo.StreamMap) { - log.Info("Map mode enabled: Stream Map") - // Map Stream mode - enableMapUdfStream = true - - mapStreamClient, err := mapstreamer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) - if err != nil { - return fmt.Errorf("failed to create map stream client, %w", err) - } - mapStreamHandler = rpc.NewUDSgRPCBasedMapStream(vertexName, mapStreamClient) - - // Readiness check - if err := mapStreamHandler.WaitUntilReady(ctx); err != nil { - return fmt.Errorf("failed on map stream UDF readiness check, %w", err) - } - defer func() { - err = mapStreamHandler.CloseConn(ctx) + for index, bufferPartition := range fromBuffer { + // Read the server info file to read which map mode is enabled + // Based on the value set, we will create the corresponding handler and clients + // we create a new client and handler for each partition because + // the client is not thread safe since we use one common gRPC Bidirectional stream + // to communicate with the server + mapMode, ok := serverInfo.Metadata[serverinfo.MapModeKey] + + if ok && (serverinfo.MapMode(mapMode) == serverinfo.StreamMap) { + log.Info("Map mode enabled: Stream Map") + // Map Stream mode + enableMapUdfStream = true + + mapStreamClient, err := mapstreamer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { - log.Warnw("Failed to close gRPC client conn", zap.Error(err)) + return fmt.Errorf("failed to create map stream client, %w", err) } - }() - opts = append(opts, forward.WithUDFStreamingMap(mapStreamHandler)) + mapStreamHandler = rpc.NewUDSgRPCBasedMapStream(vertexName, mapStreamClient) - } else if ok && (serverinfo.MapMode(mapMode) == serverinfo.BatchMap) { - log.Info("Map mode enabled: Batch Map") - // if Batch Map mode is enabled, create the client and handler for that accordingly - enableBatchMapUdf = true + // Readiness check + if err := mapStreamHandler.WaitUntilReady(ctx); err != nil { + return fmt.Errorf("failed on map stream UDF readiness check, %w", err) + } - // create the client and handler for batch map interface - batchMapClient, err := batchmapper.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) - if err != nil { - return fmt.Errorf("failed to create batch map client, %w", err) - } - batchMapHandler = rpc.NewUDSgRPCBasedBatchMap(vertexName, batchMapClient) - // Readiness check - if err := batchMapHandler.WaitUntilReady(ctx); err != nil { - return fmt.Errorf("failed on batch map UDF readiness check, %w", err) - } - defer func() { - err = batchMapHandler.CloseConn(ctx) + resourcesToClose = append(resourcesToClose, mapStreamHandler) + opts = append(opts, forward.WithUDFStreamingMap(mapStreamHandler)) + + } else if ok && (serverinfo.MapMode(mapMode) == serverinfo.BatchMap) { + log.Info("Map mode enabled: Batch Map") + // if Batch Map mode is enabled, create the client and handler for that accordingly + enableBatchMapUdf = true + + // create the client and handler for batch map interface + batchMapClient, err := batchmapper.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { - log.Warnw("Failed to close gRPC client conn", zap.Error(err)) + return fmt.Errorf("failed to create batch map client, %w", err) + } + batchMapHandler = rpc.NewUDSgRPCBasedBatchMap(vertexName, batchMapClient) + // Readiness check + if err := batchMapHandler.WaitUntilReady(ctx); err != nil { + return fmt.Errorf("failed on batch map UDF readiness check, %w", err) } - }() - opts = append(opts, forward.WithUDFBatchMap(batchMapHandler)) - } else { - log.Info("Map mode enabled: Unary Map") - // Default is to enable unary map mode - // If the MapMode metadata is not available, we will start map by default this will ensure - // backward compatibility in case of version mismatch for map + resourcesToClose = append(resourcesToClose, batchMapHandler) + opts = append(opts, forward.WithUDFBatchMap(batchMapHandler)) - // create the client and handler for map interface - mapClient, err := mapper.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) - if err != nil { - return fmt.Errorf("failed to create map client, %w", err) - } - mapHandler = rpc.NewUDSgRPCBasedMap(vertexName, mapClient) + } else { + log.Info("Map mode enabled: Unary Map") + // Default is to enable unary map mode + // If the MapMode metadata is not available, we will start map by default this will ensure + // backward compatibility in case of version mismatch for map - // Readiness check - if err := mapHandler.WaitUntilReady(ctx); err != nil { - return fmt.Errorf("failed on map UDF readiness check, %w", err) - } - defer func() { - err = mapHandler.CloseConn(ctx) + // create the client and handler for map interface + mapClient, err := mapper.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { - log.Warnw("Failed to close gRPC client conn", zap.Error(err)) + return fmt.Errorf("failed to create map client, %w", err) } - }() - opts = append(opts, forward.WithUDFUnaryMap(mapHandler)) - } + mapHandler = rpc.NewUDSgRPCBasedMap(vertexName, mapClient) + + // Readiness check + if err := mapHandler.WaitUntilReady(ctx); err != nil { + return fmt.Errorf("failed on map UDF readiness check, %w", err) + } + + resourcesToClose = append(resourcesToClose, mapHandler) + opts = append(opts, forward.WithUDFUnaryMap(mapHandler)) + } - for index, bufferPartition := range fromBuffer { // Populate shuffle function map shuffleFuncMap := make(map[string]*shuffle.Shuffle) for _, edge := range u.VertexInstance.Vertex.Spec.ToEdges { @@ -370,6 +365,13 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { _ = wmStore.Close() } + // close the handlers + for _, r := range resourcesToClose { + if err := r.Close(); err != nil { + log.Errorw("Failed to close the resource", zap.Error(err)) + } + } + log.Info("All udf data processors exited...") return nil } diff --git a/pkg/udf/rpc/grpc_batch_map.go b/pkg/udf/rpc/grpc_batch_map.go index ce65d201fb..0be7336f13 100644 --- a/pkg/udf/rpc/grpc_batch_map.go +++ b/pkg/udf/rpc/grpc_batch_map.go @@ -44,9 +44,9 @@ func NewUDSgRPCBasedBatchMap(vertexName string, client batchmapper.Client) *GRPC } } -// CloseConn closes the gRPC client connection. -func (u *GRPCBasedBatchMap) CloseConn(ctx context.Context) error { - return u.client.CloseConn(ctx) +// Close closes the gRPC client connection. +func (u *GRPCBasedBatchMap) Close() error { + return u.client.CloseConn() } // IsHealthy checks if the map udf is healthy. @@ -150,7 +150,6 @@ loop: responsePair := isb.ReadWriteMessagePair{ ReadMessage: parentMessage, WriteMessages: parsedResp, - Err: nil, } udfResults = append(udfResults, responsePair) } diff --git a/pkg/udf/rpc/grpc_map.go b/pkg/udf/rpc/grpc_map.go index 2ace71e4d1..65f9fcdb24 100644 --- a/pkg/udf/rpc/grpc_map.go +++ b/pkg/udf/rpc/grpc_map.go @@ -24,10 +24,8 @@ import ( mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" - "k8s.io/apimachinery/pkg/util/wait" "github.com/numaproj/numaflow/pkg/isb" - sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" "github.com/numaproj/numaflow/pkg/sdkclient/mapper" "github.com/numaproj/numaflow/pkg/shared/logging" ) @@ -45,9 +43,9 @@ func NewUDSgRPCBasedMap(vertexName string, client mapper.Client) *GRPCBasedMap { } } -// CloseConn closes the gRPC client connection. -func (u *GRPCBasedMap) CloseConn(ctx context.Context) error { - return u.client.CloseConn(ctx) +// Close closes the gRPC client connection. +func (u *GRPCBasedMap) Close() error { + return u.client.CloseConn() } // IsHealthy checks if the map udf is healthy. @@ -73,98 +71,76 @@ func (u *GRPCBasedMap) WaitUntilReady(ctx context.Context) error { } } -func (u *GRPCBasedMap) ApplyMap(ctx context.Context, readMessage *isb.ReadMessage) ([]*isb.WriteMessage, error) { - keys := readMessage.Keys - payload := readMessage.Body.Payload - parentMessageInfo := readMessage.MessageInfo - var req = &mappb.MapRequest{ - Keys: keys, - Value: payload, - EventTime: timestamppb.New(parentMessageInfo.EventTime), - Watermark: timestamppb.New(readMessage.Watermark), - Headers: readMessage.Headers, +func (u *GRPCBasedMap) ApplyMap(ctx context.Context, readMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { + requests := make([]*mappb.MapRequest, len(readMessages)) + results := make([]isb.ReadWriteMessagePair, len(readMessages)) + idToMsgMapping := make(map[string]*isb.ReadMessage) + + for i, msg := range readMessages { + // we track the id to the message mapping to be able to match the response with the original message. + // message info of response should be the same as the message info of the request. + id := msg.ReadOffset.String() + idToMsgMapping[id] = msg + req := &mappb.MapRequest{ + Request: &mappb.MapRequest_Request{ + Keys: msg.Keys, + Value: msg.Body.Payload, + EventTime: timestamppb.New(msg.MessageInfo.EventTime), + Watermark: timestamppb.New(msg.Watermark), + Headers: msg.Headers, + }, + Id: id, + } + requests[i] = req } - response, err := u.client.MapFn(ctx, req) + responses, err := u.client.MapFn(ctx, requests) + if err != nil { - udfErr, _ := sdkerr.FromError(err) - switch udfErr.ErrorKind() { - case sdkerr.Retryable: - var success bool - _ = wait.ExponentialBackoffWithContext(ctx, wait.Backoff{ - // retry every "duration * factor + [0, jitter]" interval for 5 times - Duration: 1 * time.Second, - Factor: 1, - Jitter: 0.1, - Steps: 5, - }, func(_ context.Context) (done bool, err error) { - response, err = u.client.MapFn(ctx, req) - if err != nil { - udfErr, _ = sdkerr.FromError(err) - switch udfErr.ErrorKind() { - case sdkerr.Retryable: - return false, nil - case sdkerr.NonRetryable: - return true, nil - default: - return true, nil - } - } - success = true - return true, nil - }) - if !success { - return nil, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.MapFn failed, %s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - } - case sdkerr.NonRetryable: - return nil, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.MapFn failed, %s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - default: - return nil, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.MapFn failed, %s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - } + println("gRPC client.mapFn failed, ", err.Error()) + err = &ApplyUDFErr{ + UserUDFErr: false, + Message: fmt.Sprintf("gRPC client.MapFn failed, %s", err), + InternalErr: InternalErr{ + Flag: true, + MainCarDown: false, + }, } + return nil, err } - writeMessages := make([]*isb.WriteMessage, 0) - for index, result := range response.GetResults() { - keys := result.Keys - taggedMessage := &isb.WriteMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: parentMessageInfo, - Keys: keys, - ID: isb.MessageID{ - VertexName: u.vertexName, - Offset: readMessage.ReadOffset.String(), - Index: int32(index), + for i, resp := range responses { + parentMessage, ok := idToMsgMapping[resp.GetId()] + if !ok { + panic(fmt.Sprintf("tracker doesn't contain the message ID received from the response: %s", resp.GetId())) + } + taggedMessages := make([]*isb.WriteMessage, len(resp.GetResults())) + for j, result := range resp.GetResults() { + keys := result.Keys + taggedMessage := &isb.WriteMessage{ + Message: isb.Message{ + Header: isb.Header{ + MessageInfo: parentMessage.MessageInfo, + ID: isb.MessageID{ + VertexName: u.vertexName, + Offset: parentMessage.ReadOffset.String(), + Index: int32(j), + }, + Keys: keys, + }, + Body: isb.Body{ + Payload: result.Value, }, }, - Body: isb.Body{ - Payload: result.Value, - }, - }, - Tags: result.Tags, + Tags: result.Tags, + } + taggedMessage.Headers = parentMessage.Headers + taggedMessages[j] = taggedMessage + } + results[i] = isb.ReadWriteMessagePair{ + ReadMessage: parentMessage, + WriteMessages: taggedMessages, } - writeMessages = append(writeMessages, taggedMessage) } - return writeMessages, nil + return results, nil } diff --git a/pkg/udf/rpc/grpc_map_test.go b/pkg/udf/rpc/grpc_map_test.go index fc657a892b..50930aadb3 100644 --- a/pkg/udf/rpc/grpc_map_test.go +++ b/pkg/udf/rpc/grpc_map_test.go @@ -19,103 +19,103 @@ package rpc import ( "context" "errors" - "fmt" + "net" "testing" "time" - "github.com/golang/mock/gomock" mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1/mapmock" + "github.com/numaproj/numaflow-go/pkg/mapper" "github.com/stretchr/testify/assert" - "go.uber.org/goleak" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/sdkclient/mapper" + mapper2 "github.com/numaproj/numaflow/pkg/sdkclient/mapper" ) -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) -} - -type rpcMsg struct { - msg proto.Message -} - -func (r *rpcMsg) Matches(msg interface{}) bool { - m, ok := msg.(proto.Message) - if !ok { - return false +func TestGRPCBasedMap_WaitUntilReadyWithServer(t *testing.T) { + svc := &mapper.Service{ + Mapper: mapper.MapperFunc(func(ctx context.Context, keys []string, d mapper.Datum) mapper.Messages { + return mapper.Messages{} + }), } - return proto.Equal(m, r.msg) -} -func (r *rpcMsg) String() string { - return fmt.Sprintf("is %s", r.msg) + conn := newServer(t, func(server *grpc.Server) { + mappb.RegisterMapServer(server, svc) + }) + mapClient := mappb.NewMapClient(conn) + client, _ := mapper2.NewFromClient(context.Background(), mapClient) + u := NewUDSgRPCBasedMap("testVertex", client) + err := u.WaitUntilReady(context.Background()) + assert.NoError(t, err) } -func NewMockUDSGRPCBasedMap(mockClient *mapmock.MockMapClient) *GRPCBasedMap { - c, _ := mapper.NewFromClient(mockClient) - return &GRPCBasedMap{"test-vertex", c} -} +func newServer(t *testing.T, register func(server *grpc.Server)) *grpc.ClientConn { + lis := bufconn.Listen(100) + t.Cleanup(func() { + _ = lis.Close() + }) -func TestGRPCBasedMap_WaitUntilReadyWithMockClient(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + server := grpc.NewServer() + t.Cleanup(func() { + server.Stop() + }) - mockClient := mapmock.NewMockMapClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mappb.ReadyResponse{Ready: true}, nil) + register(server) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() + errChan := make(chan error, 1) go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") + // t.Fatal should only be called from the goroutine running the test + if err := server.Serve(lis); err != nil { + errChan <- err } }() - u := NewMockUDSGRPCBasedMap(mockClient) - err := u.WaitUntilReady(ctx) - assert.NoError(t, err) -} + dialer := func(context.Context, string) (net.Conn, error) { + return lis.Dial() + } -func TestGRPCBasedMap_BasicApplyWithMockClient(t *testing.T) { - t.Run("test success", func(t *testing.T) { + conn, err := grpc.NewClient("passthrough://", grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(insecure.NewCredentials())) + t.Cleanup(func() { + _ = conn.Close() + }) + if err != nil { + t.Fatalf("Creating new gRPC client connection: %v", err) + } - ctrl := gomock.NewController(t) - defer ctrl.Finish() + var grpcServerErr error + select { + case grpcServerErr = <-errChan: + case <-time.After(500 * time.Millisecond): + grpcServerErr = errors.New("gRPC server didn't start in 500ms") + } + if err != nil { + t.Fatalf("Failed to start gRPC server: %v", grpcServerErr) + } + + return conn +} - mockClient := mapmock.NewMockMapClient(ctrl) - req := &mappb.MapRequest{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169600, 0)), - Watermark: timestamppb.New(time.Time{}), +func TestGRPCBasedMap_ApplyMapWithServer(t *testing.T) { + t.Run("test success", func(t *testing.T) { + svc := &mapper.Service{ + Mapper: mapper.MapperFunc(func(ctx context.Context, keys []string, d mapper.Datum) mapper.Messages { + return mapper.MessagesBuilder().Append(mapper.NewMessage(d.Value()).WithKeys(keys)) + }), } - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(&mappb.MapResponse{ - Results: []*mappb.MapResponse_Result{ - { - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - }, - }, - }, nil) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() + conn := newServer(t, func(server *grpc.Server) { + mappb.RegisterMapServer(server, svc) + }) + mapClient := mappb.NewMapClient(conn) + ctx := context.Background() + client, err := mapper2.NewFromClient(ctx, mapClient) + require.NoError(t, err, "creating map client") + u := NewUDSgRPCBasedMap("testVertex", client) - u := NewMockUDSGRPCBasedMap(mockClient) - got, err := u.ApplyMap(ctx, &isb.ReadMessage{ + got, err := u.ApplyMap(ctx, []*isb.ReadMessage{{ Message: isb.Message{ Header: isb.Header{ MessageInfo: isb.MessageInfo{ @@ -123,7 +123,7 @@ func TestGRPCBasedMap_BasicApplyWithMockClient(t *testing.T) { }, ID: isb.MessageID{ VertexName: "test-vertex", - Offset: "test-offset", + Offset: "0-0", }, Keys: []string{"test_success_key"}, }, @@ -132,98 +132,32 @@ func TestGRPCBasedMap_BasicApplyWithMockClient(t *testing.T) { }, }, ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - Metadata: isb.MessageMetadata{ - NumDelivered: 1, - }, - }, - ) + }}) assert.NoError(t, err) - assert.Equal(t, req.Keys, got[0].Keys) - assert.Equal(t, req.Value, got[0].Payload) + assert.Equal(t, []string{"test_success_key"}, got[0].WriteMessages[0].Keys) + assert.Equal(t, []byte(`forward_message`), got[0].WriteMessages[0].Payload) }) - t.Run("test retryable error: failed after 5 retries", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapmock.NewMockMapClient(ctrl) - req := &mappb.MapRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), + t.Run("test error", func(t *testing.T) { + svc := &mapper.Service{ + Mapper: mapper.MapperFunc(func(ctx context.Context, keys []string, d mapper.Datum) mapper.Messages { + return mapper.Messages{} + }), } - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedMap(mockClient) - _, err := u.ApplyMap(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "test-offset", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, + conn := newServer(t, func(server *grpc.Server) { + mappb.RegisterMapServer(server, svc) }) - }) - - t.Run("test retryable error: failed after 1 retry", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + mapClient := mappb.NewMapClient(conn) + ctx, cancel := context.WithCancel(context.Background()) + client, err := mapper2.NewFromClient(ctx, mapClient) + require.NoError(t, err, "creating map client") + u := NewUDSgRPCBasedMap("testVertex", client) - mockClient := mapmock.NewMockMapClient(ctrl) - req := &mappb.MapRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.InvalidArgument, "mock test err: non retryable").Err()) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() + // This cancelled context is passed to the ApplyMap function to simulate failure + cancel() - u := NewMockUDSGRPCBasedMap(mockClient) - _, err := u.ApplyMap(ctx, &isb.ReadMessage{ + _, err = u.ApplyMap(ctx, []*isb.ReadMessage{{ Message: isb.Message{ Header: isb.Header{ MessageInfo: isb.MessageInfo{ @@ -231,7 +165,7 @@ func TestGRPCBasedMap_BasicApplyWithMockClient(t *testing.T) { }, ID: isb.MessageID{ VertexName: "test-vertex", - Offset: "test-offset", + Offset: "0-0", }, Keys: []string{"test_error_key"}, }, @@ -240,125 +174,18 @@ func TestGRPCBasedMap_BasicApplyWithMockClient(t *testing.T) { }, }, ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - }) - }) - - t.Run("test retryable error: succeed after 1 retry", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + }}) - mockClient := mapmock.NewMockMapClient(ctrl) - req := &mappb.MapRequest{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169720, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.DeadlineExceeded, "mock test err").Err()) - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(&mappb.MapResponse{ - Results: []*mappb.MapResponse_Result{ - { - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - }, - }, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedMap(mockClient) - got, err := u.ApplyMap(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169720, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "test-offset", - }, - Keys: []string{"test_success_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - Metadata: isb.MessageMetadata{ - NumDelivered: 1, - }, - }, - ) - assert.NoError(t, err) - assert.Equal(t, req.Keys, got[0].Keys) - assert.Equal(t, req.Value, got[0].Payload) - }) - - t.Run("test non retryable error", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapmock.NewMockMapClient(ctrl) - req := &mappb.MapRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), - } - mockClient.EXPECT().MapFn(gomock.Any(), &rpcMsg{msg: req}).Return(nil, status.New(codes.InvalidArgument, "mock test err: non retryable").Err()) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedMap(mockClient) - _, err := u.ApplyMap(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "test-offset", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, - ) - assert.ErrorIs(t, err, &ApplyUDFErr{ + expectedUDFErr := &ApplyUDFErr{ UserUDFErr: false, - Message: fmt.Sprintf("%s", err), + Message: "gRPC client.MapFn failed, context canceled", InternalErr: InternalErr{ Flag: true, MainCarDown: false, }, - }) + } + var receivedErr *ApplyUDFErr + assert.ErrorAs(t, err, &receivedErr) + assert.Equal(t, expectedUDFErr, receivedErr) }) } diff --git a/pkg/udf/rpc/grpc_mapstream.go b/pkg/udf/rpc/grpc_mapstream.go index bbbf7a4937..8c5e610063 100644 --- a/pkg/udf/rpc/grpc_mapstream.go +++ b/pkg/udf/rpc/grpc_mapstream.go @@ -44,9 +44,9 @@ func NewUDSgRPCBasedMapStream(vertexName string, client mapstreamer.Client) *GRP } } -// CloseConn closes the gRPC client connection. -func (u *GRPCBasedMapStream) CloseConn(ctx context.Context) error { - return u.client.CloseConn(ctx) +// Close closes the gRPC client connection. +func (u *GRPCBasedMapStream) Close() error { + return u.client.CloseConn() } // IsHealthy checks if the map stream udf is healthy. diff --git a/pkg/udf/rpc/grpc_mapstream_test.go b/pkg/udf/rpc/grpc_mapstream_test.go index 148f9b5874..40385ed3f0 100644 --- a/pkg/udf/rpc/grpc_mapstream_test.go +++ b/pkg/udf/rpc/grpc_mapstream_test.go @@ -28,6 +28,7 @@ import ( mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1/mapstreammock" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" "github.com/numaproj/numaflow/pkg/isb" @@ -60,6 +61,22 @@ func TestGRPCBasedMapStream_WaitUntilReadyWithMockClient(t *testing.T) { assert.NoError(t, err) } +type rpcMsg struct { + msg proto.Message +} + +func (r *rpcMsg) Matches(msg interface{}) bool { + m, ok := msg.(proto.Message) + if !ok { + return false + } + return proto.Equal(m, r.msg) +} + +func (r *rpcMsg) String() string { + return fmt.Sprintf("is %s", r.msg) +} + func TestGRPCBasedUDF_BasicApplyStreamWithMockClient(t *testing.T) { t.Run("test success", func(t *testing.T) { diff --git a/rust/numaflow-grpc/src/clients/map.v1.rs b/rust/numaflow-grpc/src/clients/map.v1.rs index e0016e1963..93e372cbe3 100644 --- a/rust/numaflow-grpc/src/clients/map.v1.rs +++ b/rust/numaflow-grpc/src/clients/map.v1.rs @@ -3,19 +3,40 @@ /// MapRequest represents a request element. #[derive(Clone, PartialEq, ::prost::Message)] pub struct MapRequest { - #[prost(string, repeated, tag = "1")] - pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(bytes = "vec", tag = "2")] - pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "1")] + pub request: ::core::option::Option, + /// This ID is used to uniquely identify a map request + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, #[prost(message, optional, tag = "3")] - pub event_time: ::core::option::Option<::prost_types::Timestamp>, - #[prost(message, optional, tag = "4")] - pub watermark: ::core::option::Option<::prost_types::Timestamp>, - #[prost(map = "string, string", tag = "5")] - pub headers: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub handshake: ::core::option::Option, +} +/// Nested message and enum types in `MapRequest`. +pub mod map_request { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Request { + #[prost(string, repeated, tag = "1")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + } +} +/// +/// Handshake message between client and server to indicate the start of transmission. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Handshake { + /// Required field indicating the start of transmission. + #[prost(bool, tag = "1")] + pub sot: bool, } /// * /// MapResponse represents a response element. @@ -23,6 +44,11 @@ pub struct MapRequest { pub struct MapResponse { #[prost(message, repeated, tag = "1")] pub results: ::prost::alloc::vec::Vec, + /// This ID is used to refer the responses to the request it corresponds to. + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub handshake: ::core::option::Option, } /// Nested message and enum types in `MapResponse`. pub mod map_response { @@ -137,8 +163,11 @@ pub mod map_client { /// MapFn applies a function to each map request element. pub async fn map_fn( &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await @@ -149,9 +178,9 @@ pub mod map_client { })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/map.v1.Map/MapFn"); - let mut req = request.into_request(); + let mut req = request.into_streaming_request(); req.extensions_mut().insert(GrpcMethod::new("map.v1.Map", "MapFn")); - self.inner.unary(req, path, codec).await + self.inner.streaming(req, path, codec).await } /// IsReady is the heartbeat endpoint for gRPC. pub async fn is_ready( diff --git a/test/diamond-e2e/testdata/join-on-reduce.yaml b/test/diamond-e2e/testdata/join-on-reduce.yaml index e1562dd133..3f4475fef7 100644 --- a/test/diamond-e2e/testdata/join-on-reduce.yaml +++ b/test/diamond-e2e/testdata/join-on-reduce.yaml @@ -67,4 +67,4 @@ spec: - from: atoi-1 to: compute-sum - from: compute-sum - to: sink + to: sink \ No newline at end of file diff --git a/test/transformer-e2e/transformer_test.go b/test/transformer-e2e/transformer_test.go index e6b727fcb9..a8a4905a31 100644 --- a/test/transformer-e2e/transformer_test.go +++ b/test/transformer-e2e/transformer_test.go @@ -188,10 +188,12 @@ func (s *TransformerSuite) TestSourceTransformer() { defer wg.Done() s.testSourceTransformer("go") }() - //go func() { - // defer wg.Done() - // s.testSourceTransformer("rust") - //}() + + wg.Add(1) + go func() { + defer wg.Done() + s.testSourceTransformer("rust") + }() wg.Wait() } From a70bd33daec462b39e93d9238978f819ed4290f2 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 9 Oct 2024 01:08:44 -0700 Subject: [PATCH 095/188] chore: always install specified codegen tools (#2133) --- hack/crdgen.sh | 4 +--- hack/swagger-gen.sh | 8 +++----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/hack/crdgen.sh b/hack/crdgen.sh index 69d871490f..a40e719325 100755 --- a/hack/crdgen.sh +++ b/hack/crdgen.sh @@ -4,9 +4,7 @@ set -eu -o pipefail source $(dirname $0)/library.sh ensure_vendor -if [ "$(command -v controller-gen)" = "" ]; then - go install sigs.k8s.io/controller-tools/cmd/controller-gen -fi +go install -mod=vendor ./vendor/sigs.k8s.io/controller-tools/cmd/controller-gen header "Generating CRDs" # maxDescLen=0 avoids `kubectl apply` failing due to annotations being too long diff --git a/hack/swagger-gen.sh b/hack/swagger-gen.sh index d5e3f0ff36..3ddd12afca 100755 --- a/hack/swagger-gen.sh +++ b/hack/swagger-gen.sh @@ -16,14 +16,12 @@ k8s_swagger="dist/kubernetes.swagger.json" kubeified_swagger="dist/kubefied.swagger.json" output="api/openapi-spec/swagger.json" -if [ "`command -v swagger`" = "" ]; then - go install -mod=vendor ./vendor/github.com/go-swagger/go-swagger/cmd/swagger -fi +go install -mod=vendor ./vendor/github.com/go-swagger/go-swagger/cmd/swagger curl -Ls https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.31/api/openapi-spec/swagger.json -o ${k8s_swagger} go run ./hack/gen-openapi-spec/main.go ${VERSION} ${k8s_swagger} ${kubeified_swagger} -swagger flatten --with-flatten minimal ${kubeified_swagger} -o ${output} +$(go env GOPATH)/bin/swagger flatten --with-flatten minimal ${kubeified_swagger} -o ${output} -swagger validate ${output} +$(go env GOPATH)/bin/swagger validate ${output} From fae53fa2dcef7ef18d0d068368aaf6b832410c1e Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 9 Oct 2024 22:44:40 +0530 Subject: [PATCH 096/188] feat: integrate tickgen with monovertex (#2136) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config.rs | 179 ++++++++++++---- rust/numaflow-core/src/monovertex.rs | 210 +++++++++++++------ rust/numaflow-core/src/monovertex/metrics.rs | 35 ++-- rust/numaflow-core/src/shared/utils.rs | 54 +++-- 4 files changed, 339 insertions(+), 139 deletions(-) diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index 6310295f46..48c4e597b5 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -1,6 +1,7 @@ use crate::error::Error; use base64::prelude::BASE64_STANDARD; use base64::Engine; +use bytes::Bytes; use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use std::env; use std::fmt::Display; @@ -17,7 +18,6 @@ const DEFAULT_TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock const DEFAULT_TRANSFORMER_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcetransformer-server-info"; const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; -const ENV_GRPC_MAX_MESSAGE_SIZE: &str = "NUMAFLOW_GRPC_MAX_MESSAGE_SIZE"; const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB const DEFAULT_METRICS_PORT: u16 = 2469; @@ -88,38 +88,87 @@ pub struct Settings { pub sink_retry_interval_in_ms: u32, pub sink_retry_on_fail_strategy: OnFailureStrategy, pub sink_default_retry_strategy: RetryStrategy, - pub sdk_config: SDKConfig, + pub transformer_config: Option, + pub udsource_config: Option, + pub udsink_config: UDSinkConfig, + pub fallback_config: Option, + pub generator_config: Option, } #[derive(Debug, Clone)] -pub struct SDKConfig { +pub struct TransformerConfig { pub grpc_max_message_size: usize, - pub is_transformer_enabled: bool, - pub is_fallback_enabled: bool, - pub source_socket_path: String, - pub sink_socket_path: String, - pub transformer_socket_path: String, - pub fallback_socket_path: String, - pub source_server_info_path: String, - pub sink_server_info_path: String, - pub transformer_server_info_path: String, - pub fallback_server_info_path: String, + pub socket_path: String, + pub server_info_path: String, } -impl Default for SDKConfig { +impl Default for TransformerConfig { fn default() -> Self { Self { grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - is_transformer_enabled: false, - is_fallback_enabled: false, - source_socket_path: DEFAULT_SOURCE_SOCKET.to_string(), - sink_socket_path: DEFAULT_SINK_SOCKET.to_string(), - transformer_socket_path: DEFAULT_TRANSFORMER_SOCKET.to_string(), - fallback_socket_path: DEFAULT_FB_SINK_SOCKET.to_string(), - source_server_info_path: DEFAULT_SOURCE_SERVER_INFO_FILE.to_string(), - sink_server_info_path: DEFAULT_SINK_SERVER_INFO_FILE.to_string(), - transformer_server_info_path: DEFAULT_TRANSFORMER_SERVER_INFO_FILE.to_string(), - fallback_server_info_path: DEFAULT_FB_SINK_SERVER_INFO_FILE.to_string(), + socket_path: DEFAULT_TRANSFORMER_SOCKET.to_string(), + server_info_path: DEFAULT_TRANSFORMER_SERVER_INFO_FILE.to_string(), + } + } +} + +#[derive(Debug, Clone)] +pub struct UDSourceConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, +} + +impl Default for UDSourceConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_SOURCE_SOCKET.to_string(), + server_info_path: DEFAULT_SOURCE_SERVER_INFO_FILE.to_string(), + } + } +} + +#[derive(Debug, Clone)] +pub struct UDSinkConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, +} + +impl Default for UDSinkConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_SINK_SOCKET.to_string(), + server_info_path: DEFAULT_SINK_SERVER_INFO_FILE.to_string(), + } + } +} + +impl UDSinkConfig { + fn fallback_default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_FB_SINK_SOCKET.to_string(), + server_info_path: DEFAULT_FB_SINK_SERVER_INFO_FILE.to_string(), + } + } +} + +#[derive(Debug, Clone)] +pub struct GeneratorConfig { + pub rpu: usize, + pub content: Bytes, + pub duration: usize, +} + +impl Default for GeneratorConfig { + fn default() -> Self { + Self { + rpu: 1, + content: bytes::Bytes::from("5"), + duration: 1000, } } } @@ -148,7 +197,11 @@ impl Default for Settings { sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, sink_default_retry_strategy: default_retry_strategy, - sdk_config: Default::default(), + transformer_config: None, + udsource_config: None, + udsink_config: Default::default(), + fallback_config: None, + generator_config: None, } } } @@ -192,20 +245,77 @@ impl Settings { .and_then(|metadata| metadata.name) .ok_or_else(|| Error::ConfigError("Mono vertex name not found".to_string()))?; - settings.sdk_config.is_transformer_enabled = mono_vertex_obj + settings.transformer_config = match mono_vertex_obj .spec .source + .as_deref() .ok_or(Error::ConfigError("Source not found".to_string()))? .transformer - .is_some(); + { + Some(_) => Some(TransformerConfig::default()), + _ => None, + }; - settings.sdk_config.is_fallback_enabled = mono_vertex_obj + settings.udsource_config = match mono_vertex_obj + .spec + .source + .as_deref() + .ok_or(Error::ConfigError("Source not found".to_string()))? + .udsource + { + Some(_) => Some(UDSourceConfig::default()), + _ => None, + }; + + settings.udsink_config = match mono_vertex_obj + .spec + .sink + .as_deref() + .ok_or(Error::ConfigError("Sink not found".to_string()))? + .udsink + { + Some(_) => UDSinkConfig::default(), + _ => UDSinkConfig::default(), + }; + + settings.fallback_config = match mono_vertex_obj .spec .sink .as_deref() .ok_or(Error::ConfigError("Sink not found".to_string()))? .fallback - .is_some(); + { + Some(_) => Some(UDSinkConfig::fallback_default()), + _ => None, + }; + + settings.generator_config = match mono_vertex_obj + .spec + .source + .as_deref() + .ok_or(Error::ConfigError("Source not found".to_string()))? + .generator + .as_deref() + { + Some(generator_source) => { + let mut config = GeneratorConfig::default(); + + if let Some(value_blob) = &generator_source.value_blob { + config.content = Bytes::from(value_blob.clone()); + } + + if let Some(rpu) = generator_source.rpu { + config.rpu = rpu as usize; + } + + if let Some(d) = generator_source.duration { + config.duration = std::time::Duration::from(d).as_millis() as usize; + } + + Some(config) + } + None => None, + }; if let Some(retry_strategy) = mono_vertex_obj .spec @@ -245,7 +355,7 @@ impl Settings { // check if the sink retry strategy is set to fallback and there is no fallback sink configured // then we should return an error if settings.sink_retry_on_fail_strategy == OnFailureStrategy::Fallback - && !settings.sdk_config.is_fallback_enabled + && settings.fallback_config.is_none() { return Err(Error::ConfigError( "Retry Strategy given as fallback but Fallback sink not configured" @@ -255,13 +365,6 @@ impl Settings { } } - settings.sdk_config.grpc_max_message_size = env::var(ENV_GRPC_MAX_MESSAGE_SIZE) - .unwrap_or_else(|_| DEFAULT_GRPC_MAX_MESSAGE_SIZE.to_string()) - .parse() - .map_err(|e| { - Error::ConfigError(format!("Failed to parse grpc max message size: {:?}", e)) - })?; - settings.replica = env::var(ENV_POD_REPLICA) .unwrap_or_else(|_| "0".to_string()) .parse() @@ -592,8 +695,6 @@ mod tests { assert!(Settings::load().is_err()); env::remove_var(ENV_MONO_VERTEX_OBJ); } - // General cleanup - env::remove_var(ENV_GRPC_MAX_MESSAGE_SIZE); } #[test] diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 5e6c9a0b00..374a10e52d 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,13 +1,17 @@ -use crate::config::{config, SDKConfig}; +use crate::config::{config, Settings}; use crate::error; +use crate::reader::LagReader; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::SinkWriter; -use crate::source::generator; -use crate::source::user_defined::new_source; +use crate::source::generator::{new_generator, GeneratorAck, GeneratorLagReader, GeneratorRead}; +use crate::source::user_defined::{ + new_source, UserDefinedSourceAck, UserDefinedSourceLagReader, UserDefinedSourceRead, +}; +use crate::source::{SourceAcker, SourceReader}; use crate::transformer::user_defined::SourceTransformer; use forwarder::ForwarderBuilder; -use metrics::MetricsState; +use metrics::UserDefinedContainerState; use numaflow_grpc::clients::sink::sink_client::SinkClient; use numaflow_grpc::clients::source::source_client::SourceClient; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; @@ -15,6 +19,7 @@ use std::time::Duration; use tokio::signal; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; use tracing::info; /// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. @@ -38,7 +43,7 @@ pub async fn mono_vertex() -> error::Result<()> { }); // Run the forwarder with cancellation token. - if let Err(e) = start_forwarder(cln_token, config().sdk_config.clone()).await { + if let Err(e) = start_forwarder(cln_token, config()).await { error!("Application error: {:?}", e); // abort the signal handler task since we have an error and we are shutting down @@ -73,60 +78,67 @@ async fn shutdown_signal() { } } -async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> error::Result<()> { +enum SourceType { + UdSource( + UserDefinedSourceRead, + UserDefinedSourceAck, + UserDefinedSourceLagReader, + ), + Generator(GeneratorRead, GeneratorAck, GeneratorLagReader), +} + +async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> error::Result<()> { // make sure that we have compatibility with the server utils::check_compatibility( &cln_token, - sdk_config.source_server_info_path.into(), - sdk_config.sink_server_info_path.into(), - if sdk_config.is_transformer_enabled { - Some(sdk_config.transformer_server_info_path.into()) - } else { - None - }, - if sdk_config.is_fallback_enabled { - Some(sdk_config.fallback_server_info_path.into()) - } else { - None - }, + config + .udsource_config + .as_ref() + .map(|source_config| source_config.server_info_path.clone().into()), + config.udsink_config.server_info_path.clone().into(), + config + .transformer_config + .as_ref() + .map(|transformer_config| transformer_config.server_info_path.clone().into()), + config + .fallback_config + .as_ref() + .map(|fallback_config| fallback_config.server_info_path.clone().into()), ) .await?; - // FIXME: use me and use me right :) - let _ = generator::new_generator( - bytes::Bytes::from("fix me"), - 1, - 10, - Duration::from_millis(1000), - ); - - let mut source_grpc_client = - SourceClient::new(create_rpc_channel(sdk_config.source_socket_path.into()).await?) - .max_encoding_message_size(sdk_config.grpc_max_message_size) - .max_encoding_message_size(sdk_config.grpc_max_message_size); + let mut source_grpc_client = if let Some(source_config) = &config.udsource_config { + Some( + SourceClient::new(create_rpc_channel(source_config.socket_path.clone().into()).await?) + .max_encoding_message_size(source_config.grpc_max_message_size) + .max_encoding_message_size(source_config.grpc_max_message_size), + ) + } else { + None + }; let mut sink_grpc_client = - SinkClient::new(create_rpc_channel(sdk_config.sink_socket_path.into()).await?) - .max_encoding_message_size(sdk_config.grpc_max_message_size) - .max_encoding_message_size(sdk_config.grpc_max_message_size); + SinkClient::new(create_rpc_channel(config.udsink_config.socket_path.clone().into()).await?) + .max_encoding_message_size(config.udsink_config.grpc_max_message_size) + .max_encoding_message_size(config.udsink_config.grpc_max_message_size); - let mut transformer_grpc_client = if sdk_config.is_transformer_enabled { + let mut transformer_grpc_client = if let Some(transformer_config) = &config.transformer_config { let transformer_grpc_client = SourceTransformClient::new( - create_rpc_channel(sdk_config.transformer_socket_path.into()).await?, + create_rpc_channel(transformer_config.socket_path.clone().into()).await?, ) - .max_encoding_message_size(sdk_config.grpc_max_message_size) - .max_encoding_message_size(sdk_config.grpc_max_message_size); + .max_encoding_message_size(transformer_config.grpc_max_message_size) + .max_encoding_message_size(transformer_config.grpc_max_message_size); Some(transformer_grpc_client.clone()) } else { None }; - let mut fb_sink_grpc_client = if sdk_config.is_fallback_enabled { + let mut fb_sink_grpc_client = if let Some(fb_sink_config) = &config.fallback_config { let fb_sink_grpc_client = - SinkClient::new(create_rpc_channel(sdk_config.fallback_socket_path.into()).await?) - .max_encoding_message_size(sdk_config.grpc_max_message_size) - .max_encoding_message_size(sdk_config.grpc_max_message_size); + SinkClient::new(create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?) + .max_encoding_message_size(fb_sink_config.grpc_max_message_size) + .max_encoding_message_size(fb_sink_config.grpc_max_message_size); Some(fb_sink_grpc_client.clone()) } else { @@ -143,17 +155,12 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> ) .await?; - let (source_read, source_ack, lag_reader) = new_source( - source_grpc_client.clone(), - config().batch_size as usize, - config().timeout_in_ms as u16, - ) - .await?; + let source_type = fetch_source(&config, &mut source_grpc_client).await?; // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not // joined. - let metrics_state = MetricsState { + let metrics_state = UserDefinedContainerState { source_client: source_grpc_client.clone(), sink_client: sink_grpc_client.clone(), transformer_client: transformer_grpc_client.clone(), @@ -164,26 +171,98 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> // FIXME: what to do with the handle utils::start_metrics_server(metrics_state).await; + match source_type { + SourceType::UdSource(udsource_reader, udsource_acker, udsource_lag_reader) => { + start_forwarder_with_source( + udsource_reader, + udsource_acker, + udsource_lag_reader, + sink_grpc_client, + transformer_grpc_client, + fb_sink_grpc_client, + cln_token, + ) + .await?; + } + SourceType::Generator(generator_reader, generator_acker, generator_lag_reader) => { + start_forwarder_with_source( + generator_reader, + generator_acker, + generator_lag_reader, + sink_grpc_client, + transformer_grpc_client, + fb_sink_grpc_client, + cln_token, + ) + .await?; + } + } + + info!("Forwarder stopped gracefully"); + Ok(()) +} + +async fn fetch_source( + config: &Settings, + source_grpc_client: &mut Option>, +) -> crate::Result { + let source_type = if let Some(source_grpc_client) = source_grpc_client.clone() { + let (source_read, source_ack, lag_reader) = new_source( + source_grpc_client, + config.batch_size as usize, + config.timeout_in_ms as u16, + ) + .await?; + SourceType::UdSource(source_read, source_ack, lag_reader) + } else if let Some(generator_config) = &config.generator_config { + let (source_read, source_ack, lag_reader) = new_generator( + generator_config.content.clone(), + generator_config.rpu, + config.batch_size as usize, + Duration::from_millis(generator_config.duration as u64), + )?; + SourceType::Generator(source_read, source_ack, lag_reader) + } else { + return Err(error::Error::ConfigError( + "No valid source configuration found".into(), + )); + }; + Ok(source_type) +} + +async fn start_forwarder_with_source( + source_reader: R, + source_acker: A, + source_lag_reader: L, + sink_grpc_client: SinkClient, + transformer_client: Option>, + fallback_sink_client: Option>, + cln_token: CancellationToken, +) -> error::Result<()> +where + R: SourceReader, + A: SourceAcker, + L: LagReader + Clone + 'static, +{ // start the pending reader to publish pending metrics - let mut pending_reader = utils::create_pending_reader(lag_reader).await; + let mut pending_reader = utils::create_pending_reader(source_lag_reader).await; pending_reader.start().await; // build the forwarder - - let sink_writer = SinkWriter::new(sink_grpc_client.clone()).await?; + let sink_writer = SinkWriter::new(sink_grpc_client).await?; let mut forwarder_builder = - ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token); + ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token); // add transformer if exists - if let Some(transformer_grpc_client) = transformer_grpc_client { - let transformer = SourceTransformer::new(transformer_grpc_client).await?; + if let Some(transformer_client) = transformer_client { + let transformer = SourceTransformer::new(transformer_client).await?; forwarder_builder = forwarder_builder.source_transformer(transformer); } // add fallback sink if exists - if let Some(fb_sink_grpc_client) = fb_sink_grpc_client { - let fallback_writer = SinkWriter::new(fb_sink_grpc_client).await?; + if let Some(fallback_sink_client) = fallback_sink_client { + let fallback_writer = SinkWriter::new(fallback_sink_client).await?; forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_writer); } // build the final forwarder @@ -198,7 +277,7 @@ async fn start_forwarder(cln_token: CancellationToken, sdk_config: SDKConfig) -> #[cfg(test)] mod tests { - use crate::config::SDKConfig; + use crate::config::{Settings, UDSourceConfig}; use crate::error; use crate::monovertex::start_forwarder; use crate::shared::server_info::ServerInfo; @@ -307,16 +386,17 @@ mod tests { token_clone.cancel(); }); - let sdk_config = SDKConfig { - source_socket_path: src_sock_file.to_str().unwrap().to_string(), - sink_socket_path: sink_sock_file.to_str().unwrap().to_string(), - source_server_info_path: src_info_file.to_str().unwrap().to_string(), - sink_server_info_path: sink_server_info.to_str().unwrap().to_string(), + let mut config = Settings::default(); + config.udsink_config.socket_path = sink_sock_file.to_str().unwrap().to_string(); + config.udsink_config.server_info_path = sink_server_info.to_str().unwrap().to_string(); + + config.udsource_config = Some(UDSourceConfig { + socket_path: src_sock_file.to_str().unwrap().to_string(), + server_info_path: src_info_file.to_str().unwrap().to_string(), grpc_max_message_size: 1024, - ..Default::default() - }; + }); - let result = start_forwarder(cln_token.clone(), sdk_config).await; + let result = start_forwarder(cln_token.clone(), &config).await; assert!(result.is_ok()); // stop the source and sink servers diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 0ed0e09680..9ee6f5c65e 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -60,9 +60,13 @@ const TRANSFORM_TIME: &str = "monovtx_transformer_time"; const ACK_TIME: &str = "monovtx_ack_time"; const SINK_TIME: &str = "monovtx_sink_time"; +/// Only used defined functions will have containers since rest +/// are builtins. We save the gRPC clients to retrieve metrics and also +/// to do liveness checks. This means, these will be optionals since +/// we do not require these for builtins. #[derive(Clone)] -pub(crate) struct MetricsState { - pub source_client: SourceClient, +pub(crate) struct UserDefinedContainerState { + pub source_client: Option>, pub sink_client: SinkClient, pub transformer_client: Option>, pub fb_sink_client: Option>, @@ -264,7 +268,7 @@ pub async fn metrics_handler() -> impl IntoResponse { pub(crate) async fn start_metrics_https_server( addr: SocketAddr, - metrics_state: MetricsState, + metrics_state: UserDefinedContainerState, ) -> crate::Result<()> { let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); @@ -287,7 +291,7 @@ pub(crate) async fn start_metrics_https_server( } /// router for metrics and k8s health endpoints -fn metrics_router(metrics_state: MetricsState) -> Router { +fn metrics_router(metrics_state: UserDefinedContainerState) -> Router { Router::new() .route("/metrics", get(metrics_handler)) .route("/livez", get(livez)) @@ -300,15 +304,12 @@ async fn livez() -> impl IntoResponse { StatusCode::NO_CONTENT } -async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { - if state - .source_client - .is_ready(Request::new(())) - .await - .is_err() - { - error!("Source client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; +async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { + if let Some(mut source_client) = state.source_client { + if source_client.is_ready(Request::new(())).await.is_err() { + error!("Source client is not available"); + return StatusCode::SERVICE_UNAVAILABLE; + } } if state.sink_client.is_ready(Request::new(())).await.is_err() { error!("Sink client is not available"); @@ -537,7 +538,7 @@ mod tests { use tokio::sync::mpsc::Sender; use super::*; - use crate::monovertex::metrics::MetricsState; + use crate::monovertex::metrics::UserDefinedContainerState; use crate::shared::utils::create_rpc_channel; struct SimpleSource; @@ -646,8 +647,10 @@ mod tests { // wait for the servers to start // FIXME: we need to have a better way, this is flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let metrics_state = MetricsState { - source_client: SourceClient::new(create_rpc_channel(src_sock_file).await.unwrap()), + let metrics_state = UserDefinedContainerState { + source_client: Some(SourceClient::new( + create_rpc_channel(src_sock_file).await.unwrap(), + )), sink_client: SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()), transformer_client: Some(SourceTransformClient::new( create_rpc_channel(sock_file).await.unwrap(), diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 23d1fc3eef..de4c79187b 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -5,7 +5,7 @@ use std::time::Duration; use crate::config::config; use crate::error::Error; use crate::monovertex::metrics::{ - start_metrics_https_server, MetricsState, PendingReader, PendingReaderBuilder, + start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, }; use crate::shared::server_info; use crate::{error, reader}; @@ -29,17 +29,19 @@ use tracing::{info, warn}; pub(crate) async fn check_compatibility( cln_token: &CancellationToken, - source_file_path: PathBuf, + source_file_path: Option, sink_file_path: PathBuf, transformer_file_path: Option, fb_sink_file_path: Option, ) -> error::Result<()> { - server_info::check_for_server_compatibility(source_file_path, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for source server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) - })?; + if let Some(source_file_path) = source_file_path { + server_info::check_for_server_compatibility(source_file_path, cln_token.clone()) + .await + .map_err(|e| { + warn!("Error waiting for source server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + } server_info::check_for_server_compatibility(sink_file_path, cln_token.clone()) .await @@ -68,7 +70,9 @@ pub(crate) async fn check_compatibility( Ok(()) } -pub(crate) async fn start_metrics_server(metrics_state: MetricsState) -> JoinHandle<()> { +pub(crate) async fn start_metrics_server( + metrics_state: UserDefinedContainerState, +) -> JoinHandle<()> { tokio::spawn(async { // Start the metrics server, which server the prometheus metrics. let metrics_addr: SocketAddr = format!("0.0.0.0:{}", &config().metrics_server_listen_port) @@ -96,7 +100,7 @@ pub(crate) async fn create_pending_reader( pub(crate) async fn wait_until_ready( cln_token: CancellationToken, - source_client: &mut SourceClient, + source_client: &mut Option>, sink_client: &mut SinkClient, transformer_client: &mut Option>, fb_sink_client: &mut Option>, @@ -107,10 +111,16 @@ pub(crate) async fn wait_until_ready( "Cancellation token is cancelled".to_string(), )); } - let source_ready = source_client.is_ready(Request::new(())).await.is_ok(); - if !source_ready { - info!("UDSource is not ready, waiting..."); - } + + let source_ready = if let Some(client) = source_client { + let ready = client.is_ready(Request::new(())).await.is_ok(); + if !ready { + info!("UDSource is not ready, waiting..."); + } + ready + } else { + true + }; let sink_ready = sink_client.is_ready(Request::new(())).await.is_ok(); if !sink_ready { @@ -243,8 +253,14 @@ mod tests { .unwrap(); let cln_token = CancellationToken::new(); - let result = - check_compatibility(&cln_token, source_file_path, sink_file_path, None, None).await; + let result = check_compatibility( + &cln_token, + Some(source_file_path), + sink_file_path, + None, + None, + ) + .await; assert!(result.is_ok()); } @@ -267,7 +283,7 @@ mod tests { }); let result = check_compatibility( &cln_token, - source_file_path, + Some(source_file_path), sink_file_path, Some(transformer_file_path), Some(fb_sink_file_path), @@ -371,7 +387,7 @@ mod tests { // Wait for the servers to start sleep(Duration::from_millis(100)).await; - let mut source_grpc_client = + let source_grpc_client = SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); let mut sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); @@ -386,7 +402,7 @@ mod tests { let cln_token = CancellationToken::new(); let result = wait_until_ready( cln_token, - &mut source_grpc_client, + &mut Some(source_grpc_client), &mut sink_grpc_client, &mut transformer_grpc_client, &mut fb_sink_grpc_client, From d340a4e83311d487c2a1b3a75447168a01e3943e Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 9 Oct 2024 10:30:09 -0700 Subject: [PATCH 097/188] feat: expose ports for user defined containers (#2135) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 13 + api/openapi-spec/swagger.json | 13 + .../numaflow.numaproj.io_monovertices.yaml | 96 ++ .../full/numaflow.numaproj.io_pipelines.yaml | 144 +++ .../full/numaflow.numaproj.io_vertices.yaml | 120 ++ config/install.yaml | 360 ++++++ config/namespace-install.yaml | 360 ++++++ docs/APIs.md | 16 + .../numaflow/v1alpha1/container_builder.go | 5 + .../v1alpha1/container_builder_test.go | 16 +- pkg/apis/numaflow/v1alpha1/generated.pb.go | 1075 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 8 + pkg/apis/numaflow/v1alpha1/side_inputs.go | 2 +- pkg/apis/numaflow/v1alpha1/sink.go | 4 +- pkg/apis/numaflow/v1alpha1/source.go | 4 +- pkg/apis/numaflow/v1alpha1/udf.go | 2 +- .../v1alpha1/user_defined_container.go | 7 + .../v1alpha1/zz_generated.deepcopy.go | 5 + .../numaflow/v1alpha1/zz_generated.openapi.go | 26 +- rust/numaflow-models/src/models/container.rs | 3 + 20 files changed, 1763 insertions(+), 516 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 2366c1b53a..a44b78c881 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -19773,6 +19773,19 @@ "livenessProbe": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" }, + "ports": { + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, "readinessProbe": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 6e852cf2a0..43d2dfe2bd 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -19778,6 +19778,19 @@ "livenessProbe": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" }, + "ports": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, "readinessProbe": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Probe" }, diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 13f0cd9f70..b3c73cdb3b 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -3755,6 +3755,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -4279,6 +4303,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -5117,6 +5165,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -5389,6 +5461,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 7c192a9e79..3abdbe43bc 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -273,6 +273,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8411,6 +8435,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8935,6 +8983,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -9773,6 +9845,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -10045,6 +10141,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -10354,6 +10474,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 297ca4c99f..945d1a17bd 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -3223,6 +3223,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -3747,6 +3771,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -4585,6 +4633,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -4857,6 +4929,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -5243,6 +5339,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: diff --git a/config/install.yaml b/config/install.yaml index 85cd57c65e..73d53b7756 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -6941,6 +6941,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -7465,6 +7489,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8303,6 +8351,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8575,6 +8647,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -9908,6 +10004,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -18046,6 +18166,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -18570,6 +18714,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19408,6 +19576,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19680,6 +19872,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19989,6 +20205,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -24330,6 +24570,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -24854,6 +25118,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -25692,6 +25980,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -25964,6 +26276,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -26350,6 +26686,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 4048cda38e..0c0e287f7f 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -6941,6 +6941,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -7465,6 +7489,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8303,6 +8351,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -8575,6 +8647,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -9908,6 +10004,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -18046,6 +18166,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -18570,6 +18714,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19408,6 +19576,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19680,6 +19872,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -19989,6 +20205,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -24330,6 +24570,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -24854,6 +25118,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -25692,6 +25980,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -25964,6 +26276,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: @@ -26350,6 +26686,30 @@ spec: format: int32 type: integer type: object + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: properties: failureThreshold: diff --git a/docs/APIs.md b/docs/APIs.md index eb9b923b4f..8c48a00aad 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -1627,6 +1627,22 @@ Kubernetes core/v1.PullPolicy + + + + +ports
+ +\[\]Kubernetes core/v1.ContainerPort + + + + +(Optional) + + + + diff --git a/pkg/apis/numaflow/v1alpha1/container_builder.go b/pkg/apis/numaflow/v1alpha1/container_builder.go index dcded47e9c..df09d250cd 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder.go @@ -70,6 +70,11 @@ func (b containerBuilder) appendEnv(x ...corev1.EnvVar) containerBuilder { return b } +func (b containerBuilder) appendPorts(x ...corev1.ContainerPort) containerBuilder { + b.Ports = append(b.Ports, x...) + return b +} + func (b containerBuilder) appendVolumeMounts(x ...corev1.VolumeMount) containerBuilder { b.VolumeMounts = append(b.VolumeMounts, x...) return b diff --git a/pkg/apis/numaflow/v1alpha1/container_builder_test.go b/pkg/apis/numaflow/v1alpha1/container_builder_test.go index 3503f40dc6..b641918b6b 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder_test.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder_test.go @@ -41,9 +41,21 @@ func Test_containerBuilder(t *testing.T) { c := containerBuilder{}. init(getContainerReq{ resources: testResources, - }). + }).args("numa", "args"). + image("image"). + imagePullPolicy(corev1.PullIfNotPresent). + command("cmd"). + appendVolumeMounts(corev1.VolumeMount{Name: "vol", MountPath: "/vol"}). + appendEnv(corev1.EnvVar{ + Name: "env", Value: "value"}). + appendPorts(corev1.ContainerPort{Name: "port", ContainerPort: 8080}). build() assert.Equal(t, "numa", c.Name) - assert.Len(t, c.VolumeMounts, 0) + assert.Len(t, c.VolumeMounts, 1) assert.Equal(t, testResources, c.Resources) + assert.Equal(t, []string{"numa", "args"}, c.Args) + assert.Equal(t, "image", c.Image) + assert.Equal(t, corev1.PullIfNotPresent, c.ImagePullPolicy) + assert.Equal(t, []corev1.EnvVar{{Name: "env", Value: "value"}}, c.Env) + assert.Equal(t, []corev1.ContainerPort{{Name: "port", ContainerPort: 8080}}, c.Ports) } diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 8905ee00c2..8c1b846d4f 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2880,513 +2880,514 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8084 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0x57, - 0x76, 0x9e, 0xfa, 0xbf, 0xfb, 0x34, 0xff, 0x74, 0x67, 0x34, 0xe2, 0xcc, 0x4a, 0xd3, 0xe3, 0x5a, - 0xef, 0xee, 0x38, 0xb6, 0xc9, 0x88, 0x5e, 0x69, 0xb5, 0xb6, 0x77, 0x25, 0x36, 0x39, 0xe4, 0x50, - 0x43, 0xce, 0x70, 0x4f, 0x93, 0x23, 0xad, 0x15, 0xaf, 0x52, 0xac, 0xba, 0x6c, 0x96, 0x58, 0x5d, - 0xd5, 0x5b, 0x55, 0xcd, 0x19, 0xca, 0x31, 0xd6, 0xde, 0x4d, 0xa0, 0x0d, 0x92, 0x20, 0x81, 0x9f, - 0x0c, 0x04, 0x4e, 0x90, 0x20, 0x80, 0x1f, 0x0c, 0xe7, 0x21, 0xc8, 0xe6, 0x21, 0x40, 0x7e, 0x1c, - 0x04, 0xc9, 0xe6, 0x7f, 0x11, 0x04, 0x88, 0xf2, 0x42, 0x64, 0x19, 0xe4, 0x21, 0x01, 0x1c, 0x18, - 0x31, 0x12, 0x3b, 0x03, 0x23, 0x1b, 0xdc, 0xbf, 0xfa, 0xeb, 0xea, 0x19, 0xb2, 0xab, 0x39, 0x1a, - 0xc5, 0x7a, 0xeb, 0xbe, 0xe7, 0xdc, 0xef, 0xdc, 0xba, 0x75, 0xeb, 0xde, 0x73, 0xcf, 0x39, 0xf7, - 0x5c, 0x58, 0xef, 0x5a, 0xc1, 0xc1, 0x60, 0x6f, 0xc1, 0x70, 0x7b, 0x8b, 0xce, 0xa0, 0xa7, 0xf7, - 0x3d, 0xf7, 0x7d, 0xfe, 0x63, 0xdf, 0x76, 0x1f, 0x2c, 0xf6, 0x0f, 0xbb, 0x8b, 0x7a, 0xdf, 0xf2, - 0xa3, 0x92, 0xa3, 0x57, 0x74, 0xbb, 0x7f, 0xa0, 0xbf, 0xb2, 0xd8, 0xa5, 0x0e, 0xf5, 0xf4, 0x80, - 0x9a, 0x0b, 0x7d, 0xcf, 0x0d, 0x5c, 0xf2, 0xa5, 0x08, 0x68, 0x41, 0x01, 0x2d, 0xa8, 0x6a, 0x0b, - 0xfd, 0xc3, 0xee, 0x02, 0x03, 0x8a, 0x4a, 0x14, 0xd0, 0xb5, 0x9f, 0x8e, 0xb5, 0xa0, 0xeb, 0x76, - 0xdd, 0x45, 0x8e, 0xb7, 0x37, 0xd8, 0xe7, 0xff, 0xf8, 0x1f, 0xfe, 0x4b, 0xc8, 0xb9, 0xa6, 0x1d, - 0xbe, 0xee, 0x2f, 0x58, 0x2e, 0x6b, 0xd6, 0xa2, 0xe1, 0x7a, 0x74, 0xf1, 0x68, 0xa8, 0x2d, 0xd7, - 0xbe, 0x18, 0xf1, 0xf4, 0x74, 0xe3, 0xc0, 0x72, 0xa8, 0x77, 0xac, 0x9e, 0x65, 0xd1, 0xa3, 0xbe, - 0x3b, 0xf0, 0x0c, 0x7a, 0xae, 0x5a, 0xfe, 0x62, 0x8f, 0x06, 0x7a, 0x96, 0xac, 0xc5, 0x51, 0xb5, - 0xbc, 0x81, 0x13, 0x58, 0xbd, 0x61, 0x31, 0xaf, 0x3d, 0xa9, 0x82, 0x6f, 0x1c, 0xd0, 0x9e, 0x3e, - 0x54, 0xef, 0x67, 0x46, 0xd5, 0x1b, 0x04, 0x96, 0xbd, 0x68, 0x39, 0x81, 0x1f, 0x78, 0xe9, 0x4a, - 0xda, 0xef, 0x00, 0x5c, 0x5a, 0xde, 0xf3, 0x03, 0x4f, 0x37, 0x82, 0x6d, 0xd7, 0xdc, 0xa1, 0xbd, - 0xbe, 0xad, 0x07, 0x94, 0x1c, 0x42, 0x9d, 0x3d, 0x90, 0xa9, 0x07, 0xfa, 0x7c, 0xe1, 0x46, 0xe1, - 0x66, 0x73, 0x69, 0x79, 0x61, 0xcc, 0x17, 0xb8, 0xb0, 0x25, 0x81, 0xda, 0x53, 0xa7, 0x27, 0xad, - 0xba, 0xfa, 0x87, 0xa1, 0x00, 0xf2, 0xeb, 0x05, 0x98, 0x72, 0x5c, 0x93, 0x76, 0xa8, 0x4d, 0x8d, - 0xc0, 0xf5, 0xe6, 0x8b, 0x37, 0x4a, 0x37, 0x9b, 0x4b, 0xdf, 0x18, 0x5b, 0x62, 0xc6, 0x13, 0x2d, - 0xdc, 0x8d, 0x09, 0xb8, 0xe5, 0x04, 0xde, 0x71, 0xfb, 0xf2, 0xf7, 0x4f, 0x5a, 0xcf, 0x9d, 0x9e, - 0xb4, 0xa6, 0xe2, 0x24, 0x4c, 0xb4, 0x84, 0xec, 0x42, 0x33, 0x70, 0x6d, 0xd6, 0x65, 0x96, 0xeb, - 0xf8, 0xf3, 0x25, 0xde, 0xb0, 0xeb, 0x0b, 0xa2, 0xab, 0x99, 0xf8, 0x05, 0x36, 0xc6, 0x16, 0x8e, - 0x5e, 0x59, 0xd8, 0x09, 0xd9, 0xda, 0x97, 0x24, 0x70, 0x33, 0x2a, 0xf3, 0x31, 0x8e, 0x43, 0x28, - 0xcc, 0xfa, 0xd4, 0x18, 0x78, 0x56, 0x70, 0xbc, 0xe2, 0x3a, 0x01, 0x7d, 0x18, 0xcc, 0x97, 0x79, - 0x2f, 0x7f, 0x3e, 0x0b, 0x7a, 0xdb, 0x35, 0x3b, 0x49, 0xee, 0xf6, 0xa5, 0xd3, 0x93, 0xd6, 0x6c, - 0xaa, 0x10, 0xd3, 0x98, 0xc4, 0x81, 0x39, 0xab, 0xa7, 0x77, 0xe9, 0xf6, 0xc0, 0xb6, 0x3b, 0xd4, - 0xf0, 0x68, 0xe0, 0xcf, 0x57, 0xf8, 0x23, 0xdc, 0xcc, 0x92, 0xb3, 0xe9, 0x1a, 0xba, 0x7d, 0x6f, - 0xef, 0x7d, 0x6a, 0x04, 0x48, 0xf7, 0xa9, 0x47, 0x1d, 0x83, 0xb6, 0xe7, 0xe5, 0xc3, 0xcc, 0x6d, - 0xa4, 0x90, 0x70, 0x08, 0x9b, 0xac, 0xc3, 0xf3, 0x7d, 0xcf, 0x72, 0x79, 0x13, 0x6c, 0xdd, 0xf7, - 0xef, 0xea, 0x3d, 0x3a, 0x5f, 0xbd, 0x51, 0xb8, 0xd9, 0x68, 0x5f, 0x95, 0x30, 0xcf, 0x6f, 0xa7, - 0x19, 0x70, 0xb8, 0x0e, 0xb9, 0x09, 0x75, 0x55, 0x38, 0x5f, 0xbb, 0x51, 0xb8, 0x59, 0x11, 0x63, - 0x47, 0xd5, 0xc5, 0x90, 0x4a, 0xd6, 0xa0, 0xae, 0xef, 0xef, 0x5b, 0x0e, 0xe3, 0xac, 0xf3, 0x2e, - 0x7c, 0x29, 0xeb, 0xd1, 0x96, 0x25, 0x8f, 0xc0, 0x51, 0xff, 0x30, 0xac, 0x4b, 0xde, 0x02, 0xe2, - 0x53, 0xef, 0xc8, 0x32, 0xe8, 0xb2, 0x61, 0xb8, 0x03, 0x27, 0xe0, 0x6d, 0x6f, 0xf0, 0xb6, 0x5f, - 0x93, 0x6d, 0x27, 0x9d, 0x21, 0x0e, 0xcc, 0xa8, 0x45, 0xde, 0x84, 0x39, 0xf9, 0xad, 0x46, 0xbd, - 0x00, 0x1c, 0xe9, 0x32, 0xeb, 0x48, 0x4c, 0xd1, 0x70, 0x88, 0x9b, 0x98, 0xf0, 0x92, 0x3e, 0x08, - 0xdc, 0x1e, 0x83, 0x4c, 0x0a, 0xdd, 0x71, 0x0f, 0xa9, 0x33, 0xdf, 0xbc, 0x51, 0xb8, 0x59, 0x6f, - 0xdf, 0x38, 0x3d, 0x69, 0xbd, 0xb4, 0xfc, 0x18, 0x3e, 0x7c, 0x2c, 0x0a, 0xb9, 0x07, 0x0d, 0xd3, - 0xf1, 0xb7, 0x5d, 0xdb, 0x32, 0x8e, 0xe7, 0xa7, 0x78, 0x03, 0x5f, 0x91, 0x8f, 0xda, 0x58, 0xbd, - 0xdb, 0x11, 0x84, 0x47, 0x27, 0xad, 0x97, 0x86, 0xa7, 0xd4, 0x85, 0x90, 0x8e, 0x11, 0x06, 0xd9, - 0xe2, 0x80, 0x2b, 0xae, 0xb3, 0x6f, 0x75, 0xe7, 0xa7, 0xf9, 0xdb, 0xb8, 0x31, 0x62, 0x40, 0xaf, - 0xde, 0xed, 0x08, 0xbe, 0xf6, 0xb4, 0x14, 0x27, 0xfe, 0x62, 0x84, 0x40, 0x4c, 0x98, 0x51, 0x93, - 0xf1, 0x8a, 0xad, 0x5b, 0x3d, 0x7f, 0x7e, 0x86, 0x0f, 0xde, 0x1f, 0x1f, 0x81, 0x89, 0x71, 0xe6, - 0xf6, 0x15, 0xf9, 0x28, 0x33, 0x89, 0x62, 0x1f, 0x53, 0x98, 0xd7, 0xde, 0x80, 0xe7, 0x87, 0xe6, - 0x06, 0x32, 0x07, 0xa5, 0x43, 0x7a, 0xcc, 0xa7, 0xbe, 0x06, 0xb2, 0x9f, 0xe4, 0x32, 0x54, 0x8e, - 0x74, 0x7b, 0x40, 0xe7, 0x8b, 0xbc, 0x4c, 0xfc, 0xf9, 0xd9, 0xe2, 0xeb, 0x05, 0xed, 0x6f, 0x96, - 0x60, 0x4a, 0xcd, 0x38, 0x1d, 0xcb, 0x39, 0x24, 0x6f, 0x43, 0xc9, 0x76, 0xbb, 0x72, 0xde, 0xfc, - 0xf9, 0xb1, 0x67, 0xb1, 0x4d, 0xb7, 0xdb, 0xae, 0x9d, 0x9e, 0xb4, 0x4a, 0x9b, 0x6e, 0x17, 0x19, - 0x22, 0x31, 0xa0, 0x72, 0xa8, 0xef, 0x1f, 0xea, 0xbc, 0x0d, 0xcd, 0xa5, 0xf6, 0xd8, 0xd0, 0x77, - 0x18, 0x0a, 0x6b, 0x6b, 0xbb, 0x71, 0x7a, 0xd2, 0xaa, 0xf0, 0xbf, 0x28, 0xb0, 0x89, 0x0b, 0x8d, - 0x3d, 0x5b, 0x37, 0x0e, 0x0f, 0x5c, 0x9b, 0xce, 0x97, 0x72, 0x0a, 0x6a, 0x2b, 0x24, 0xf1, 0x9a, - 0xc3, 0xbf, 0x18, 0xc9, 0x20, 0x06, 0x54, 0x07, 0xa6, 0x6f, 0x39, 0x87, 0x72, 0x0e, 0x7c, 0x63, - 0x6c, 0x69, 0xbb, 0xab, 0xfc, 0x99, 0xe0, 0xf4, 0xa4, 0x55, 0x15, 0xbf, 0x51, 0x42, 0x6b, 0x7f, - 0x38, 0x05, 0x33, 0xea, 0x25, 0xdd, 0xa7, 0x5e, 0x40, 0x1f, 0x92, 0x1b, 0x50, 0x76, 0xd8, 0xa7, - 0xc9, 0x5f, 0x72, 0x7b, 0x4a, 0x0e, 0x97, 0x32, 0xff, 0x24, 0x39, 0x85, 0xb5, 0x4c, 0x0c, 0x15, - 0xd9, 0xe1, 0xe3, 0xb7, 0xac, 0xc3, 0x61, 0x44, 0xcb, 0xc4, 0x6f, 0x94, 0xd0, 0xe4, 0x5d, 0x28, - 0xf3, 0x87, 0x17, 0x5d, 0xfd, 0x95, 0xf1, 0x45, 0xb0, 0x47, 0xaf, 0xb3, 0x27, 0xe0, 0x0f, 0xce, - 0x41, 0xd9, 0x50, 0x1c, 0x98, 0xfb, 0xb2, 0x63, 0x7f, 0x3e, 0x47, 0xc7, 0xae, 0x89, 0xa1, 0xb8, - 0xbb, 0xba, 0x86, 0x0c, 0x91, 0xfc, 0xe5, 0x02, 0x3c, 0x6f, 0xb8, 0x4e, 0xa0, 0x33, 0x3d, 0x43, - 0x2d, 0xb2, 0xf3, 0x15, 0x2e, 0xe7, 0xad, 0xb1, 0xe5, 0xac, 0xa4, 0x11, 0xdb, 0x2f, 0xb0, 0x35, - 0x63, 0xa8, 0x18, 0x87, 0x65, 0x93, 0xbf, 0x5a, 0x80, 0x17, 0xd8, 0x5c, 0x3e, 0xc4, 0xcc, 0x57, - 0xa0, 0xc9, 0xb6, 0xea, 0xea, 0xe9, 0x49, 0xeb, 0x85, 0x8d, 0x2c, 0x61, 0x98, 0xdd, 0x06, 0xd6, - 0xba, 0x4b, 0xfa, 0xb0, 0x5a, 0xc2, 0x57, 0xb7, 0xe6, 0xd2, 0xe6, 0x24, 0x55, 0x9d, 0xf6, 0x67, - 0xe4, 0x50, 0xce, 0xd2, 0xec, 0x30, 0xab, 0x15, 0xe4, 0x16, 0xd4, 0x8e, 0x5c, 0x7b, 0xd0, 0xa3, - 0xfe, 0x7c, 0x9d, 0x4f, 0xb1, 0xd7, 0xb2, 0xa6, 0xd8, 0xfb, 0x9c, 0xa5, 0x3d, 0x2b, 0xe1, 0x6b, - 0xe2, 0xbf, 0x8f, 0xaa, 0x2e, 0xb1, 0xa0, 0x6a, 0x5b, 0x3d, 0x2b, 0xf0, 0xf9, 0xc2, 0xd9, 0x5c, - 0xba, 0x35, 0xf6, 0x63, 0x89, 0x4f, 0x74, 0x93, 0x83, 0x89, 0xaf, 0x46, 0xfc, 0x46, 0x29, 0x80, - 0x4d, 0x85, 0xbe, 0xa1, 0xdb, 0x62, 0x61, 0x6d, 0x2e, 0x7d, 0x75, 0xfc, 0xcf, 0x86, 0xa1, 0xb4, - 0xa7, 0xe5, 0x33, 0x55, 0xf8, 0x5f, 0x14, 0xd8, 0xe4, 0x17, 0x61, 0x26, 0xf1, 0x36, 0xfd, 0xf9, - 0x26, 0xef, 0x9d, 0x97, 0xb3, 0x7a, 0x27, 0xe4, 0x8a, 0x56, 0x9e, 0xc4, 0x08, 0xf1, 0x31, 0x05, - 0x46, 0xee, 0x40, 0xdd, 0xb7, 0x4c, 0x6a, 0xe8, 0x9e, 0x3f, 0x3f, 0x75, 0x16, 0xe0, 0x39, 0x09, - 0x5c, 0xef, 0xc8, 0x6a, 0x18, 0x02, 0x90, 0x05, 0x80, 0xbe, 0xee, 0x05, 0x96, 0x50, 0x54, 0xa7, - 0xb9, 0xd2, 0x34, 0x73, 0x7a, 0xd2, 0x82, 0xed, 0xb0, 0x14, 0x63, 0x1c, 0x8c, 0x9f, 0xd5, 0xdd, - 0x70, 0xfa, 0x83, 0x40, 0x2c, 0xac, 0x0d, 0xc1, 0xdf, 0x09, 0x4b, 0x31, 0xc6, 0x41, 0x7e, 0xbb, - 0x00, 0x9f, 0x89, 0xfe, 0x0e, 0x7f, 0x64, 0xb3, 0x13, 0xff, 0xc8, 0x5a, 0xa7, 0x27, 0xad, 0xcf, - 0x74, 0x46, 0x8b, 0xc4, 0xc7, 0xb5, 0x87, 0x7c, 0x58, 0x80, 0x99, 0x41, 0xdf, 0xd4, 0x03, 0xda, - 0x09, 0xd8, 0x8e, 0xa7, 0x7b, 0x3c, 0x3f, 0xc7, 0x9b, 0xb8, 0x3e, 0xfe, 0x2c, 0x98, 0x80, 0x8b, - 0x5e, 0x73, 0xb2, 0x1c, 0x53, 0x62, 0xb5, 0xb7, 0x61, 0x7a, 0x79, 0x10, 0x1c, 0xb8, 0x9e, 0xf5, - 0x01, 0x57, 0xff, 0xc9, 0x1a, 0x54, 0x02, 0xae, 0xc6, 0x09, 0x0d, 0xe1, 0x73, 0x59, 0x2f, 0x5d, - 0xa8, 0xd4, 0x77, 0xe8, 0xb1, 0xd2, 0x4b, 0xc4, 0x4a, 0x2d, 0xd4, 0x3a, 0x51, 0x5d, 0xfb, 0xb3, - 0x05, 0xa8, 0xb5, 0x75, 0xe3, 0xd0, 0xdd, 0xdf, 0x27, 0xef, 0x40, 0xdd, 0x72, 0x02, 0xea, 0x1d, - 0xe9, 0xb6, 0x84, 0x5d, 0x88, 0xc1, 0x86, 0x1b, 0xc2, 0xe8, 0xf1, 0xd8, 0xee, 0x8b, 0x09, 0x5a, - 0x1d, 0xc8, 0x5d, 0x0b, 0xd7, 0x8c, 0x37, 0x24, 0x06, 0x86, 0x68, 0xa4, 0x05, 0x15, 0x3f, 0xa0, - 0x7d, 0x9f, 0xaf, 0x81, 0xd3, 0xa2, 0x19, 0x1d, 0x56, 0x80, 0xa2, 0x5c, 0xfb, 0x1b, 0x05, 0x68, - 0xb4, 0x75, 0xdf, 0x32, 0xd8, 0x53, 0x92, 0x15, 0x28, 0x0f, 0x7c, 0xea, 0x9d, 0xef, 0xd9, 0xf8, - 0xb2, 0xb5, 0xeb, 0x53, 0x0f, 0x79, 0x65, 0x72, 0x0f, 0xea, 0x7d, 0xdd, 0xf7, 0x1f, 0xb8, 0x9e, - 0x29, 0x97, 0xde, 0x33, 0x02, 0x89, 0x6d, 0x82, 0xac, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x48, 0xf7, - 0xd0, 0x7e, 0xbf, 0x00, 0x97, 0xda, 0x83, 0xfd, 0x7d, 0xea, 0x49, 0xad, 0x58, 0xea, 0x9b, 0x14, - 0x2a, 0x1e, 0x35, 0x2d, 0x5f, 0xb6, 0x7d, 0x75, 0xec, 0x81, 0x82, 0x0c, 0x45, 0xaa, 0xb7, 0xbc, - 0xbf, 0x78, 0x01, 0x0a, 0x74, 0x32, 0x80, 0xc6, 0xfb, 0x94, 0xed, 0xc6, 0xa9, 0xde, 0x93, 0x4f, - 0x77, 0x7b, 0x6c, 0x51, 0x6f, 0xd1, 0xa0, 0xc3, 0x91, 0xe2, 0xda, 0x74, 0x58, 0x88, 0x91, 0x24, - 0xed, 0x77, 0x2a, 0x30, 0xb5, 0xe2, 0xf6, 0xf6, 0x2c, 0x87, 0x9a, 0xb7, 0xcc, 0x2e, 0x25, 0xef, - 0x41, 0x99, 0x9a, 0x5d, 0x2a, 0x9f, 0x76, 0x7c, 0xc5, 0x83, 0x81, 0x45, 0xea, 0x13, 0xfb, 0x87, - 0x1c, 0x98, 0x6c, 0xc2, 0xcc, 0xbe, 0xe7, 0xf6, 0xc4, 0x5c, 0xbe, 0x73, 0xdc, 0x97, 0xba, 0x73, - 0xfb, 0xc7, 0xd5, 0x87, 0xb3, 0x96, 0xa0, 0x3e, 0x3a, 0x69, 0x41, 0xf4, 0x0f, 0x53, 0x75, 0xc9, - 0x3b, 0x30, 0x1f, 0x95, 0x84, 0x93, 0xda, 0x0a, 0xdb, 0xce, 0x70, 0xdd, 0xa9, 0xd2, 0x7e, 0xe9, - 0xf4, 0xa4, 0x35, 0xbf, 0x36, 0x82, 0x07, 0x47, 0xd6, 0x66, 0x53, 0xc5, 0x5c, 0x44, 0x14, 0x0b, - 0x8d, 0x54, 0x99, 0x26, 0xb4, 0x82, 0xf1, 0x7d, 0xdf, 0x5a, 0x4a, 0x04, 0x0e, 0x09, 0x25, 0x6b, - 0x30, 0x15, 0xb8, 0xb1, 0xfe, 0xaa, 0xf0, 0xfe, 0xd2, 0x94, 0xa1, 0x62, 0xc7, 0x1d, 0xd9, 0x5b, - 0x89, 0x7a, 0x04, 0xe1, 0x8a, 0xfa, 0x9f, 0xea, 0xa9, 0x2a, 0xef, 0xa9, 0x6b, 0xa7, 0x27, 0xad, - 0x2b, 0x3b, 0x99, 0x1c, 0x38, 0xa2, 0x26, 0xf9, 0xd5, 0x02, 0xcc, 0x28, 0x92, 0xec, 0xa3, 0xda, - 0x24, 0xfb, 0x88, 0xb0, 0x11, 0xb1, 0x93, 0x10, 0x80, 0x29, 0x81, 0xda, 0xef, 0x56, 0xa1, 0x11, - 0x4e, 0xf5, 0xe4, 0xb3, 0x50, 0xe1, 0x26, 0x08, 0xa9, 0xc1, 0x87, 0x6b, 0x38, 0xb7, 0x54, 0xa0, - 0xa0, 0x91, 0xcf, 0x41, 0xcd, 0x70, 0x7b, 0x3d, 0xdd, 0x31, 0xb9, 0x59, 0xa9, 0xd1, 0x6e, 0x32, - 0xd5, 0x65, 0x45, 0x14, 0xa1, 0xa2, 0x91, 0x97, 0xa0, 0xac, 0x7b, 0x5d, 0x61, 0xe1, 0x69, 0x88, - 0xf9, 0x68, 0xd9, 0xeb, 0xfa, 0xc8, 0x4b, 0xc9, 0x97, 0xa1, 0x44, 0x9d, 0xa3, 0xf9, 0xf2, 0x68, - 0xdd, 0xe8, 0x96, 0x73, 0x74, 0x5f, 0xf7, 0xda, 0x4d, 0xd9, 0x86, 0xd2, 0x2d, 0xe7, 0x08, 0x59, - 0x1d, 0xb2, 0x09, 0x35, 0xea, 0x1c, 0xb1, 0x77, 0x2f, 0x4d, 0x2f, 0x3f, 0x36, 0xa2, 0x3a, 0x63, - 0x91, 0xdb, 0x84, 0x50, 0xc3, 0x92, 0xc5, 0xa8, 0x20, 0xc8, 0xd7, 0x61, 0x4a, 0x28, 0x5b, 0x5b, - 0xec, 0x9d, 0xf8, 0xf3, 0x55, 0x0e, 0xd9, 0x1a, 0xad, 0xad, 0x71, 0xbe, 0xc8, 0xd4, 0x15, 0x2b, - 0xf4, 0x31, 0x01, 0x45, 0xbe, 0x0e, 0x0d, 0xb5, 0x33, 0x56, 0x6f, 0x36, 0xd3, 0x4a, 0xa4, 0xb6, - 0xd3, 0x48, 0xbf, 0x39, 0xb0, 0x3c, 0xda, 0xa3, 0x4e, 0xe0, 0xb7, 0x9f, 0x57, 0x76, 0x03, 0x45, - 0xf5, 0x31, 0x42, 0x23, 0x7b, 0xc3, 0xe6, 0x2e, 0x61, 0xab, 0xf9, 0xec, 0x88, 0x59, 0x7d, 0x0c, - 0x5b, 0xd7, 0x37, 0x60, 0x36, 0xb4, 0x47, 0x49, 0x93, 0x86, 0xb0, 0xde, 0x7c, 0x91, 0x55, 0xdf, - 0x48, 0x92, 0x1e, 0x9d, 0xb4, 0x5e, 0xce, 0x30, 0x6a, 0x44, 0x0c, 0x98, 0x06, 0x23, 0x1f, 0xc0, - 0x8c, 0x47, 0x75, 0xd3, 0x72, 0xa8, 0xef, 0x6f, 0x7b, 0xee, 0x5e, 0x7e, 0xcd, 0x93, 0xa3, 0x88, - 0x61, 0x8f, 0x09, 0x64, 0x4c, 0x49, 0x22, 0x0f, 0x60, 0xda, 0xb6, 0x8e, 0x68, 0x24, 0xba, 0x39, - 0x11, 0xd1, 0xcf, 0x9f, 0x9e, 0xb4, 0xa6, 0x37, 0xe3, 0xc0, 0x98, 0x94, 0xa3, 0xfd, 0xdd, 0x0a, - 0x0c, 0x6f, 0xbe, 0x92, 0x23, 0xa5, 0x30, 0xe9, 0x91, 0x92, 0x7e, 0x8b, 0x62, 0xcd, 0x78, 0x5d, - 0x56, 0x9b, 0xc0, 0x9b, 0xcc, 0x18, 0x8d, 0xa5, 0x49, 0x8f, 0xc6, 0x67, 0x66, 0xc2, 0x18, 0x1e, - 0xb6, 0xd5, 0x8f, 0x6f, 0xd8, 0xd6, 0x9e, 0xd2, 0xb0, 0xfd, 0x6e, 0x19, 0x66, 0x56, 0x75, 0xda, - 0x73, 0x9d, 0x27, 0xee, 0xbf, 0x0b, 0xcf, 0xc4, 0xfe, 0xfb, 0x26, 0xd4, 0x3d, 0xda, 0xb7, 0x2d, - 0x43, 0x17, 0x6a, 0xb6, 0xb4, 0x77, 0xa3, 0x2c, 0xc3, 0x90, 0x3a, 0xc2, 0xee, 0x52, 0x7a, 0x26, - 0xed, 0x2e, 0xe5, 0x8f, 0xdf, 0xee, 0xa2, 0xfd, 0x6a, 0x11, 0xb8, 0x4a, 0x4a, 0x6e, 0x40, 0x99, - 0xa9, 0x5b, 0x69, 0x6b, 0x1f, 0xff, 0x5a, 0x38, 0x85, 0x5c, 0x83, 0x62, 0xe0, 0xca, 0xe9, 0x06, - 0x24, 0xbd, 0xb8, 0xe3, 0x62, 0x31, 0x70, 0xc9, 0x07, 0x00, 0x86, 0xeb, 0x98, 0x96, 0x72, 0x03, - 0xe5, 0x7b, 0xb0, 0x35, 0xd7, 0x7b, 0xa0, 0x7b, 0xe6, 0x4a, 0x88, 0x28, 0x76, 0xde, 0xd1, 0x7f, - 0x8c, 0x49, 0x23, 0x6f, 0x40, 0xd5, 0x75, 0xd6, 0x06, 0xb6, 0xcd, 0x3b, 0xb4, 0xd1, 0xfe, 0xc2, - 0xe9, 0x49, 0xab, 0x7a, 0x8f, 0x97, 0x3c, 0x3a, 0x69, 0x5d, 0x15, 0x3b, 0x19, 0xf6, 0xef, 0x6d, - 0xcf, 0x0a, 0x2c, 0xa7, 0x1b, 0x6e, 0x44, 0x65, 0x35, 0xed, 0xd7, 0x0a, 0xd0, 0x5c, 0xb3, 0x1e, - 0x52, 0xf3, 0x6d, 0xcb, 0x31, 0xdd, 0x07, 0x04, 0xa1, 0x6a, 0x53, 0xa7, 0x1b, 0x1c, 0x8c, 0xb9, - 0x53, 0x14, 0xf6, 0x18, 0x8e, 0x80, 0x12, 0x89, 0x2c, 0x42, 0x43, 0xec, 0x33, 0x2c, 0xa7, 0xcb, - 0xfb, 0xb0, 0x1e, 0xcd, 0xf4, 0x1d, 0x45, 0xc0, 0x88, 0x47, 0x3b, 0x86, 0xe7, 0x87, 0xba, 0x81, - 0x98, 0x50, 0x0e, 0xf4, 0xae, 0x5a, 0x54, 0xd6, 0xc6, 0xee, 0xe0, 0x1d, 0xbd, 0x1b, 0xeb, 0x5c, - 0xae, 0xcd, 0xed, 0xe8, 0x4c, 0x9b, 0x63, 0xe8, 0xda, 0x1f, 0x15, 0xa0, 0xbe, 0x36, 0x70, 0x0c, - 0xbe, 0x19, 0x7f, 0xb2, 0x15, 0x58, 0xa9, 0x86, 0xc5, 0x4c, 0xd5, 0x70, 0x00, 0xd5, 0xc3, 0x07, - 0xa1, 0xea, 0xd8, 0x5c, 0xda, 0x1a, 0x7f, 0x54, 0xc8, 0x26, 0x2d, 0xdc, 0xe1, 0x78, 0xc2, 0x49, - 0x39, 0x23, 0x1b, 0x54, 0xbd, 0xf3, 0x36, 0x17, 0x2a, 0x85, 0x5d, 0xfb, 0x32, 0x34, 0x63, 0x6c, - 0xe7, 0xf2, 0x57, 0xfc, 0xbd, 0x32, 0x54, 0xd7, 0x3b, 0x9d, 0xe5, 0xed, 0x0d, 0xf2, 0x2a, 0x34, - 0xa5, 0xff, 0xea, 0x6e, 0xd4, 0x07, 0xa1, 0xfb, 0xb2, 0x13, 0x91, 0x30, 0xce, 0xc7, 0x14, 0x6f, - 0x8f, 0xea, 0x76, 0x4f, 0x7e, 0x2c, 0xa1, 0xe2, 0x8d, 0xac, 0x10, 0x05, 0x8d, 0xe8, 0x30, 0xc3, - 0xf6, 0xf2, 0xac, 0x0b, 0xc5, 0x3e, 0x5d, 0x7e, 0x36, 0x67, 0xdc, 0xc9, 0xf3, 0x05, 0x66, 0x37, - 0x01, 0x80, 0x29, 0x40, 0xf2, 0x3a, 0xd4, 0xf5, 0x41, 0x70, 0xc0, 0xb7, 0x4a, 0xe2, 0xdb, 0x78, - 0x89, 0xbb, 0xf7, 0x64, 0xd9, 0xa3, 0x93, 0xd6, 0xd4, 0x1d, 0x6c, 0xbf, 0xaa, 0xfe, 0x63, 0xc8, - 0xcd, 0x1a, 0xa7, 0x6c, 0x03, 0xb2, 0x71, 0x95, 0x73, 0x37, 0x6e, 0x3b, 0x01, 0x80, 0x29, 0x40, - 0xf2, 0x2e, 0x4c, 0x1d, 0xd2, 0xe3, 0x40, 0xdf, 0x93, 0x02, 0xaa, 0xe7, 0x11, 0x30, 0xc7, 0x94, - 0xf5, 0x3b, 0xb1, 0xea, 0x98, 0x00, 0x23, 0x3e, 0x5c, 0x3e, 0xa4, 0xde, 0x1e, 0xf5, 0x5c, 0x69, - 0x67, 0x90, 0x42, 0x6a, 0xe7, 0x11, 0x32, 0x7f, 0x7a, 0xd2, 0xba, 0x7c, 0x27, 0x03, 0x06, 0x33, - 0xc1, 0xb5, 0xff, 0x53, 0x84, 0xd9, 0x75, 0x11, 0x40, 0xe0, 0x7a, 0x42, 0xf3, 0x20, 0x57, 0xa1, - 0xe4, 0xf5, 0x07, 0x7c, 0xe4, 0x94, 0x84, 0x8b, 0x00, 0xb7, 0x77, 0x91, 0x95, 0x91, 0x77, 0xa0, - 0x6e, 0xca, 0x29, 0x43, 0x9a, 0x39, 0xc6, 0x32, 0x49, 0xa9, 0x7f, 0x18, 0xa2, 0xb1, 0x3d, 0x5d, - 0xcf, 0xef, 0x76, 0xac, 0x0f, 0xa8, 0xdc, 0xf9, 0xf3, 0x3d, 0xdd, 0x96, 0x28, 0x42, 0x45, 0x63, - 0xab, 0xea, 0x21, 0x3d, 0x16, 0xfb, 0xde, 0x72, 0xb4, 0xaa, 0xde, 0x91, 0x65, 0x18, 0x52, 0x49, - 0x4b, 0x7d, 0x2c, 0x6c, 0x14, 0x94, 0x85, 0xcd, 0xe6, 0x3e, 0x2b, 0x90, 0xdf, 0x0d, 0x9b, 0x32, - 0xdf, 0xb7, 0x82, 0x80, 0x7a, 0xf2, 0x35, 0x8e, 0x35, 0x65, 0xbe, 0xc5, 0x11, 0x50, 0x22, 0x91, - 0x9f, 0x84, 0x06, 0x07, 0x6f, 0xdb, 0xee, 0x1e, 0x7f, 0x71, 0x0d, 0x61, 0xbd, 0xb9, 0xaf, 0x0a, - 0x31, 0xa2, 0x6b, 0x3f, 0x2a, 0xc2, 0x95, 0x75, 0x1a, 0x08, 0xad, 0x66, 0x95, 0xf6, 0x6d, 0xf7, - 0x98, 0xe9, 0xd3, 0x48, 0xbf, 0x49, 0xde, 0x04, 0xb0, 0xfc, 0xbd, 0xce, 0x91, 0xc1, 0xbf, 0x03, - 0xf1, 0x0d, 0xdf, 0x90, 0x9f, 0x24, 0x6c, 0x74, 0xda, 0x92, 0xf2, 0x28, 0xf1, 0x0f, 0x63, 0x75, - 0xa2, 0x8d, 0x74, 0xf1, 0x31, 0x1b, 0xe9, 0x0e, 0x40, 0x3f, 0xd2, 0xca, 0x4b, 0x9c, 0xf3, 0x67, - 0x94, 0x98, 0xf3, 0x28, 0xe4, 0x31, 0x98, 0x3c, 0x7a, 0xb2, 0x03, 0x73, 0x26, 0xdd, 0xd7, 0x07, - 0x76, 0x10, 0xee, 0x24, 0xe4, 0x47, 0x7c, 0xf6, 0xcd, 0x48, 0x18, 0xdc, 0xb0, 0x9a, 0x42, 0xc2, - 0x21, 0x6c, 0xed, 0xef, 0x97, 0xe0, 0xda, 0x3a, 0x0d, 0x42, 0xdb, 0x9a, 0x9c, 0x1d, 0x3b, 0x7d, - 0x6a, 0xb0, 0xb7, 0xf0, 0x61, 0x01, 0xaa, 0xb6, 0xbe, 0x47, 0x6d, 0xb6, 0x7a, 0xb1, 0xa7, 0x79, - 0x6f, 0xec, 0x85, 0x60, 0xb4, 0x94, 0x85, 0x4d, 0x2e, 0x21, 0xb5, 0x34, 0x88, 0x42, 0x94, 0xe2, - 0xd9, 0xa4, 0x6e, 0xd8, 0x03, 0x3f, 0xa0, 0xde, 0xb6, 0xeb, 0x05, 0x52, 0x9f, 0x0c, 0x27, 0xf5, - 0x95, 0x88, 0x84, 0x71, 0x3e, 0xb2, 0x04, 0x60, 0xd8, 0x16, 0x75, 0x02, 0x5e, 0x4b, 0x7c, 0x57, - 0x44, 0xbd, 0xdf, 0x95, 0x90, 0x82, 0x31, 0x2e, 0x26, 0xaa, 0xe7, 0x3a, 0x56, 0xe0, 0x0a, 0x51, - 0xe5, 0xa4, 0xa8, 0xad, 0x88, 0x84, 0x71, 0x3e, 0x5e, 0x8d, 0x06, 0x9e, 0x65, 0xf8, 0xbc, 0x5a, - 0x25, 0x55, 0x2d, 0x22, 0x61, 0x9c, 0x8f, 0xad, 0x79, 0xb1, 0xe7, 0x3f, 0xd7, 0x9a, 0xf7, 0x5b, - 0x0d, 0xb8, 0x9e, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0xfe, 0xc0, 0xee, 0xd0, 0x40, 0xbd, 0xc0, 0x31, - 0xd7, 0xc2, 0xbf, 0x10, 0xbd, 0x77, 0x11, 0xb6, 0x64, 0x4c, 0xe6, 0xbd, 0x0f, 0x35, 0xf0, 0x4c, - 0xef, 0x7e, 0x11, 0x1a, 0x8e, 0x1e, 0xf8, 0xfc, 0xc3, 0x95, 0xdf, 0x68, 0xa8, 0x86, 0xdd, 0x55, - 0x04, 0x8c, 0x78, 0xc8, 0x36, 0x5c, 0x96, 0x5d, 0x7c, 0xeb, 0x61, 0xdf, 0xf5, 0x02, 0xea, 0x89, - 0xba, 0x72, 0x39, 0x95, 0x75, 0x2f, 0x6f, 0x65, 0xf0, 0x60, 0x66, 0x4d, 0xb2, 0x05, 0x97, 0x0c, - 0x11, 0xca, 0x41, 0x6d, 0x57, 0x37, 0x15, 0xa0, 0x30, 0x65, 0x86, 0x5b, 0xa3, 0x95, 0x61, 0x16, - 0xcc, 0xaa, 0x97, 0x1e, 0xcd, 0xd5, 0xb1, 0x46, 0x73, 0x6d, 0x9c, 0xd1, 0x5c, 0x1f, 0x6f, 0x34, - 0x37, 0xce, 0x36, 0x9a, 0x59, 0xcf, 0xb3, 0x71, 0x44, 0x3d, 0xa6, 0x9e, 0x88, 0x15, 0x36, 0x16, - 0x29, 0x14, 0xf6, 0x7c, 0x27, 0x83, 0x07, 0x33, 0x6b, 0x92, 0x3d, 0xb8, 0x26, 0xca, 0x6f, 0x39, - 0x86, 0x77, 0xdc, 0x67, 0x0b, 0x4f, 0x0c, 0xb7, 0x99, 0xb0, 0x25, 0x5f, 0xeb, 0x8c, 0xe4, 0xc4, - 0xc7, 0xa0, 0x90, 0x9f, 0x83, 0x69, 0xf1, 0x96, 0xb6, 0xf4, 0x3e, 0x87, 0x15, 0x71, 0x43, 0x2f, - 0x48, 0xd8, 0xe9, 0x95, 0x38, 0x11, 0x93, 0xbc, 0x64, 0x19, 0x66, 0xfb, 0x47, 0x06, 0xfb, 0xb9, - 0xb1, 0x7f, 0x97, 0x52, 0x93, 0x9a, 0xdc, 0x51, 0xd9, 0x68, 0xbf, 0xa8, 0xac, 0x3b, 0xdb, 0x49, - 0x32, 0xa6, 0xf9, 0xc9, 0xeb, 0x30, 0xe5, 0x07, 0xba, 0x17, 0x48, 0x03, 0xee, 0xfc, 0x8c, 0x88, - 0xab, 0x52, 0xf6, 0xcd, 0x4e, 0x8c, 0x86, 0x09, 0xce, 0xcc, 0xf5, 0x62, 0xf6, 0xe2, 0xd6, 0x8b, - 0x3c, 0xb3, 0xd5, 0x3f, 0x2f, 0xc2, 0x8d, 0x75, 0x1a, 0x6c, 0xb9, 0x8e, 0x34, 0x7f, 0x67, 0x2d, - 0xfb, 0x67, 0xb2, 0x7e, 0x27, 0x17, 0xed, 0xe2, 0x44, 0x17, 0xed, 0xd2, 0x84, 0x16, 0xed, 0xf2, - 0x05, 0x2e, 0xda, 0xff, 0xb0, 0x08, 0x2f, 0x26, 0x7a, 0x72, 0xdb, 0x35, 0xd5, 0x84, 0xff, 0x69, - 0x07, 0x9e, 0xa1, 0x03, 0x1f, 0x09, 0xbd, 0x93, 0x3b, 0x30, 0x53, 0x1a, 0xcf, 0x77, 0xd2, 0x1a, - 0xcf, 0xbb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0x67, 0x5a, 0xf1, 0xde, 0x02, 0xe2, 0x49, 0x77, 0xab, - 0x30, 0xfd, 0xc4, 0x94, 0x9e, 0x30, 0x70, 0x13, 0x87, 0x38, 0x30, 0xa3, 0x16, 0xe9, 0xc0, 0x0b, - 0x3e, 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x68, 0x43, 0x2f, 0x4b, 0xb8, 0x17, 0x3a, 0x59, - 0x4c, 0x98, 0x5d, 0x37, 0xcf, 0x3c, 0xf0, 0xaf, 0x81, 0xab, 0x9c, 0xa2, 0x6b, 0x26, 0xa6, 0xb1, - 0x7c, 0x98, 0xd6, 0x58, 0xde, 0xcb, 0xff, 0xde, 0xc6, 0xd3, 0x56, 0x96, 0x00, 0xf8, 0x5b, 0x88, - 0xab, 0x2b, 0xe1, 0x22, 0x8d, 0x21, 0x05, 0x63, 0x5c, 0x6c, 0x01, 0x52, 0xfd, 0x1c, 0xd7, 0x54, - 0xc2, 0x05, 0xa8, 0x13, 0x27, 0x62, 0x92, 0x77, 0xa4, 0xb6, 0x53, 0x19, 0x5b, 0xdb, 0x79, 0x0b, - 0x48, 0xc2, 0xf0, 0x28, 0xf0, 0xaa, 0xc9, 0xb8, 0xe1, 0x8d, 0x21, 0x0e, 0xcc, 0xa8, 0x35, 0x62, - 0x28, 0xd7, 0x26, 0x3b, 0x94, 0xeb, 0xe3, 0x0f, 0x65, 0xf2, 0x1e, 0x5c, 0xe5, 0xa2, 0x64, 0xff, - 0x24, 0x81, 0x85, 0xde, 0xf3, 0x63, 0x12, 0xf8, 0x2a, 0x8e, 0x62, 0xc4, 0xd1, 0x18, 0xec, 0xfd, - 0x18, 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xad, 0x13, 0xad, 0x64, 0xf0, 0x60, 0x66, 0x4d, 0x36, - 0xc4, 0x02, 0x36, 0x0c, 0xf5, 0x3d, 0x9b, 0x9a, 0x32, 0x6e, 0x3a, 0x1c, 0x62, 0x3b, 0x9b, 0x1d, - 0x49, 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x75, 0x4e, 0x35, 0x65, 0x9d, 0x5b, 0xe9, 0xf7, 0x13, - 0xda, 0x90, 0xd4, 0x75, 0xc2, 0x48, 0xf8, 0x95, 0x34, 0x03, 0x0e, 0xd7, 0xe1, 0x5a, 0xa2, 0xe1, - 0x59, 0xfd, 0xc0, 0x4f, 0x62, 0xcd, 0xa4, 0xb4, 0xc4, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0xf4, 0xf3, - 0x03, 0xaa, 0xdb, 0xc1, 0x41, 0x12, 0x70, 0x36, 0xa9, 0x9f, 0xdf, 0x1e, 0x66, 0xc1, 0xac, 0x7a, - 0x99, 0x0b, 0xd2, 0xdc, 0xb3, 0xa9, 0x56, 0x7d, 0xbb, 0x04, 0x57, 0xd7, 0x69, 0x10, 0x86, 0x94, - 0x7d, 0x6a, 0x46, 0xf9, 0x18, 0xcc, 0x28, 0xbf, 0x59, 0x81, 0x4b, 0xeb, 0x34, 0x18, 0xd2, 0xc6, - 0xfe, 0x98, 0x76, 0xff, 0x16, 0x5c, 0x8a, 0xa2, 0x18, 0x3b, 0x81, 0xeb, 0x89, 0xb5, 0x3c, 0xb5, - 0x5b, 0xee, 0x0c, 0xb3, 0x60, 0x56, 0x3d, 0xf2, 0x75, 0x78, 0x91, 0x2f, 0xf5, 0x4e, 0x57, 0xd8, - 0x67, 0x85, 0x31, 0x21, 0x76, 0x0e, 0xa7, 0x25, 0x21, 0x5f, 0xec, 0x64, 0xb3, 0xe1, 0xa8, 0xfa, - 0xe4, 0x5b, 0x30, 0xd5, 0xb7, 0xfa, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0xfe, 0xd9, 0x8e, - 0x81, 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, 0xfa, 0x05, 0x8e, 0xd4, 0xff, - 0x59, 0x84, 0xda, 0xba, 0xe7, 0x0e, 0xfa, 0xed, 0x63, 0xd2, 0x85, 0xea, 0x03, 0xee, 0x3c, 0x93, - 0xae, 0xa9, 0xf1, 0x4f, 0x02, 0x08, 0x1f, 0x5c, 0xa4, 0x12, 0x89, 0xff, 0x28, 0xe1, 0xd9, 0x20, - 0x3e, 0xa4, 0xc7, 0xd4, 0x94, 0x3e, 0xb4, 0x70, 0x10, 0xdf, 0x61, 0x85, 0x28, 0x68, 0xa4, 0x07, - 0xb3, 0xba, 0x6d, 0xbb, 0x0f, 0xa8, 0xb9, 0xa9, 0x07, 0xdc, 0xef, 0x2d, 0x7d, 0x2b, 0xe7, 0x35, - 0x4b, 0xf3, 0x60, 0x86, 0xe5, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xfb, 0x50, 0xf3, 0x03, 0xd7, 0x53, - 0xca, 0x56, 0x73, 0x69, 0x65, 0xfc, 0x97, 0xde, 0xfe, 0x5a, 0x47, 0x40, 0x09, 0x9b, 0xbd, 0xfc, - 0x83, 0x4a, 0x80, 0xf6, 0x1b, 0x05, 0x80, 0xdb, 0x3b, 0x3b, 0xdb, 0xd2, 0xbd, 0x60, 0x42, 0x59, - 0x1f, 0x84, 0x8e, 0xca, 0xf1, 0x1d, 0x82, 0x89, 0x00, 0x5c, 0xe9, 0xc3, 0x1b, 0x04, 0x07, 0xc8, - 0xd1, 0xc9, 0x4f, 0x40, 0x4d, 0x2a, 0xc8, 0xb2, 0xdb, 0xc3, 0x78, 0x0a, 0xa9, 0x44, 0xa3, 0xa2, - 0x6b, 0x7f, 0xa7, 0x08, 0xb0, 0x61, 0xda, 0xb4, 0xa3, 0x0e, 0x6f, 0x34, 0x82, 0x03, 0x8f, 0xfa, - 0x07, 0xae, 0x6d, 0x8e, 0xe9, 0x4d, 0xe5, 0x36, 0xff, 0x1d, 0x05, 0x82, 0x11, 0x1e, 0x31, 0x61, - 0xca, 0x0f, 0x68, 0x5f, 0xc5, 0xe4, 0x8e, 0xe9, 0x44, 0x99, 0x13, 0x76, 0x91, 0x08, 0x07, 0x13, - 0xa8, 0x44, 0x87, 0xa6, 0xe5, 0x18, 0xe2, 0x03, 0x69, 0x1f, 0x8f, 0x39, 0x90, 0x66, 0xd9, 0x8e, - 0x63, 0x23, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x7b, 0x45, 0xb8, 0xc2, 0xe5, 0xb1, 0x66, 0x24, 0x22, - 0x6f, 0xc9, 0x9f, 0x1e, 0x3a, 0x68, 0xfa, 0x27, 0xcf, 0x26, 0x5a, 0x9c, 0x53, 0xdc, 0xa2, 0x81, - 0x1e, 0xe9, 0x73, 0x51, 0x59, 0xec, 0x74, 0xe9, 0x00, 0xca, 0x3e, 0x9b, 0xaf, 0x44, 0xef, 0x75, - 0xc6, 0x1e, 0x42, 0xd9, 0x0f, 0xc0, 0x67, 0xaf, 0xd0, 0x6b, 0xcc, 0x67, 0x2d, 0x2e, 0x8e, 0xfc, - 0x32, 0x54, 0xfd, 0x40, 0x0f, 0x06, 0xea, 0xd3, 0xdc, 0x9d, 0xb4, 0x60, 0x0e, 0x1e, 0xcd, 0x23, - 0xe2, 0x3f, 0x4a, 0xa1, 0xda, 0xef, 0x15, 0xe0, 0x5a, 0x76, 0xc5, 0x4d, 0xcb, 0x0f, 0xc8, 0x9f, - 0x1a, 0xea, 0xf6, 0x33, 0xbe, 0x71, 0x56, 0x9b, 0x77, 0x7a, 0x78, 0x16, 0x41, 0x95, 0xc4, 0xba, - 0x3c, 0x80, 0x8a, 0x15, 0xd0, 0x9e, 0xda, 0x5f, 0xde, 0x9b, 0xf0, 0xa3, 0xc7, 0x96, 0x76, 0x26, - 0x05, 0x85, 0x30, 0xed, 0xbb, 0xc5, 0x51, 0x8f, 0xcc, 0x97, 0x0f, 0x3b, 0x19, 0xdd, 0x7d, 0x27, - 0x5f, 0x74, 0x77, 0xb2, 0x41, 0xc3, 0x41, 0xde, 0x7f, 0x66, 0x38, 0xc8, 0xfb, 0x5e, 0xfe, 0x20, - 0xef, 0x54, 0x37, 0x8c, 0x8c, 0xf5, 0xfe, 0xa8, 0x04, 0x2f, 0x3d, 0x6e, 0xd8, 0xb0, 0xf5, 0x4c, - 0x8e, 0xce, 0xbc, 0xeb, 0xd9, 0xe3, 0xc7, 0x21, 0x59, 0x82, 0x4a, 0xff, 0x40, 0xf7, 0x95, 0x52, - 0xa6, 0x36, 0x2c, 0x95, 0x6d, 0x56, 0xf8, 0x88, 0x4d, 0x1a, 0x5c, 0x99, 0xe3, 0x7f, 0x51, 0xb0, - 0xb2, 0xe9, 0xb8, 0x47, 0x7d, 0x3f, 0xb2, 0x09, 0x84, 0xd3, 0xf1, 0x96, 0x28, 0x46, 0x45, 0x27, - 0x01, 0x54, 0x85, 0x89, 0x59, 0xae, 0x4c, 0xe3, 0x07, 0x72, 0x65, 0x1c, 0x08, 0x88, 0x1e, 0x4a, - 0x7a, 0x2b, 0xa4, 0x2c, 0xb2, 0x00, 0xe5, 0x20, 0x0a, 0xcf, 0x56, 0x5b, 0xf3, 0x72, 0x86, 0x7e, - 0xca, 0xf9, 0xd8, 0xc6, 0xde, 0xdd, 0xe3, 0x46, 0x75, 0x53, 0xfa, 0xcf, 0x2d, 0xd7, 0xe1, 0x0a, - 0x59, 0x29, 0xda, 0xd8, 0xdf, 0x1b, 0xe2, 0xc0, 0x8c, 0x5a, 0xda, 0xbf, 0xab, 0xc3, 0x95, 0xec, - 0xf1, 0xc0, 0xfa, 0xed, 0x88, 0x7a, 0x3e, 0xc3, 0x2e, 0x24, 0xfb, 0xed, 0xbe, 0x28, 0x46, 0x45, - 0xff, 0x44, 0x07, 0x9c, 0xfd, 0x66, 0x01, 0xae, 0x7a, 0xd2, 0x47, 0xf4, 0x34, 0x82, 0xce, 0x5e, - 0x16, 0xe6, 0x8c, 0x11, 0x02, 0x71, 0x74, 0x5b, 0xc8, 0xdf, 0x2a, 0xc0, 0x7c, 0x2f, 0x65, 0xe7, - 0xb8, 0xc0, 0xb3, 0x92, 0xfc, 0xfc, 0xc3, 0xd6, 0x08, 0x79, 0x38, 0xb2, 0x25, 0xe4, 0x5b, 0xd0, - 0xec, 0xb3, 0x71, 0xe1, 0x07, 0xd4, 0x31, 0x54, 0x80, 0xe8, 0xf8, 0x5f, 0xd2, 0x76, 0x84, 0x15, - 0x9e, 0x95, 0xe2, 0xfa, 0x41, 0x8c, 0x80, 0x71, 0x89, 0xcf, 0xf8, 0xe1, 0xc8, 0x9b, 0x50, 0xf7, - 0x69, 0x10, 0x58, 0x4e, 0x57, 0xec, 0x37, 0x1a, 0xe2, 0x5b, 0xe9, 0xc8, 0x32, 0x0c, 0xa9, 0xe4, - 0x27, 0xa1, 0xc1, 0x5d, 0x4e, 0xcb, 0x5e, 0xd7, 0x9f, 0x6f, 0xf0, 0x70, 0xb1, 0x69, 0x11, 0x00, - 0x27, 0x0b, 0x31, 0xa2, 0x93, 0x2f, 0xc2, 0xd4, 0x1e, 0xff, 0x7c, 0xe5, 0x79, 0x79, 0x61, 0xe3, - 0xe2, 0xda, 0x5a, 0x3b, 0x56, 0x8e, 0x09, 0x2e, 0xb2, 0x04, 0x40, 0x43, 0xbf, 0x5c, 0xda, 0x9e, - 0x15, 0x79, 0xec, 0x30, 0xc6, 0x45, 0x5e, 0x86, 0x52, 0x60, 0xfb, 0xdc, 0x86, 0x55, 0x8f, 0xb6, - 0xa0, 0x3b, 0x9b, 0x1d, 0x64, 0xe5, 0xda, 0x8f, 0x0a, 0x30, 0x9b, 0x3a, 0x46, 0xc4, 0xaa, 0x0c, - 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x95, 0x5d, 0xdc, 0x44, 0x56, 0x4e, 0xde, 0x93, 0x6a, 0x79, 0x31, - 0x67, 0x6a, 0x90, 0xbb, 0x7a, 0xe0, 0x33, 0x3d, 0x7c, 0x48, 0x23, 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, - 0x72, 0x1d, 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, 0xbf, 0xf2, 0x59, 0x0c, 0x7e, - 0xda, 0xaf, 0x15, 0x63, 0x3d, 0x20, 0x35, 0xfb, 0x27, 0xf4, 0xc0, 0xe7, 0xd9, 0x02, 0x1a, 0x2e, - 0xee, 0x8d, 0xf8, 0xfa, 0xc7, 0x17, 0x63, 0x49, 0x25, 0x6f, 0x8b, 0xbe, 0x2f, 0xe5, 0x3c, 0x80, - 0xbd, 0xb3, 0xd9, 0x11, 0xd1, 0x55, 0xea, 0xad, 0x85, 0xaf, 0xa0, 0x7c, 0x41, 0xaf, 0x40, 0xfb, - 0x97, 0x25, 0x68, 0xbe, 0xe5, 0xee, 0x7d, 0x42, 0x22, 0xa8, 0xb3, 0x97, 0xa9, 0xe2, 0xc7, 0xb8, - 0x4c, 0xed, 0xc2, 0x8b, 0x41, 0x60, 0x77, 0xa8, 0xe1, 0x3a, 0xa6, 0xbf, 0xbc, 0x1f, 0x50, 0x6f, - 0xcd, 0x72, 0x2c, 0xff, 0x80, 0x9a, 0xd2, 0x9d, 0xf4, 0x99, 0xd3, 0x93, 0xd6, 0x8b, 0x3b, 0x3b, - 0x9b, 0x59, 0x2c, 0x38, 0xaa, 0x2e, 0x9f, 0x36, 0xc4, 0x99, 0x4f, 0x7e, 0x26, 0x4a, 0xc6, 0xdc, - 0x88, 0x69, 0x23, 0x56, 0x8e, 0x09, 0x2e, 0xed, 0x7b, 0x45, 0x68, 0x84, 0x49, 0x1f, 0xc8, 0xe7, - 0xa0, 0xb6, 0xe7, 0xb9, 0x87, 0xd4, 0x13, 0x9e, 0x3b, 0x79, 0x26, 0xaa, 0x2d, 0x8a, 0x50, 0xd1, - 0xc8, 0x67, 0xa1, 0x12, 0xb8, 0x7d, 0xcb, 0x48, 0x1b, 0xd4, 0x76, 0x58, 0x21, 0x0a, 0xda, 0xc5, - 0x0d, 0xf0, 0xcf, 0x27, 0x54, 0xbb, 0xc6, 0x48, 0x65, 0xec, 0x5d, 0x28, 0xfb, 0xba, 0x6f, 0xcb, - 0xf5, 0x34, 0x47, 0xfe, 0x84, 0xe5, 0xce, 0xa6, 0xcc, 0x9f, 0xb0, 0xdc, 0xd9, 0x44, 0x0e, 0xaa, - 0xfd, 0x61, 0x11, 0x9a, 0xa2, 0xdf, 0xc4, 0xac, 0x30, 0xc9, 0x9e, 0x7b, 0x83, 0x87, 0x52, 0xf8, - 0x83, 0x1e, 0xf5, 0xb8, 0x99, 0x49, 0x4e, 0x72, 0x71, 0xff, 0x40, 0x44, 0x0c, 0xc3, 0x29, 0xa2, - 0x22, 0xd5, 0xf5, 0xe5, 0x0b, 0xec, 0xfa, 0xca, 0x99, 0xba, 0xbe, 0x7a, 0x11, 0x5d, 0xff, 0x61, - 0x11, 0x1a, 0x9b, 0xd6, 0x3e, 0x35, 0x8e, 0x0d, 0x9b, 0x9f, 0xfe, 0x34, 0xa9, 0x4d, 0x03, 0xba, - 0xee, 0xe9, 0x06, 0xdd, 0xa6, 0x9e, 0xc5, 0x93, 0x22, 0xb1, 0xef, 0x83, 0xcf, 0x40, 0xf2, 0xf4, - 0xe7, 0xea, 0x08, 0x1e, 0x1c, 0x59, 0x9b, 0x6c, 0xc0, 0x94, 0x49, 0x7d, 0xcb, 0xa3, 0xe6, 0x76, - 0x6c, 0xa3, 0xf2, 0x39, 0xb5, 0xd4, 0xac, 0xc6, 0x68, 0x8f, 0x4e, 0x5a, 0xd3, 0xca, 0x40, 0x29, - 0x76, 0x2c, 0x89, 0xaa, 0xec, 0x93, 0xef, 0xeb, 0x03, 0x3f, 0xab, 0x8d, 0xb1, 0x4f, 0x7e, 0x3b, - 0x9b, 0x05, 0x47, 0xd5, 0xd5, 0x2a, 0x50, 0xda, 0x74, 0xbb, 0xda, 0x77, 0x4b, 0x10, 0x66, 0xcf, - 0x22, 0x7f, 0xbe, 0x00, 0x4d, 0xdd, 0x71, 0xdc, 0x40, 0x66, 0xa6, 0x12, 0x1e, 0x78, 0xcc, 0x9d, - 0xa4, 0x6b, 0x61, 0x39, 0x02, 0x15, 0xce, 0xdb, 0xd0, 0xa1, 0x1c, 0xa3, 0x60, 0x5c, 0x36, 0x19, - 0xa4, 0xfc, 0xc9, 0x5b, 0xf9, 0x5b, 0x71, 0x06, 0xef, 0xf1, 0xb5, 0xaf, 0xc2, 0x5c, 0xba, 0xb1, - 0xe7, 0x71, 0x07, 0xe5, 0x72, 0xcc, 0x17, 0x01, 0xa2, 0x98, 0x92, 0xa7, 0x60, 0xc4, 0xb2, 0x12, - 0x46, 0xac, 0xf1, 0x53, 0x18, 0x44, 0x8d, 0x1e, 0x69, 0xb8, 0xfa, 0x66, 0xca, 0x70, 0xb5, 0x31, - 0x09, 0x61, 0x8f, 0x37, 0x56, 0xed, 0xc1, 0xa5, 0x88, 0x37, 0xfa, 0xe6, 0xef, 0xa4, 0xbe, 0x4c, - 0xa1, 0x8b, 0x7d, 0x61, 0xc4, 0x97, 0x39, 0x1b, 0x0b, 0xf2, 0x19, 0xfe, 0x36, 0xb5, 0xbf, 0x5d, - 0x80, 0xb9, 0xb8, 0x10, 0x7e, 0xde, 0xfa, 0x4b, 0x30, 0xed, 0x51, 0xdd, 0x6c, 0xeb, 0x81, 0x71, - 0xc0, 0xc3, 0xc9, 0x0b, 0x3c, 0xfe, 0x9b, 0x9f, 0x30, 0xc3, 0x38, 0x01, 0x93, 0x7c, 0x44, 0x87, - 0x26, 0x2b, 0xd8, 0xb1, 0x7a, 0xd4, 0x1d, 0x04, 0x63, 0x5a, 0x66, 0xf9, 0xa6, 0x08, 0x23, 0x18, - 0x8c, 0x63, 0x6a, 0x1f, 0x15, 0x60, 0x26, 0xde, 0xe0, 0x0b, 0xb7, 0xda, 0x1d, 0x24, 0xad, 0x76, - 0x2b, 0x13, 0x78, 0xef, 0x23, 0x2c, 0x75, 0xdf, 0x6e, 0xc6, 0x1f, 0x8d, 0x5b, 0xe7, 0xe2, 0x06, - 0x89, 0xc2, 0x63, 0x0d, 0x12, 0x9f, 0xfc, 0xa4, 0x4c, 0xa3, 0x34, 0xe9, 0xf2, 0x33, 0xac, 0x49, - 0x7f, 0x9c, 0x99, 0x9d, 0x62, 0xd9, 0x89, 0xaa, 0x39, 0xb2, 0x13, 0xf5, 0xc2, 0xec, 0x44, 0xb5, - 0x89, 0x4d, 0x6c, 0x67, 0xc9, 0x50, 0x54, 0x7f, 0xaa, 0x19, 0x8a, 0x1a, 0x17, 0x95, 0xa1, 0x08, - 0xf2, 0x66, 0x28, 0xfa, 0x4e, 0x01, 0x66, 0xcc, 0xc4, 0xa9, 0x5c, 0x79, 0x8e, 0x7d, 0xfc, 0xe5, - 0x2c, 0x79, 0xc8, 0x57, 0x1c, 0xcb, 0x4a, 0x96, 0x61, 0x4a, 0x64, 0x56, 0x5e, 0xa0, 0xa9, 0x8f, - 0x25, 0x2f, 0x10, 0xf9, 0x65, 0x68, 0xd8, 0x6a, 0xad, 0x93, 0xd9, 0x12, 0x37, 0x27, 0x32, 0x24, - 0x25, 0x66, 0x14, 0xf9, 0x1f, 0x16, 0x61, 0x24, 0x51, 0xfb, 0x83, 0x5a, 0x7c, 0x41, 0x7c, 0xda, - 0x7e, 0x81, 0xd7, 0x92, 0x7e, 0x81, 0x1b, 0x69, 0xbf, 0xc0, 0xd0, 0x6a, 0x2e, 0x7d, 0x03, 0x3f, - 0x15, 0x5b, 0x27, 0x4a, 0x3c, 0x21, 0x51, 0x38, 0xe4, 0x32, 0xd6, 0x8a, 0x65, 0x98, 0x95, 0x4a, - 0x80, 0x22, 0xf2, 0x49, 0x76, 0x3a, 0x8a, 0xe4, 0x5a, 0x4d, 0x92, 0x31, 0xcd, 0xcf, 0x04, 0xfa, - 0x2a, 0x2f, 0xad, 0xd8, 0x0d, 0x45, 0x63, 0x5c, 0xe5, 0x8c, 0x0d, 0x39, 0xd8, 0xce, 0xc9, 0xa3, - 0xba, 0x2f, 0xad, 0xfb, 0xb1, 0x9d, 0x13, 0xf2, 0x52, 0x94, 0xd4, 0xb8, 0x8b, 0xa3, 0xf6, 0x04, - 0x17, 0x87, 0x0e, 0x4d, 0x5b, 0xf7, 0x03, 0x31, 0x98, 0x4c, 0x39, 0x9b, 0xfc, 0x89, 0xb3, 0xad, - 0xfb, 0x4c, 0x97, 0x88, 0x14, 0xf8, 0xcd, 0x08, 0x06, 0xe3, 0x98, 0xc4, 0x84, 0x29, 0xf6, 0x97, - 0xcf, 0x2c, 0xe6, 0x72, 0x20, 0xb3, 0xb7, 0x9d, 0x47, 0x46, 0x68, 0x99, 0xdb, 0x8c, 0xe1, 0x60, - 0x02, 0x75, 0x84, 0x17, 0x04, 0xc6, 0xf1, 0x82, 0x90, 0x9f, 0x13, 0x8a, 0xdb, 0x71, 0xf8, 0x5a, - 0x9b, 0xfc, 0xb5, 0x86, 0x51, 0xa0, 0x18, 0x27, 0x62, 0x92, 0x97, 0x8d, 0x8a, 0x81, 0xec, 0x06, - 0x55, 0x7d, 0x2a, 0x39, 0x2a, 0x76, 0x93, 0x64, 0x4c, 0xf3, 0x93, 0x6d, 0xb8, 0x1c, 0x16, 0xc5, - 0x9b, 0x31, 0xcd, 0x71, 0xc2, 0xb0, 0xbc, 0xdd, 0x0c, 0x1e, 0xcc, 0xac, 0xc9, 0xcf, 0xb9, 0x0c, - 0x3c, 0x8f, 0x3a, 0xc1, 0x6d, 0xdd, 0x3f, 0x90, 0xf1, 0x7d, 0xd1, 0x39, 0x97, 0x88, 0x84, 0x71, - 0x3e, 0xb2, 0x04, 0x20, 0xe0, 0x78, 0xad, 0xd9, 0x64, 0x08, 0xed, 0x6e, 0x48, 0xc1, 0x18, 0x97, - 0xf6, 0x9d, 0x06, 0x34, 0xef, 0xea, 0x81, 0x75, 0x44, 0xb9, 0xcb, 0xf2, 0x62, 0xfc, 0x46, 0x7f, - 0xad, 0x00, 0x57, 0x92, 0x71, 0xa9, 0x17, 0xe8, 0x3c, 0xe2, 0xf9, 0x8c, 0x30, 0x53, 0x1a, 0x8e, - 0x68, 0x05, 0x77, 0x23, 0x0d, 0x85, 0xb9, 0x5e, 0xb4, 0x1b, 0xa9, 0x33, 0x4a, 0x20, 0x8e, 0x6e, - 0xcb, 0x27, 0xc5, 0x8d, 0xf4, 0x6c, 0x27, 0xe0, 0x4c, 0x39, 0xb9, 0x6a, 0xcf, 0x8c, 0x93, 0xab, - 0xfe, 0x4c, 0x68, 0xfd, 0xfd, 0x98, 0x93, 0xab, 0x91, 0x33, 0xd8, 0x4a, 0x1e, 0xe5, 0x10, 0x68, - 0xa3, 0x9c, 0x65, 0x3c, 0x0b, 0x83, 0x72, 0x3e, 0x30, 0x65, 0x79, 0x4f, 0xf7, 0x2d, 0x43, 0xaa, - 0x1d, 0x39, 0x12, 0x0e, 0xab, 0x44, 0x84, 0x22, 0x26, 0x83, 0xff, 0x45, 0x81, 0x1d, 0xe5, 0x5d, - 0x2c, 0xe6, 0xca, 0xbb, 0x48, 0x56, 0xa0, 0xec, 0x1c, 0xd2, 0xe3, 0xf3, 0xe5, 0x33, 0xe0, 0x9b, - 0xc0, 0xbb, 0x77, 0xe8, 0x31, 0xf2, 0xca, 0xda, 0xf7, 0x8a, 0x00, 0xec, 0xf1, 0xcf, 0xe6, 0x6e, - 0xfa, 0x09, 0xa8, 0xf9, 0x03, 0x6e, 0x18, 0x92, 0x0a, 0x53, 0x14, 0xa1, 0x26, 0x8a, 0x51, 0xd1, - 0xc9, 0x67, 0xa1, 0xf2, 0xcd, 0x01, 0x1d, 0xa8, 0xd8, 0x89, 0x70, 0xdf, 0xf0, 0x35, 0x56, 0x88, - 0x82, 0x76, 0x71, 0xa6, 0x63, 0xe5, 0x96, 0xaa, 0x5c, 0x94, 0x5b, 0xaa, 0x01, 0xb5, 0xbb, 0x2e, - 0x0f, 0x78, 0xd5, 0xfe, 0x7b, 0x11, 0x20, 0x0a, 0x28, 0x24, 0xbf, 0x51, 0x80, 0x17, 0xc2, 0x0f, - 0x2e, 0x10, 0xdb, 0x3f, 0x9e, 0xe3, 0x3b, 0xb7, 0x8b, 0x2a, 0xeb, 0x63, 0xe7, 0x33, 0xd0, 0x76, - 0x96, 0x38, 0xcc, 0x6e, 0x05, 0x41, 0xa8, 0xd3, 0x5e, 0x3f, 0x38, 0x5e, 0xb5, 0x3c, 0x39, 0x02, - 0x33, 0xe3, 0x56, 0x6f, 0x49, 0x1e, 0x51, 0x55, 0xda, 0x28, 0xf8, 0x47, 0xa4, 0x28, 0x18, 0xe2, - 0x90, 0x03, 0xa8, 0x3b, 0xee, 0x7b, 0x3e, 0xeb, 0x0e, 0x39, 0x1c, 0xdf, 0x1c, 0xbf, 0xcb, 0x45, - 0xb7, 0x0a, 0x97, 0x86, 0xfc, 0x83, 0x35, 0x47, 0x76, 0xf6, 0xaf, 0x17, 0xe1, 0x52, 0x46, 0x3f, - 0x90, 0x37, 0x61, 0x4e, 0xc6, 0x6e, 0x46, 0xc9, 0xee, 0x0b, 0x51, 0xb2, 0xfb, 0x4e, 0x8a, 0x86, - 0x43, 0xdc, 0xe4, 0x3d, 0x00, 0xdd, 0x30, 0xa8, 0xef, 0x6f, 0xb9, 0xa6, 0xda, 0x0f, 0xbc, 0xc1, - 0xd4, 0x97, 0xe5, 0xb0, 0xf4, 0xd1, 0x49, 0xeb, 0xa7, 0xb3, 0xc2, 0xb1, 0x53, 0xfd, 0x1c, 0x55, - 0xc0, 0x18, 0x24, 0xf9, 0x06, 0x80, 0xb0, 0x01, 0x84, 0x19, 0x23, 0x9e, 0x60, 0x38, 0x5b, 0x50, - 0x09, 0xc9, 0x16, 0xbe, 0x36, 0xd0, 0x9d, 0xc0, 0x0a, 0x8e, 0x45, 0x82, 0x9e, 0xfb, 0x21, 0x0a, - 0xc6, 0x10, 0xb5, 0x7f, 0x56, 0x84, 0xba, 0x72, 0x0b, 0x3c, 0x05, 0x5b, 0x70, 0x37, 0x61, 0x0b, - 0x9e, 0x50, 0x00, 0x76, 0x96, 0x25, 0xd8, 0x4d, 0x59, 0x82, 0xd7, 0xf3, 0x8b, 0x7a, 0xbc, 0x1d, - 0xf8, 0xb7, 0x8b, 0x30, 0xa3, 0x58, 0xf3, 0x5a, 0x68, 0xbf, 0x02, 0xb3, 0x22, 0x70, 0x62, 0x4b, - 0x7f, 0x28, 0x72, 0x15, 0xf1, 0x0e, 0x2b, 0x8b, 0x98, 0xe7, 0x76, 0x92, 0x84, 0x69, 0x5e, 0x36, - 0xac, 0x45, 0xd1, 0x2e, 0xdb, 0x84, 0x09, 0x57, 0xab, 0xd8, 0x6f, 0xf2, 0x61, 0xdd, 0x4e, 0xd1, - 0x70, 0x88, 0x3b, 0x6d, 0x22, 0x2e, 0x5f, 0x80, 0x89, 0xf8, 0x3f, 0x14, 0x60, 0x2a, 0xea, 0xaf, - 0x0b, 0x37, 0x10, 0xef, 0x27, 0x0d, 0xc4, 0xcb, 0xb9, 0x87, 0xc3, 0x08, 0xf3, 0xf0, 0x5f, 0xaa, - 0x41, 0xe2, 0x1c, 0x00, 0xd9, 0x83, 0x6b, 0x56, 0x66, 0x34, 0x63, 0x6c, 0xb6, 0x09, 0x0f, 0xb6, - 0x6f, 0x8c, 0xe4, 0xc4, 0xc7, 0xa0, 0x90, 0x01, 0xd4, 0x8f, 0xa8, 0x17, 0x58, 0x06, 0x55, 0xcf, - 0xb7, 0x9e, 0x5b, 0x25, 0x93, 0x46, 0xf0, 0xb0, 0x4f, 0xef, 0x4b, 0x01, 0x18, 0x8a, 0x22, 0x7b, - 0x50, 0xa1, 0x66, 0x97, 0xaa, 0xec, 0x51, 0x39, 0xb3, 0xf0, 0x86, 0xfd, 0xc9, 0xfe, 0xf9, 0x28, - 0xa0, 0x89, 0x1f, 0x37, 0x34, 0x95, 0x73, 0x2a, 0x58, 0x67, 0x34, 0x2f, 0x91, 0xc3, 0xd0, 0xda, - 0x5a, 0x99, 0xd0, 0xe4, 0xf1, 0x18, 0x5b, 0xab, 0x0f, 0x8d, 0x07, 0x7a, 0x40, 0xbd, 0x9e, 0xee, - 0x1d, 0xca, 0xdd, 0xc6, 0xf8, 0x4f, 0xf8, 0xb6, 0x42, 0x8a, 0x9e, 0x30, 0x2c, 0xc2, 0x48, 0x0e, - 0x71, 0xa1, 0x11, 0x48, 0xf5, 0x59, 0x99, 0x94, 0xc7, 0x17, 0xaa, 0x14, 0x71, 0x5f, 0x9e, 0x07, - 0x50, 0x7f, 0x31, 0x92, 0x41, 0x8e, 0x12, 0x29, 0xdb, 0x45, 0xa2, 0xfe, 0x76, 0x0e, 0xd7, 0x84, - 0x84, 0x8a, 0x96, 0x9b, 0xec, 0xd4, 0xef, 0xda, 0xff, 0xaa, 0x44, 0xd3, 0xf2, 0xd3, 0xb6, 0x13, - 0x7e, 0x31, 0x69, 0x27, 0xbc, 0x9e, 0xb6, 0x13, 0xa6, 0xfc, 0xf1, 0xe7, 0x8f, 0x20, 0x4e, 0x99, - 0xd7, 0xca, 0x17, 0x60, 0x5e, 0x7b, 0x05, 0x9a, 0x47, 0x7c, 0x26, 0x10, 0xa9, 0xa8, 0x2a, 0x7c, - 0x19, 0xe1, 0x33, 0xfb, 0xfd, 0xa8, 0x18, 0xe3, 0x3c, 0xac, 0x8a, 0xbc, 0xa4, 0x26, 0xcc, 0xda, - 0x2c, 0xab, 0x74, 0xa2, 0x62, 0x8c, 0xf3, 0xf0, 0xe0, 0x43, 0xcb, 0x39, 0x14, 0x15, 0x6a, 0xbc, - 0x82, 0x08, 0x3e, 0x54, 0x85, 0x18, 0xd1, 0xc9, 0x4d, 0xa8, 0x0f, 0xcc, 0x7d, 0xc1, 0x5b, 0xe7, - 0xbc, 0x5c, 0xc3, 0xdc, 0x5d, 0x5d, 0x93, 0xa9, 0xb1, 0x14, 0x95, 0xb5, 0xa4, 0xa7, 0xf7, 0x15, - 0x81, 0xef, 0x0d, 0x65, 0x4b, 0xb6, 0xa2, 0x62, 0x8c, 0xf3, 0x90, 0x9f, 0x85, 0x19, 0x8f, 0x9a, - 0x03, 0x83, 0x86, 0xb5, 0x80, 0xd7, 0x92, 0x39, 0x43, 0xe3, 0x14, 0x4c, 0x71, 0x8e, 0x30, 0x12, - 0x36, 0xc7, 0x32, 0x12, 0x7e, 0x15, 0x66, 0x4c, 0x4f, 0xb7, 0x1c, 0x6a, 0xde, 0x73, 0x78, 0xd0, - 0x85, 0x0c, 0x81, 0x0c, 0x0d, 0xf4, 0xab, 0x09, 0x2a, 0xa6, 0xb8, 0xb5, 0x7f, 0x55, 0x84, 0x8a, - 0xc8, 0x64, 0xba, 0x01, 0x97, 0x2c, 0xc7, 0x0a, 0x2c, 0xdd, 0x5e, 0xa5, 0xb6, 0x7e, 0x9c, 0x0c, - 0x3c, 0x79, 0x91, 0x6d, 0xb4, 0x37, 0x86, 0xc9, 0x98, 0x55, 0x87, 0x75, 0x4e, 0x20, 0x96, 0x6f, - 0x85, 0x22, 0xec, 0x68, 0x22, 0xfd, 0x75, 0x82, 0x82, 0x29, 0x4e, 0xa6, 0x0c, 0xf5, 0x33, 0xa2, - 0x4a, 0xb8, 0x32, 0x94, 0x8c, 0x25, 0x49, 0xf2, 0x71, 0x25, 0x7d, 0xc0, 0x15, 0xe2, 0xf0, 0xa0, - 0x91, 0x0c, 0x1c, 0x13, 0x4a, 0x7a, 0x8a, 0x86, 0x43, 0xdc, 0x0c, 0x61, 0x5f, 0xb7, 0xec, 0x81, - 0x47, 0x23, 0x84, 0x4a, 0x84, 0xb0, 0x96, 0xa2, 0xe1, 0x10, 0xb7, 0xf6, 0x3f, 0x0a, 0x40, 0x86, - 0x8f, 0x4e, 0x90, 0x03, 0xa8, 0x3a, 0xdc, 0x16, 0x99, 0x3b, 0xeb, 0x7e, 0xcc, 0xa4, 0x29, 0x16, - 0x09, 0x59, 0x20, 0xf1, 0x89, 0x03, 0x75, 0xfa, 0x30, 0xa0, 0x9e, 0x13, 0x1e, 0xa5, 0x9a, 0x4c, - 0x86, 0x7f, 0xb1, 0x37, 0x93, 0xc8, 0x18, 0xca, 0xd0, 0x7e, 0xbf, 0x08, 0xcd, 0x18, 0xdf, 0x93, - 0xb6, 0xf8, 0x3c, 0x9b, 0x83, 0x30, 0x01, 0xee, 0x7a, 0xb6, 0x9c, 0xef, 0x62, 0xd9, 0x1c, 0x24, - 0x09, 0x37, 0x31, 0xce, 0x47, 0x96, 0x00, 0x7a, 0xba, 0x1f, 0x50, 0x8f, 0xeb, 0x42, 0xa9, 0x1c, - 0x0a, 0x5b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0x86, 0xbc, 0xa3, 0xa1, 0x9c, 0xcc, 0x79, 0x39, 0xe2, - 0x02, 0x86, 0xca, 0x04, 0x2e, 0x60, 0x20, 0x5d, 0x98, 0x53, 0xad, 0x56, 0xd4, 0xf3, 0x65, 0x44, - 0x14, 0x03, 0x35, 0x05, 0x81, 0x43, 0xa0, 0xda, 0xf7, 0x0a, 0x30, 0x9d, 0x30, 0x40, 0x89, 0x6c, - 0x95, 0xea, 0xe0, 0x4f, 0x22, 0x5b, 0x65, 0xec, 0xbc, 0xce, 0xe7, 0xa1, 0x2a, 0x3a, 0x28, 0x1d, - 0xcf, 0x2b, 0xba, 0x10, 0x25, 0x95, 0xad, 0x2c, 0xd2, 0xc4, 0x9d, 0x5e, 0x59, 0xa4, 0x0d, 0x1c, - 0x15, 0x5d, 0x78, 0x8e, 0x44, 0xeb, 0x64, 0x4f, 0xc7, 0x3c, 0x47, 0xa2, 0x1c, 0x43, 0x0e, 0xed, - 0x1f, 0xf1, 0x76, 0x07, 0xde, 0x71, 0xb8, 0xb3, 0xee, 0x42, 0x4d, 0xc6, 0x70, 0xca, 0x4f, 0xe3, - 0xcd, 0x1c, 0x56, 0x31, 0x8e, 0x23, 0xa3, 0x15, 0x75, 0xe3, 0xf0, 0xde, 0xfe, 0x3e, 0x2a, 0x74, - 0x72, 0x0b, 0x1a, 0xae, 0x23, 0xbf, 0x60, 0xf9, 0xf8, 0x5f, 0x60, 0x2b, 0xc7, 0x3d, 0x55, 0xf8, - 0xe8, 0xa4, 0x75, 0x25, 0xfc, 0x93, 0x68, 0x24, 0x46, 0x35, 0xb5, 0x3f, 0x57, 0x80, 0x17, 0xd0, - 0xb5, 0x6d, 0xcb, 0xe9, 0x26, 0x3d, 0x9f, 0xc4, 0x86, 0x99, 0x9e, 0xfe, 0x70, 0xd7, 0xd1, 0x8f, - 0x74, 0xcb, 0xd6, 0xf7, 0x6c, 0xfa, 0xc4, 0x9d, 0xf1, 0x20, 0xb0, 0xec, 0x05, 0x71, 0x67, 0xe5, - 0xc2, 0x86, 0x13, 0xdc, 0xf3, 0x3a, 0x81, 0x67, 0x39, 0x5d, 0x31, 0x4b, 0x6e, 0x25, 0xb0, 0x30, - 0x85, 0xad, 0xfd, 0x41, 0x09, 0x78, 0x1c, 0x21, 0xf9, 0x12, 0x34, 0x7a, 0xd4, 0x38, 0xd0, 0x1d, - 0xcb, 0x57, 0x79, 0x7f, 0xaf, 0xb2, 0xe7, 0xda, 0x52, 0x85, 0x8f, 0xd8, 0xab, 0x58, 0xee, 0x6c, - 0xf2, 0xa3, 0x3a, 0x11, 0x2f, 0x31, 0xa0, 0xda, 0xf5, 0x7d, 0xbd, 0x6f, 0xe5, 0x0e, 0x31, 0x11, - 0x79, 0x56, 0xc5, 0x74, 0x24, 0x7e, 0xa3, 0x84, 0x26, 0x06, 0x54, 0xfa, 0xb6, 0x6e, 0x39, 0xb9, - 0xef, 0x58, 0x63, 0x4f, 0xb0, 0xcd, 0x90, 0x84, 0xa9, 0x92, 0xff, 0x44, 0x81, 0x4d, 0x06, 0xd0, - 0xf4, 0x0d, 0x4f, 0xef, 0xf9, 0x07, 0xfa, 0xd2, 0xab, 0xaf, 0xe5, 0x56, 0xfe, 0x23, 0x51, 0x42, - 0x17, 0x59, 0xc1, 0xe5, 0xad, 0xce, 0xed, 0xe5, 0xa5, 0x57, 0x5f, 0xc3, 0xb8, 0x9c, 0xb8, 0xd8, - 0x57, 0x5f, 0x59, 0x92, 0x33, 0xc8, 0xc4, 0xc5, 0xbe, 0xfa, 0xca, 0x12, 0xc6, 0xe5, 0x68, 0xff, - 0xbb, 0x00, 0x8d, 0x90, 0x97, 0xec, 0x02, 0xb0, 0xb9, 0x4c, 0x66, 0x46, 0x3d, 0xd7, 0x7d, 0x34, - 0xdc, 0xda, 0xb3, 0x1b, 0x56, 0xc6, 0x18, 0x50, 0x46, 0xea, 0xd8, 0xe2, 0xa4, 0x53, 0xc7, 0x2e, - 0x42, 0xe3, 0x40, 0x77, 0x4c, 0xff, 0x40, 0x3f, 0x14, 0x53, 0x7a, 0x2c, 0x99, 0xf2, 0x6d, 0x45, - 0xc0, 0x88, 0x47, 0xfb, 0x27, 0x55, 0x10, 0x71, 0x21, 0x6c, 0xd2, 0x31, 0x2d, 0x5f, 0x1c, 0x7e, - 0x28, 0xf0, 0x9a, 0xe1, 0xa4, 0xb3, 0x2a, 0xcb, 0x31, 0xe4, 0x20, 0x57, 0xa1, 0xd4, 0xb3, 0x1c, - 0xa9, 0x81, 0x70, 0x43, 0xee, 0x96, 0xe5, 0x20, 0x2b, 0xe3, 0x24, 0xfd, 0xa1, 0xd4, 0x30, 0x04, - 0x49, 0x7f, 0x88, 0xac, 0x8c, 0x7c, 0x05, 0x66, 0x6d, 0xd7, 0x3d, 0x64, 0xd3, 0x87, 0x52, 0x44, - 0x84, 0x57, 0x9d, 0x9b, 0x56, 0x36, 0x93, 0x24, 0x4c, 0xf3, 0x92, 0x5d, 0x78, 0xf1, 0x03, 0xea, - 0xb9, 0x72, 0xbe, 0xec, 0xd8, 0x94, 0xf6, 0x15, 0x8c, 0x50, 0x8d, 0x79, 0x94, 0xec, 0x2f, 0x64, - 0xb3, 0xe0, 0xa8, 0xba, 0x3c, 0xde, 0x5e, 0xf7, 0xba, 0x34, 0xd8, 0xf6, 0x5c, 0xa6, 0xbb, 0x58, - 0x4e, 0x57, 0xc1, 0x56, 0x23, 0xd8, 0x9d, 0x6c, 0x16, 0x1c, 0x55, 0x97, 0xbc, 0x03, 0xf3, 0x82, - 0x24, 0xd4, 0x96, 0x65, 0x31, 0xcd, 0x58, 0xb6, 0xba, 0x9a, 0x74, 0x5a, 0xf8, 0xcb, 0x76, 0x46, - 0xf0, 0xe0, 0xc8, 0xda, 0xe4, 0x2d, 0x98, 0x53, 0xde, 0xd2, 0x6d, 0xea, 0x75, 0xc2, 0x58, 0xa1, - 0xe9, 0xf6, 0xf5, 0xd3, 0x93, 0xd6, 0xb5, 0x55, 0xda, 0xf7, 0xa8, 0x11, 0xf7, 0x3a, 0x2b, 0x2e, - 0x1c, 0xaa, 0x47, 0x10, 0xae, 0xf0, 0x80, 0xa0, 0xdd, 0xfe, 0x8a, 0xeb, 0xda, 0xa6, 0xfb, 0xc0, - 0x51, 0xcf, 0x2e, 0x14, 0x76, 0xee, 0x20, 0xed, 0x64, 0x72, 0xe0, 0x88, 0x9a, 0xec, 0xc9, 0x39, - 0x65, 0xd5, 0x7d, 0xe0, 0xa4, 0x51, 0x21, 0x7a, 0xf2, 0xce, 0x08, 0x1e, 0x1c, 0x59, 0x9b, 0xac, - 0x01, 0x49, 0x3f, 0xc1, 0x6e, 0x5f, 0xba, 0xf0, 0xaf, 0x88, 0x24, 0x47, 0x69, 0x2a, 0x66, 0xd4, - 0x20, 0x9b, 0x70, 0x39, 0x5d, 0xca, 0xc4, 0x49, 0x6f, 0x3e, 0x4f, 0x6f, 0x8c, 0x19, 0x74, 0xcc, - 0xac, 0xa5, 0xfd, 0xd3, 0x22, 0x4c, 0x27, 0xb2, 0x62, 0x3c, 0x73, 0xd9, 0x07, 0xd8, 0xe6, 0xa1, - 0xe7, 0x77, 0x37, 0x56, 0x6f, 0x53, 0xdd, 0xa4, 0xde, 0x1d, 0xaa, 0x32, 0x98, 0x88, 0x65, 0x31, - 0x41, 0xc1, 0x14, 0x27, 0xd9, 0x87, 0x8a, 0xf0, 0x13, 0xe4, 0xbd, 0xd9, 0x48, 0xf5, 0x11, 0x77, - 0x16, 0xc8, 0xeb, 0xc0, 0x5c, 0x8f, 0xa2, 0x80, 0xd7, 0x02, 0x98, 0x8a, 0x73, 0xb0, 0x89, 0x24, - 0x52, 0x7b, 0x6b, 0x09, 0x95, 0x77, 0x03, 0x4a, 0x41, 0x30, 0x6e, 0x5e, 0x03, 0xe1, 0x77, 0xda, - 0xd9, 0x44, 0x86, 0xa1, 0xed, 0xb3, 0x77, 0xe7, 0xfb, 0x96, 0xeb, 0xc8, 0x24, 0xf7, 0xbb, 0x50, - 0x93, 0xbb, 0xa7, 0x31, 0xf3, 0x32, 0x70, 0x5d, 0x49, 0x99, 0x5d, 0x15, 0x96, 0xf6, 0x1f, 0x8b, - 0xd0, 0x08, 0xcd, 0x24, 0x67, 0x48, 0x1e, 0xef, 0x42, 0x23, 0x0c, 0x68, 0xcc, 0x7d, 0x6d, 0x6b, - 0x14, 0x67, 0xc7, 0x77, 0xf6, 0xe1, 0x5f, 0x8c, 0x64, 0xc4, 0x83, 0x25, 0x4b, 0x39, 0x82, 0x25, - 0xfb, 0x50, 0x0b, 0x3c, 0xab, 0xdb, 0x95, 0xbb, 0x84, 0x3c, 0xd1, 0x92, 0x61, 0x77, 0xed, 0x08, - 0x40, 0xd9, 0xb3, 0xe2, 0x0f, 0x2a, 0x31, 0xda, 0xfb, 0x30, 0x97, 0xe6, 0xe4, 0x2a, 0xb4, 0x71, - 0x40, 0xcd, 0x81, 0xad, 0xfa, 0x38, 0x52, 0xa1, 0x65, 0x39, 0x86, 0x1c, 0xe4, 0x26, 0xd4, 0xd9, - 0x6b, 0xfa, 0xc0, 0x75, 0x94, 0x1a, 0xcb, 0x77, 0x23, 0x3b, 0xb2, 0x0c, 0x43, 0xaa, 0xf6, 0xdf, - 0x4a, 0x70, 0x35, 0x32, 0x76, 0x6d, 0xe9, 0x8e, 0xde, 0x3d, 0xc3, 0x5d, 0x9d, 0x9f, 0x9e, 0x74, - 0x3b, 0xef, 0x0d, 0x20, 0xa5, 0x67, 0xe0, 0x06, 0x90, 0xff, 0x5b, 0x04, 0x1e, 0x7c, 0x4d, 0xbe, - 0x05, 0x53, 0x7a, 0xec, 0x9a, 0x66, 0xf9, 0x3a, 0x6f, 0xe5, 0x7e, 0x9d, 0x3c, 0xc6, 0x3b, 0x0c, - 0x80, 0x8b, 0x97, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xbe, 0xaf, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0x3b, - 0xef, 0x12, 0xc2, 0xf9, 0x30, 0x5f, 0x93, 0xd0, 0x18, 0x0a, 0x21, 0xdf, 0x29, 0xc0, 0xb4, 0x17, - 0xdf, 0xae, 0xc9, 0x17, 0x92, 0x27, 0xb4, 0x23, 0x86, 0x16, 0x0f, 0xb7, 0x8b, 0xef, 0x09, 0x93, - 0x32, 0xb5, 0xff, 0x5a, 0x80, 0xe9, 0x8e, 0x6d, 0x99, 0x96, 0xd3, 0xbd, 0xc0, 0x0b, 0x48, 0xee, - 0x41, 0xc5, 0xb7, 0x2d, 0x93, 0x8e, 0xb9, 0x9a, 0x88, 0x75, 0x8c, 0x01, 0xa0, 0xc0, 0x49, 0xde, - 0x68, 0x52, 0x3a, 0xc3, 0x8d, 0x26, 0x7f, 0x54, 0x05, 0x79, 0x8c, 0x80, 0x0c, 0xa0, 0xd1, 0x55, - 0x17, 0x25, 0xc8, 0x67, 0xbc, 0x9d, 0x23, 0xc9, 0x66, 0xe2, 0xca, 0x05, 0x31, 0xf7, 0x87, 0x85, - 0x18, 0x49, 0x22, 0x34, 0x79, 0x3f, 0xf8, 0x6a, 0xce, 0xfb, 0xc1, 0x85, 0xb8, 0xe1, 0x1b, 0xc2, - 0x75, 0x28, 0x1f, 0x04, 0x41, 0x5f, 0x0e, 0xa6, 0xf1, 0xcf, 0x89, 0x44, 0x79, 0x9e, 0x84, 0x4e, - 0xc4, 0xfe, 0x23, 0x87, 0x66, 0x22, 0x1c, 0x3d, 0xbc, 0x85, 0x71, 0x25, 0x57, 0x18, 0x49, 0x5c, - 0x04, 0xfb, 0x8f, 0x1c, 0x9a, 0xfc, 0x12, 0x34, 0x03, 0x4f, 0x77, 0xfc, 0x7d, 0xd7, 0xeb, 0x51, - 0x4f, 0xee, 0x51, 0xd7, 0x72, 0x5c, 0x91, 0xbd, 0x13, 0xa1, 0x09, 0x93, 0x6c, 0xa2, 0x08, 0xe3, - 0xd2, 0xc8, 0x21, 0xd4, 0x07, 0xa6, 0x68, 0x98, 0x34, 0x83, 0x2d, 0xe7, 0xb9, 0xf5, 0x3c, 0x16, - 0x24, 0xa2, 0xfe, 0x61, 0x28, 0x20, 0x79, 0xe1, 0x68, 0x6d, 0x52, 0x17, 0x8e, 0xc6, 0x47, 0x63, - 0x56, 0x12, 0x1a, 0xd2, 0x93, 0x7a, 0xad, 0xd3, 0x95, 0x31, 0x6e, 0x6b, 0xb9, 0x55, 0x4e, 0x21, - 0xb2, 0x19, 0xea, 0xc6, 0x4e, 0x17, 0x95, 0x0c, 0xad, 0x07, 0xd2, 0x77, 0x44, 0x8c, 0xc4, 0x65, - 0x4d, 0xe2, 0x64, 0xe4, 0xe2, 0xd9, 0xe6, 0x83, 0xf0, 0xd6, 0xa0, 0x58, 0xb2, 0xf8, 0xcc, 0x5b, - 0x99, 0xb4, 0xff, 0x54, 0x84, 0xd2, 0xce, 0x66, 0x47, 0x24, 0x80, 0xe5, 0xd7, 0xbf, 0xd1, 0xce, - 0xa1, 0xd5, 0xbf, 0x4f, 0x3d, 0x6b, 0xff, 0x58, 0x6e, 0xbd, 0x63, 0x09, 0x60, 0xd3, 0x1c, 0x98, - 0x51, 0x8b, 0xbc, 0x0b, 0x53, 0x86, 0xbe, 0x42, 0xbd, 0x60, 0x1c, 0xc3, 0x02, 0x3f, 0x02, 0xbe, - 0xb2, 0x1c, 0x55, 0xc7, 0x04, 0x18, 0xd9, 0x05, 0x30, 0x22, 0xe8, 0xd2, 0xb9, 0xcd, 0x21, 0x31, - 0xe0, 0x18, 0x10, 0x41, 0x68, 0x1c, 0x32, 0x56, 0x8e, 0x5a, 0x3e, 0x0f, 0x2a, 0x1f, 0x39, 0x77, - 0x54, 0x5d, 0x8c, 0x60, 0x34, 0x07, 0xa6, 0x13, 0x37, 0x38, 0x91, 0x2f, 0x43, 0xdd, 0xed, 0xc7, - 0xa6, 0xd3, 0x06, 0x8f, 0xa6, 0xad, 0xdf, 0x93, 0x65, 0x8f, 0x4e, 0x5a, 0xd3, 0x9b, 0x6e, 0xd7, - 0x32, 0x54, 0x01, 0x86, 0xec, 0x44, 0x83, 0x2a, 0x3f, 0xb7, 0xa9, 0xee, 0x6f, 0xe2, 0x6b, 0x07, - 0xbf, 0x62, 0xc5, 0x47, 0x49, 0xd1, 0x7e, 0xa5, 0x0c, 0x91, 0xc7, 0x95, 0xf8, 0x50, 0x15, 0x67, - 0x46, 0xe4, 0xcc, 0x7d, 0xa1, 0xc7, 0x53, 0xa4, 0x28, 0xd2, 0x85, 0xd2, 0xfb, 0xee, 0x5e, 0xee, - 0x89, 0x3b, 0x96, 0xb0, 0x41, 0xd8, 0xca, 0x62, 0x05, 0xc8, 0x24, 0x90, 0xbf, 0x5e, 0x80, 0xe7, - 0xfd, 0xb4, 0xea, 0x2b, 0x87, 0x03, 0xe6, 0xd7, 0xf1, 0xd3, 0xca, 0xb4, 0x0c, 0x7b, 0x1e, 0x45, - 0xc6, 0xe1, 0xb6, 0xb0, 0xfe, 0x17, 0xae, 0x50, 0x39, 0x9c, 0xd6, 0x73, 0xde, 0x2f, 0x9b, 0xec, - 0xff, 0x64, 0x19, 0x4a, 0x51, 0xda, 0xb7, 0x8b, 0xd0, 0x8c, 0xcd, 0xd6, 0xb9, 0xaf, 0x05, 0x7b, - 0x98, 0xba, 0x16, 0x6c, 0x7b, 0xfc, 0xc8, 0x80, 0xa8, 0x55, 0x17, 0x7d, 0x33, 0xd8, 0xbf, 0x28, - 0x42, 0x69, 0x77, 0x75, 0x2d, 0xb9, 0x69, 0x2d, 0x3c, 0x85, 0x4d, 0xeb, 0x01, 0xd4, 0xf6, 0x06, - 0x96, 0x1d, 0x58, 0x4e, 0xee, 0x94, 0x32, 0xea, 0x16, 0x35, 0xe9, 0xeb, 0x10, 0xa8, 0xa8, 0xe0, - 0x49, 0x17, 0x6a, 0x5d, 0x91, 0xd3, 0x33, 0x77, 0xbc, 0xa4, 0xcc, 0x0d, 0x2a, 0x04, 0xc9, 0x3f, - 0xa8, 0xd0, 0xb5, 0x63, 0xa8, 0xee, 0xae, 0x4a, 0xb5, 0xff, 0xe9, 0xf6, 0xa6, 0xf6, 0x4b, 0x10, - 0x6a, 0x01, 0x4f, 0x5f, 0xf8, 0xef, 0x16, 0x20, 0xa9, 0xf8, 0x3c, 0xfd, 0xd1, 0x74, 0x98, 0x1e, - 0x4d, 0xab, 0x93, 0xf8, 0xf8, 0xb2, 0x07, 0x94, 0xf6, 0xef, 0x0b, 0x90, 0x3a, 0xe8, 0x47, 0x5e, - 0x93, 0xe9, 0xe1, 0x92, 0x81, 0x69, 0x2a, 0x3d, 0x1c, 0x49, 0x72, 0xc7, 0xd2, 0xc4, 0x7d, 0xc8, - 0xb6, 0x6b, 0x71, 0x07, 0x9a, 0x6c, 0xfe, 0xdd, 0xf1, 0xb7, 0x6b, 0x59, 0xee, 0x38, 0x19, 0x3c, - 0x19, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x8f, 0x8b, 0x50, 0x7d, 0x6a, 0xb9, 0x0d, 0x68, 0x22, 0x9e, - 0x75, 0x25, 0xe7, 0x6c, 0x3f, 0x32, 0x9a, 0xb5, 0x97, 0x8a, 0x66, 0xcd, 0x7b, 0x6d, 0xf9, 0x13, - 0x62, 0x59, 0xff, 0x6d, 0x01, 0xe4, 0x5a, 0xb3, 0xe1, 0xf8, 0x81, 0xee, 0x18, 0x94, 0x18, 0xe1, - 0xc2, 0x96, 0x37, 0x68, 0x4a, 0x06, 0x16, 0x0a, 0x5d, 0x86, 0xff, 0x56, 0x0b, 0x19, 0xf9, 0x29, - 0xa8, 0x1f, 0xb8, 0x7e, 0xc0, 0x17, 0xaf, 0x62, 0xd2, 0x64, 0x76, 0x5b, 0x96, 0x63, 0xc8, 0x91, - 0x76, 0x67, 0x57, 0x46, 0xbb, 0xb3, 0xb5, 0xdf, 0x2a, 0xc2, 0xd4, 0x27, 0x25, 0x79, 0x42, 0x56, - 0xf4, 0x6f, 0x29, 0x67, 0xf4, 0x6f, 0xf9, 0x3c, 0xd1, 0xbf, 0xda, 0x0f, 0x0a, 0x00, 0x4f, 0x2d, - 0x73, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0x3d, 0xae, 0xb2, 0xc3, 0x72, 0xff, 0x41, 0x45, 0x3d, 0x12, - 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0x33, 0x7a, 0x22, 0xd0, 0x35, 0xb7, 0xbe, 0x9c, 0x8a, 0x9b, 0x0d, - 0xe3, 0xb4, 0x92, 0xe5, 0x98, 0x12, 0x4b, 0x5e, 0x8f, 0x32, 0x93, 0xdf, 0x8d, 0x86, 0xfd, 0x50, - 0x4a, 0x71, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x10, 0x58, 0x5c, 0x9a, 0x48, 0x60, 0x71, 0xfc, 0xc8, - 0x64, 0xf9, 0xb1, 0x47, 0x26, 0x8f, 0xa0, 0xb1, 0xef, 0xb9, 0x3d, 0x1e, 0xbb, 0x2b, 0xef, 0xfe, - 0xbe, 0x95, 0x63, 0xa1, 0xec, 0xed, 0x59, 0x0e, 0x35, 0x79, 0x5c, 0x70, 0x68, 0xb8, 0x5a, 0x53, - 0xf8, 0x18, 0x89, 0xe2, 0xb6, 0x7e, 0x57, 0x48, 0xad, 0x4e, 0x52, 0x6a, 0x38, 0x97, 0xec, 0x08, - 0x74, 0x54, 0x62, 0x92, 0xf1, 0xba, 0xb5, 0xa7, 0x13, 0xaf, 0xab, 0xfd, 0xc5, 0x9a, 0x9a, 0xc0, - 0x9e, 0xb9, 0x24, 0xb8, 0x9f, 0x1e, 0x74, 0xef, 0xd2, 0xa1, 0x53, 0xe8, 0xf5, 0xa7, 0x78, 0x0a, - 0xbd, 0x31, 0x99, 0x53, 0xe8, 0x90, 0xef, 0x14, 0x7a, 0x73, 0x42, 0xa7, 0xd0, 0xa7, 0x26, 0x75, - 0x0a, 0x7d, 0x7a, 0xac, 0x53, 0xe8, 0x33, 0x67, 0x3a, 0x85, 0x7e, 0x52, 0x82, 0xd4, 0x66, 0xfc, - 0x53, 0xc7, 0xdb, 0xff, 0x57, 0x8e, 0xb7, 0xef, 0x16, 0x21, 0x9a, 0x88, 0xcf, 0x19, 0x98, 0xf4, - 0x0e, 0xd4, 0x7b, 0xfa, 0x43, 0x1e, 0x38, 0x9d, 0xe7, 0xee, 0xe8, 0x2d, 0x89, 0x81, 0x21, 0x1a, - 0xf1, 0x01, 0xac, 0xf0, 0xfe, 0x86, 0xdc, 0x2e, 0x8c, 0xe8, 0x2a, 0x08, 0x61, 0x24, 0x8d, 0xfe, - 0x63, 0x4c, 0x8c, 0xf6, 0x6f, 0x8a, 0x20, 0x2f, 0xfa, 0x20, 0x14, 0x2a, 0xfb, 0xd6, 0x43, 0x6a, - 0xe6, 0x0e, 0x77, 0x8e, 0xdd, 0xe8, 0x2f, 0x7c, 0x34, 0xbc, 0x00, 0x05, 0x3a, 0x37, 0xbe, 0x0b, - 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, 0xdf, 0x45, 0x11, 0x2a, 0x19, - 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, 0xca, 0xd6, 0xef, 0x8b, 0x34, - 0x14, 0x52, 0x46, 0xfb, 0x17, 0xbf, 0xff, 0xc3, 0xeb, 0xcf, 0xfd, 0xe0, 0x87, 0xd7, 0x9f, 0xfb, - 0xe8, 0x87, 0xd7, 0x9f, 0xfb, 0x95, 0xd3, 0xeb, 0x85, 0xef, 0x9f, 0x5e, 0x2f, 0xfc, 0xe0, 0xf4, - 0x7a, 0xe1, 0xa3, 0xd3, 0xeb, 0x85, 0xff, 0x7c, 0x7a, 0xbd, 0xf0, 0x57, 0xfe, 0xcb, 0xf5, 0xe7, - 0x7e, 0xe1, 0x4b, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, 0xc5, 0xfe, 0x61, 0x77, 0x91, - 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xad, 0x0f, 0x6c, 0xf9, - 0xf5, 0x9e, 0x00, 0x00, + // 8105 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd9, + 0x75, 0xde, 0xf6, 0x7f, 0xf7, 0x69, 0xfe, 0xed, 0x9d, 0xd9, 0x59, 0xce, 0x68, 0x76, 0x7a, 0x54, + 0xb2, 0xa4, 0x71, 0x6c, 0x93, 0x59, 0x5a, 0xbb, 0x5a, 0xd9, 0x96, 0x76, 0xd9, 0xe4, 0x70, 0x86, + 0x33, 0xe4, 0x0c, 0x75, 0x9a, 0x9c, 0x5d, 0x79, 0x63, 0x6d, 0x8a, 0x55, 0x97, 0xcd, 0x5a, 0x56, + 0x57, 0xb5, 0xaa, 0xaa, 0x39, 0xc3, 0x75, 0x0c, 0xd9, 0x52, 0x82, 0x55, 0x90, 0x04, 0x09, 0xfc, + 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x41, 0x94, 0x87, 0x00, 0xf9, 0x71, + 0x10, 0x24, 0xca, 0xbf, 0x10, 0x04, 0xc8, 0xe6, 0x85, 0x88, 0x18, 0xe4, 0x21, 0x01, 0x12, 0x18, + 0x31, 0x12, 0x3b, 0x03, 0x23, 0x0a, 0xee, 0x5f, 0xfd, 0x75, 0xf5, 0x0c, 0xd9, 0xd5, 0x9c, 0x9d, + 0x4d, 0xf6, 0xad, 0xfb, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xad, 0x7b, 0xcf, 0x3d, 0xe7, 0xdc, + 0x73, 0xe1, 0x56, 0xd7, 0x0a, 0xf6, 0x07, 0xbb, 0x0b, 0x86, 0xdb, 0x5b, 0x74, 0x06, 0x3d, 0xbd, + 0xef, 0xb9, 0xef, 0xf3, 0x1f, 0x7b, 0xb6, 0xfb, 0x70, 0xb1, 0x7f, 0xd0, 0x5d, 0xd4, 0xfb, 0x96, + 0x1f, 0x95, 0x1c, 0xbe, 0xaa, 0xdb, 0xfd, 0x7d, 0xfd, 0xd5, 0xc5, 0x2e, 0x75, 0xa8, 0xa7, 0x07, + 0xd4, 0x5c, 0xe8, 0x7b, 0x6e, 0xe0, 0x92, 0x2f, 0x47, 0x40, 0x0b, 0x0a, 0x68, 0x41, 0x55, 0x5b, + 0xe8, 0x1f, 0x74, 0x17, 0x18, 0x50, 0x54, 0xa2, 0x80, 0xae, 0xfc, 0x4c, 0xac, 0x05, 0x5d, 0xb7, + 0xeb, 0x2e, 0x72, 0xbc, 0xdd, 0xc1, 0x1e, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x42, 0xce, 0x15, 0xed, + 0xe0, 0x0d, 0x7f, 0xc1, 0x72, 0x59, 0xb3, 0x16, 0x0d, 0xd7, 0xa3, 0x8b, 0x87, 0x43, 0x6d, 0xb9, + 0xf2, 0xa5, 0x88, 0xa7, 0xa7, 0x1b, 0xfb, 0x96, 0x43, 0xbd, 0x23, 0xf5, 0x2c, 0x8b, 0x1e, 0xf5, + 0xdd, 0x81, 0x67, 0xd0, 0x33, 0xd5, 0xf2, 0x17, 0x7b, 0x34, 0xd0, 0xb3, 0x64, 0x2d, 0x8e, 0xaa, + 0xe5, 0x0d, 0x9c, 0xc0, 0xea, 0x0d, 0x8b, 0x79, 0xfd, 0x69, 0x15, 0x7c, 0x63, 0x9f, 0xf6, 0xf4, + 0xa1, 0x7a, 0x3f, 0x3b, 0xaa, 0xde, 0x20, 0xb0, 0xec, 0x45, 0xcb, 0x09, 0xfc, 0xc0, 0x4b, 0x57, + 0xd2, 0x7e, 0x17, 0xe0, 0xc2, 0xf2, 0xae, 0x1f, 0x78, 0xba, 0x11, 0x6c, 0xb9, 0xe6, 0x36, 0xed, + 0xf5, 0x6d, 0x3d, 0xa0, 0xe4, 0x00, 0xea, 0xec, 0x81, 0x4c, 0x3d, 0xd0, 0xe7, 0x0b, 0xd7, 0x0b, + 0x37, 0x9a, 0x4b, 0xcb, 0x0b, 0x63, 0xbe, 0xc0, 0x85, 0x4d, 0x09, 0xd4, 0x9e, 0x3a, 0x39, 0x6e, + 0xd5, 0xd5, 0x3f, 0x0c, 0x05, 0x90, 0xdf, 0x28, 0xc0, 0x94, 0xe3, 0x9a, 0xb4, 0x43, 0x6d, 0x6a, + 0x04, 0xae, 0x37, 0x5f, 0xbc, 0x5e, 0xba, 0xd1, 0x5c, 0xfa, 0xe6, 0xd8, 0x12, 0x33, 0x9e, 0x68, + 0xe1, 0x5e, 0x4c, 0xc0, 0x4d, 0x27, 0xf0, 0x8e, 0xda, 0x17, 0x7f, 0x70, 0xdc, 0x7a, 0xe1, 0xe4, + 0xb8, 0x35, 0x15, 0x27, 0x61, 0xa2, 0x25, 0x64, 0x07, 0x9a, 0x81, 0x6b, 0xb3, 0x2e, 0xb3, 0x5c, + 0xc7, 0x9f, 0x2f, 0xf1, 0x86, 0x5d, 0x5b, 0x10, 0x5d, 0xcd, 0xc4, 0x2f, 0xb0, 0x31, 0xb6, 0x70, + 0xf8, 0xea, 0xc2, 0x76, 0xc8, 0xd6, 0xbe, 0x20, 0x81, 0x9b, 0x51, 0x99, 0x8f, 0x71, 0x1c, 0x42, + 0x61, 0xd6, 0xa7, 0xc6, 0xc0, 0xb3, 0x82, 0xa3, 0x15, 0xd7, 0x09, 0xe8, 0xa3, 0x60, 0xbe, 0xcc, + 0x7b, 0xf9, 0x0b, 0x59, 0xd0, 0x5b, 0xae, 0xd9, 0x49, 0x72, 0xb7, 0x2f, 0x9c, 0x1c, 0xb7, 0x66, + 0x53, 0x85, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x59, 0x3d, 0xbd, 0x4b, 0xb7, 0x06, 0xb6, 0xdd, 0xa1, + 0x86, 0x47, 0x03, 0x7f, 0xbe, 0xc2, 0x1f, 0xe1, 0x46, 0x96, 0x9c, 0x0d, 0xd7, 0xd0, 0xed, 0xfb, + 0xbb, 0xef, 0x53, 0x23, 0x40, 0xba, 0x47, 0x3d, 0xea, 0x18, 0xb4, 0x3d, 0x2f, 0x1f, 0x66, 0x6e, + 0x3d, 0x85, 0x84, 0x43, 0xd8, 0xe4, 0x16, 0xbc, 0xd8, 0xf7, 0x2c, 0x97, 0x37, 0xc1, 0xd6, 0x7d, + 0xff, 0x9e, 0xde, 0xa3, 0xf3, 0xd5, 0xeb, 0x85, 0x1b, 0x8d, 0xf6, 0x65, 0x09, 0xf3, 0xe2, 0x56, + 0x9a, 0x01, 0x87, 0xeb, 0x90, 0x1b, 0x50, 0x57, 0x85, 0xf3, 0xb5, 0xeb, 0x85, 0x1b, 0x15, 0x31, + 0x76, 0x54, 0x5d, 0x0c, 0xa9, 0x64, 0x0d, 0xea, 0xfa, 0xde, 0x9e, 0xe5, 0x30, 0xce, 0x3a, 0xef, + 0xc2, 0xab, 0x59, 0x8f, 0xb6, 0x2c, 0x79, 0x04, 0x8e, 0xfa, 0x87, 0x61, 0x5d, 0x72, 0x07, 0x88, + 0x4f, 0xbd, 0x43, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0x0e, 0x9c, 0x80, 0xb7, 0xbd, 0xc1, 0xdb, 0x7e, + 0x45, 0xb6, 0x9d, 0x74, 0x86, 0x38, 0x30, 0xa3, 0x16, 0x79, 0x0b, 0xe6, 0xe4, 0xb7, 0x1a, 0xf5, + 0x02, 0x70, 0xa4, 0x8b, 0xac, 0x23, 0x31, 0x45, 0xc3, 0x21, 0x6e, 0x62, 0xc2, 0x55, 0x7d, 0x10, + 0xb8, 0x3d, 0x06, 0x99, 0x14, 0xba, 0xed, 0x1e, 0x50, 0x67, 0xbe, 0x79, 0xbd, 0x70, 0xa3, 0xde, + 0xbe, 0x7e, 0x72, 0xdc, 0xba, 0xba, 0xfc, 0x04, 0x3e, 0x7c, 0x22, 0x0a, 0xb9, 0x0f, 0x0d, 0xd3, + 0xf1, 0xb7, 0x5c, 0xdb, 0x32, 0x8e, 0xe6, 0xa7, 0x78, 0x03, 0x5f, 0x95, 0x8f, 0xda, 0x58, 0xbd, + 0xd7, 0x11, 0x84, 0xc7, 0xc7, 0xad, 0xab, 0xc3, 0x53, 0xea, 0x42, 0x48, 0xc7, 0x08, 0x83, 0x6c, + 0x72, 0xc0, 0x15, 0xd7, 0xd9, 0xb3, 0xba, 0xf3, 0xd3, 0xfc, 0x6d, 0x5c, 0x1f, 0x31, 0xa0, 0x57, + 0xef, 0x75, 0x04, 0x5f, 0x7b, 0x5a, 0x8a, 0x13, 0x7f, 0x31, 0x42, 0x20, 0x26, 0xcc, 0xa8, 0xc9, + 0x78, 0xc5, 0xd6, 0xad, 0x9e, 0x3f, 0x3f, 0xc3, 0x07, 0xef, 0x4f, 0x8c, 0xc0, 0xc4, 0x38, 0x73, + 0xfb, 0x92, 0x7c, 0x94, 0x99, 0x44, 0xb1, 0x8f, 0x29, 0xcc, 0x2b, 0x6f, 0xc2, 0x8b, 0x43, 0x73, + 0x03, 0x99, 0x83, 0xd2, 0x01, 0x3d, 0xe2, 0x53, 0x5f, 0x03, 0xd9, 0x4f, 0x72, 0x11, 0x2a, 0x87, + 0xba, 0x3d, 0xa0, 0xf3, 0x45, 0x5e, 0x26, 0xfe, 0xfc, 0x5c, 0xf1, 0x8d, 0x82, 0xf6, 0xd7, 0x4b, + 0x30, 0xa5, 0x66, 0x9c, 0x8e, 0xe5, 0x1c, 0x90, 0xb7, 0xa1, 0x64, 0xbb, 0x5d, 0x39, 0x6f, 0xfe, + 0xc2, 0xd8, 0xb3, 0xd8, 0x86, 0xdb, 0x6d, 0xd7, 0x4e, 0x8e, 0x5b, 0xa5, 0x0d, 0xb7, 0x8b, 0x0c, + 0x91, 0x18, 0x50, 0x39, 0xd0, 0xf7, 0x0e, 0x74, 0xde, 0x86, 0xe6, 0x52, 0x7b, 0x6c, 0xe8, 0xbb, + 0x0c, 0x85, 0xb5, 0xb5, 0xdd, 0x38, 0x39, 0x6e, 0x55, 0xf8, 0x5f, 0x14, 0xd8, 0xc4, 0x85, 0xc6, + 0xae, 0xad, 0x1b, 0x07, 0xfb, 0xae, 0x4d, 0xe7, 0x4b, 0x39, 0x05, 0xb5, 0x15, 0x92, 0x78, 0xcd, + 0xe1, 0x5f, 0x8c, 0x64, 0x10, 0x03, 0xaa, 0x03, 0xd3, 0xb7, 0x9c, 0x03, 0x39, 0x07, 0xbe, 0x39, + 0xb6, 0xb4, 0x9d, 0x55, 0xfe, 0x4c, 0x70, 0x72, 0xdc, 0xaa, 0x8a, 0xdf, 0x28, 0xa1, 0xb5, 0x3f, + 0x9c, 0x82, 0x19, 0xf5, 0x92, 0x1e, 0x50, 0x2f, 0xa0, 0x8f, 0xc8, 0x75, 0x28, 0x3b, 0xec, 0xd3, + 0xe4, 0x2f, 0xb9, 0x3d, 0x25, 0x87, 0x4b, 0x99, 0x7f, 0x92, 0x9c, 0xc2, 0x5a, 0x26, 0x86, 0x8a, + 0xec, 0xf0, 0xf1, 0x5b, 0xd6, 0xe1, 0x30, 0xa2, 0x65, 0xe2, 0x37, 0x4a, 0x68, 0xf2, 0x2e, 0x94, + 0xf9, 0xc3, 0x8b, 0xae, 0xfe, 0xea, 0xf8, 0x22, 0xd8, 0xa3, 0xd7, 0xd9, 0x13, 0xf0, 0x07, 0xe7, + 0xa0, 0x6c, 0x28, 0x0e, 0xcc, 0x3d, 0xd9, 0xb1, 0xbf, 0x90, 0xa3, 0x63, 0xd7, 0xc4, 0x50, 0xdc, + 0x59, 0x5d, 0x43, 0x86, 0x48, 0xfe, 0x62, 0x01, 0x5e, 0x34, 0x5c, 0x27, 0xd0, 0x99, 0x9e, 0xa1, + 0x16, 0xd9, 0xf9, 0x0a, 0x97, 0x73, 0x67, 0x6c, 0x39, 0x2b, 0x69, 0xc4, 0xf6, 0x4b, 0x6c, 0xcd, + 0x18, 0x2a, 0xc6, 0x61, 0xd9, 0xe4, 0x2f, 0x17, 0xe0, 0x25, 0x36, 0x97, 0x0f, 0x31, 0xf3, 0x15, + 0x68, 0xb2, 0xad, 0xba, 0x7c, 0x72, 0xdc, 0x7a, 0x69, 0x3d, 0x4b, 0x18, 0x66, 0xb7, 0x81, 0xb5, + 0xee, 0x82, 0x3e, 0xac, 0x96, 0xf0, 0xd5, 0xad, 0xb9, 0xb4, 0x31, 0x49, 0x55, 0xa7, 0xfd, 0x19, + 0x39, 0x94, 0xb3, 0x34, 0x3b, 0xcc, 0x6a, 0x05, 0xb9, 0x09, 0xb5, 0x43, 0xd7, 0x1e, 0xf4, 0xa8, + 0x3f, 0x5f, 0xe7, 0x53, 0xec, 0x95, 0xac, 0x29, 0xf6, 0x01, 0x67, 0x69, 0xcf, 0x4a, 0xf8, 0x9a, + 0xf8, 0xef, 0xa3, 0xaa, 0x4b, 0x2c, 0xa8, 0xda, 0x56, 0xcf, 0x0a, 0x7c, 0xbe, 0x70, 0x36, 0x97, + 0x6e, 0x8e, 0xfd, 0x58, 0xe2, 0x13, 0xdd, 0xe0, 0x60, 0xe2, 0xab, 0x11, 0xbf, 0x51, 0x0a, 0x60, + 0x53, 0xa1, 0x6f, 0xe8, 0xb6, 0x58, 0x58, 0x9b, 0x4b, 0x5f, 0x1b, 0xff, 0xb3, 0x61, 0x28, 0xed, + 0x69, 0xf9, 0x4c, 0x15, 0xfe, 0x17, 0x05, 0x36, 0xf9, 0x25, 0x98, 0x49, 0xbc, 0x4d, 0x7f, 0xbe, + 0xc9, 0x7b, 0xe7, 0x95, 0xac, 0xde, 0x09, 0xb9, 0xa2, 0x95, 0x27, 0x31, 0x42, 0x7c, 0x4c, 0x81, + 0x91, 0xbb, 0x50, 0xf7, 0x2d, 0x93, 0x1a, 0xba, 0xe7, 0xcf, 0x4f, 0x9d, 0x06, 0x78, 0x4e, 0x02, + 0xd7, 0x3b, 0xb2, 0x1a, 0x86, 0x00, 0x64, 0x01, 0xa0, 0xaf, 0x7b, 0x81, 0x25, 0x14, 0xd5, 0x69, + 0xae, 0x34, 0xcd, 0x9c, 0x1c, 0xb7, 0x60, 0x2b, 0x2c, 0xc5, 0x18, 0x07, 0xe3, 0x67, 0x75, 0xd7, + 0x9d, 0xfe, 0x20, 0x10, 0x0b, 0x6b, 0x43, 0xf0, 0x77, 0xc2, 0x52, 0x8c, 0x71, 0x90, 0xdf, 0x29, + 0xc0, 0x67, 0xa2, 0xbf, 0xc3, 0x1f, 0xd9, 0xec, 0xc4, 0x3f, 0xb2, 0xd6, 0xc9, 0x71, 0xeb, 0x33, + 0x9d, 0xd1, 0x22, 0xf1, 0x49, 0xed, 0x21, 0x1f, 0x16, 0x60, 0x66, 0xd0, 0x37, 0xf5, 0x80, 0x76, + 0x02, 0xb6, 0xe3, 0xe9, 0x1e, 0xcd, 0xcf, 0xf1, 0x26, 0xde, 0x1a, 0x7f, 0x16, 0x4c, 0xc0, 0x45, + 0xaf, 0x39, 0x59, 0x8e, 0x29, 0xb1, 0xda, 0xdb, 0x30, 0xbd, 0x3c, 0x08, 0xf6, 0x5d, 0xcf, 0xfa, + 0x80, 0xab, 0xff, 0x64, 0x0d, 0x2a, 0x01, 0x57, 0xe3, 0x84, 0x86, 0xf0, 0xf9, 0xac, 0x97, 0x2e, + 0x54, 0xea, 0xbb, 0xf4, 0x48, 0xe9, 0x25, 0x62, 0xa5, 0x16, 0x6a, 0x9d, 0xa8, 0xae, 0xfd, 0xe9, + 0x02, 0xd4, 0xda, 0xba, 0x71, 0xe0, 0xee, 0xed, 0x91, 0x77, 0xa0, 0x6e, 0x39, 0x01, 0xf5, 0x0e, + 0x75, 0x5b, 0xc2, 0x2e, 0xc4, 0x60, 0xc3, 0x0d, 0x61, 0xf4, 0x78, 0x6c, 0xf7, 0xc5, 0x04, 0xad, + 0x0e, 0xe4, 0xae, 0x85, 0x6b, 0xc6, 0xeb, 0x12, 0x03, 0x43, 0x34, 0xd2, 0x82, 0x8a, 0x1f, 0xd0, + 0xbe, 0xcf, 0xd7, 0xc0, 0x69, 0xd1, 0x8c, 0x0e, 0x2b, 0x40, 0x51, 0xae, 0xfd, 0xb5, 0x02, 0x34, + 0xda, 0xba, 0x6f, 0x19, 0xec, 0x29, 0xc9, 0x0a, 0x94, 0x07, 0x3e, 0xf5, 0xce, 0xf6, 0x6c, 0x7c, + 0xd9, 0xda, 0xf1, 0xa9, 0x87, 0xbc, 0x32, 0xb9, 0x0f, 0xf5, 0xbe, 0xee, 0xfb, 0x0f, 0x5d, 0xcf, + 0x94, 0x4b, 0xef, 0x29, 0x81, 0xc4, 0x36, 0x41, 0x56, 0xc5, 0x10, 0x44, 0x6b, 0x42, 0xa4, 0x7b, + 0x68, 0xbf, 0x5f, 0x80, 0x0b, 0xed, 0xc1, 0xde, 0x1e, 0xf5, 0xa4, 0x56, 0x2c, 0xf5, 0x4d, 0x0a, + 0x15, 0x8f, 0x9a, 0x96, 0x2f, 0xdb, 0xbe, 0x3a, 0xf6, 0x40, 0x41, 0x86, 0x22, 0xd5, 0x5b, 0xde, + 0x5f, 0xbc, 0x00, 0x05, 0x3a, 0x19, 0x40, 0xe3, 0x7d, 0xca, 0x76, 0xe3, 0x54, 0xef, 0xc9, 0xa7, + 0xbb, 0x3d, 0xb6, 0xa8, 0x3b, 0x34, 0xe8, 0x70, 0xa4, 0xb8, 0x36, 0x1d, 0x16, 0x62, 0x24, 0x49, + 0xfb, 0xdd, 0x0a, 0x4c, 0xad, 0xb8, 0xbd, 0x5d, 0xcb, 0xa1, 0xe6, 0x4d, 0xb3, 0x4b, 0xc9, 0x7b, + 0x50, 0xa6, 0x66, 0x97, 0xca, 0xa7, 0x1d, 0x5f, 0xf1, 0x60, 0x60, 0x91, 0xfa, 0xc4, 0xfe, 0x21, + 0x07, 0x26, 0x1b, 0x30, 0xb3, 0xe7, 0xb9, 0x3d, 0x31, 0x97, 0x6f, 0x1f, 0xf5, 0xa5, 0xee, 0xdc, + 0xfe, 0x09, 0xf5, 0xe1, 0xac, 0x25, 0xa8, 0x8f, 0x8f, 0x5b, 0x10, 0xfd, 0xc3, 0x54, 0x5d, 0xf2, + 0x0e, 0xcc, 0x47, 0x25, 0xe1, 0xa4, 0xb6, 0xc2, 0xb6, 0x33, 0x5c, 0x77, 0xaa, 0xb4, 0xaf, 0x9e, + 0x1c, 0xb7, 0xe6, 0xd7, 0x46, 0xf0, 0xe0, 0xc8, 0xda, 0x6c, 0xaa, 0x98, 0x8b, 0x88, 0x62, 0xa1, + 0x91, 0x2a, 0xd3, 0x84, 0x56, 0x30, 0xbe, 0xef, 0x5b, 0x4b, 0x89, 0xc0, 0x21, 0xa1, 0x64, 0x0d, + 0xa6, 0x02, 0x37, 0xd6, 0x5f, 0x15, 0xde, 0x5f, 0x9a, 0x32, 0x54, 0x6c, 0xbb, 0x23, 0x7b, 0x2b, + 0x51, 0x8f, 0x20, 0x5c, 0x52, 0xff, 0x53, 0x3d, 0x55, 0xe5, 0x3d, 0x75, 0xe5, 0xe4, 0xb8, 0x75, + 0x69, 0x3b, 0x93, 0x03, 0x47, 0xd4, 0x24, 0xbf, 0x56, 0x80, 0x19, 0x45, 0x92, 0x7d, 0x54, 0x9b, + 0x64, 0x1f, 0x11, 0x36, 0x22, 0xb6, 0x13, 0x02, 0x30, 0x25, 0x50, 0xfb, 0x7e, 0x0d, 0x1a, 0xe1, + 0x54, 0x4f, 0x3e, 0x07, 0x15, 0x6e, 0x82, 0x90, 0x1a, 0x7c, 0xb8, 0x86, 0x73, 0x4b, 0x05, 0x0a, + 0x1a, 0xf9, 0x3c, 0xd4, 0x0c, 0xb7, 0xd7, 0xd3, 0x1d, 0x93, 0x9b, 0x95, 0x1a, 0xed, 0x26, 0x53, + 0x5d, 0x56, 0x44, 0x11, 0x2a, 0x1a, 0xb9, 0x0a, 0x65, 0xdd, 0xeb, 0x0a, 0x0b, 0x4f, 0x43, 0xcc, + 0x47, 0xcb, 0x5e, 0xd7, 0x47, 0x5e, 0x4a, 0xbe, 0x02, 0x25, 0xea, 0x1c, 0xce, 0x97, 0x47, 0xeb, + 0x46, 0x37, 0x9d, 0xc3, 0x07, 0xba, 0xd7, 0x6e, 0xca, 0x36, 0x94, 0x6e, 0x3a, 0x87, 0xc8, 0xea, + 0x90, 0x0d, 0xa8, 0x51, 0xe7, 0x90, 0xbd, 0x7b, 0x69, 0x7a, 0xf9, 0xec, 0x88, 0xea, 0x8c, 0x45, + 0x6e, 0x13, 0x42, 0x0d, 0x4b, 0x16, 0xa3, 0x82, 0x20, 0xdf, 0x80, 0x29, 0xa1, 0x6c, 0x6d, 0xb2, + 0x77, 0xe2, 0xcf, 0x57, 0x39, 0x64, 0x6b, 0xb4, 0xb6, 0xc6, 0xf9, 0x22, 0x53, 0x57, 0xac, 0xd0, + 0xc7, 0x04, 0x14, 0xf9, 0x06, 0x34, 0xd4, 0xce, 0x58, 0xbd, 0xd9, 0x4c, 0x2b, 0x91, 0xda, 0x4e, + 0x23, 0xfd, 0xd6, 0xc0, 0xf2, 0x68, 0x8f, 0x3a, 0x81, 0xdf, 0x7e, 0x51, 0xd9, 0x0d, 0x14, 0xd5, + 0xc7, 0x08, 0x8d, 0xec, 0x0e, 0x9b, 0xbb, 0x84, 0xad, 0xe6, 0x73, 0x23, 0x66, 0xf5, 0x31, 0x6c, + 0x5d, 0xdf, 0x84, 0xd9, 0xd0, 0x1e, 0x25, 0x4d, 0x1a, 0xc2, 0x7a, 0xf3, 0x25, 0x56, 0x7d, 0x3d, + 0x49, 0x7a, 0x7c, 0xdc, 0x7a, 0x25, 0xc3, 0xa8, 0x11, 0x31, 0x60, 0x1a, 0x8c, 0x7c, 0x00, 0x33, + 0x1e, 0xd5, 0x4d, 0xcb, 0xa1, 0xbe, 0xbf, 0xe5, 0xb9, 0xbb, 0xf9, 0x35, 0x4f, 0x8e, 0x22, 0x86, + 0x3d, 0x26, 0x90, 0x31, 0x25, 0x89, 0x3c, 0x84, 0x69, 0xdb, 0x3a, 0xa4, 0x91, 0xe8, 0xe6, 0x44, + 0x44, 0xbf, 0x78, 0x72, 0xdc, 0x9a, 0xde, 0x88, 0x03, 0x63, 0x52, 0x0e, 0xd3, 0x54, 0xfa, 0xae, + 0x17, 0x28, 0xf5, 0xf4, 0xb3, 0x4f, 0x54, 0x4f, 0xb7, 0x5c, 0x2f, 0x88, 0x3e, 0x42, 0xf6, 0xcf, + 0x47, 0x51, 0x5d, 0xfb, 0xdb, 0x15, 0x18, 0xde, 0xc4, 0x25, 0x47, 0x5c, 0x61, 0xd2, 0x23, 0x2e, + 0x3d, 0x1a, 0xc4, 0xda, 0xf3, 0x86, 0xac, 0x36, 0x81, 0x11, 0x91, 0x31, 0xaa, 0x4b, 0x93, 0x1e, + 0xd5, 0xcf, 0xcd, 0xc4, 0x33, 0x3c, 0xfc, 0xab, 0x1f, 0xdf, 0xf0, 0xaf, 0x3d, 0x9b, 0xe1, 0xaf, + 0x7d, 0xaf, 0x0c, 0x33, 0xab, 0x3a, 0xed, 0xb9, 0xce, 0x53, 0xf7, 0xf1, 0x85, 0xe7, 0x62, 0x1f, + 0x7f, 0x03, 0xea, 0x1e, 0xed, 0xdb, 0x96, 0xa1, 0x0b, 0x75, 0x5d, 0xda, 0xcd, 0x51, 0x96, 0x61, + 0x48, 0x1d, 0x61, 0xbf, 0x29, 0x3d, 0x97, 0xf6, 0x9b, 0xf2, 0xc7, 0x6f, 0xbf, 0xd1, 0x7e, 0xad, + 0x08, 0x5c, 0xb5, 0x25, 0xd7, 0xa1, 0xcc, 0xd4, 0xb6, 0xb4, 0xd5, 0x90, 0x7f, 0x2d, 0x9c, 0x42, + 0xae, 0x40, 0x31, 0x70, 0xe5, 0x74, 0x03, 0x92, 0x5e, 0xdc, 0x76, 0xb1, 0x18, 0xb8, 0xe4, 0x03, + 0x00, 0xc3, 0x75, 0x4c, 0x4b, 0xb9, 0x93, 0xf2, 0x3d, 0xd8, 0x9a, 0xeb, 0x3d, 0xd4, 0x3d, 0x73, + 0x25, 0x44, 0x14, 0x3b, 0xf8, 0xe8, 0x3f, 0xc6, 0xa4, 0x91, 0x37, 0xa1, 0xea, 0x3a, 0x6b, 0x03, + 0xdb, 0xe6, 0x1d, 0xda, 0x68, 0x7f, 0xf1, 0xe4, 0xb8, 0x55, 0xbd, 0xcf, 0x4b, 0x1e, 0x1f, 0xb7, + 0x2e, 0x8b, 0x1d, 0x11, 0xfb, 0xf7, 0xb6, 0x67, 0x05, 0x96, 0xd3, 0x0d, 0x37, 0xb4, 0xb2, 0x9a, + 0xf6, 0xeb, 0x05, 0x68, 0xae, 0x59, 0x8f, 0xa8, 0xf9, 0xb6, 0xe5, 0x98, 0xee, 0x43, 0x82, 0x50, + 0xb5, 0xa9, 0xd3, 0x0d, 0xf6, 0xc7, 0xdc, 0x71, 0x0a, 0xbb, 0x0e, 0x47, 0x40, 0x89, 0x44, 0x16, + 0xa1, 0x21, 0xf6, 0x2b, 0x96, 0xd3, 0xe5, 0x7d, 0x58, 0x8f, 0x66, 0xfa, 0x8e, 0x22, 0x60, 0xc4, + 0xa3, 0x1d, 0xc1, 0x8b, 0x43, 0xdd, 0x40, 0x4c, 0x28, 0x07, 0x7a, 0x57, 0x2d, 0x2a, 0x6b, 0x63, + 0x77, 0xf0, 0xb6, 0xde, 0x8d, 0x75, 0x2e, 0xd7, 0x0a, 0xb7, 0x75, 0xa6, 0x15, 0x32, 0x74, 0xed, + 0x8f, 0x0a, 0x50, 0x5f, 0x1b, 0x38, 0x06, 0xdf, 0xd4, 0x3f, 0xdd, 0x9a, 0xac, 0x54, 0xcc, 0x62, + 0xa6, 0x8a, 0x39, 0x80, 0xea, 0xc1, 0xc3, 0x50, 0x05, 0x6d, 0x2e, 0x6d, 0x8e, 0x3f, 0x2a, 0x64, + 0x93, 0x16, 0xee, 0x72, 0x3c, 0xe1, 0xec, 0x9c, 0x91, 0x0d, 0xaa, 0xde, 0x7d, 0x9b, 0x0b, 0x95, + 0xc2, 0xae, 0x7c, 0x05, 0x9a, 0x31, 0xb6, 0x33, 0xf9, 0x3d, 0xfe, 0x4e, 0x19, 0xaa, 0xb7, 0x3a, + 0x9d, 0xe5, 0xad, 0x75, 0xf2, 0x1a, 0x34, 0xa5, 0x1f, 0xec, 0x5e, 0xd4, 0x07, 0xa1, 0x1b, 0xb4, + 0x13, 0x91, 0x30, 0xce, 0xc7, 0x14, 0x78, 0x8f, 0xea, 0x76, 0x4f, 0x7e, 0x2c, 0xa1, 0xee, 0x80, + 0xac, 0x10, 0x05, 0x8d, 0xe8, 0x30, 0x33, 0xf0, 0xa9, 0xc7, 0xba, 0x50, 0xec, 0xf7, 0xe5, 0x67, + 0x73, 0x4a, 0x8b, 0x00, 0x5f, 0x60, 0x76, 0x12, 0x00, 0x98, 0x02, 0x24, 0x6f, 0x40, 0x5d, 0x1f, + 0x04, 0xfb, 0x7c, 0xcb, 0x25, 0xbe, 0x8d, 0xab, 0xdc, 0x4d, 0x28, 0xcb, 0x1e, 0x1f, 0xb7, 0xa6, + 0xee, 0x62, 0xfb, 0x35, 0xf5, 0x1f, 0x43, 0x6e, 0xd6, 0x38, 0x65, 0x63, 0x90, 0x8d, 0xab, 0x9c, + 0xb9, 0x71, 0x5b, 0x09, 0x00, 0x4c, 0x01, 0x92, 0x77, 0x61, 0xea, 0x80, 0x1e, 0x05, 0xfa, 0xae, + 0x14, 0x50, 0x3d, 0x8b, 0x80, 0x39, 0xa6, 0xf4, 0xdf, 0x8d, 0x55, 0xc7, 0x04, 0x18, 0xf1, 0xe1, + 0xe2, 0x01, 0xf5, 0x76, 0xa9, 0xe7, 0x4a, 0x7b, 0x85, 0x14, 0x52, 0x3b, 0x8b, 0x90, 0xf9, 0x93, + 0xe3, 0xd6, 0xc5, 0xbb, 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xef, 0x22, 0xcc, 0xde, 0x12, 0x81, + 0x08, 0xae, 0x27, 0x34, 0x0f, 0x72, 0x19, 0x4a, 0x5e, 0x7f, 0xc0, 0x47, 0x4e, 0x49, 0xb8, 0x1a, + 0x70, 0x6b, 0x07, 0x59, 0x19, 0x79, 0x07, 0xea, 0xa6, 0x9c, 0x32, 0xa4, 0xb9, 0x64, 0x2c, 0xd3, + 0x96, 0xfa, 0x87, 0x21, 0x1a, 0xdb, 0x1b, 0xf6, 0xfc, 0x6e, 0xc7, 0xfa, 0x80, 0x4a, 0x0b, 0x02, + 0xdf, 0x1b, 0x6e, 0x8a, 0x22, 0x54, 0x34, 0xb6, 0xaa, 0x1e, 0xd0, 0x23, 0xb1, 0x7f, 0x2e, 0x47, + 0xab, 0xea, 0x5d, 0x59, 0x86, 0x21, 0x95, 0xb4, 0xd4, 0xc7, 0xc2, 0x46, 0x41, 0x59, 0xd8, 0x7e, + 0x1e, 0xb0, 0x02, 0xf9, 0xdd, 0xb0, 0x29, 0xf3, 0x7d, 0x2b, 0x08, 0xa8, 0x27, 0x5f, 0xe3, 0x58, + 0x53, 0xe6, 0x1d, 0x8e, 0x80, 0x12, 0x89, 0xfc, 0x14, 0x34, 0x38, 0x78, 0xdb, 0x76, 0x77, 0xf9, + 0x8b, 0x6b, 0x08, 0x2b, 0xd0, 0x03, 0x55, 0x88, 0x11, 0x5d, 0xfb, 0x71, 0x11, 0x2e, 0xdd, 0xa2, + 0x81, 0xd0, 0x6a, 0x56, 0x69, 0xdf, 0x76, 0x8f, 0x98, 0x3e, 0x8d, 0xf4, 0x5b, 0xe4, 0x2d, 0x00, + 0xcb, 0xdf, 0xed, 0x1c, 0x1a, 0xfc, 0x3b, 0x10, 0xdf, 0xf0, 0x75, 0xf9, 0x49, 0xc2, 0x7a, 0xa7, + 0x2d, 0x29, 0x8f, 0x13, 0xff, 0x30, 0x56, 0x27, 0xda, 0x90, 0x17, 0x9f, 0xb0, 0x21, 0xef, 0x00, + 0xf4, 0x23, 0xad, 0xbc, 0xc4, 0x39, 0x7f, 0x56, 0x89, 0x39, 0x8b, 0x42, 0x1e, 0x83, 0xc9, 0xa3, + 0x27, 0x3b, 0x30, 0x67, 0xd2, 0x3d, 0x7d, 0x60, 0x07, 0xe1, 0x4e, 0x42, 0x7e, 0xc4, 0xa7, 0xdf, + 0x8c, 0x84, 0x41, 0x12, 0xab, 0x29, 0x24, 0x1c, 0xc2, 0xd6, 0xfe, 0x6e, 0x09, 0xae, 0xdc, 0xa2, + 0x41, 0x68, 0xa3, 0x93, 0xb3, 0x63, 0xa7, 0x4f, 0x0d, 0xf6, 0x16, 0x3e, 0x2c, 0x40, 0xd5, 0xd6, + 0x77, 0xa9, 0xcd, 0x56, 0x2f, 0xf6, 0x34, 0xef, 0x8d, 0xbd, 0x10, 0x8c, 0x96, 0xb2, 0xb0, 0xc1, + 0x25, 0xa4, 0x96, 0x06, 0x51, 0x88, 0x52, 0x3c, 0x9b, 0xd4, 0x0d, 0x7b, 0xe0, 0x07, 0x62, 0x67, + 0x27, 0xf5, 0xc9, 0x70, 0x52, 0x5f, 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x86, 0x6d, 0x51, + 0x27, 0xe0, 0xb5, 0xc4, 0x77, 0x45, 0xd4, 0xfb, 0x5d, 0x09, 0x29, 0x18, 0xe3, 0x62, 0xa2, 0x7a, + 0xae, 0x63, 0x05, 0xae, 0x10, 0x55, 0x4e, 0x8a, 0xda, 0x8c, 0x48, 0x18, 0xe7, 0xe3, 0xd5, 0x68, + 0xe0, 0x59, 0x86, 0xcf, 0xab, 0x55, 0x52, 0xd5, 0x22, 0x12, 0xc6, 0xf9, 0xd8, 0x9a, 0x17, 0x7b, + 0xfe, 0x33, 0xad, 0x79, 0xbf, 0xdd, 0x80, 0x6b, 0x89, 0x6e, 0x0d, 0xf4, 0x80, 0xee, 0x0d, 0xec, + 0x0e, 0x0d, 0xd4, 0x0b, 0x1c, 0x73, 0x2d, 0xfc, 0x73, 0xd1, 0x7b, 0x17, 0xe1, 0x4f, 0xc6, 0x64, + 0xde, 0xfb, 0x50, 0x03, 0x4f, 0xf5, 0xee, 0x17, 0xa1, 0xe1, 0xe8, 0x81, 0xcf, 0x3f, 0x5c, 0xf9, + 0x8d, 0x86, 0x6a, 0xd8, 0x3d, 0x45, 0xc0, 0x88, 0x87, 0x6c, 0xc1, 0x45, 0xd9, 0xc5, 0x37, 0x1f, + 0xb1, 0x3d, 0x3f, 0xf5, 0x44, 0x5d, 0xb9, 0x9c, 0xca, 0xba, 0x17, 0x37, 0x33, 0x78, 0x30, 0xb3, + 0x26, 0xd9, 0x84, 0x0b, 0x86, 0x08, 0x09, 0xa1, 0xb6, 0xab, 0x9b, 0x0a, 0x50, 0x98, 0x44, 0xc3, + 0xad, 0xd1, 0xca, 0x30, 0x0b, 0x66, 0xd5, 0x4b, 0x8f, 0xe6, 0xea, 0x58, 0xa3, 0xb9, 0x36, 0xce, + 0x68, 0xae, 0x8f, 0x37, 0x9a, 0x1b, 0xa7, 0x1b, 0xcd, 0xac, 0xe7, 0xd9, 0x38, 0xa2, 0x1e, 0x53, + 0x4f, 0xc4, 0x0a, 0x1b, 0x8b, 0x38, 0x0a, 0x7b, 0xbe, 0x93, 0xc1, 0x83, 0x99, 0x35, 0xc9, 0x2e, + 0x5c, 0x11, 0xe5, 0x37, 0x1d, 0xc3, 0x3b, 0xea, 0xb3, 0x85, 0x27, 0x86, 0xdb, 0x4c, 0xd8, 0xa4, + 0xaf, 0x74, 0x46, 0x72, 0xe2, 0x13, 0x50, 0xc8, 0xcf, 0xc3, 0xb4, 0x78, 0x4b, 0x9b, 0x7a, 0x9f, + 0xc3, 0x8a, 0xf8, 0xa3, 0x97, 0x24, 0xec, 0xf4, 0x4a, 0x9c, 0x88, 0x49, 0x5e, 0xb2, 0x0c, 0xb3, + 0xfd, 0x43, 0x83, 0xfd, 0x5c, 0xdf, 0xbb, 0x47, 0xa9, 0x49, 0x4d, 0xee, 0xf0, 0x6c, 0xb4, 0x5f, + 0x56, 0xd6, 0x9d, 0xad, 0x24, 0x19, 0xd3, 0xfc, 0xe4, 0x0d, 0x98, 0xf2, 0x03, 0xdd, 0x0b, 0xa4, + 0x21, 0x78, 0x7e, 0x46, 0xc4, 0x67, 0x29, 0x3b, 0x69, 0x27, 0x46, 0xc3, 0x04, 0x67, 0xe6, 0x7a, + 0x31, 0x7b, 0x7e, 0xeb, 0x45, 0x9e, 0xd9, 0xea, 0x9f, 0x16, 0xe1, 0xfa, 0x2d, 0x1a, 0x6c, 0xba, + 0x8e, 0x34, 0xa3, 0x67, 0x2d, 0xfb, 0xa7, 0xb2, 0xa2, 0x27, 0x17, 0xed, 0xe2, 0x44, 0x17, 0xed, + 0xd2, 0x84, 0x16, 0xed, 0xf2, 0x39, 0x2e, 0xda, 0x7f, 0xbf, 0x08, 0x2f, 0x27, 0x7a, 0x72, 0xcb, + 0x35, 0xd5, 0x84, 0xff, 0x69, 0x07, 0x9e, 0xa2, 0x03, 0x1f, 0x0b, 0xbd, 0x93, 0x3b, 0x42, 0x53, + 0x1a, 0xcf, 0x77, 0xd3, 0x1a, 0xcf, 0xbb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0xa7, 0x5a, 0xf1, 0xee, + 0x00, 0xf1, 0xa4, 0xdb, 0x36, 0x32, 0x67, 0x4b, 0xa5, 0x27, 0x0c, 0x00, 0xc5, 0x21, 0x0e, 0xcc, + 0xa8, 0x45, 0x3a, 0xf0, 0x92, 0x4f, 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, 0x2b, + 0x12, 0xee, 0xa5, 0x4e, 0x16, 0x13, 0x66, 0xd7, 0xcd, 0x33, 0x0f, 0xfc, 0x4b, 0xe0, 0x2a, 0xa7, + 0xe8, 0x9a, 0x89, 0x69, 0x2c, 0x1f, 0xa6, 0x35, 0x96, 0xf7, 0xf2, 0xbf, 0xb7, 0xf1, 0xb4, 0x95, + 0x25, 0x00, 0xfe, 0x16, 0xe2, 0xea, 0x4a, 0xb8, 0x48, 0x63, 0x48, 0xc1, 0x18, 0x17, 0x5b, 0x80, + 0x54, 0x3f, 0xc7, 0x35, 0x95, 0x70, 0x01, 0xea, 0xc4, 0x89, 0x98, 0xe4, 0x1d, 0xa9, 0xed, 0x54, + 0xc6, 0xd6, 0x76, 0xee, 0x00, 0x49, 0x18, 0x1e, 0x05, 0x5e, 0x35, 0x19, 0x7f, 0xbc, 0x3e, 0xc4, + 0x81, 0x19, 0xb5, 0x46, 0x0c, 0xe5, 0xda, 0x64, 0x87, 0x72, 0x7d, 0xfc, 0xa1, 0x4c, 0xde, 0x83, + 0xcb, 0x5c, 0x94, 0xec, 0x9f, 0x24, 0xb0, 0xd0, 0x7b, 0x3e, 0x2b, 0x81, 0x2f, 0xe3, 0x28, 0x46, + 0x1c, 0x8d, 0xc1, 0xde, 0x8f, 0xe1, 0x51, 0x93, 0x09, 0xd7, 0xed, 0xd1, 0x3a, 0xd1, 0x4a, 0x06, + 0x0f, 0x66, 0xd6, 0x64, 0x43, 0x2c, 0x60, 0xc3, 0x50, 0xdf, 0xb5, 0xa9, 0x29, 0xe3, 0xaf, 0xc3, + 0x21, 0xb6, 0xbd, 0xd1, 0x91, 0x14, 0x8c, 0x71, 0x65, 0xa9, 0x29, 0x53, 0x67, 0x54, 0x53, 0x6e, + 0x71, 0x2b, 0xfd, 0x5e, 0x42, 0x1b, 0x92, 0xba, 0x4e, 0x18, 0x51, 0xbf, 0x92, 0x66, 0xc0, 0xe1, + 0x3a, 0x5c, 0x4b, 0x34, 0x3c, 0xab, 0x1f, 0xf8, 0x49, 0xac, 0x99, 0x94, 0x96, 0x98, 0xc1, 0x83, + 0x99, 0x35, 0x99, 0x7e, 0xbe, 0x4f, 0x75, 0x3b, 0xd8, 0x4f, 0x02, 0xce, 0x26, 0xf5, 0xf3, 0xdb, + 0xc3, 0x2c, 0x98, 0x55, 0x2f, 0x73, 0x41, 0x9a, 0x7b, 0x3e, 0xd5, 0xaa, 0xef, 0x94, 0xe0, 0xf2, + 0x2d, 0x1a, 0x84, 0xa1, 0x69, 0x9f, 0x9a, 0x51, 0x3e, 0x06, 0x33, 0xca, 0x6f, 0x55, 0xe0, 0xc2, + 0x2d, 0x1a, 0x0c, 0x69, 0x63, 0xff, 0x9f, 0x76, 0xff, 0x26, 0x5c, 0x88, 0xa2, 0x21, 0x3b, 0x81, + 0xeb, 0x89, 0xb5, 0x3c, 0xb5, 0x5b, 0xee, 0x0c, 0xb3, 0x60, 0x56, 0x3d, 0xf2, 0x0d, 0x78, 0x99, + 0x2f, 0xf5, 0x4e, 0x57, 0xd8, 0x67, 0x85, 0x31, 0x21, 0x76, 0x9e, 0xa7, 0x25, 0x21, 0x5f, 0xee, + 0x64, 0xb3, 0xe1, 0xa8, 0xfa, 0xe4, 0xdb, 0x30, 0xd5, 0xb7, 0xfa, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, + 0xe5, 0x0e, 0x22, 0xda, 0x8a, 0x81, 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, + 0xfa, 0x39, 0x8e, 0xd4, 0xff, 0x51, 0x84, 0xda, 0x2d, 0xcf, 0x1d, 0xf4, 0xdb, 0x47, 0xa4, 0x0b, + 0xd5, 0x87, 0xdc, 0x79, 0x26, 0x5d, 0x53, 0xe3, 0x9f, 0x28, 0x10, 0x3e, 0xb8, 0x48, 0x25, 0x12, + 0xff, 0x51, 0xc2, 0xb3, 0x41, 0x7c, 0x40, 0x8f, 0xa8, 0x29, 0x7d, 0x68, 0xe1, 0x20, 0xbe, 0xcb, + 0x0a, 0x51, 0xd0, 0x48, 0x0f, 0x66, 0x75, 0xdb, 0x76, 0x1f, 0x52, 0x73, 0x43, 0x0f, 0xb8, 0xdf, + 0x5b, 0xfa, 0x56, 0xce, 0x6a, 0x96, 0xe6, 0xc1, 0x0c, 0xcb, 0x49, 0x28, 0x4c, 0x63, 0x93, 0xf7, + 0xa1, 0xe6, 0x07, 0xae, 0xa7, 0x94, 0xad, 0xe6, 0xd2, 0xca, 0xf8, 0x2f, 0xbd, 0xfd, 0xf5, 0x8e, + 0x80, 0x12, 0x36, 0x7b, 0xf9, 0x07, 0x95, 0x00, 0xed, 0x37, 0x0b, 0x00, 0xb7, 0xb7, 0xb7, 0xb7, + 0xa4, 0x7b, 0xc1, 0x84, 0xb2, 0x3e, 0x08, 0x1d, 0x95, 0xe3, 0x3b, 0x04, 0x13, 0x81, 0xbc, 0xd2, + 0x87, 0x37, 0x08, 0xf6, 0x91, 0xa3, 0x93, 0x9f, 0x84, 0x9a, 0x54, 0x90, 0x65, 0xb7, 0x87, 0xf1, + 0x14, 0x52, 0x89, 0x46, 0x45, 0xd7, 0xfe, 0x56, 0x11, 0x60, 0xdd, 0xb4, 0x69, 0x47, 0x1d, 0x02, + 0x69, 0x04, 0xfb, 0x1e, 0xf5, 0xf7, 0x5d, 0xdb, 0x1c, 0xd3, 0x9b, 0xca, 0x6d, 0xfe, 0xdb, 0x0a, + 0x04, 0x23, 0x3c, 0x62, 0xc2, 0x94, 0x1f, 0xd0, 0xbe, 0x8a, 0xed, 0x1d, 0xd3, 0x89, 0x32, 0x27, + 0xec, 0x22, 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x4d, 0xcb, 0x31, 0xc4, 0x07, 0xd2, 0x3e, 0x1a, + 0x73, 0x20, 0xcd, 0xb2, 0x1d, 0xc7, 0x7a, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xf7, 0x8a, 0x70, 0x89, + 0xcb, 0x63, 0xcd, 0x48, 0x44, 0xf0, 0x92, 0x3f, 0x39, 0x74, 0x60, 0xf5, 0x8f, 0x9f, 0x4e, 0xb4, + 0x38, 0xef, 0xb8, 0x49, 0x03, 0x3d, 0xd2, 0xe7, 0xa2, 0xb2, 0xd8, 0x29, 0xd5, 0x01, 0x94, 0x7d, + 0x36, 0x5f, 0x89, 0xde, 0xeb, 0x8c, 0x3d, 0x84, 0xb2, 0x1f, 0x80, 0xcf, 0x5e, 0xa1, 0xd7, 0x98, + 0xcf, 0x5a, 0x5c, 0x1c, 0xf9, 0x15, 0xa8, 0xfa, 0x81, 0x1e, 0x0c, 0xd4, 0xa7, 0xb9, 0x33, 0x69, + 0xc1, 0x1c, 0x3c, 0x9a, 0x47, 0xc4, 0x7f, 0x94, 0x42, 0xb5, 0xdf, 0x2b, 0xc0, 0x95, 0xec, 0x8a, + 0x1b, 0x96, 0x1f, 0x90, 0x3f, 0x31, 0xd4, 0xed, 0xa7, 0x7c, 0xe3, 0xac, 0x36, 0xef, 0xf4, 0xf0, + 0x4c, 0x83, 0x2a, 0x89, 0x75, 0x79, 0x00, 0x15, 0x2b, 0xa0, 0x3d, 0xb5, 0xbf, 0xbc, 0x3f, 0xe1, + 0x47, 0x8f, 0x2d, 0xed, 0x4c, 0x0a, 0x0a, 0x61, 0xda, 0xf7, 0x8a, 0xa3, 0x1e, 0x99, 0x2f, 0x1f, + 0x76, 0x32, 0x4a, 0xfc, 0x6e, 0xbe, 0x28, 0xf1, 0x64, 0x83, 0x86, 0x83, 0xc5, 0xff, 0xd4, 0x70, + 0xb0, 0xf8, 0xfd, 0xfc, 0xc1, 0xe2, 0xa9, 0x6e, 0x18, 0x19, 0x33, 0xfe, 0x51, 0x09, 0xae, 0x3e, + 0x69, 0xd8, 0xb0, 0xf5, 0x4c, 0x8e, 0xce, 0xbc, 0xeb, 0xd9, 0x93, 0xc7, 0x21, 0x59, 0x82, 0x4a, + 0x7f, 0x5f, 0xf7, 0x95, 0x52, 0x76, 0x35, 0x0c, 0x33, 0x64, 0x85, 0x8f, 0xd9, 0xa4, 0xc1, 0x95, + 0x39, 0xfe, 0x17, 0x05, 0x2b, 0x9b, 0x8e, 0x7b, 0xd4, 0xf7, 0x23, 0x9b, 0x40, 0x38, 0x1d, 0x6f, + 0x8a, 0x62, 0x54, 0x74, 0x12, 0x40, 0x55, 0x98, 0x98, 0xe5, 0xca, 0x34, 0x7e, 0x20, 0x57, 0xc6, + 0xc1, 0x82, 0xe8, 0xa1, 0xa4, 0xb7, 0x42, 0xca, 0x22, 0x0b, 0x50, 0x0e, 0xa2, 0x30, 0x6f, 0xb5, + 0x35, 0x2f, 0x67, 0xe8, 0xa7, 0x9c, 0x8f, 0x6d, 0xec, 0xdd, 0x5d, 0x6e, 0x54, 0x37, 0xa5, 0xff, + 0xdc, 0x72, 0x1d, 0xae, 0x90, 0x95, 0xa2, 0x8d, 0xfd, 0xfd, 0x21, 0x0e, 0xcc, 0xa8, 0xa5, 0xfd, + 0x9b, 0x3a, 0x5c, 0xca, 0x1e, 0x0f, 0xac, 0xdf, 0x0e, 0xa9, 0xe7, 0x33, 0xec, 0x42, 0xb2, 0xdf, + 0x1e, 0x88, 0x62, 0x54, 0xf4, 0x4f, 0x74, 0xc0, 0xd9, 0x6f, 0x15, 0xe0, 0xb2, 0x27, 0x7d, 0x44, + 0xcf, 0x22, 0xe8, 0xec, 0x15, 0x61, 0xce, 0x18, 0x21, 0x10, 0x47, 0xb7, 0x85, 0xfc, 0x8d, 0x02, + 0xcc, 0xf7, 0x52, 0x76, 0x8e, 0x73, 0x3c, 0x73, 0xc9, 0xcf, 0x51, 0x6c, 0x8e, 0x90, 0x87, 0x23, + 0x5b, 0x42, 0xbe, 0x0d, 0xcd, 0x3e, 0x1b, 0x17, 0x7e, 0x40, 0x1d, 0x43, 0x05, 0x88, 0x8e, 0xff, + 0x25, 0x6d, 0x45, 0x58, 0xe1, 0x99, 0x2b, 0xae, 0x1f, 0xc4, 0x08, 0x18, 0x97, 0xf8, 0x9c, 0x1f, + 0xb2, 0xbc, 0x01, 0x75, 0x9f, 0x06, 0x81, 0xe5, 0x74, 0xc5, 0x7e, 0xa3, 0x21, 0xbe, 0x95, 0x8e, + 0x2c, 0xc3, 0x90, 0x4a, 0x7e, 0x0a, 0x1a, 0xdc, 0xe5, 0xb4, 0xec, 0x75, 0xfd, 0xf9, 0x06, 0x0f, + 0x17, 0x9b, 0x16, 0x01, 0x70, 0xb2, 0x10, 0x23, 0x3a, 0xf9, 0x12, 0x4c, 0xed, 0xf2, 0xcf, 0x57, + 0x9e, 0xbb, 0x17, 0x36, 0x2e, 0xae, 0xad, 0xb5, 0x63, 0xe5, 0x98, 0xe0, 0x22, 0x4b, 0x00, 0x34, + 0xf4, 0xcb, 0xa5, 0xed, 0x59, 0x91, 0xc7, 0x0e, 0x63, 0x5c, 0xe4, 0x15, 0x28, 0x05, 0xb6, 0xcf, + 0x6d, 0x58, 0xf5, 0x68, 0x0b, 0xba, 0xbd, 0xd1, 0x41, 0x56, 0xae, 0xfd, 0xb8, 0x00, 0xb3, 0xa9, + 0xe3, 0x48, 0xac, 0xca, 0xc0, 0xb3, 0xe5, 0x34, 0x12, 0x56, 0xd9, 0xc1, 0x0d, 0x64, 0xe5, 0xe4, + 0x3d, 0xa9, 0x96, 0x17, 0x73, 0xa6, 0x18, 0xb9, 0xa7, 0x07, 0x3e, 0xd3, 0xc3, 0x87, 0x34, 0x72, + 0xee, 0xe6, 0x8b, 0xda, 0x23, 0xd7, 0x81, 0x98, 0x9b, 0x2f, 0xa2, 0x61, 0x82, 0x33, 0x65, 0xf0, + 0x2b, 0x9f, 0xc6, 0xe0, 0xa7, 0xfd, 0x7a, 0x31, 0xd6, 0x03, 0x52, 0xb3, 0x7f, 0x4a, 0x0f, 0x7c, + 0x81, 0x2d, 0xa0, 0xe1, 0xe2, 0xde, 0x88, 0xaf, 0x7f, 0x7c, 0x31, 0x96, 0x54, 0xf2, 0xb6, 0xe8, + 0xfb, 0x52, 0xce, 0x83, 0xdc, 0xdb, 0x1b, 0x1d, 0x11, 0x5d, 0xa5, 0xde, 0x5a, 0xf8, 0x0a, 0xca, + 0xe7, 0xf4, 0x0a, 0xb4, 0x7f, 0x5e, 0x82, 0xe6, 0x1d, 0x77, 0xf7, 0x13, 0x12, 0x41, 0x9d, 0xbd, + 0x4c, 0x15, 0x3f, 0xc6, 0x65, 0x6a, 0x07, 0x5e, 0x0e, 0x02, 0xbb, 0x43, 0x0d, 0xd7, 0x31, 0xfd, + 0xe5, 0xbd, 0x80, 0x7a, 0x6b, 0x96, 0x63, 0xf9, 0xfb, 0xd4, 0x94, 0xee, 0xa4, 0xcf, 0x9c, 0x1c, + 0xb7, 0x5e, 0xde, 0xde, 0xde, 0xc8, 0x62, 0xc1, 0x51, 0x75, 0xf9, 0xb4, 0x21, 0xce, 0x8e, 0xf2, + 0xb3, 0x55, 0x32, 0xe6, 0x46, 0x4c, 0x1b, 0xb1, 0x72, 0x4c, 0x70, 0x69, 0xdf, 0x2f, 0x42, 0x23, + 0x4c, 0x1e, 0x41, 0x3e, 0x0f, 0xb5, 0x5d, 0xcf, 0x3d, 0xa0, 0x9e, 0xf0, 0xdc, 0xc9, 0xb3, 0x55, + 0x6d, 0x51, 0x84, 0x8a, 0x46, 0x3e, 0x07, 0x95, 0xc0, 0xed, 0x5b, 0x46, 0xda, 0xa0, 0xb6, 0xcd, + 0x0a, 0x51, 0xd0, 0xce, 0x6f, 0x80, 0x7f, 0x21, 0xa1, 0xda, 0x35, 0x46, 0x2a, 0x63, 0xef, 0x42, + 0xd9, 0xd7, 0x7d, 0x5b, 0xae, 0xa7, 0x39, 0xf2, 0x30, 0x2c, 0x77, 0x36, 0x64, 0x1e, 0x86, 0xe5, + 0xce, 0x06, 0x72, 0x50, 0xed, 0x0f, 0x8b, 0xd0, 0x14, 0xfd, 0x26, 0x66, 0x85, 0x49, 0xf6, 0xdc, + 0x9b, 0x3c, 0x94, 0xc2, 0x1f, 0xf4, 0xa8, 0xc7, 0xcd, 0x4c, 0x72, 0x92, 0x8b, 0xfb, 0x07, 0x22, + 0x62, 0x18, 0x4e, 0x11, 0x15, 0xa9, 0xae, 0x2f, 0x9f, 0x63, 0xd7, 0x57, 0x4e, 0xd5, 0xf5, 0xd5, + 0xf3, 0xe8, 0xfa, 0x0f, 0x8b, 0xd0, 0xd8, 0xb0, 0xf6, 0xa8, 0x71, 0x64, 0xd8, 0xfc, 0x14, 0xa9, + 0x49, 0x6d, 0x1a, 0xd0, 0x5b, 0x9e, 0x6e, 0xd0, 0x2d, 0xea, 0x59, 0x3c, 0xb9, 0x12, 0xfb, 0x3e, + 0xf8, 0x0c, 0x24, 0x4f, 0x91, 0xae, 0x8e, 0xe0, 0xc1, 0x91, 0xb5, 0xc9, 0x3a, 0x4c, 0x99, 0xd4, + 0xb7, 0x3c, 0x6a, 0x6e, 0xc5, 0x36, 0x2a, 0x9f, 0x57, 0x4b, 0xcd, 0x6a, 0x8c, 0xf6, 0xf8, 0xb8, + 0x35, 0xad, 0x0c, 0x94, 0x62, 0xc7, 0x92, 0xa8, 0xca, 0x3e, 0xf9, 0xbe, 0x3e, 0xf0, 0xb3, 0xda, + 0x18, 0xfb, 0xe4, 0xb7, 0xb2, 0x59, 0x70, 0x54, 0x5d, 0xad, 0x02, 0xa5, 0x0d, 0xb7, 0xab, 0x7d, + 0xaf, 0x04, 0x61, 0x16, 0x2e, 0xf2, 0x67, 0x0b, 0xd0, 0xd4, 0x1d, 0xc7, 0x0d, 0x64, 0x86, 0x2b, + 0xe1, 0x81, 0xc7, 0xdc, 0xc9, 0xbe, 0x16, 0x96, 0x23, 0x50, 0xe1, 0xbc, 0x0d, 0x1d, 0xca, 0x31, + 0x0a, 0xc6, 0x65, 0x93, 0x41, 0xca, 0x9f, 0xbc, 0x99, 0xbf, 0x15, 0xa7, 0xf0, 0x1e, 0x5f, 0xf9, + 0x1a, 0xcc, 0xa5, 0x1b, 0x7b, 0x16, 0x77, 0x50, 0x2e, 0xc7, 0x7c, 0x11, 0x20, 0x8a, 0x29, 0x79, + 0x06, 0x46, 0x2c, 0x2b, 0x61, 0xc4, 0x1a, 0x3f, 0x15, 0x42, 0xd4, 0xe8, 0x91, 0x86, 0xab, 0x6f, + 0xa5, 0x0c, 0x57, 0xeb, 0x93, 0x10, 0xf6, 0x64, 0x63, 0xd5, 0x2e, 0x5c, 0x88, 0x78, 0xa3, 0x6f, + 0xfe, 0x6e, 0xea, 0xcb, 0x14, 0xba, 0xd8, 0x17, 0x47, 0x7c, 0x99, 0xb3, 0xb1, 0x20, 0x9f, 0xe1, + 0x6f, 0x53, 0xfb, 0x9b, 0x05, 0x98, 0x8b, 0x0b, 0xe1, 0xe7, 0xb6, 0xbf, 0x0c, 0xd3, 0x1e, 0xd5, + 0xcd, 0xb6, 0x1e, 0x18, 0xfb, 0x3c, 0x9c, 0xbc, 0xc0, 0xe3, 0xbf, 0xf9, 0x09, 0x33, 0x8c, 0x13, + 0x30, 0xc9, 0x47, 0x74, 0x68, 0xb2, 0x82, 0x6d, 0xab, 0x47, 0xdd, 0x41, 0x30, 0xa6, 0x65, 0x96, + 0x6f, 0x8a, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x51, 0x01, 0x66, 0xe2, 0x0d, 0x3e, 0x77, 0xab, + 0xdd, 0x7e, 0xd2, 0x6a, 0xb7, 0x32, 0x81, 0xf7, 0x3e, 0xc2, 0x52, 0xf7, 0x9d, 0x66, 0xfc, 0xd1, + 0xb8, 0x75, 0x2e, 0x6e, 0x90, 0x28, 0x3c, 0xd1, 0x20, 0xf1, 0xc9, 0x4f, 0xee, 0x34, 0x4a, 0x93, + 0x2e, 0x3f, 0xc7, 0x9a, 0xf4, 0xc7, 0x99, 0x21, 0x2a, 0x96, 0xe5, 0xa8, 0x9a, 0x23, 0xcb, 0x51, + 0x2f, 0xcc, 0x72, 0x54, 0x9b, 0xd8, 0xc4, 0x76, 0x9a, 0x4c, 0x47, 0xf5, 0x67, 0x9a, 0xe9, 0xa8, + 0x71, 0x5e, 0x99, 0x8e, 0x20, 0x6f, 0xa6, 0xa3, 0xef, 0x16, 0x60, 0xc6, 0x4c, 0x9c, 0xca, 0x95, + 0xe7, 0xe1, 0xc7, 0x5f, 0xce, 0x92, 0x87, 0x7c, 0xc5, 0xb1, 0xac, 0x64, 0x19, 0xa6, 0x44, 0x66, + 0xe5, 0x17, 0x9a, 0xfa, 0x58, 0xf2, 0x0b, 0x91, 0x5f, 0x81, 0x86, 0xad, 0xd6, 0x3a, 0x99, 0x75, + 0x71, 0x63, 0x22, 0x43, 0x52, 0x62, 0x46, 0x91, 0xff, 0x61, 0x11, 0x46, 0x12, 0xb5, 0x3f, 0xa8, + 0xc5, 0x17, 0xc4, 0x67, 0xed, 0x17, 0x78, 0x3d, 0xe9, 0x17, 0xb8, 0x9e, 0xf6, 0x0b, 0x0c, 0xad, + 0xe6, 0xd2, 0x37, 0xf0, 0xd3, 0xb1, 0x75, 0xa2, 0xc4, 0x13, 0x1b, 0x85, 0x43, 0x2e, 0x63, 0xad, + 0x58, 0x86, 0x59, 0xa9, 0x04, 0x28, 0x22, 0x9f, 0x64, 0xa7, 0xa3, 0x48, 0xae, 0xd5, 0x24, 0x19, + 0xd3, 0xfc, 0x4c, 0xa0, 0xaf, 0xf2, 0xdb, 0x8a, 0xdd, 0x50, 0x34, 0xc6, 0x55, 0xee, 0xd9, 0x90, + 0x83, 0xed, 0x9c, 0x3c, 0xaa, 0xfb, 0xd2, 0xba, 0x1f, 0xdb, 0x39, 0x21, 0x2f, 0x45, 0x49, 0x8d, + 0xbb, 0x38, 0x6a, 0x4f, 0x71, 0x71, 0xe8, 0xd0, 0xb4, 0x75, 0x3f, 0x10, 0x83, 0xc9, 0x94, 0xb3, + 0xc9, 0x1f, 0x3b, 0xdd, 0xba, 0xcf, 0x74, 0x89, 0x48, 0x81, 0xdf, 0x88, 0x60, 0x30, 0x8e, 0x49, + 0x4c, 0x98, 0x62, 0x7f, 0xf9, 0xcc, 0x62, 0x2e, 0x07, 0x32, 0x0b, 0xdc, 0x59, 0x64, 0x84, 0x96, + 0xb9, 0x8d, 0x18, 0x0e, 0x26, 0x50, 0x47, 0x78, 0x41, 0x60, 0x1c, 0x2f, 0x08, 0xf9, 0x79, 0xa1, + 0xb8, 0x1d, 0x85, 0xaf, 0xb5, 0xc9, 0x5f, 0x6b, 0x18, 0x05, 0x8a, 0x71, 0x22, 0x26, 0x79, 0xd9, + 0xa8, 0x18, 0xc8, 0x6e, 0x50, 0xd5, 0xa7, 0x92, 0xa3, 0x62, 0x27, 0x49, 0xc6, 0x34, 0x3f, 0xd9, + 0x82, 0x8b, 0x61, 0x51, 0xbc, 0x19, 0xd3, 0x1c, 0x27, 0x0c, 0xcb, 0xdb, 0xc9, 0xe0, 0xc1, 0xcc, + 0x9a, 0xfc, 0x9c, 0xcb, 0xc0, 0xf3, 0xa8, 0x13, 0xdc, 0xd6, 0xfd, 0x7d, 0x19, 0xdf, 0x17, 0x9d, + 0x73, 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x02, 0x8e, 0xd7, 0x9a, 0x4d, 0x86, 0xd0, 0xee, + 0x84, 0x14, 0x8c, 0x71, 0x69, 0xdf, 0x6d, 0x40, 0xf3, 0x9e, 0x1e, 0x58, 0x87, 0x94, 0xbb, 0x2c, + 0xcf, 0xc7, 0x6f, 0xf4, 0x57, 0x0a, 0x70, 0x29, 0x19, 0x97, 0x7a, 0x8e, 0xce, 0x23, 0x9e, 0x17, + 0x09, 0x33, 0xa5, 0xe1, 0x88, 0x56, 0x70, 0x37, 0xd2, 0x50, 0x98, 0xeb, 0x79, 0xbb, 0x91, 0x3a, + 0xa3, 0x04, 0xe2, 0xe8, 0xb6, 0x7c, 0x52, 0xdc, 0x48, 0xcf, 0x77, 0x22, 0xcf, 0x94, 0x93, 0xab, + 0xf6, 0xdc, 0x38, 0xb9, 0xea, 0xcf, 0x85, 0xd6, 0xdf, 0x8f, 0x39, 0xb9, 0x1a, 0x39, 0x83, 0xad, + 0xe4, 0x51, 0x0e, 0x81, 0x36, 0xca, 0x59, 0xc6, 0xb3, 0x30, 0x28, 0xe7, 0x03, 0x53, 0x96, 0x77, + 0x75, 0xdf, 0x32, 0xa4, 0xda, 0x91, 0x23, 0x71, 0xb1, 0x4a, 0x68, 0x28, 0x62, 0x32, 0xf8, 0x5f, + 0x14, 0xd8, 0x51, 0xfe, 0xc6, 0x62, 0xae, 0xfc, 0x8d, 0x64, 0x05, 0xca, 0xce, 0x01, 0x3d, 0x3a, + 0x5b, 0x3e, 0x03, 0xbe, 0x09, 0xbc, 0x77, 0x97, 0x1e, 0x21, 0xaf, 0xac, 0x7d, 0xbf, 0x08, 0xc0, + 0x1e, 0xff, 0x74, 0xee, 0xa6, 0x9f, 0x84, 0x9a, 0x3f, 0xe0, 0x86, 0x21, 0xa9, 0x30, 0x45, 0x11, + 0x6a, 0xa2, 0x18, 0x15, 0x9d, 0x7c, 0x0e, 0x2a, 0xdf, 0x1a, 0xd0, 0x81, 0x8a, 0x9d, 0x08, 0xf7, + 0x0d, 0x5f, 0x67, 0x85, 0x28, 0x68, 0xe7, 0x67, 0x3a, 0x56, 0x6e, 0xa9, 0xca, 0x79, 0xb9, 0xa5, + 0x1a, 0x50, 0xbb, 0xe7, 0xf2, 0x80, 0x57, 0xed, 0xbf, 0x16, 0x01, 0xa2, 0x80, 0x42, 0xf2, 0x9b, + 0x05, 0x78, 0x29, 0xfc, 0xe0, 0x02, 0xb1, 0xfd, 0xe3, 0xb9, 0xc2, 0x73, 0xbb, 0xa8, 0xb2, 0x3e, + 0x76, 0x3e, 0x03, 0x6d, 0x65, 0x89, 0xc3, 0xec, 0x56, 0x10, 0x84, 0x3a, 0xed, 0xf5, 0x83, 0xa3, + 0x55, 0xcb, 0x93, 0x23, 0x30, 0x33, 0x6e, 0xf5, 0xa6, 0xe4, 0x11, 0x55, 0xa5, 0x8d, 0x82, 0x7f, + 0x44, 0x8a, 0x82, 0x21, 0x0e, 0xd9, 0x87, 0xba, 0xe3, 0xbe, 0xe7, 0xb3, 0xee, 0x90, 0xc3, 0xf1, + 0xad, 0xf1, 0xbb, 0x5c, 0x74, 0xab, 0x70, 0x69, 0xc8, 0x3f, 0x58, 0x73, 0x64, 0x67, 0xff, 0x46, + 0x11, 0x2e, 0x64, 0xf4, 0x03, 0x79, 0x0b, 0xe6, 0x64, 0xec, 0x66, 0x94, 0x34, 0xbf, 0x10, 0x25, + 0xcd, 0xef, 0xa4, 0x68, 0x38, 0xc4, 0x4d, 0xde, 0x03, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0xa6, 0x6b, + 0xaa, 0xfd, 0xc0, 0x9b, 0x4c, 0x7d, 0x59, 0x0e, 0x4b, 0x1f, 0x1f, 0xb7, 0x7e, 0x26, 0x2b, 0x1c, + 0x3b, 0xd5, 0xcf, 0x51, 0x05, 0x8c, 0x41, 0x92, 0x6f, 0x02, 0x08, 0x1b, 0x40, 0x98, 0x31, 0xe2, + 0x29, 0x86, 0xb3, 0x05, 0x95, 0x90, 0x6c, 0xe1, 0xeb, 0x03, 0xdd, 0x09, 0xac, 0xe0, 0x48, 0x24, + 0xe8, 0x79, 0x10, 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0x27, 0x45, 0xa8, 0x2b, 0xb7, 0xc0, 0x33, 0xb0, + 0x05, 0x77, 0x13, 0xb6, 0xe0, 0x09, 0x05, 0x60, 0x67, 0x59, 0x82, 0xdd, 0x94, 0x25, 0xf8, 0x56, + 0x7e, 0x51, 0x4f, 0xb6, 0x03, 0xff, 0x4e, 0x11, 0x66, 0x14, 0x6b, 0x5e, 0x0b, 0xed, 0x57, 0x61, + 0x56, 0x04, 0x4e, 0x6c, 0xea, 0x8f, 0x44, 0xae, 0x22, 0xde, 0x61, 0x65, 0x11, 0xf3, 0xdc, 0x4e, + 0x92, 0x30, 0xcd, 0xcb, 0x86, 0xb5, 0x28, 0xda, 0x61, 0x9b, 0x30, 0xe1, 0x6a, 0x15, 0xfb, 0x4d, + 0x3e, 0xac, 0xdb, 0x29, 0x1a, 0x0e, 0x71, 0xa7, 0x4d, 0xc4, 0xe5, 0x73, 0x30, 0x11, 0xff, 0xbb, + 0x02, 0x4c, 0x45, 0xfd, 0x75, 0xee, 0x06, 0xe2, 0xbd, 0xa4, 0x81, 0x78, 0x39, 0xf7, 0x70, 0x18, + 0x61, 0x1e, 0xfe, 0x0b, 0x35, 0x48, 0x9c, 0x03, 0x20, 0xbb, 0x70, 0xc5, 0xca, 0x8c, 0x66, 0x8c, + 0xcd, 0x36, 0xe1, 0xc1, 0xf6, 0xf5, 0x91, 0x9c, 0xf8, 0x04, 0x14, 0x32, 0x80, 0xfa, 0x21, 0xf5, + 0x02, 0xcb, 0xa0, 0xea, 0xf9, 0x6e, 0xe5, 0x56, 0xc9, 0xa4, 0x11, 0x3c, 0xec, 0xd3, 0x07, 0x52, + 0x00, 0x86, 0xa2, 0xc8, 0x2e, 0x54, 0xa8, 0xd9, 0xa5, 0x2a, 0x7b, 0x54, 0xce, 0x6c, 0xbe, 0x61, + 0x7f, 0xb2, 0x7f, 0x3e, 0x0a, 0x68, 0xe2, 0xc7, 0x0d, 0x4d, 0xe5, 0x9c, 0x0a, 0xd6, 0x29, 0xcd, + 0x4b, 0xe4, 0x20, 0xb4, 0xb6, 0x56, 0x26, 0x34, 0x79, 0x3c, 0xc1, 0xd6, 0xea, 0x43, 0xe3, 0xa1, + 0x1e, 0x50, 0xaf, 0xa7, 0x7b, 0x07, 0x72, 0xb7, 0x31, 0xfe, 0x13, 0xbe, 0xad, 0x90, 0xa2, 0x27, + 0x0c, 0x8b, 0x30, 0x92, 0x43, 0x5c, 0x68, 0x04, 0x52, 0x7d, 0x56, 0x26, 0xe5, 0xf1, 0x85, 0x2a, + 0x45, 0xdc, 0x97, 0xe7, 0x01, 0xd4, 0x5f, 0x8c, 0x64, 0x90, 0xc3, 0x44, 0xea, 0x77, 0x91, 0xf0, + 0xbf, 0x9d, 0xc3, 0x35, 0x21, 0xa1, 0xa2, 0xe5, 0x26, 0x3b, 0x85, 0xbc, 0xf6, 0x3f, 0x2b, 0xd1, + 0xb4, 0xfc, 0xac, 0xed, 0x84, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa5, 0xed, 0x84, 0x29, 0x7f, 0xfc, + 0xd9, 0x23, 0x88, 0x53, 0xe6, 0xb5, 0xf2, 0x39, 0x98, 0xd7, 0x5e, 0x85, 0xe6, 0x21, 0x9f, 0x09, + 0x44, 0x2a, 0xaa, 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0xfe, 0x20, 0x2a, 0xc6, 0x38, 0x0f, 0xab, 0x22, + 0x2f, 0xbb, 0x09, 0xb3, 0x3f, 0xcb, 0x2a, 0x9d, 0xa8, 0x18, 0xe3, 0x3c, 0x3c, 0xf8, 0xd0, 0x72, + 0x0e, 0x44, 0x85, 0x1a, 0xaf, 0x20, 0x82, 0x0f, 0x55, 0x21, 0x46, 0x74, 0x72, 0x03, 0xea, 0x03, + 0x73, 0x4f, 0xf0, 0xd6, 0x39, 0x2f, 0xd7, 0x30, 0x77, 0x56, 0xd7, 0x64, 0x6a, 0x2c, 0x45, 0x65, + 0x2d, 0xe9, 0xe9, 0x7d, 0x45, 0xe0, 0x7b, 0x43, 0xd9, 0x92, 0xcd, 0xa8, 0x18, 0xe3, 0x3c, 0xe4, + 0xe7, 0x60, 0xc6, 0xa3, 0xe6, 0xc0, 0xa0, 0x61, 0x2d, 0xe0, 0xb5, 0x64, 0xce, 0xd0, 0x38, 0x05, + 0x53, 0x9c, 0x23, 0x8c, 0x84, 0xcd, 0xb1, 0x8c, 0x84, 0x5f, 0x83, 0x19, 0xd3, 0xd3, 0x2d, 0x87, + 0x9a, 0xf7, 0x1d, 0x1e, 0x74, 0x21, 0x43, 0x20, 0x43, 0x03, 0xfd, 0x6a, 0x82, 0x8a, 0x29, 0x6e, + 0xed, 0x5f, 0x14, 0xa1, 0x22, 0x32, 0x99, 0xae, 0xc3, 0x05, 0xcb, 0xb1, 0x02, 0x4b, 0xb7, 0x57, + 0xa9, 0xad, 0x1f, 0x25, 0x03, 0x4f, 0x5e, 0x66, 0x1b, 0xed, 0xf5, 0x61, 0x32, 0x66, 0xd5, 0x61, + 0x9d, 0x13, 0x88, 0xe5, 0x5b, 0xa1, 0x08, 0x3b, 0x9a, 0x48, 0xa3, 0x9d, 0xa0, 0x60, 0x8a, 0x93, + 0x29, 0x43, 0xfd, 0x8c, 0xa8, 0x12, 0xae, 0x0c, 0x25, 0x63, 0x49, 0x92, 0x7c, 0x5c, 0x49, 0x1f, + 0x70, 0x85, 0x38, 0x3c, 0x68, 0x24, 0x03, 0xc7, 0x84, 0x92, 0x9e, 0xa2, 0xe1, 0x10, 0x37, 0x43, + 0xd8, 0xd3, 0x2d, 0x7b, 0xe0, 0xd1, 0x08, 0xa1, 0x12, 0x21, 0xac, 0xa5, 0x68, 0x38, 0xc4, 0xad, + 0xfd, 0xf7, 0x02, 0x90, 0xe1, 0xa3, 0x13, 0x64, 0x1f, 0xaa, 0x0e, 0xb7, 0x45, 0xe6, 0xce, 0xde, + 0x1f, 0x33, 0x69, 0x8a, 0x45, 0x42, 0x16, 0x48, 0x7c, 0xe2, 0x40, 0x9d, 0x3e, 0x0a, 0xa8, 0xe7, + 0x84, 0x47, 0xa9, 0x26, 0x73, 0x53, 0x80, 0xd8, 0x9b, 0x49, 0x64, 0x0c, 0x65, 0x68, 0xbf, 0x5f, + 0x84, 0x66, 0x8c, 0xef, 0x69, 0x5b, 0x7c, 0x9e, 0xcd, 0x41, 0x98, 0x00, 0x77, 0x3c, 0x5b, 0xce, + 0x77, 0xb1, 0x6c, 0x0e, 0x92, 0x84, 0x1b, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x3d, 0xdd, 0x0f, 0xa8, + 0xc7, 0x75, 0xa1, 0x54, 0x0e, 0x85, 0xcd, 0x90, 0x82, 0x31, 0x2e, 0x72, 0x5d, 0xde, 0xf5, 0x50, + 0x4e, 0xe6, 0xbc, 0x1c, 0x71, 0x91, 0x43, 0x65, 0x02, 0x17, 0x39, 0x90, 0x2e, 0xcc, 0xa9, 0x56, + 0x2b, 0xea, 0xd9, 0x32, 0x22, 0x8a, 0x81, 0x9a, 0x82, 0xc0, 0x21, 0x50, 0xed, 0xfb, 0x05, 0x98, + 0x4e, 0x18, 0xa0, 0x44, 0xb6, 0x4a, 0x75, 0xf0, 0x27, 0x91, 0xad, 0x32, 0x76, 0x5e, 0xe7, 0x0b, + 0x50, 0x15, 0x1d, 0x94, 0x8e, 0xe7, 0x15, 0x5d, 0x88, 0x92, 0xca, 0x56, 0x16, 0x69, 0xe2, 0x4e, + 0xaf, 0x2c, 0xd2, 0x06, 0x8e, 0x8a, 0x2e, 0x3c, 0x47, 0xa2, 0x75, 0xb2, 0xa7, 0x63, 0x9e, 0x23, + 0x51, 0x8e, 0x21, 0x87, 0xf6, 0x0f, 0x78, 0xbb, 0x03, 0xef, 0x28, 0xdc, 0x59, 0x77, 0xa1, 0x26, + 0x63, 0x38, 0xe5, 0xa7, 0xf1, 0x56, 0x0e, 0xab, 0x18, 0xc7, 0x91, 0xd1, 0x8a, 0xba, 0x71, 0x70, + 0x7f, 0x6f, 0x0f, 0x15, 0x3a, 0xb9, 0x09, 0x0d, 0xd7, 0x91, 0x5f, 0xb0, 0x7c, 0xfc, 0x2f, 0xb2, + 0x95, 0xe3, 0xbe, 0x2a, 0x7c, 0x7c, 0xdc, 0xba, 0x14, 0xfe, 0x49, 0x34, 0x12, 0xa3, 0x9a, 0xda, + 0x9f, 0x29, 0xc0, 0x4b, 0xe8, 0xda, 0xb6, 0xe5, 0x74, 0x93, 0x9e, 0x4f, 0x62, 0xc3, 0x4c, 0x4f, + 0x7f, 0xb4, 0xe3, 0xe8, 0x87, 0xba, 0x65, 0xeb, 0xbb, 0x36, 0x7d, 0xea, 0xce, 0x78, 0x10, 0x58, + 0xf6, 0x82, 0xb8, 0xfb, 0x72, 0x61, 0xdd, 0x09, 0xee, 0x7b, 0x9d, 0xc0, 0xb3, 0x9c, 0xae, 0x98, + 0x25, 0x37, 0x13, 0x58, 0x98, 0xc2, 0xd6, 0xfe, 0xa0, 0x04, 0x3c, 0x8e, 0x90, 0x7c, 0x19, 0x1a, + 0x3d, 0x6a, 0xec, 0xeb, 0x8e, 0xe5, 0xab, 0xbc, 0xbf, 0x97, 0xd9, 0x73, 0x6d, 0xaa, 0xc2, 0xc7, + 0xec, 0x55, 0x2c, 0x77, 0x36, 0xf8, 0x51, 0x9d, 0x88, 0x97, 0x18, 0x50, 0xed, 0xfa, 0xbe, 0xde, + 0xb7, 0x72, 0x87, 0x98, 0x88, 0x3c, 0xab, 0x62, 0x3a, 0x12, 0xbf, 0x51, 0x42, 0x13, 0x03, 0x2a, + 0x7d, 0x5b, 0xb7, 0x9c, 0xdc, 0x77, 0xb5, 0xb1, 0x27, 0xd8, 0x62, 0x48, 0xc2, 0x54, 0xc9, 0x7f, + 0xa2, 0xc0, 0x26, 0x03, 0x68, 0xfa, 0x86, 0xa7, 0xf7, 0xfc, 0x7d, 0x7d, 0xe9, 0xb5, 0xd7, 0x73, + 0x2b, 0xff, 0x91, 0x28, 0xa1, 0x8b, 0xac, 0xe0, 0xf2, 0x66, 0xe7, 0xf6, 0xf2, 0xd2, 0x6b, 0xaf, + 0x63, 0x5c, 0x4e, 0x5c, 0xec, 0x6b, 0xaf, 0x2e, 0xc9, 0x19, 0x64, 0xe2, 0x62, 0x5f, 0x7b, 0x75, + 0x09, 0xe3, 0x72, 0xb4, 0xff, 0x55, 0x80, 0x46, 0xc8, 0x4b, 0x76, 0x00, 0xd8, 0x5c, 0x26, 0x33, + 0xa3, 0x9e, 0xe9, 0x5e, 0x1b, 0x6e, 0xed, 0xd9, 0x09, 0x2b, 0x63, 0x0c, 0x28, 0x23, 0x75, 0x6c, + 0x71, 0xd2, 0xa9, 0x63, 0x17, 0xa1, 0xb1, 0xaf, 0x3b, 0xa6, 0xbf, 0xaf, 0x1f, 0x88, 0x29, 0x3d, + 0x96, 0x4c, 0xf9, 0xb6, 0x22, 0x60, 0xc4, 0xa3, 0xfd, 0xa3, 0x2a, 0x88, 0xb8, 0x10, 0x36, 0xe9, + 0x98, 0x96, 0x2f, 0x0e, 0x3f, 0x14, 0x78, 0xcd, 0x70, 0xd2, 0x59, 0x95, 0xe5, 0x18, 0x72, 0x90, + 0xcb, 0x50, 0xea, 0x59, 0x8e, 0xd4, 0x40, 0xb8, 0x21, 0x77, 0xd3, 0x72, 0x90, 0x95, 0x71, 0x92, + 0xfe, 0x48, 0x6a, 0x18, 0x82, 0xa4, 0x3f, 0x42, 0x56, 0x46, 0xbe, 0x0a, 0xb3, 0xb6, 0xeb, 0x1e, + 0xb0, 0xe9, 0x43, 0x29, 0x22, 0xc2, 0xab, 0xce, 0x4d, 0x2b, 0x1b, 0x49, 0x12, 0xa6, 0x79, 0xc9, + 0x0e, 0xbc, 0xfc, 0x01, 0xf5, 0x5c, 0x39, 0x5f, 0x76, 0x6c, 0x4a, 0xfb, 0x0a, 0x46, 0xa8, 0xc6, + 0x3c, 0x4a, 0xf6, 0x17, 0xb3, 0x59, 0x70, 0x54, 0x5d, 0x1e, 0x6f, 0xaf, 0x7b, 0x5d, 0x1a, 0x6c, + 0x79, 0x2e, 0xd3, 0x5d, 0x2c, 0xa7, 0xab, 0x60, 0xab, 0x11, 0xec, 0x76, 0x36, 0x0b, 0x8e, 0xaa, + 0x4b, 0xde, 0x81, 0x79, 0x41, 0x12, 0x6a, 0xcb, 0xb2, 0x98, 0x66, 0x2c, 0x5b, 0x5d, 0x71, 0x3a, + 0x2d, 0xfc, 0x65, 0xdb, 0x23, 0x78, 0x70, 0x64, 0x6d, 0x72, 0x07, 0xe6, 0x94, 0xb7, 0x74, 0x8b, + 0x7a, 0x9d, 0x30, 0x56, 0x68, 0xba, 0x7d, 0xed, 0xe4, 0xb8, 0x75, 0x65, 0x95, 0xf6, 0x3d, 0x6a, + 0xc4, 0xbd, 0xce, 0x8a, 0x0b, 0x87, 0xea, 0x11, 0x84, 0x4b, 0x3c, 0x20, 0x68, 0xa7, 0xbf, 0xe2, + 0xba, 0xb6, 0xe9, 0x3e, 0x74, 0xd4, 0xb3, 0x0b, 0x85, 0x9d, 0x3b, 0x48, 0x3b, 0x99, 0x1c, 0x38, + 0xa2, 0x26, 0x7b, 0x72, 0x4e, 0x59, 0x75, 0x1f, 0x3a, 0x69, 0x54, 0x88, 0x9e, 0xbc, 0x33, 0x82, + 0x07, 0x47, 0xd6, 0x26, 0x6b, 0x40, 0xd2, 0x4f, 0xb0, 0xd3, 0x97, 0x2e, 0xfc, 0x4b, 0x22, 0xc9, + 0x51, 0x9a, 0x8a, 0x19, 0x35, 0xc8, 0x06, 0x5c, 0x4c, 0x97, 0x32, 0x71, 0xd2, 0x9b, 0xcf, 0xd3, + 0x1b, 0x63, 0x06, 0x1d, 0x33, 0x6b, 0x69, 0xff, 0xb8, 0x08, 0xd3, 0x89, 0xac, 0x18, 0xcf, 0x5d, + 0xf6, 0x01, 0xb6, 0x79, 0xe8, 0xf9, 0xdd, 0xf5, 0xd5, 0xdb, 0x54, 0x37, 0xa9, 0x77, 0x97, 0xaa, + 0x0c, 0x26, 0x62, 0x59, 0x4c, 0x50, 0x30, 0xc5, 0x49, 0xf6, 0xa0, 0x22, 0xfc, 0x04, 0x79, 0x6f, + 0x48, 0x52, 0x7d, 0xc4, 0x9d, 0x05, 0xf2, 0x5a, 0x31, 0xd7, 0xa3, 0x28, 0xe0, 0xb5, 0x00, 0xa6, + 0xe2, 0x1c, 0x6c, 0x22, 0x89, 0xd4, 0xde, 0x5a, 0x42, 0xe5, 0x5d, 0x87, 0x52, 0x10, 0x8c, 0x9b, + 0xd7, 0x40, 0xf8, 0x9d, 0xb6, 0x37, 0x90, 0x61, 0x68, 0x7b, 0xec, 0xdd, 0xf9, 0xbe, 0xe5, 0x3a, + 0x32, 0xc9, 0xfd, 0x0e, 0xd4, 0xe4, 0xee, 0x69, 0xcc, 0xbc, 0x0c, 0x5c, 0x57, 0x52, 0x66, 0x57, + 0x85, 0xa5, 0xfd, 0xfb, 0x22, 0x34, 0x42, 0x33, 0xc9, 0x29, 0x92, 0xc7, 0xbb, 0xd0, 0x08, 0x03, + 0x1a, 0x73, 0x5f, 0xff, 0x1a, 0xc5, 0xd9, 0xf1, 0x9d, 0x7d, 0xf8, 0x17, 0x23, 0x19, 0xf1, 0x60, + 0xc9, 0x52, 0x8e, 0x60, 0xc9, 0x3e, 0xd4, 0x02, 0xcf, 0xea, 0x76, 0xe5, 0x2e, 0x21, 0x4f, 0xb4, + 0x64, 0xd8, 0x5d, 0xdb, 0x02, 0x50, 0xf6, 0xac, 0xf8, 0x83, 0x4a, 0x8c, 0xf6, 0x3e, 0xcc, 0xa5, + 0x39, 0xb9, 0x0a, 0x6d, 0xec, 0x53, 0x73, 0x60, 0xab, 0x3e, 0x8e, 0x54, 0x68, 0x59, 0x8e, 0x21, + 0x07, 0xb9, 0x01, 0x75, 0xf6, 0x9a, 0x3e, 0x70, 0x1d, 0xa5, 0xc6, 0xf2, 0xdd, 0xc8, 0xb6, 0x2c, + 0xc3, 0x90, 0xaa, 0xfd, 0x97, 0x12, 0x5c, 0x8e, 0x8c, 0x5d, 0x9b, 0xba, 0xa3, 0x77, 0x4f, 0x71, + 0xe7, 0xe7, 0xa7, 0x27, 0xdd, 0xce, 0x7a, 0x03, 0x48, 0xe9, 0x39, 0xb8, 0x01, 0xe4, 0xff, 0x14, + 0x81, 0x07, 0x5f, 0x93, 0x6f, 0xc3, 0x94, 0x1e, 0xbb, 0xee, 0x59, 0xbe, 0xce, 0x9b, 0xb9, 0x5f, + 0x27, 0x8f, 0xf1, 0x0e, 0x03, 0xe0, 0xe2, 0xa5, 0x98, 0x10, 0x48, 0x5c, 0xa8, 0xef, 0xe9, 0xb6, + 0xcd, 0x74, 0xa1, 0xdc, 0xce, 0xbb, 0x84, 0x70, 0x3e, 0xcc, 0xd7, 0x24, 0x34, 0x86, 0x42, 0xc8, + 0x77, 0x0b, 0x30, 0xed, 0xc5, 0xb7, 0x6b, 0xf2, 0x85, 0xe4, 0x09, 0xed, 0x88, 0xa1, 0xc5, 0xc3, + 0xed, 0xe2, 0x7b, 0xc2, 0xa4, 0x4c, 0xed, 0x3f, 0x17, 0x60, 0xba, 0x63, 0x5b, 0xa6, 0xe5, 0x74, + 0xcf, 0xf1, 0x02, 0x92, 0xfb, 0x50, 0xf1, 0x6d, 0xcb, 0xa4, 0x63, 0xae, 0x26, 0x62, 0x1d, 0x63, + 0x00, 0x28, 0x70, 0x92, 0x37, 0x9a, 0x94, 0x4e, 0x71, 0xa3, 0xc9, 0x1f, 0x55, 0x41, 0x1e, 0x23, + 0x20, 0x03, 0x68, 0x74, 0xd5, 0x45, 0x09, 0xf2, 0x19, 0x6f, 0xe7, 0x48, 0xb2, 0x99, 0xb8, 0x72, + 0x41, 0xcc, 0xfd, 0x61, 0x21, 0x46, 0x92, 0x08, 0x4d, 0xde, 0x33, 0xbe, 0x9a, 0xf3, 0x9e, 0x71, + 0x21, 0x6e, 0xf8, 0xa6, 0x71, 0x1d, 0xca, 0xfb, 0x41, 0xd0, 0x97, 0x83, 0x69, 0xfc, 0x73, 0x22, + 0x51, 0x9e, 0x27, 0xa1, 0x13, 0xb1, 0xff, 0xc8, 0xa1, 0x99, 0x08, 0x47, 0x0f, 0x6f, 0x73, 0x5c, + 0xc9, 0x15, 0x46, 0x12, 0x17, 0xc1, 0xfe, 0x23, 0x87, 0x26, 0xbf, 0x0c, 0xcd, 0xc0, 0xd3, 0x1d, + 0x7f, 0xcf, 0xf5, 0x7a, 0xd4, 0x93, 0x7b, 0xd4, 0xb5, 0x1c, 0x57, 0x6d, 0x6f, 0x47, 0x68, 0xc2, + 0x24, 0x9b, 0x28, 0xc2, 0xb8, 0x34, 0x72, 0x00, 0xf5, 0x81, 0x29, 0x1a, 0x26, 0xcd, 0x60, 0xcb, + 0x79, 0x6e, 0x4f, 0x8f, 0x05, 0x89, 0xa8, 0x7f, 0x18, 0x0a, 0x48, 0x5e, 0x5c, 0x5a, 0x9b, 0xd4, + 0xc5, 0xa5, 0xf1, 0xd1, 0x98, 0x95, 0x84, 0x86, 0xf4, 0xa4, 0x5e, 0xeb, 0x74, 0x65, 0x8c, 0xdb, + 0x5a, 0x6e, 0x95, 0x53, 0x88, 0x6c, 0x86, 0xba, 0xb1, 0xd3, 0x45, 0x25, 0x43, 0xeb, 0x81, 0xf4, + 0x1d, 0x11, 0x23, 0x71, 0x59, 0x93, 0x38, 0x19, 0xb9, 0x78, 0xba, 0xf9, 0x20, 0xbc, 0x35, 0x28, + 0x96, 0x2c, 0x3e, 0xf3, 0x56, 0x26, 0xed, 0x3f, 0x14, 0xa1, 0xb4, 0xbd, 0xd1, 0x11, 0x09, 0x60, + 0xf9, 0xf5, 0x6f, 0xb4, 0x73, 0x60, 0xf5, 0x1f, 0x50, 0xcf, 0xda, 0x3b, 0x92, 0x5b, 0xef, 0x58, + 0x02, 0xd8, 0x34, 0x07, 0x66, 0xd4, 0x22, 0xef, 0xc2, 0x94, 0xa1, 0xaf, 0x50, 0x2f, 0x18, 0xc7, + 0xb0, 0xc0, 0x8f, 0x80, 0xaf, 0x2c, 0x47, 0xd5, 0x31, 0x01, 0x46, 0x76, 0x00, 0x8c, 0x08, 0xba, + 0x74, 0x66, 0x73, 0x48, 0x0c, 0x38, 0x06, 0x44, 0x10, 0x1a, 0x07, 0x8c, 0x95, 0xa3, 0x96, 0xcf, + 0x82, 0xca, 0x47, 0xce, 0x5d, 0x55, 0x17, 0x23, 0x18, 0xcd, 0x81, 0xe9, 0xc4, 0x0d, 0x4e, 0xe4, + 0x2b, 0x50, 0x77, 0xfb, 0xb1, 0xe9, 0xb4, 0xc1, 0xa3, 0x69, 0xeb, 0xf7, 0x65, 0xd9, 0xe3, 0xe3, + 0xd6, 0xf4, 0x86, 0xdb, 0xb5, 0x0c, 0x55, 0x80, 0x21, 0x3b, 0xd1, 0xa0, 0xca, 0xcf, 0x6d, 0xaa, + 0xfb, 0x9b, 0xf8, 0xda, 0xc1, 0xaf, 0x58, 0xf1, 0x51, 0x52, 0xb4, 0x5f, 0x2d, 0x43, 0xe4, 0x71, + 0x25, 0x3e, 0x54, 0xc5, 0x99, 0x11, 0x39, 0x73, 0x9f, 0xeb, 0xf1, 0x14, 0x29, 0x8a, 0x74, 0xa1, + 0xf4, 0xbe, 0xbb, 0x9b, 0x7b, 0xe2, 0x8e, 0x25, 0x6c, 0x10, 0xb6, 0xb2, 0x58, 0x01, 0x32, 0x09, + 0xe4, 0xaf, 0x16, 0xe0, 0x45, 0x3f, 0xad, 0xfa, 0xca, 0xe1, 0x80, 0xf9, 0x75, 0xfc, 0xb4, 0x32, + 0x2d, 0xc3, 0x9e, 0x47, 0x91, 0x71, 0xb8, 0x2d, 0xac, 0xff, 0x85, 0x2b, 0x54, 0x0e, 0xa7, 0x5b, + 0x39, 0xef, 0xa9, 0x4d, 0xf6, 0x7f, 0xb2, 0x0c, 0xa5, 0x28, 0xed, 0x3b, 0x45, 0x68, 0xc6, 0x66, + 0xeb, 0xdc, 0xd7, 0x82, 0x3d, 0x4a, 0x5d, 0x0b, 0xb6, 0x35, 0x7e, 0x64, 0x40, 0xd4, 0xaa, 0xf3, + 0xbe, 0x19, 0xec, 0x9f, 0x15, 0xa1, 0xb4, 0xb3, 0xba, 0x96, 0xdc, 0xb4, 0x16, 0x9e, 0xc1, 0xa6, + 0x75, 0x1f, 0x6a, 0xbb, 0x03, 0xcb, 0x0e, 0x2c, 0x27, 0x77, 0x4a, 0x19, 0x75, 0x8b, 0x9a, 0xf4, + 0x75, 0x08, 0x54, 0x54, 0xf0, 0xa4, 0x0b, 0xb5, 0xae, 0xc8, 0xe9, 0x99, 0x3b, 0x5e, 0x52, 0xe6, + 0x06, 0x15, 0x82, 0xe4, 0x1f, 0x54, 0xe8, 0xda, 0x11, 0x54, 0x77, 0x56, 0xa5, 0xda, 0xff, 0x6c, + 0x7b, 0x53, 0xfb, 0x65, 0x08, 0xb5, 0x80, 0x67, 0x2f, 0xfc, 0xbf, 0x15, 0x20, 0xa9, 0xf8, 0x3c, + 0xfb, 0xd1, 0x74, 0x90, 0x1e, 0x4d, 0xab, 0x93, 0xf8, 0xf8, 0xb2, 0x07, 0x94, 0xf6, 0x6f, 0x0b, + 0x90, 0x3a, 0xe8, 0x47, 0x5e, 0x97, 0xe9, 0xe1, 0x92, 0x81, 0x69, 0x2a, 0x3d, 0x1c, 0x49, 0x72, + 0xc7, 0xd2, 0xc4, 0x7d, 0xc8, 0xb6, 0x6b, 0x71, 0x07, 0x9a, 0x6c, 0xfe, 0xbd, 0xf1, 0xb7, 0x6b, + 0x59, 0xee, 0x38, 0x19, 0x3c, 0x19, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x0f, 0x8b, 0x50, 0x7d, 0x66, + 0xb9, 0x0d, 0x68, 0x22, 0x9e, 0x75, 0x25, 0xe7, 0x6c, 0x3f, 0x32, 0x9a, 0xb5, 0x97, 0x8a, 0x66, + 0xcd, 0x7b, 0xfd, 0xf9, 0x53, 0x62, 0x59, 0xff, 0x75, 0x01, 0xe4, 0x5a, 0xb3, 0xee, 0xf8, 0x81, + 0xee, 0x18, 0x94, 0x18, 0xe1, 0xc2, 0x96, 0x37, 0x68, 0x4a, 0x06, 0x16, 0x0a, 0x5d, 0x86, 0xff, + 0x56, 0x0b, 0x19, 0xf9, 0x69, 0xa8, 0xef, 0xbb, 0x7e, 0xc0, 0x17, 0xaf, 0x62, 0xd2, 0x64, 0x76, + 0x5b, 0x96, 0x63, 0xc8, 0x91, 0x76, 0x67, 0x57, 0x46, 0xbb, 0xb3, 0xb5, 0xdf, 0x2e, 0xc2, 0xd4, + 0x27, 0x25, 0x79, 0x42, 0x56, 0xf4, 0x6f, 0x29, 0x67, 0xf4, 0x6f, 0xf9, 0x2c, 0xd1, 0xbf, 0xda, + 0x0f, 0x0b, 0x00, 0xcf, 0x2c, 0x73, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0x3d, 0xae, 0xb2, 0xc3, 0x72, + 0xff, 0x5e, 0x45, 0x3d, 0x12, 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0x33, 0x7a, 0x22, 0xd0, 0x35, 0xb7, + 0xbe, 0x9c, 0x8a, 0x9b, 0x0d, 0xe3, 0xb4, 0x92, 0xe5, 0x98, 0x12, 0x4b, 0xde, 0x88, 0x32, 0x93, + 0xdf, 0x8b, 0x86, 0xfd, 0x50, 0x4a, 0x71, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x12, 0x58, 0x5c, 0x9a, + 0x48, 0x60, 0x71, 0xfc, 0xc8, 0x64, 0xf9, 0x89, 0x47, 0x26, 0x0f, 0xa1, 0xb1, 0xe7, 0xb9, 0x3d, + 0x1e, 0xbb, 0x2b, 0xef, 0xfe, 0xbe, 0x99, 0x63, 0xa1, 0xec, 0xed, 0x5a, 0x0e, 0x35, 0x79, 0x5c, + 0x70, 0x68, 0xb8, 0x5a, 0x53, 0xf8, 0x18, 0x89, 0xe2, 0xb6, 0x7e, 0x57, 0x48, 0xad, 0x4e, 0x52, + 0x6a, 0x38, 0x97, 0x6c, 0x0b, 0x74, 0x54, 0x62, 0x92, 0xf1, 0xba, 0xb5, 0x67, 0x13, 0xaf, 0xab, + 0xfd, 0xf9, 0x9a, 0x9a, 0xc0, 0x9e, 0xbb, 0x24, 0xb8, 0x9f, 0x1e, 0x74, 0xef, 0xd2, 0xa1, 0x53, + 0xe8, 0xf5, 0x67, 0x78, 0x0a, 0xbd, 0x31, 0x99, 0x53, 0xe8, 0x90, 0xef, 0x14, 0x7a, 0x73, 0x42, + 0xa7, 0xd0, 0xa7, 0x26, 0x75, 0x0a, 0x7d, 0x7a, 0xac, 0x53, 0xe8, 0x33, 0xa7, 0x3a, 0x85, 0x7e, + 0x5c, 0x82, 0xd4, 0x66, 0xfc, 0x53, 0xc7, 0xdb, 0xff, 0x53, 0x8e, 0xb7, 0xef, 0x15, 0x21, 0x9a, + 0x88, 0xcf, 0x18, 0x98, 0xf4, 0x0e, 0xd4, 0x7b, 0xfa, 0x23, 0x1e, 0x38, 0x9d, 0xe7, 0xee, 0xe8, + 0x4d, 0x89, 0x81, 0x21, 0x1a, 0xf1, 0x01, 0xac, 0xf0, 0xfe, 0x86, 0xdc, 0x2e, 0x8c, 0xe8, 0x2a, + 0x08, 0x61, 0x24, 0x8d, 0xfe, 0x63, 0x4c, 0x8c, 0xf6, 0xaf, 0x8a, 0x20, 0x2f, 0xfa, 0x20, 0x14, + 0x2a, 0x7b, 0xd6, 0x23, 0x6a, 0xe6, 0x0e, 0x77, 0x8e, 0xdd, 0xe8, 0x2f, 0x7c, 0x34, 0xbc, 0x00, + 0x05, 0x3a, 0x37, 0xbe, 0x0b, 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, + 0xdf, 0x45, 0x11, 0x2a, 0x19, 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, + 0xca, 0xd6, 0xef, 0x8b, 0x34, 0x14, 0x52, 0x46, 0xfb, 0x97, 0x7e, 0xf0, 0xa3, 0x6b, 0x2f, 0xfc, + 0xf0, 0x47, 0xd7, 0x5e, 0xf8, 0xe8, 0x47, 0xd7, 0x5e, 0xf8, 0xd5, 0x93, 0x6b, 0x85, 0x1f, 0x9c, + 0x5c, 0x2b, 0xfc, 0xf0, 0xe4, 0x5a, 0xe1, 0xa3, 0x93, 0x6b, 0x85, 0xff, 0x78, 0x72, 0xad, 0xf0, + 0x97, 0xfe, 0xd3, 0xb5, 0x17, 0x7e, 0xf1, 0xcb, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, + 0xc5, 0xfe, 0x41, 0x77, 0x91, 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0xbf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xdf, 0xb8, 0xfb, 0xb9, 0x3d, 0x9f, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -4123,6 +4124,20 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.LivenessProbe != nil { { size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -9927,6 +9942,12 @@ func (m *Container) Size() (n int) { l = m.LivenessProbe.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -12085,6 +12106,11 @@ func (this *Container) String() string { repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," } repeatedStringForVolumeMounts += "}" + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForPorts += "}" s := strings.Join([]string{`&Container{`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, `Command:` + fmt.Sprintf("%v", this.Command) + `,`, @@ -12097,6 +12123,7 @@ func (this *Container) String() string { `ImagePullPolicy:` + valueToStringGenerated(this.ImagePullPolicy) + `,`, `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") return s @@ -15982,6 +16009,40 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, v1.ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 308fcd4382..f967281d56 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -310,6 +310,14 @@ message Container { // +optional optional Probe livenessProbe = 11; + + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + repeated .k8s.io.api.core.v1.ContainerPort ports = 12; } // ContainerTemplate defines customized spec for a container diff --git a/pkg/apis/numaflow/v1alpha1/side_inputs.go b/pkg/apis/numaflow/v1alpha1/side_inputs.go index 5de3b75065..f3eea9bef4 100644 --- a/pkg/apis/numaflow/v1alpha1/side_inputs.go +++ b/pkg/apis/numaflow/v1alpha1/side_inputs.go @@ -169,7 +169,7 @@ func (si SideInput) getUDContainer(req GetSideInputDeploymentReq) corev1.Contain } // Do not append the envs from req here, as they might contain sensitive information cb = cb.appendEnv(si.Container.Env...).appendVolumeMounts(si.Container.VolumeMounts...). - resources(si.Container.Resources).securityContext(si.Container.SecurityContext).appendEnvFrom(si.Container.EnvFrom...) + resources(si.Container.Resources).securityContext(si.Container.SecurityContext).appendEnvFrom(si.Container.EnvFrom...).appendPorts(si.Container.Ports...) cb = cb.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerSideInputs}) return cb.build() } diff --git a/pkg/apis/numaflow/v1alpha1/sink.go b/pkg/apis/numaflow/v1alpha1/sink.go index d35f323e9e..b0f38aa67a 100644 --- a/pkg/apis/numaflow/v1alpha1/sink.go +++ b/pkg/apis/numaflow/v1alpha1/sink.go @@ -79,7 +79,7 @@ func (s Sink) getUDSinkContainer(mainContainerReq getContainerReq) corev1.Contai c = c.args(x.Args...) } c = c.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerSink}) - c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...) + c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...).appendPorts(x.Ports...) if x.ImagePullPolicy != nil { c = c.imagePullPolicy(*x.ImagePullPolicy) } @@ -114,7 +114,7 @@ func (s Sink) getFallbackUDSinkContainer(mainContainerReq getContainerReq) corev c = c.args(x.Args...) } c = c.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerFallbackSink}) - c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...) + c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...).appendPorts(x.Ports...) if x.ImagePullPolicy != nil { c = c.imagePullPolicy(*x.ImagePullPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/source.go b/pkg/apis/numaflow/v1alpha1/source.go index b89016d9c2..deece42e72 100644 --- a/pkg/apis/numaflow/v1alpha1/source.go +++ b/pkg/apis/numaflow/v1alpha1/source.go @@ -96,7 +96,7 @@ func (s Source) getUDTransformerContainer(mainContainerReq getContainerReq) core c = c.image(mainContainerReq.image).args(args...) // Use the same image as the main container } if x := s.UDTransformer.Container; x != nil { - c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...) + c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...).appendPorts(x.Ports...) if x.ImagePullPolicy != nil { c = c.imagePullPolicy(*x.ImagePullPolicy) } @@ -142,7 +142,7 @@ func (s Source) getUDSourceContainer(mainContainerReq getContainerReq) corev1.Co } } if x := s.UDSource.Container; x != nil { - c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...) + c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...).appendPorts(x.Ports...) if x.ImagePullPolicy != nil { c = c.imagePullPolicy(*x.ImagePullPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/udf.go b/pkg/apis/numaflow/v1alpha1/udf.go index 23e9bcf085..6f5f18ff49 100644 --- a/pkg/apis/numaflow/v1alpha1/udf.go +++ b/pkg/apis/numaflow/v1alpha1/udf.go @@ -91,7 +91,7 @@ func (in UDF) getUDFContainer(mainContainerReq getContainerReq) corev1.Container c = c.image(mainContainerReq.image).args(args...) // Use the same image as the main container } if x := in.Container; x != nil { - c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...) + c = c.appendEnv(x.Env...).appendVolumeMounts(x.VolumeMounts...).resources(x.Resources).securityContext(x.SecurityContext).appendEnvFrom(x.EnvFrom...).appendPorts(x.Ports...) if x.ImagePullPolicy != nil { c = c.imagePullPolicy(*x.ImagePullPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/user_defined_container.go b/pkg/apis/numaflow/v1alpha1/user_defined_container.go index 5e8746097e..79d724f228 100644 --- a/pkg/apis/numaflow/v1alpha1/user_defined_container.go +++ b/pkg/apis/numaflow/v1alpha1/user_defined_container.go @@ -42,4 +42,11 @@ type Container struct { ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,10,opt,name=readinessProbe"` // +optional LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,11,opt,name=livenessProbe"` + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,12,rep,name=ports"` } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index c131980d96..912aabc5c2 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -433,6 +433,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ContainerPort, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index e28fbbe28a..3885f6e75f 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -900,11 +900,35 @@ func schema_pkg_apis_numaflow_v1alpha1_Container(ref common.ReferenceCallback) c Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe"), }, }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"}, } } diff --git a/rust/numaflow-models/src/models/container.rs b/rust/numaflow-models/src/models/container.rs index 3599d6a243..834bf57e99 100644 --- a/rust/numaflow-models/src/models/container.rs +++ b/rust/numaflow-models/src/models/container.rs @@ -34,6 +34,8 @@ pub struct Container { pub image_pull_policy: Option, #[serde(rename = "livenessProbe", skip_serializing_if = "Option::is_none")] pub liveness_probe: Option>, + #[serde(rename = "ports", skip_serializing_if = "Option::is_none")] + pub ports: Option>, #[serde(rename = "readinessProbe", skip_serializing_if = "Option::is_none")] pub readiness_probe: Option>, #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] @@ -55,6 +57,7 @@ impl Container { image: None, image_pull_policy: None, liveness_probe: None, + ports: None, readiness_probe: None, resources: None, security_context: None, From 206ff7f72bf83e19edf17eb36861865585b1ce9c Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Wed, 9 Oct 2024 11:21:59 -0700 Subject: [PATCH 098/188] fix: pipeline pausing race conditions of draining and terminating source (#2131) Signed-off-by: Sidhant Kohli --- go.mod | 2 +- pkg/reconciler/pipeline/controller.go | 79 ++++++++++++++++++--------- 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 657849e773..880e5f2bd1 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.27.0 + golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.8.0 @@ -197,7 +198,6 @@ require ( github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index c2f9080529..3ae7c49c00 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -24,6 +24,7 @@ import ( "github.com/imdario/mergo" "go.uber.org/zap" + "golang.org/x/exp/maps" appv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -181,8 +182,8 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( } // check if any changes related to pause/resume lifecycle for the pipeline - if isLifecycleChange(pl) { - oldPhase := pl.Status.Phase + oldPhase := pl.Status.Phase + if isLifecycleChange(pl) && oldPhase != pl.Spec.Lifecycle.GetDesiredPhase() { requeue, err := r.updateDesiredState(ctx, pl) if err != nil { logMsg := fmt.Sprintf("Updated desired pipeline phase failed: %v", zap.Error(err)) @@ -611,7 +612,7 @@ func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { copyVertexTemplate(pl, vCopy) copyVertexLimits(pl, vCopy) replicas := int32(1) - // If the desired phase is paused or we are in the middle of pausing we should not start any vertex replicas + // If the desired phase is paused, or we are in the middle of pausing we should not start any vertex replicas if isLifecycleChange(pl) { replicas = int32(0) } else if v.IsReduceUDF() { @@ -830,39 +831,48 @@ func (r *pipelineReconciler) resumePipeline(ctx context.Context, pl *dfv1.Pipeli } func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { - // check that annotations / pause timestamp annotation exist + var ( + drainCompleted = false + daemonClient daemonclient.DaemonClient + errWhileDrain error + ) + pl.Status.MarkPhasePausing() + if pl.GetAnnotations() == nil || pl.GetAnnotations()[dfv1.KeyPauseTimestamp] == "" { + _, err := r.scaleDownSourceVertices(ctx, pl) + if err != nil { + // If there's an error requeue the request + return true, err + } patchJson := `{"metadata":{"annotations":{"` + dfv1.KeyPauseTimestamp + `":"` + time.Now().Format(time.RFC3339) + `"}}}` - if err := r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { + if err = r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil && !apierrors.IsNotFound(err) { return true, err } + // This is to give some time to process the new messages, + // otherwise check IsDrained directly may get incorrect information + return true, nil } - pl.Status.MarkPhasePausing() - updated, err := r.scaleDownSourceVertices(ctx, pl) - if err != nil || updated { - // If there's an error, or scaling down happens, requeue the request - // This is to give some time to process the new messages, otherwise check IsDrained directly may get incorrect information - return updated, err - } - - var daemonError error - var drainCompleted = false - + // Check if all the source vertex pods have scaled down to zero + sourcePodsTerminated, err := r.noSourceVertexPodsRunning(ctx, pl) + // If the sources have scaled down successfully then check for the buffer information. // Check for the daemon to obtain the buffer draining information, in case we see an error trying to // retrieve this we do not exit prematurely to allow honoring the pause timeout for a consistent error // - In case the timeout has not occurred we would trigger a requeue // - If the timeout has occurred even after getting the drained error, we will try to pause the pipeline - daemonClient, daemonError := daemonclient.NewGRPCDaemonServiceClient(pl.GetDaemonServiceURL()) - if daemonClient != nil { - defer func() { - _ = daemonClient.Close() - }() - drainCompleted, err = daemonClient.IsDrained(ctx, pl.Name) - if err != nil { - daemonError = err + if sourcePodsTerminated { + daemonClient, err = daemonclient.NewGRPCDaemonServiceClient(pl.GetDaemonServiceURL()) + if daemonClient != nil { + defer func() { + _ = daemonClient.Close() + }() + drainCompleted, err = daemonClient.IsDrained(ctx, pl.Name) } } + if err != nil { + errWhileDrain = err + } + pauseTimestamp, err := time.Parse(time.RFC3339, pl.GetAnnotations()[dfv1.KeyPauseTimestamp]) if err != nil { return false, err @@ -874,8 +884,8 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin if err != nil { return true, err } - if daemonError != nil { - r.logger.Errorw("Error in fetching Drained status, Pausing due to timeout", zap.Error(daemonError)) + if errWhileDrain != nil { + r.logger.Errorw("Errors encountered while pausing, moving to paused after timeout", zap.Error(errWhileDrain)) } // if the drain completed successfully, then set the DrainedOnPause field to true if drainCompleted { @@ -884,7 +894,20 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin pl.Status.MarkPhasePaused() return false, nil } - return true, daemonError + return true, err +} + +// noSourceVertexPodsRunning checks whether any source vertex has running replicas +func (r *pipelineReconciler) noSourceVertexPodsRunning(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { + sources := pl.Spec.GetSourcesByName() + pods := corev1.PodList{} + label := fmt.Sprintf("%s=%s, %s in (%s)", dfv1.KeyPipelineName, pl.Name, + dfv1.KeyVertexName, strings.Join(maps.Keys(sources), ",")) + selector, _ := labels.Parse(label) + if err := r.client.List(ctx, &pods, &client.ListOptions{Namespace: pl.Namespace, LabelSelector: selector}); err != nil { + return false, err + } + return len(pods.Items) == 0, nil } func (r *pipelineReconciler) scaleDownSourceVertices(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { @@ -965,6 +988,8 @@ func (r *pipelineReconciler) checkChildrenResourceStatus(ctx context.Context, pi return } } + // if all conditions are True, clear the status message. + pipeline.Status.Message = "" }() // get the daemon deployment and update the status of it to the pipeline From 1f0ef3f39e22c4d15036b2857b5d4f5ec99444ae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 23:51:00 -0700 Subject: [PATCH 099/188] docs: updated CHANGELOG.md (#2138) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21035aecaa..1688eb88cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## v1.3.3 (2024-10-09) + + * [4f31aad7](https://github.com/numaproj/numaflow/commit/4f31aad7f51cce59700ef53f363d06afeb6d6aee) Update manifests to v1.3.3 + * [d0133636](https://github.com/numaproj/numaflow/commit/d01336364b1826c9d28ff81828919b17ca8da222) fix: pipeline pausing race conditions of draining and terminating source (#2131) + * [688dd730](https://github.com/numaproj/numaflow/commit/688dd73049617511806779a4e535ad9f380af21f) feat: expose ports for user defined containers (#2135) + * [a4a4fd05](https://github.com/numaproj/numaflow/commit/a4a4fd0578f7a4e45a6435505d03061c3612ed6f) fix: create buffers and buckets before updating Vertices (#2112) + * [498583f2](https://github.com/numaproj/numaflow/commit/498583f24573649f6ed2db959742f515804a2edc) chore(deps): bump rollup from 2.79.1 to 2.79.2 in /ui (#2096) + +### Contributors + + * Derek Wang + * Julie Vogelman + * Sidhant Kohli + * dependabot[bot] + ## v1.3.2 (2024-09-26) * [cb7d17d4](https://github.com/numaproj/numaflow/commit/cb7d17d4f3e2ecfcf6a1aa413031f714c135983d) Update manifests to v1.3.2 From 271e459a5deb13f77906fb58c8308151ef6415a1 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Fri, 11 Oct 2024 15:17:11 -0700 Subject: [PATCH 100/188] feat: add keys into kafka header while producing (#2143) Signed-off-by: Vigith Maurice --- docs/user-guide/sinks/kafka.md | 10 ++++++++++ pkg/sinks/kafka/kafka.go | 22 ++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/docs/user-guide/sinks/kafka.md b/docs/user-guide/sinks/kafka.md index ae4d85f8ef..6837f262ff 100644 --- a/docs/user-guide/sinks/kafka.md +++ b/docs/user-guide/sinks/kafka.md @@ -2,6 +2,16 @@ A `Kafka` sink is used to forward the messages to a Kafka topic. Kafka sink supports configuration overrides. +## Kafka Headers + +We will insert `keys` into the Kafka header, but since `keys` is an array, we will add `keys` into the header in the +following format. + +* `__keys_len` will have the number of `key` in the header. if `__keys_len` == `0`, means no `keys` are present. +* `__keys_%d` will have the `key`, e.g., `__key_0` will be the first key, and so forth. + +## Example + ```yaml spec: vertices: diff --git a/pkg/sinks/kafka/kafka.go b/pkg/sinks/kafka/kafka.go index cfbd2d27ff..b1ba1f05f3 100644 --- a/pkg/sinks/kafka/kafka.go +++ b/pkg/sinks/kafka/kafka.go @@ -150,10 +150,32 @@ func (tk *ToKafka) Write(_ context.Context, messages []isb.Message) ([]isb.Offse } } }() + for index, msg := range messages { + // insert keys in the header. + // since keys is an array, to decompose it, we need len and key at each index. + var headers []sarama.RecordHeader + // insert __key_len + keyLen := sarama.RecordHeader{ + Key: []byte("__key_len"), + Value: []byte(fmt.Sprintf("%d", len(msg.Keys))), + } + headers = append(headers, keyLen) + + // write keys into header if length > 0 + if len(msg.Keys) > 0 { + for idx, key := range msg.Keys { + headers = append(headers, sarama.RecordHeader{ + Key: []byte(fmt.Sprintf("__key_%d", idx)), + Value: []byte(key), + }) + } + } + message := &sarama.ProducerMessage{ Topic: tk.topic, Value: sarama.ByteEncoder(msg.Payload), + Headers: headers, Metadata: index, // Use metadata to identify if it succeeds or fails in the async return. } tk.producer.Input() <- message From dd08bcab15c7dad09930cb158b8b98caa3698d0e Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Sun, 13 Oct 2024 20:48:41 +0530 Subject: [PATCH 101/188] feat: Unify Batch Map and Unary Map Operations Using a Shared gRPC Protocol (#2139) Signed-off-by: Yashash H L --- go.mod | 2 +- go.sum | 4 +- pkg/apis/proto/map/v1/map.proto | 9 + pkg/isb/stores/jetstream/writer_test.go | 8 +- pkg/isb/stores/redis/read_test.go | 6 +- pkg/isb/stores/redis/write_test.go | 4 +- pkg/reduce/pnf/pnf.go | 2 +- pkg/sdkclient/batchmapper/client.go | 180 ------------- pkg/sdkclient/batchmapper/client_test.go | 274 -------------------- pkg/sdkclient/batchmapper/interface.go | 31 --- pkg/sdkclient/mapper/client.go | 35 ++- pkg/sdkclient/options.go | 13 + pkg/sources/transformer/grpc_transformer.go | 2 +- pkg/udf/forward/forward.go | 90 +------ pkg/udf/forward/forward_test.go | 225 +++------------- pkg/udf/forward/options.go | 23 +- pkg/udf/forward/shutdown_test.go | 46 ++-- pkg/udf/map_udf.go | 30 +-- pkg/udf/rpc/grpc_batch_map.go | 207 --------------- pkg/udf/rpc/grpc_batch_map_test.go | 97 ------- pkg/udf/rpc/grpc_map.go | 8 +- pkg/udf/rpc/grpc_map_test.go | 6 +- rust/numaflow-grpc/src/clients/map.v1.rs | 11 + 23 files changed, 159 insertions(+), 1154 deletions(-) delete mode 100644 pkg/sdkclient/batchmapper/client.go delete mode 100644 pkg/sdkclient/batchmapper/client_test.go delete mode 100644 pkg/sdkclient/batchmapper/interface.go delete mode 100644 pkg/udf/rpc/grpc_batch_map.go delete mode 100644 pkg/udf/rpc/grpc_batch_map_test.go diff --git a/go.mod b/go.mod index 880e5f2bd1..07f6cc2d62 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a + github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.55.0 diff --git a/go.sum b/go.sum index 80a55220bb..d481755086 100644 --- a/go.sum +++ b/go.sum @@ -483,8 +483,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a h1:KnpALzELgzX7GR2FDvADTDsauGW/B1fzFw9b+kXYkFc= -github.com/numaproj/numaflow-go v0.8.2-0.20241003055702-9179ac584a4a/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= +github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1 h1:4uHQqImTmgGkCFrgEhX7atxsAe/nRgjv/2Px0rwqw/I= +github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/apis/proto/map/v1/map.proto b/pkg/apis/proto/map/v1/map.proto index 34a189f08b..49e88740b3 100644 --- a/pkg/apis/proto/map/v1/map.proto +++ b/pkg/apis/proto/map/v1/map.proto @@ -47,6 +47,7 @@ message MapRequest { // This ID is used to uniquely identify a map request string id = 2; optional Handshake handshake = 3; + optional Status status = 4; } /* @@ -57,6 +58,13 @@ message Handshake { bool sot = 1; } +/* + * Status message to indicate the status of the message. + */ +message Status { + bool eot = 1; +} + /** * MapResponse represents a response element. */ @@ -70,6 +78,7 @@ message MapResponse { // This ID is used to refer the responses to the request it corresponds to. string id = 2; optional Handshake handshake = 3; + optional Status status = 4; } /** diff --git a/pkg/isb/stores/jetstream/writer_test.go b/pkg/isb/stores/jetstream/writer_test.go index 78afb7738f..6070f06755 100644 --- a/pkg/isb/stores/jetstream/writer_test.go +++ b/pkg/isb/stores/jetstream/writer_test.go @@ -169,14 +169,10 @@ func TestForwarderJetStreamBuffer(t *testing.T) { assert.NoError(t, err) opts := []forward.Option{forward.WithReadBatchSize(tt.batchSize)} - if tt.batchEnabled { - opts = append(opts, forward.WithUDFBatchMap(myForwardJetStreamTest{})) - } if tt.streamEnabled { opts = append(opts, forward.WithUDFStreamingMap(myForwardJetStreamTest{})) - } - if tt.unaryEnabled { - opts = append(opts, forward.WithUDFUnaryMap(myForwardJetStreamTest{})) + } else { + opts = append(opts, forward.WithUDFMap(myForwardJetStreamTest{})) } f, err := forward.NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardJetStreamTest{}, fetchWatermark, publishWatermark, idleManager, opts...) diff --git a/pkg/isb/stores/redis/read_test.go b/pkg/isb/stores/redis/read_test.go index 6f42dc79c5..749010c6d4 100644 --- a/pkg/isb/stores/redis/read_test.go +++ b/pkg/isb/stores/redis/read_test.go @@ -140,7 +140,7 @@ func TestRedisCheckBacklog(t *testing.T) { } fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - f, err := forward.NewInterStepDataForward(vertexInstance, rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithReadBatchSize(10), forward.WithUDFUnaryMap(forwardReadWritePerformance{})) + f, err := forward.NewInterStepDataForward(vertexInstance, rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithReadBatchSize(10), forward.WithUDFMap(forwardReadWritePerformance{})) assert.NoError(t, err) stopped := f.Start() @@ -349,7 +349,7 @@ func (suite *ReadWritePerformance) SetupSuite() { } fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - isdf, _ := forward.NewInterStepDataForward(vertexInstance, rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFUnaryMap(forwardReadWritePerformance{})) + isdf, _ := forward.NewInterStepDataForward(vertexInstance, rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFMap(forwardReadWritePerformance{})) suite.ctx = ctx suite.rclient = client @@ -443,7 +443,7 @@ func (suite *ReadWritePerformance) TestReadWriteLatencyPipelining() { } fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - suite.isdf, _ = forward.NewInterStepDataForward(vertexInstance, suite.rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFUnaryMap(forwardReadWritePerformance{})) + suite.isdf, _ = forward.NewInterStepDataForward(vertexInstance, suite.rqr, toSteps, forwardReadWritePerformance{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFMap(forwardReadWritePerformance{})) suite.False(suite.rqw.IsFull()) var writeMessages = make([]isb.Message, 0, suite.count) diff --git a/pkg/isb/stores/redis/write_test.go b/pkg/isb/stores/redis/write_test.go index 92693a5890..167f060eda 100644 --- a/pkg/isb/stores/redis/write_test.go +++ b/pkg/isb/stores/redis/write_test.go @@ -414,7 +414,7 @@ func TestNewInterStepDataForwardRedis(t *testing.T) { } fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - f, err := forward.NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardRedisTest{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFUnaryMap(myForwardRedisTest{})) + f, err := forward.NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardRedisTest{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFMap(myForwardRedisTest{})) assert.NoError(t, err) assert.False(t, to1.IsFull()) @@ -463,7 +463,7 @@ func TestReadTimeout(t *testing.T) { } fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - f, err := forward.NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardRedisTest{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFUnaryMap(myForwardRedisTest{})) + f, err := forward.NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardRedisTest{}, fetchWatermark, publishWatermark, wmb.NewNoOpIdleManager(), forward.WithUDFMap(myForwardRedisTest{})) assert.NoError(t, err) stopped := f.Start() // Call stop to end the test as we have a blocking read. The forwarder is up and running with no messages written diff --git a/pkg/reduce/pnf/pnf.go b/pkg/reduce/pnf/pnf.go index b1fffb1494..e09a6e1bc8 100644 --- a/pkg/reduce/pnf/pnf.go +++ b/pkg/reduce/pnf/pnf.go @@ -424,7 +424,7 @@ func (pf *ProcessAndForward) writeToBuffer(ctx context.Context, edgeName string, metrics.LabelReason: writeErr.Error(), }).Add(float64(len(message.Payload))) - pf.log.Infow("Dropped message", zap.String("reason", writeErr.Error()), zap.String("vertex", pf.vertexName), zap.String("pipeline", pf.pipelineName)) + pf.log.Infow("Dropped message", zap.String("reason", writeErr.Error()), zap.String("vertex", pf.vertexName), zap.String("pipeline", pf.pipelineName), zap.String("msg_id", message.ID.String())) } else { failedMessages = append(failedMessages, message) } diff --git a/pkg/sdkclient/batchmapper/client.go b/pkg/sdkclient/batchmapper/client.go deleted file mode 100644 index 74d1cb49f5..0000000000 --- a/pkg/sdkclient/batchmapper/client.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchmapper - -import ( - "context" - "errors" - "io" - - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/numaproj/numaflow/pkg/sdkclient" - sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" - grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" - "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" -) - -// client contains the grpc connection and the grpc client. -type client struct { - conn *grpc.ClientConn - grpcClt batchmappb.BatchMapClient -} - -// New creates a new client object. -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { - var opts = sdkclient.DefaultOptions(sdkclient.BatchMapAddr) - - for _, inputOption := range inputOptions { - inputOption(opts) - } - - // Connect to the server - conn, err := grpcutil.ConnectToServer(opts.UdsSockAddr(), serverInfo, opts.MaxMessageSize()) - if err != nil { - return nil, err - } - - c := new(client) - c.conn = conn - c.grpcClt = batchmappb.NewBatchMapClient(conn) - return c, nil -} - -func NewFromClient(c batchmappb.BatchMapClient) (Client, error) { - return &client{ - grpcClt: c, - }, nil -} - -// CloseConn closes the grpc client connection. -func (c *client) CloseConn() error { - return c.conn.Close() -} - -// IsReady returns true if the grpc connection is ready to use. -func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { - resp, err := c.grpcClt.IsReady(ctx, in) - if err != nil { - return false, err - } - return resp.GetReady(), nil -} - -// BatchMapFn is the handler for the gRPC client (Numa container) -// It takes in a stream of input Requests, sends them to the gRPC server(UDF) and then streams the -// responses received back on a channel asynchronously. -// We spawn 2 goroutines here, one for sending the requests over the stream -// and the other one for receiving the responses -func (c *client) BatchMapFn(ctx context.Context, inputCh <-chan *batchmappb.BatchMapRequest) (<-chan *batchmappb.BatchMapResponse, <-chan error) { - // errCh is used to track and propagate any errors that might occur during the rpc lifecyle, these could include - // errors in sending, UDF errors etc - // These are propagated to the applier for further handling - errCh := make(chan error) - - // response channel for streaming back the results received from the gRPC server - responseCh := make(chan *batchmappb.BatchMapResponse) - - // BatchMapFn is a bidirectional streaming RPC - // We get a Map_BatchMapFnClient object over which we can send the requests, - // receive the responses asynchronously. - // TODO(map-batch): this creates a new gRPC stream for every batch, - // it might be useful to see the performance difference between this approach - // and a long-running RPC - stream, err := c.grpcClt.BatchMapFn(ctx) - if err != nil { - go func() { - errCh <- sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", err) - }() - // passing a nil channel for responseCh to ensure that it is never selected as this is an error scenario - // and we should be reading on the error channel only. - return nil, errCh - } - - // read the response from the server stream and send it to responseCh channel - // any error is sent to errCh channel - go func() { - // close this channel to indicate that no more elements left to receive from grpc - // We do defer here on the whole go-routine as even during a error scenario, we - // want to close the channel and stop forwarding any more responses from the UDF - // as we would be replaying the current ones. - defer close(responseCh) - - var resp *batchmappb.BatchMapResponse - var recvErr error - for { - resp, recvErr = stream.Recv() - // check if this is EOF error, which indicates that no more responses left to process on the - // stream from the UDF, in such a case we return without any error to indicate this - if errors.Is(recvErr, io.EOF) { - // set the error channel to nil in case of no errors to ensure - // that it is not picked up - errCh = nil - return - } - // If this is some other error, propagate it to error channel, - // also close the response channel(done using the defer close) to indicate no more messages being read - errSDK := sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", recvErr) - if errSDK != nil { - errCh <- errSDK - return - } - // send the response upstream - responseCh <- resp - } - }() - - // Read from the read messages and send them individually to the bi-di stream for processing - // in case there is an error in sending, send it to the error channel for handling - go func() { - for { - select { - case <-ctx.Done(): - // If the context is done we do not want to send further on the stream, - // the Recv should get an error from the server as the stream uses the same ctx - return - case inputMsg, ok := <-inputCh: - // If there are no more messages left to read on the channel, then we can - // close the stream. - if !ok { - // CloseSend closes the send direction of the stream. This indicates to the - // UDF that we have sent all requests from the client, and it can safely - // stop listening on the stream - sendErr := stream.CloseSend() - if sendErr != nil && !errors.Is(sendErr, io.EOF) { - errCh <- sdkerr.ToUDFErr("c.grpcClt.BatchMapFn stream.CloseSend()", sendErr) - } - // exit this routine - return - } else { - err = stream.Send(inputMsg) - if err != nil { - errCh <- sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", err) - // On an error we would be stopping any further processing and go for a replay - // so return directly - return - } - } - } - } - - }() - return responseCh, errCh - -} diff --git a/pkg/sdkclient/batchmapper/client_test.go b/pkg/sdkclient/batchmapper/client_test.go deleted file mode 100644 index 60feb4d567..0000000000 --- a/pkg/sdkclient/batchmapper/client_test.go +++ /dev/null @@ -1,274 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchmapper - -import ( - "fmt" - "io" - "reflect" - "testing" - "time" - - "github.com/golang/mock/gomock" - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1/batchmapmock" - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" - "google.golang.org/protobuf/types/known/emptypb" - "google.golang.org/protobuf/types/known/timestamppb" - - sdkerr "github.com/numaproj/numaflow/pkg/sdkclient/error" -) - -func TestClient_IsReady(t *testing.T) { - var ctx = context.Background() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&batchmappb.ReadyResponse{Ready: true}, nil) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&batchmappb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) - assert.True(t, ready) - assert.NoError(t, err) - - ready, err = testClient.IsReady(ctx, &emptypb.Empty{}) - assert.False(t, ready) - assert.EqualError(t, err, "mock connection refused") -} - -func TestClient_BatchMapFn(t *testing.T) { - var ctx = context.Background() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockMapclient := batchmapmock.NewMockBatchMap_BatchMapFnClient(ctrl) - - mockMapclient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockMapclient.EXPECT().CloseSend().Return(nil).AnyTimes() - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test1`), - }, - }, - Id: "test1", - }, nil) - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test2`), - }, - }, - Id: "test2", - }, io.EOF) - - mockClient.EXPECT().BatchMapFn(gomock.Any(), gomock.Any()).Return(mockMapclient, nil) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - messageCh := make(chan *batchmappb.BatchMapRequest) - close(messageCh) - responseCh, _ := testClient.BatchMapFn(ctx, messageCh) - idx := 1 - for response := range responseCh { - id := fmt.Sprintf("test%d", idx) - assert.Equal(t, &batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(id), - }, - }, - Id: id, - }, response) - idx += 1 - } -} - -func TestClientStreamError_BatchMapFn(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockMapclient := batchmapmock.NewMockBatchMap_BatchMapFnClient(ctrl) - - mockMapclient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockMapclient.EXPECT().CloseSend().Return(nil).AnyTimes() - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test1`), - }, - }, - Id: "test1", - }, fmt.Errorf("recv error")).AnyTimes() - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test2`), - }, - }, - Id: "test2", - }, io.EOF).AnyTimes() - - mockClient.EXPECT().BatchMapFn(gomock.Any(), gomock.Any()).Return(mockMapclient, nil) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - messageCh := make(chan *batchmappb.BatchMapRequest) - responseCh, errCh := testClient.BatchMapFn(ctx, messageCh) - go func() { - defer close(messageCh) - requests := []*batchmappb.BatchMapRequest{{ - Keys: []string{"client"}, - Value: []byte(`test1`), - EventTime: timestamppb.New(time.Time{}), - Watermark: timestamppb.New(time.Time{}), - Id: "test1", - }, { - Keys: []string{"client"}, - Value: []byte(`test2`), - EventTime: timestamppb.New(time.Time{}), - Watermark: timestamppb.New(time.Time{}), - Id: "test2", - }} - for _, req := range requests { - messageCh <- req - } - }() - -readLoop: - for { - select { - case err = <-errCh: - if err != nil { - assert.Equal(t, err, sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", fmt.Errorf("recv error"))) - break readLoop - } - case _, ok := <-responseCh: - if !ok { - break - } - } - } - assert.Equal(t, err, sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", fmt.Errorf("recv error"))) -} - -func TestClientRpcError_BatchMapFn(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockMapclient := batchmapmock.NewMockBatchMap_BatchMapFnClient(ctrl) - - mockMapclient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockMapclient.EXPECT().CloseSend().Return(nil).AnyTimes() - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test1`), - }, - }, - Id: "test1", - }, fmt.Errorf("recv error")).AnyTimes() - mockMapclient.EXPECT().Recv().Return(&batchmappb.BatchMapResponse{ - Results: []*batchmappb.BatchMapResponse_Result{ - { - Keys: []string{"client_test"}, - Value: []byte(`test2`), - }, - }, - Id: "test2", - }, io.EOF).AnyTimes() - - mockClient.EXPECT().BatchMapFn(gomock.Any(), gomock.Any()).Return(mockMapclient, fmt.Errorf("error from rpc")) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - messageCh := make(chan *batchmappb.BatchMapRequest) - responseCh, errCh := testClient.BatchMapFn(ctx, messageCh) - go func() { - defer close(messageCh) - requests := []*batchmappb.BatchMapRequest{{ - Keys: []string{"client"}, - Value: []byte(`test1`), - EventTime: timestamppb.New(time.Time{}), - Watermark: timestamppb.New(time.Time{}), - Id: "test1", - }, { - Keys: []string{"client"}, - Value: []byte(`test2`), - EventTime: timestamppb.New(time.Time{}), - Watermark: timestamppb.New(time.Time{}), - Id: "test2", - }} - for _, req := range requests { - messageCh <- req - } - }() - -readLoop: - for { - select { - case err = <-errCh: - if err != nil { - assert.Equal(t, err, sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", fmt.Errorf("error from rpc"))) - break readLoop - } - case _, ok := <-responseCh: - if !ok { - break - } - } - } - - assert.Equal(t, err, sdkerr.ToUDFErr("c.grpcClt.BatchMapFn", fmt.Errorf("error from rpc"))) -} diff --git a/pkg/sdkclient/batchmapper/interface.go b/pkg/sdkclient/batchmapper/interface.go deleted file mode 100644 index 4f0e40013d..0000000000 --- a/pkg/sdkclient/batchmapper/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchmapper - -import ( - "context" - - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "google.golang.org/protobuf/types/known/emptypb" -) - -// Client contains methods to call a gRPC client. -type Client interface { - CloseConn() error - IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) - BatchMapFn(ctx context.Context, inputCh <-chan *batchmappb.BatchMapRequest) (<-chan *batchmappb.BatchMapResponse, <-chan error) -} diff --git a/pkg/sdkclient/mapper/client.go b/pkg/sdkclient/mapper/client.go index 8e3e3d24d6..972e842d4e 100644 --- a/pkg/sdkclient/mapper/client.go +++ b/pkg/sdkclient/mapper/client.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -36,9 +37,11 @@ import ( // client contains the grpc connection and the grpc client. type client struct { - conn *grpc.ClientConn - grpcClt mappb.MapClient - stream mappb.Map_MapFnClient + conn *grpc.ClientConn + grpcClt mappb.MapClient + stream mappb.Map_MapFnClient + batchMapMode bool + log *zap.SugaredLogger } // New creates a new client object. @@ -58,8 +61,9 @@ func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions .. c := new(client) c.conn = conn c.grpcClt = mappb.NewMapClient(conn) + c.batchMapMode = opts.BatchMapMode() - var logger = logging.FromContext(ctx) + c.log = logging.FromContext(ctx) waitUntilReady: for { @@ -69,7 +73,7 @@ waitUntilReady: default: _, err := c.IsReady(ctx, &emptypb.Empty{}) if err != nil { - logger.Warnf("Mapper server is not ready: %v", err) + c.log.Warnf("Mapper server is not ready: %v", err) time.Sleep(100 * time.Millisecond) continue waitUntilReady } @@ -150,6 +154,11 @@ func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { // MapFn applies a function to each map request element. func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*mappb.MapResponse, error) { + if c.batchMapMode { + // if it is a batch map, we need to send an end of transmission message to the server + // to indicate that the batch is finished. + requests = append(requests, &mappb.MapRequest{Status: &mappb.Status{Eot: true}}) + } var eg errgroup.Group // send n requests eg.Go(func() error { @@ -166,9 +175,10 @@ func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*ma return nil }) - // receive n responses - responses := make([]*mappb.MapResponse, len(requests)) + // receive the responses + var responses []*mappb.MapResponse eg.Go(func() error { + // we need to receive n+1 responses because the last response will be the end of transmission message. for i := 0; i < len(requests); i++ { select { case <-ctx.Done(): @@ -179,7 +189,16 @@ func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*ma if err != nil { return sdkerror.ToUDFErr("c.grpcClt.MapFn stream.Recv", err) } - responses[i] = resp + if resp.GetStatus() != nil && resp.GetStatus().GetEot() { + // we might get an end of transmission message from the server before receiving all the responses. + if i < len(requests)-1 { + c.log.Errorw("Received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received_responses", i+1), zap.Int("total_requests", len(requests))) + } else { + break + } + } else { + responses = append(responses, resp) + } } return nil }) diff --git a/pkg/sdkclient/options.go b/pkg/sdkclient/options.go index e46e9c8869..c8728cc486 100644 --- a/pkg/sdkclient/options.go +++ b/pkg/sdkclient/options.go @@ -19,6 +19,7 @@ package sdkclient type Options struct { udsSockAddr string maxMessageSize int + batchMapMode bool } // UdsSockAddr returns the UDS sock addr. @@ -31,6 +32,11 @@ func (o *Options) MaxMessageSize() int { return o.maxMessageSize } +// BatchMapMode returns the batch map mode. +func (o *Options) BatchMapMode() bool { + return o.batchMapMode +} + // DefaultOptions returns the default options. func DefaultOptions(address string) *Options { return &Options{ @@ -55,3 +61,10 @@ func WithMaxMessageSize(size int) Option { opts.maxMessageSize = size } } + +// WithBatchMapMode sets the client to batch map mode. +func WithBatchMapMode() Option { + return func(opts *Options) { + opts.batchMapMode = true + } +} diff --git a/pkg/sources/transformer/grpc_transformer.go b/pkg/sources/transformer/grpc_transformer.go index 3becd414cb..b619074de0 100644 --- a/pkg/sources/transformer/grpc_transformer.go +++ b/pkg/sources/transformer/grpc_transformer.go @@ -114,7 +114,7 @@ func (u *GRPCBasedTransformer) ApplyTransform(ctx context.Context, messages []*i for i, resp := range responses { parentMessage, ok := idToMsgMapping[resp.GetId()] if !ok { - panic("tracker doesn't contain the message ID received from the response") + panic("tracker doesn't contain the message ID received from the response - " + resp.GetId()) } taggedMessages := make([]*isb.WriteMessage, len(resp.GetResults())) for i, result := range resp.GetResults() { diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index b3adb91e01..0f91c5e33a 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -53,7 +53,7 @@ type InterStepDataForward struct { fromBufferPartition isb.BufferReader // toBuffers is a map of toVertex name to the toVertex's owned buffers. toBuffers map[string][]isb.BufferWriter - FSD forwarder.ToWhichStepDecider + fsd forwarder.ToWhichStepDecider wmFetcher fetch.Fetcher // wmPublishers stores the vertex to publisher mapping wmPublishers map[string]publish.Publisher @@ -78,11 +78,6 @@ func NewInterStepDataForward(vertexInstance *dfv1.VertexInstance, fromStep isb.B } } - // we can have all modes empty if no option was enabled, this is an invalid case - if !isValidMapMode(options) { - return nil, fmt.Errorf("no valid map mode selected") - } - // creating a context here which is managed by the forwarder's lifecycle ctx, cancel := context.WithCancel(context.Background()) @@ -91,7 +86,7 @@ func NewInterStepDataForward(vertexInstance *dfv1.VertexInstance, fromStep isb.B cancelFn: cancel, fromBufferPartition: fromStep, toBuffers: toSteps, - FSD: fsd, + fsd: fsd, wmFetcher: fetchWatermark, wmPublishers: publishWatermark, // should we do a check here for the values not being null? @@ -271,22 +266,12 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { // Trigger the UDF processing based on the mode enabled for map // ie Batch Map or unary map // This will be a blocking call until the all the UDF results for the batch are received. - if isdf.opts.batchMapUdfApplier != nil { - udfResults, err = isdf.processBatchMessages(ctx, dataMessages) - if err != nil { - isdf.opts.logger.Errorw("failed to processBatchMessages", zap.Error(err)) - // As there's no partial failure, non-ack all the readOffsets - isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return - } - } else { - udfResults, err = isdf.applyUDF(ctx, dataMessages) - if err != nil { - isdf.opts.logger.Errorw("failed to applyUDF", zap.Error(err)) - // As there's no partial failure, non-ack all the readOffsets - isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return - } + udfResults, err = isdf.applyUDF(ctx, dataMessages) + if err != nil { + isdf.opts.logger.Errorw("failed to applyUDF", zap.Error(err)) + // As there's no partial failure, non-ack all the readOffsets + isdf.fromBufferPartition.NoAck(ctx, readOffsets) + return } // let's figure out which vertex to send the results to. @@ -377,54 +362,6 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) } -// processBatchMessages is used for processing the Batch Map mode UDF -// batch map processing we send a list of N input requests together to the UDF and get the consolidated -// response for all of them. -// if there is an error it will do a retry and will return when -// - if there is a success while retrying -// - if shutdown has been initiated. -// - if context is cancelled -func (isdf *InterStepDataForward) processBatchMessages(ctx context.Context, dataMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - concurrentUDFProcessingStart := time.Now() - var udfResults []isb.ReadWriteMessagePair - var err error - - for { - // invoke the UDF call - udfResults, err = isdf.opts.batchMapUdfApplier.ApplyBatchMap(ctx, dataMessages) - if err != nil { - // check if there is a shutdown, in this case we will not retry further - if ok, _ := isdf.IsShuttingDown(); ok { - isdf.opts.logger.Errorw("batchMapUDF.Apply, Stop called during udf processing", zap.Error(err)) - metrics.PlatformError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() - return nil, err - } - isdf.opts.logger.Errorw("batchMapUDF.Apply got error during batch udf processing", zap.Error(err)) - select { - case <-ctx.Done(): - // no point in retrying if the context is cancelled - return nil, err - case <-time.After(time.Second): - // sleep for 1 second and keep retrying after that - // Keeping one second of timeout for consistency with other map modes (unary and stream) - // for retrying UDF errors. - // These errors can be induced due to grpc connections, pod restarts etc. - // Hence, a conservative sleep time chosen here. - // TODO: Would be a good exercise to understand the behaviour and see what can be - // a suitable time across all modes. - continue - } - } - // if no error is found, then we do not need to retry - break - } - isdf.opts.logger.Debugw("batch map applyUDF completed", zap.Int("concurrency", isdf.opts.udfConcurrency), zap.Duration("took", time.Since(concurrentUDFProcessingStart))) - metrics.ConcurrentUDFProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(concurrentUDFProcessingStart).Microseconds())) - return udfResults, nil -} - // streamMessage streams the data messages to the next step. func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessages []*isb.ReadMessage) (map[string][][]isb.Offset, error) { // create space for writeMessages specific to each step as we could forward to all the steps too. @@ -619,7 +556,7 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar metrics.LabelReason: err.Error(), }).Add(float64(len(msg.Payload))) - isdf.opts.logger.Infow("Dropped message", zap.String("reason", err.Error()), zap.String("partition", toBufferPartition.GetName()), zap.String("vertex", isdf.vertexName), zap.String("pipeline", isdf.pipelineName)) + isdf.opts.logger.Infow("Dropped message", zap.String("reason", err.Error()), zap.String("partition", toBufferPartition.GetName()), zap.String("vertex", isdf.vertexName), zap.String("pipeline", isdf.pipelineName), zap.String("msg_id", msg.ID.String())) } else { needRetry = true // we retry only failed messages @@ -694,7 +631,7 @@ func (isdf *InterStepDataForward) applyUDF(ctx context.Context, readMessages []* // whereToStep executes the WhereTo interfaces and then updates the to step's writeToBuffers buffer. func (isdf *InterStepDataForward) whereToStep(writeMessage *isb.WriteMessage, messageToStep map[string][][]isb.Message, readMessage *isb.ReadMessage) error { // call WhereTo and drop it on errors - to, err := isdf.FSD.WhereTo(writeMessage.Keys, writeMessage.Tags, writeMessage.ID.String()) + to, err := isdf.fsd.WhereTo(writeMessage.Keys, writeMessage.Tags, writeMessage.ID.String()) if err != nil { isdf.opts.logger.Errorw("failed in whereToStep", zap.Error(isb.MessageWriteErr{Name: isdf.fromBufferPartition.GetName(), Header: readMessage.Header, Body: readMessage.Body, Message: fmt.Sprintf("WhereTo failed, %s", err)})) // a shutdown can break the blocking loop caused due to InternalErr @@ -725,10 +662,3 @@ func errorArrayToMap(errs []error) map[string]int64 { } return result } - -// check if the options provided are for a valid map mode -// exactly one of the appliers should not be nil as only one mode can be active at a time, not more not less only 1 -func isValidMapMode(opts *options) bool { - // if all the appliers are empty, then it is an invalid scenario - return !((opts.batchMapUdfApplier == nil) && (opts.unaryMapUdfApplier == nil) && (opts.streamMapUdfApplier == nil)) -} diff --git a/pkg/udf/forward/forward_test.go b/pkg/udf/forward/forward_test.go index da94970da0..5984bf4a69 100644 --- a/pkg/udf/forward/forward_test.go +++ b/pkg/udf/forward/forward_test.go @@ -103,135 +103,31 @@ func (f myForwardTest) ApplyMapStream(ctx context.Context, message *isb.ReadMess } type testingOpts struct { - name string - batchSize int64 - streamEnabled bool - batchMapEnabled bool - unaryMapEnabled bool -} - -// Check valid initialization for map modes in forwarder -func TestValidMapModeInit(t *testing.T) { - tests := []struct { - name string - valid bool - streamEnabled bool - batchMapEnabled bool - unaryMapEnabled bool - }{ - { - name: "valid_stream", - streamEnabled: true, - batchMapEnabled: false, - unaryMapEnabled: false, - valid: true, - }, - { - name: "valid_batch", - streamEnabled: false, - batchMapEnabled: false, - unaryMapEnabled: true, - valid: true, - }, - { - name: "valid_unary", - streamEnabled: false, - batchMapEnabled: true, - unaryMapEnabled: false, - valid: true, - }, - { - name: "all_disabled", - streamEnabled: false, - batchMapEnabled: false, - unaryMapEnabled: false, - valid: false, - }, - { - name: "all_enabled", - streamEnabled: true, - batchMapEnabled: true, - unaryMapEnabled: true, - valid: false, - }, - { - name: "two_enabled", - streamEnabled: true, - batchMapEnabled: true, - unaryMapEnabled: false, - valid: false, - }, - } - for _, tt := range tests { - t.Run(tt.name+"_map_mode", func(t *testing.T) { - batchSize := int64(1) - fromStep := simplebuffer.NewInMemoryBuffer("from", 5*batchSize, 0) - to11 := simplebuffer.NewInMemoryBuffer("to1-1", 2*batchSize, 0) - to12 := simplebuffer.NewInMemoryBuffer("to1-2", 2*batchSize, 1) - toSteps := map[string][]isb.BufferWriter{ - "to1": {to11, to12}, - } - - vertex := &dfv1.Vertex{Spec: dfv1.VertexSpec{ - PipelineName: "testPipeline", - AbstractVertex: dfv1.AbstractVertex{ - Name: "testVertex", - }, - }} - vertexInstance := &dfv1.VertexInstance{ - Vertex: vertex, - Replica: 0, - } - - _, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) - - opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardTest{})) - } - if tt.streamEnabled { - opts = append(opts, WithUDFStreamingMap(myForwardTest{})) - } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardTest{})) - } - - idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) - _, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, &mySourceForwardTestRoundRobin{}, fetchWatermark, publishWatermark, idleManager, opts...) - if tt.valid { - assert.NoError(t, err, "expected no error") - } else { - assert.Error(t, err, "expected error") - } - }) - } + name string + batchSize int64 + streamEnabled bool + mapEnabled bool } func TestNewInterStepDataForward(t *testing.T) { tests := []testingOpts{ { - name: "stream_forward", - batchSize: 1, - streamEnabled: true, - batchMapEnabled: false, - unaryMapEnabled: false, + name: "stream_forward", + batchSize: 1, + streamEnabled: true, + mapEnabled: false, }, { - name: "batch_forward", - batchSize: 10, - streamEnabled: false, - batchMapEnabled: false, - unaryMapEnabled: true, + name: "batch_forward", + batchSize: 10, + streamEnabled: false, + mapEnabled: true, }, { - name: "batch_map_forward", - batchSize: 10, - streamEnabled: false, - batchMapEnabled: true, - unaryMapEnabled: false, + name: "batch_map_forward", + batchSize: 10, + streamEnabled: false, + mapEnabled: true, }, } for _, tt := range tests { @@ -264,14 +160,11 @@ func TestNewInterStepDataForward(t *testing.T) { fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardTest{})) } idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) @@ -379,14 +272,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(&myForwardToAllTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(&myForwardToAllTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(&myForwardToAllTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(&myForwardToAllTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, &myForwardToAllTest{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -556,14 +446,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardDropTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardDropTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardDropTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardDropTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardDropTest{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -746,14 +633,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, &mySourceForwardTestRoundRobin{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -893,14 +777,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardApplyUDFErrTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardApplyUDFErrTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardApplyUDFErrTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardApplyUDFErrTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardApplyUDFErrTest{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -909,11 +790,6 @@ func TestNewInterStepDataForward(t *testing.T) { assert.False(t, to1.IsFull()) assert.True(t, to1.IsEmpty()) - if tt.batchMapEnabled { - // TODO(map-batch): if map-batch is enabled we panic on seeing an error, think of a way to - // gracefully test that - t.Skip() - } stopped := f.Start() // write some data _, errs := fromStep.Write(ctx, writeMessages[0:batchSize]) @@ -954,14 +830,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardApplyWhereToErrTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardApplyWhereToErrTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardApplyWhereToErrTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardApplyWhereToErrTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardApplyWhereToErrTest{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -1008,14 +881,11 @@ func TestNewInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myForwardInternalErrTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardInternalErrTest{})) } - if tt.unaryMapEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardInternalErrTest{})) + if tt.mapEnabled { + opts = append(opts, WithUDFMap(myForwardInternalErrTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardInternalErrTest{}, fetchWatermark, publishWatermark, idleManager, opts...) @@ -1024,11 +894,6 @@ func TestNewInterStepDataForward(t *testing.T) { assert.False(t, to1.IsFull()) assert.True(t, to1.IsEmpty()) - if tt.batchMapEnabled { - // TODO(map-batch): if map-batch is enabled we panic on seeing an error, think of a way to - // gracefully test that - t.Skip() - } stopped := f.Start() // write some data @@ -1162,7 +1027,7 @@ func TestNewInterStepDataForwardIdleWatermark(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) - opts := []Option{WithReadBatchSize(2), WithUDFUnaryMap(myForwardTest{})} + opts := []Option{WithReadBatchSize(2), WithUDFMap(myForwardTest{})} f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardTest{}, fetchWatermark, publishWatermark, idleManager, opts...) assert.NoError(t, err) assert.False(t, to1.IsFull()) @@ -1342,7 +1207,7 @@ func TestNewInterStepDataForwardIdleWatermark_Reset(t *testing.T) { }() idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) - opts := []Option{WithReadBatchSize(2), WithUDFUnaryMap(myForwardTest{})} + opts := []Option{WithReadBatchSize(2), WithUDFMap(myForwardTest{})} f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardTest{}, fetchWatermark, publishWatermark, idleManager, opts...) assert.NoError(t, err) @@ -1579,7 +1444,7 @@ func TestInterStepDataForwardSinglePartition(t *testing.T) { // create a forwarder idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) - opts := []Option{WithReadBatchSize(5), WithUDFUnaryMap(mySourceForwardTest{})} + opts := []Option{WithReadBatchSize(5), WithUDFMap(mySourceForwardTest{})} f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, mySourceForwardTest{}, fetchWatermark, publishWatermark, idleManager, opts...) assert.NoError(t, err) @@ -1647,7 +1512,7 @@ func TestInterStepDataForwardMultiplePartition(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) - opts := []Option{WithReadBatchSize(5), WithUDFUnaryMap(mySourceForwardTest{})} + opts := []Option{WithReadBatchSize(5), WithUDFMap(mySourceForwardTest{})} f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, &mySourceForwardTestRoundRobin{}, fetchWatermark, publishWatermark, idleManager, opts...) assert.NoError(t, err) @@ -1717,8 +1582,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize int64 strategy dfv1.BufferFullWritingStrategy streamEnabled bool - unaryEnabled bool - batchEnabled bool throwError bool }{ { @@ -1726,8 +1589,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 10, strategy: dfv1.DiscardLatest, streamEnabled: false, - batchEnabled: false, - unaryEnabled: true, // should not throw any error as we drop messages and finish writing before context is cancelled throwError: false, }, @@ -1736,8 +1597,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 10, strategy: dfv1.RetryUntilSuccess, streamEnabled: false, - batchEnabled: false, - unaryEnabled: true, // should throw context closed error as we keep retrying writing until context is cancelled throwError: true, }, @@ -1746,8 +1605,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 1, strategy: dfv1.DiscardLatest, streamEnabled: true, - batchEnabled: false, - unaryEnabled: false, // should not throw any error as we drop messages and finish writing before context is cancelled throwError: false, }, @@ -1756,8 +1613,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 1, strategy: dfv1.RetryUntilSuccess, streamEnabled: true, - batchEnabled: false, - unaryEnabled: false, // should throw context closed error as we keep retrying writing until context is cancelled throwError: true, }, @@ -1766,8 +1621,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 10, strategy: dfv1.DiscardLatest, streamEnabled: false, - batchEnabled: true, - unaryEnabled: false, // should not throw any error as we drop messages and finish writing before context is cancelled throwError: false, }, @@ -1776,8 +1629,6 @@ func TestWriteToBuffer(t *testing.T) { batchSize: 10, strategy: dfv1.RetryUntilSuccess, streamEnabled: false, - batchEnabled: true, - unaryEnabled: false, // should throw context closed error as we keep retrying writing until context is cancelled throwError: true, }, @@ -1807,14 +1658,10 @@ func TestWriteToBuffer(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(value.batchSize)} - if value.batchEnabled { - opts = append(opts, WithUDFBatchMap(myForwardApplyWhereToErrTest{})) - } if value.streamEnabled { opts = append(opts, WithUDFStreamingMap(myForwardApplyWhereToErrTest{})) - } - if value.unaryEnabled { - opts = append(opts, WithUDFUnaryMap(myForwardApplyWhereToErrTest{})) + } else { + opts = append(opts, WithUDFMap(myForwardApplyWhereToErrTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myForwardTest{}, fetchWatermark, publishWatermark, idleManager, opts...) diff --git a/pkg/udf/forward/options.go b/pkg/udf/forward/options.go index 31374d64ee..7eba8d8147 100644 --- a/pkg/udf/forward/options.go +++ b/pkg/udf/forward/options.go @@ -44,8 +44,6 @@ type options struct { unaryMapUdfApplier applier.MapApplier // streamMapUdfApplier is the UDF applier for a server streaming map mode streamMapUdfApplier applier.MapStreamApplier - // batchMapUdfApplier is the UDF applier for a batch map mode - batchMapUdfApplier applier.BatchMapApplier } type Option func(*options) error @@ -57,7 +55,6 @@ func DefaultOptions() *options { retryInterval: time.Millisecond, logger: logging.NewLogger(), unaryMapUdfApplier: nil, - batchMapUdfApplier: nil, streamMapUdfApplier: nil, } } @@ -114,23 +111,11 @@ func WithCallbackUploader(cp *callback.Uploader) Option { // exactly one of the appliers should not be nil as only one mode can be active at a time, not more not less only 1 // There is a case where none of them is set which cannot be allowed -// WithUDFBatchMap enables the batch map for UDF if provided with a non-nil applier -func WithUDFBatchMap(f applier.BatchMapApplier) Option { +// WithUDFMap enables the unary map for UDF if provided with a non-nil applier +func WithUDFMap(f applier.MapApplier) Option { return func(o *options) error { // only overwrite for the same option is allowed, other two cannot be set - if f != nil && o.unaryMapUdfApplier == nil && o.streamMapUdfApplier == nil { - o.batchMapUdfApplier = f - return nil - } - return fmt.Errorf("invalid option") - } -} - -// WithUDFUnaryMap enables the unary map for UDF if provided with a non-nil applier -func WithUDFUnaryMap(f applier.MapApplier) Option { - return func(o *options) error { - // only overwrite for the same option is allowed, other two cannot be set - if f != nil && o.batchMapUdfApplier == nil && o.streamMapUdfApplier == nil { + if f != nil && o.streamMapUdfApplier == nil { o.unaryMapUdfApplier = f return nil } @@ -142,7 +127,7 @@ func WithUDFUnaryMap(f applier.MapApplier) Option { func WithUDFStreamingMap(f applier.MapStreamApplier) Option { return func(o *options) error { // only overwrite for the same option is allowed, other two cannot be set - if f != nil && o.unaryMapUdfApplier == nil && o.batchMapUdfApplier == nil { + if f != nil && o.unaryMapUdfApplier == nil { o.streamMapUdfApplier = f return nil } diff --git a/pkg/udf/forward/shutdown_test.go b/pkg/udf/forward/shutdown_test.go index 338feaf941..d3dd4d69ce 100644 --- a/pkg/udf/forward/shutdown_test.go +++ b/pkg/udf/forward/shutdown_test.go @@ -52,32 +52,28 @@ func (s myShutdownTest) ApplyMapStream(ctx context.Context, message *isb.ReadMes func TestInterStepDataForward(t *testing.T) { tests := []struct { - name string - batchSize int64 - streamEnabled bool - batchMapEnabled bool - unaryEnabled bool + name string + batchSize int64 + streamEnabled bool + unaryEnabled bool }{ { - name: "stream_forward", - batchSize: 1, - streamEnabled: true, - batchMapEnabled: false, - unaryEnabled: false, + name: "stream_forward", + batchSize: 1, + streamEnabled: true, + unaryEnabled: false, }, { - name: "batch_forward", - batchSize: 5, - streamEnabled: false, - batchMapEnabled: false, - unaryEnabled: true, + name: "batch_forward", + batchSize: 5, + streamEnabled: false, + unaryEnabled: true, }, { - name: "batch_map_forward", - batchSize: 5, - streamEnabled: false, - batchMapEnabled: true, - unaryEnabled: false, + name: "batch_map_forward", + batchSize: 5, + streamEnabled: false, + unaryEnabled: true, }, } for _, tt := range tests { @@ -109,14 +105,11 @@ func TestInterStepDataForward(t *testing.T) { fetchWatermark, publishWatermark := generic.BuildNoOpWatermarkProgressorsFromBufferMap(toSteps) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myShutdownTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myShutdownTest{})) } if tt.unaryEnabled { - opts = append(opts, WithUDFUnaryMap(myShutdownTest{})) + opts = append(opts, WithUDFMap(myShutdownTest{})) } idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) @@ -162,14 +155,11 @@ func TestInterStepDataForward(t *testing.T) { idleManager, _ := wmb.NewIdleManager(1, len(toSteps)) opts := []Option{WithReadBatchSize(batchSize)} - if tt.batchMapEnabled { - opts = append(opts, WithUDFBatchMap(myShutdownTest{})) - } if tt.streamEnabled { opts = append(opts, WithUDFStreamingMap(myShutdownTest{})) } if tt.unaryEnabled { - opts = append(opts, WithUDFUnaryMap(myShutdownTest{})) + opts = append(opts, WithUDFMap(myShutdownTest{})) } f, err := NewInterStepDataForward(vertexInstance, fromStep, toSteps, myShutdownTest{}, fetchWatermark, publishWatermark, idleManager, opts...) diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 3303738449..a8262a461e 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -31,7 +31,6 @@ import ( "github.com/numaproj/numaflow/pkg/isb" "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/sdkclient" - "github.com/numaproj/numaflow/pkg/sdkclient/batchmapper" "github.com/numaproj/numaflow/pkg/sdkclient/mapper" "github.com/numaproj/numaflow/pkg/sdkclient/mapstreamer" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" @@ -71,7 +70,6 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { toVertexWmStores map[string]store.WatermarkStore mapHandler *rpc.GRPCBasedMap mapStreamHandler *rpc.GRPCBasedMapStream - batchMapHandler *rpc.GRPCBasedBatchMap idleManager wmb.IdleManager vertexName = u.VertexInstance.Vertex.Spec.Name pipelineName = u.VertexInstance.Vertex.Spec.PipelineName @@ -135,7 +133,6 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { opts := []forward.Option{forward.WithLogger(log)} enableMapUdfStream := false - enableBatchMapUdf := false maxMessageSize := sharedutil.LookupEnvIntOr(dfv1.EnvGRPCMaxMessageSize, sdkclient.DefaultGRPCMaxMessageSize) // Wait for map server info to be ready, we use the same info file for all the map modes @@ -146,6 +143,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { // track all the resources that need to be closed var resourcesToClose []io.Closer + mapMode, ok := serverInfo.Metadata[serverinfo.MapModeKey] for index, bufferPartition := range fromBuffer { // Read the server info file to read which map mode is enabled @@ -153,7 +151,6 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { // we create a new client and handler for each partition because // the client is not thread safe since we use one common gRPC Bidirectional stream // to communicate with the server - mapMode, ok := serverInfo.Metadata[serverinfo.MapModeKey] if ok && (serverinfo.MapMode(mapMode) == serverinfo.StreamMap) { log.Info("Map mode enabled: Stream Map") @@ -176,22 +173,20 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { } else if ok && (serverinfo.MapMode(mapMode) == serverinfo.BatchMap) { log.Info("Map mode enabled: Batch Map") - // if Batch Map mode is enabled, create the client and handler for that accordingly - enableBatchMapUdf = true - // create the client and handler for batch map interface - batchMapClient, err := batchmapper.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) + // create the map client with batch map socket address + mapClient, err := mapper.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.BatchMapAddr), sdkclient.WithBatchMapMode()) if err != nil { return fmt.Errorf("failed to create batch map client, %w", err) } - batchMapHandler = rpc.NewUDSgRPCBasedBatchMap(vertexName, batchMapClient) + mapHandler = rpc.NewUDSgRPCBasedMap(ctx, mapClient, vertexName) // Readiness check - if err := batchMapHandler.WaitUntilReady(ctx); err != nil { + if err := mapHandler.WaitUntilReady(ctx); err != nil { return fmt.Errorf("failed on batch map UDF readiness check, %w", err) } - resourcesToClose = append(resourcesToClose, batchMapHandler) - opts = append(opts, forward.WithUDFBatchMap(batchMapHandler)) + resourcesToClose = append(resourcesToClose, mapHandler) + opts = append(opts, forward.WithUDFMap(mapHandler)) } else { log.Info("Map mode enabled: Unary Map") @@ -204,7 +199,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to create map client, %w", err) } - mapHandler = rpc.NewUDSgRPCBasedMap(vertexName, mapClient) + mapHandler = rpc.NewUDSgRPCBasedMap(ctx, mapClient, vertexName) // Readiness check if err := mapHandler.WaitUntilReady(ctx); err != nil { @@ -212,7 +207,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { } resourcesToClose = append(resourcesToClose, mapHandler) - opts = append(opts, forward.WithUDFUnaryMap(mapHandler)) + opts = append(opts, forward.WithUDFMap(mapHandler)) } // Populate shuffle function map @@ -327,11 +322,8 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { // Add the correct client handler for the metrics server, based on the mode being used. if enableMapUdfStream { metricsOpts = metrics.NewMetricsOptions(ctx, u.VertexInstance.Vertex, []metrics.HealthChecker{mapStreamHandler}, lagReaders) - } else if enableBatchMapUdf { - metricsOpts = metrics.NewMetricsOptions(ctx, u.VertexInstance.Vertex, []metrics.HealthChecker{batchMapHandler}, lagReaders) } else { metricsOpts = metrics.NewMetricsOptions(ctx, u.VertexInstance.Vertex, []metrics.HealthChecker{mapHandler}, lagReaders) - } ms := metrics.NewMetricsServer(u.VertexInstance.Vertex, metricsOpts...) if shutdown, err := ms.Start(ctx); err != nil { @@ -353,13 +345,13 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { } } - // close the from vertex wm stores + // close the fromVertex wm stores // since we created the stores, we can close them for _, wmStore := range fromVertexWmStores { _ = wmStore.Close() } - // close the to vertex wm stores + // close the toVertex wm stores // since we created the stores, we can close them for _, wmStore := range toVertexWmStores { _ = wmStore.Close() diff --git a/pkg/udf/rpc/grpc_batch_map.go b/pkg/udf/rpc/grpc_batch_map.go deleted file mode 100644 index 0be7336f13..0000000000 --- a/pkg/udf/rpc/grpc_batch_map.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rpc - -import ( - "context" - "fmt" - "time" - - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "google.golang.org/protobuf/types/known/emptypb" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/isb/tracker" - "github.com/numaproj/numaflow/pkg/sdkclient/batchmapper" - "github.com/numaproj/numaflow/pkg/shared/logging" -) - -// GRPCBasedBatchMap is a map applier that uses gRPC client to invoke the map UDF. It implements the applier.MapApplier interface. -type GRPCBasedBatchMap struct { - vertexName string - client batchmapper.Client -} - -func NewUDSgRPCBasedBatchMap(vertexName string, client batchmapper.Client) *GRPCBasedBatchMap { - return &GRPCBasedBatchMap{ - vertexName: vertexName, - client: client, - } -} - -// Close closes the gRPC client connection. -func (u *GRPCBasedBatchMap) Close() error { - return u.client.CloseConn() -} - -// IsHealthy checks if the map udf is healthy. -func (u *GRPCBasedBatchMap) IsHealthy(ctx context.Context) error { - return u.WaitUntilReady(ctx) -} - -// WaitUntilReady waits until the map udf is connected. -func (u *GRPCBasedBatchMap) WaitUntilReady(ctx context.Context) error { - log := logging.FromContext(ctx) - for { - select { - case <-ctx.Done(): - return fmt.Errorf("failed on readiness check: %w", ctx.Err()) - default: - if _, err := u.client.IsReady(ctx, &emptypb.Empty{}); err == nil { - return nil - } else { - log.Infof("Waiting for batch map udf to be ready: %v", err) - time.Sleep(1 * time.Second) - } - } - } -} - -// ApplyBatchMap is used to invoke the BatchMapFn RPC using the client. -// It applies the batch map udf on an array read messages and sends the responses for the whole batch. -func (u *GRPCBasedBatchMap) ApplyBatchMap(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - logger := logging.FromContext(ctx) - // udfResults is structure used to store the results corresponding to the read messages. - udfResults := make([]isb.ReadWriteMessagePair, 0) - - // inputChan is used to stream messages to the grpc client - inputChan := make(chan *batchmappb.BatchMapRequest) - - // Invoke the RPC from the client - respCh, errCh := u.client.BatchMapFn(ctx, inputChan) - - // trackerReq is used to store the read messages in a key, value manner where - // key is the read offset and the reference to read message as the value. - // Once the results are received from the UDF, we map the responses to the corresponding request - // using a lookup on this Tracker. - trackerReq := tracker.NewMessageTracker(messages) - - // Read routine: this goroutine iterates over the input messages and sends each - // of the read messages to the grpc client after transforming it to a BatchMapRequest. - // Once all messages are sent, it closes the input channel to indicate that all requests have been read. - // On creating a new request, we add it to a Tracker map so that the responses on the stream - // can be mapped backed to the given parent request - go func() { - defer close(inputChan) - for _, msg := range messages { - inputChan <- u.parseInputRequest(msg) - } - }() - - // Process the responses received on the response channel: - // This is an infinite loop which would exit - // 1. Once there are no more responses left to read from the channel - // 2. There is an error received on the error channel - // We have not added a case for context.Done as there is a handler for that in the client, and it should - // propagate it on the error channel itself. - // - // On getting a response, it would parse them and create a new ReadWriteMessagePair entry in the udfResults - // Any errors received from the client, are propagated back to the caller. -loop: - for { - select { - // got an error on the error channel, so return immediately - case err := <-errCh: - err = &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.BatchMapFn failed, %s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - return nil, err - case grpcResp, ok := <-respCh: - // if there are no more messages to read on the channel we can break - if !ok { - break loop - } - // Get the unique request ID for which these responses are meant for. - msgId := grpcResp.GetId() - // Fetch the request value for the given ID from the Tracker - parentMessage := trackerReq.Remove(msgId) - if parentMessage == nil { - // this case is when the given request ID was not present in the Tracker. - // This means that either the UDF added an incorrect ID - // This cannot be processed further and should result in an error - // Can there be another case for this? - logger.Error("Request missing from message tracker, ", msgId) - return nil, fmt.Errorf("incorrect ID found during batch map processing") - } - // parse the responses received - // TODO(map-batch): should we make this concurrent by using multiple goroutines, instead of sequential. - // Try and see if any perf improvements from this. - parsedResp := u.parseResponse(grpcResp, parentMessage) - responsePair := isb.ReadWriteMessagePair{ - ReadMessage: parentMessage, - WriteMessages: parsedResp, - } - udfResults = append(udfResults, responsePair) - } - } - // check if there are elements left in the Tracker. This cannot be an acceptable case as we want the - // UDF to send responses for all elements. - if !trackerReq.IsEmpty() { - logger.Error("BatchMap response for all requests not received from UDF") - return nil, fmt.Errorf("batchMap response for all requests not received from UDF") - } - return udfResults, nil -} - -func (u *GRPCBasedBatchMap) parseResponse(response *batchmappb.BatchMapResponse, parentMessage *isb.ReadMessage) []*isb.WriteMessage { - writeMessages := make([]*isb.WriteMessage, 0) - for index, result := range response.GetResults() { - keys := result.Keys - taggedMessage := &isb.WriteMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: parentMessage.MessageInfo, - Keys: keys, - ID: isb.MessageID{ - VertexName: u.vertexName, - Offset: response.GetId(), - Index: int32(index), - }, - }, - Body: isb.Body{ - Payload: result.Value, - }, - }, - Tags: result.Tags, - } - // set the headers for the write messages - taggedMessage.Headers = parentMessage.Headers - writeMessages = append(writeMessages, taggedMessage) - } - return writeMessages -} - -func (u *GRPCBasedBatchMap) parseInputRequest(inputMsg *isb.ReadMessage) *batchmappb.BatchMapRequest { - keys := inputMsg.Keys - payload := inputMsg.Body.Payload - parentMessageInfo := inputMsg.MessageInfo - var req = &batchmappb.BatchMapRequest{ - Id: inputMsg.ReadOffset.String(), - Keys: keys, - Value: payload, - EventTime: timestamppb.New(parentMessageInfo.EventTime), - Watermark: timestamppb.New(inputMsg.Watermark), - Headers: inputMsg.Headers, - } - return req -} diff --git a/pkg/udf/rpc/grpc_batch_map_test.go b/pkg/udf/rpc/grpc_batch_map_test.go deleted file mode 100644 index 8c8a3e15bc..0000000000 --- a/pkg/udf/rpc/grpc_batch_map_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rpc - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - batchmappb "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/batchmap/v1/batchmapmock" - "github.com/stretchr/testify/assert" - - "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/sdkclient/batchmapper" -) - -func NewMockUDSGRPCBasedBatchMap(mockClient *batchmapmock.MockBatchMapClient) *GRPCBasedBatchMap { - c, _ := batchmapper.NewFromClient(mockClient) - return NewUDSgRPCBasedBatchMap("test-vertex", c) -} - -func TestGRPCBasedBatchMap_WaitUntilReady(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&batchmappb.ReadyResponse{Ready: true}, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedBatchMap(mockClient) - err := u.WaitUntilReady(ctx) - assert.NoError(t, err) -} - -func TestGRPCBasedBatchMap_BasicBatchMapFnWithMockClient(t *testing.T) { - t.Run("test error", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := batchmapmock.NewMockBatchMapClient(ctrl) - mockBatchMapClient := batchmapmock.NewMockBatchMap_BatchMapFnClient(ctrl) - mockBatchMapClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() - mockBatchMapClient.EXPECT().CloseSend().Return(nil).AnyTimes() - mockBatchMapClient.EXPECT().Recv().Return(nil, errors.New("mock error for map")).Times(1) - - mockClient.EXPECT().BatchMapFn(gomock.Any(), gomock.Any()).Return(mockBatchMapClient, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedBatchMap(mockClient) - - dataMessages := make([]*isb.ReadMessage, 0) - - _, err := u.ApplyBatchMap(ctx, dataMessages) - assert.ErrorIs(t, err, &ApplyUDFErr{ - UserUDFErr: false, - Message: err.Error(), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - }) - }) -} diff --git a/pkg/udf/rpc/grpc_map.go b/pkg/udf/rpc/grpc_map.go index 65f9fcdb24..71fe0fe790 100644 --- a/pkg/udf/rpc/grpc_map.go +++ b/pkg/udf/rpc/grpc_map.go @@ -22,6 +22,7 @@ import ( "time" mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" + "go.uber.org/zap" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -34,12 +35,14 @@ import ( type GRPCBasedMap struct { vertexName string client mapper.Client + log *zap.SugaredLogger } -func NewUDSgRPCBasedMap(vertexName string, client mapper.Client) *GRPCBasedMap { +func NewUDSgRPCBasedMap(ctx context.Context, client mapper.Client, vertexName string) *GRPCBasedMap { return &GRPCBasedMap{ vertexName: vertexName, client: client, + log: logging.FromContext(ctx), } } @@ -97,7 +100,6 @@ func (u *GRPCBasedMap) ApplyMap(ctx context.Context, readMessages []*isb.ReadMes responses, err := u.client.MapFn(ctx, requests) if err != nil { - println("gRPC client.mapFn failed, ", err.Error()) err = &ApplyUDFErr{ UserUDFErr: false, Message: fmt.Sprintf("gRPC client.MapFn failed, %s", err), @@ -112,7 +114,7 @@ func (u *GRPCBasedMap) ApplyMap(ctx context.Context, readMessages []*isb.ReadMes for i, resp := range responses { parentMessage, ok := idToMsgMapping[resp.GetId()] if !ok { - panic(fmt.Sprintf("tracker doesn't contain the message ID received from the response: %s", resp.GetId())) + panic("tracker doesn't contain the message ID received from the response - " + resp.GetId()) } taggedMessages := make([]*isb.WriteMessage, len(resp.GetResults())) for j, result := range resp.GetResults() { diff --git a/pkg/udf/rpc/grpc_map_test.go b/pkg/udf/rpc/grpc_map_test.go index 50930aadb3..4f9bed4f08 100644 --- a/pkg/udf/rpc/grpc_map_test.go +++ b/pkg/udf/rpc/grpc_map_test.go @@ -47,7 +47,7 @@ func TestGRPCBasedMap_WaitUntilReadyWithServer(t *testing.T) { }) mapClient := mappb.NewMapClient(conn) client, _ := mapper2.NewFromClient(context.Background(), mapClient) - u := NewUDSgRPCBasedMap("testVertex", client) + u := NewUDSgRPCBasedMap(context.Background(), client, "testVertex") err := u.WaitUntilReady(context.Background()) assert.NoError(t, err) } @@ -113,7 +113,7 @@ func TestGRPCBasedMap_ApplyMapWithServer(t *testing.T) { ctx := context.Background() client, err := mapper2.NewFromClient(ctx, mapClient) require.NoError(t, err, "creating map client") - u := NewUDSgRPCBasedMap("testVertex", client) + u := NewUDSgRPCBasedMap(ctx, client, "testVertex") got, err := u.ApplyMap(ctx, []*isb.ReadMessage{{ Message: isb.Message{ @@ -152,7 +152,7 @@ func TestGRPCBasedMap_ApplyMapWithServer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) client, err := mapper2.NewFromClient(ctx, mapClient) require.NoError(t, err, "creating map client") - u := NewUDSgRPCBasedMap("testVertex", client) + u := NewUDSgRPCBasedMap(ctx, client, "testVertex") // This cancelled context is passed to the ApplyMap function to simulate failure cancel() diff --git a/rust/numaflow-grpc/src/clients/map.v1.rs b/rust/numaflow-grpc/src/clients/map.v1.rs index 93e372cbe3..1f5de6a369 100644 --- a/rust/numaflow-grpc/src/clients/map.v1.rs +++ b/rust/numaflow-grpc/src/clients/map.v1.rs @@ -10,6 +10,8 @@ pub struct MapRequest { pub id: ::prost::alloc::string::String, #[prost(message, optional, tag = "3")] pub handshake: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub status: ::core::option::Option, } /// Nested message and enum types in `MapRequest`. pub mod map_request { @@ -38,6 +40,13 @@ pub struct Handshake { #[prost(bool, tag = "1")] pub sot: bool, } +/// +/// Status message to indicate the status of the message. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Status { + #[prost(bool, tag = "1")] + pub eot: bool, +} /// * /// MapResponse represents a response element. #[derive(Clone, PartialEq, ::prost::Message)] @@ -49,6 +58,8 @@ pub struct MapResponse { pub id: ::prost::alloc::string::String, #[prost(message, optional, tag = "3")] pub handshake: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub status: ::core::option::Option, } /// Nested message and enum types in `MapResponse`. pub mod map_response { From bc12925f550d05732a435581570d6e1c0948f377 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 13 Oct 2024 10:50:19 -0700 Subject: [PATCH 102/188] feat: set kafka keys if setKey is set (#2146) Signed-off-by: Vigith Maurice --- api/json-schema/schema.json | 4 + api/openapi-spec/swagger.json | 4 + .../numaflow.numaproj.io_monovertices.yaml | 4 + .../full/numaflow.numaproj.io_pipelines.yaml | 4 + .../full/numaflow.numaproj.io_vertices.yaml | 4 + config/install.yaml | 12 + config/namespace-install.yaml | 12 + docs/APIs.md | 24 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 1057 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 13 +- pkg/apis/numaflow/v1alpha1/kafka_sink.go | 12 +- .../numaflow/v1alpha1/zz_generated.openapi.go | 8 + pkg/sinks/kafka/kafka.go | 15 + rust/numaflow-models/src/models/kafka_sink.rs | 4 + 14 files changed, 658 insertions(+), 519 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index a44b78c881..b840921f2e 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -20868,6 +20868,10 @@ "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.SASL", "description": "SASL user to configure SASL connection for kafka broker SASL.enable=true default for SASL." }, + "setKey": { + "description": "SetKey sets the Kafka key to the keys passed in the Message. When the key is null (default), the record is sent randomly to one of the available partitions of the topic. If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This ensures that messages with the same key end up in the same partition.", + "type": "boolean" + }, "tls": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.TLS", "description": "TLS user to configure TLS connection for kafka broker TLS.enable=true default for TLS." diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 43d2dfe2bd..89ac681330 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -20867,6 +20867,10 @@ "description": "SASL user to configure SASL connection for kafka broker SASL.enable=true default for SASL.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.SASL" }, + "setKey": { + "description": "SetKey sets the Kafka key to the keys passed in the Message. When the key is null (default), the record is sent randomly to one of the available partitions of the topic. If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This ensures that messages with the same key end up in the same partition.", + "type": "boolean" + }, "tls": { "description": "TLS user to configure TLS connection for kafka broker TLS.enable=true default for TLS.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.TLS" diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index b3c73cdb3b..6f777379e3 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -3579,6 +3579,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -4112,6 +4114,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 3abdbe43bc..52a866eb2a 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -8259,6 +8259,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -8792,6 +8794,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 945d1a17bd..da1af1d126 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -3047,6 +3047,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -3580,6 +3582,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: diff --git a/config/install.yaml b/config/install.yaml index 73d53b7756..a130e9c144 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -6765,6 +6765,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -7298,6 +7300,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -17990,6 +17994,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -18523,6 +18529,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -24394,6 +24402,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -24927,6 +24937,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 0c0e287f7f..38053d2bfc 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -6765,6 +6765,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -7298,6 +7300,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -17990,6 +17994,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -18523,6 +18529,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -24394,6 +24402,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: @@ -24927,6 +24937,8 @@ spec: required: - mechanism type: object + setKey: + type: boolean tls: properties: caCertSecret: diff --git a/docs/APIs.md b/docs/APIs.md index 8c48a00aad..1261395f58 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5227,6 +5227,30 @@ Description +setKey
bool + + + + +(Optional) +

+ +SetKey sets the Kafka key to the keys passed in the Message. When the +key is null (default), the record is sent randomly to one of the +available partitions of the topic. If a key exists, Kafka hashes the +key, and the result is used to map the message to a specific partition. +This ensures that messages with the same key end up in the same +partition. +

+ + + + + + + + + tls
TLS diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 8c1b846d4f..c5f28f89b8 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2880,514 +2880,515 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8105 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xd9, - 0x75, 0xde, 0xf6, 0x7f, 0xf7, 0x69, 0xfe, 0xed, 0x9d, 0xd9, 0x59, 0xce, 0x68, 0x76, 0x7a, 0x54, - 0xb2, 0xa4, 0x71, 0x6c, 0x93, 0x59, 0x5a, 0xbb, 0x5a, 0xd9, 0x96, 0x76, 0xd9, 0xe4, 0x70, 0x86, - 0x33, 0xe4, 0x0c, 0x75, 0x9a, 0x9c, 0x5d, 0x79, 0x63, 0x6d, 0x8a, 0x55, 0x97, 0xcd, 0x5a, 0x56, - 0x57, 0xb5, 0xaa, 0xaa, 0x39, 0xc3, 0x75, 0x0c, 0xd9, 0x52, 0x82, 0x55, 0x90, 0x04, 0x09, 0xfc, - 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x41, 0x94, 0x87, 0x00, 0xf9, 0x71, - 0x10, 0x24, 0xca, 0xbf, 0x10, 0x04, 0xc8, 0xe6, 0x85, 0x88, 0x18, 0xe4, 0x21, 0x01, 0x12, 0x18, - 0x31, 0x12, 0x3b, 0x03, 0x23, 0x0a, 0xee, 0x5f, 0xfd, 0x75, 0xf5, 0x0c, 0xd9, 0xd5, 0x9c, 0x9d, - 0x4d, 0xf6, 0xad, 0xfb, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xad, 0x7b, 0xcf, 0x3d, 0xe7, 0xdc, - 0x73, 0xe1, 0x56, 0xd7, 0x0a, 0xf6, 0x07, 0xbb, 0x0b, 0x86, 0xdb, 0x5b, 0x74, 0x06, 0x3d, 0xbd, - 0xef, 0xb9, 0xef, 0xf3, 0x1f, 0x7b, 0xb6, 0xfb, 0x70, 0xb1, 0x7f, 0xd0, 0x5d, 0xd4, 0xfb, 0x96, - 0x1f, 0x95, 0x1c, 0xbe, 0xaa, 0xdb, 0xfd, 0x7d, 0xfd, 0xd5, 0xc5, 0x2e, 0x75, 0xa8, 0xa7, 0x07, - 0xd4, 0x5c, 0xe8, 0x7b, 0x6e, 0xe0, 0x92, 0x2f, 0x47, 0x40, 0x0b, 0x0a, 0x68, 0x41, 0x55, 0x5b, - 0xe8, 0x1f, 0x74, 0x17, 0x18, 0x50, 0x54, 0xa2, 0x80, 0xae, 0xfc, 0x4c, 0xac, 0x05, 0x5d, 0xb7, - 0xeb, 0x2e, 0x72, 0xbc, 0xdd, 0xc1, 0x1e, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x42, 0xce, 0x15, 0xed, - 0xe0, 0x0d, 0x7f, 0xc1, 0x72, 0x59, 0xb3, 0x16, 0x0d, 0xd7, 0xa3, 0x8b, 0x87, 0x43, 0x6d, 0xb9, - 0xf2, 0xa5, 0x88, 0xa7, 0xa7, 0x1b, 0xfb, 0x96, 0x43, 0xbd, 0x23, 0xf5, 0x2c, 0x8b, 0x1e, 0xf5, - 0xdd, 0x81, 0x67, 0xd0, 0x33, 0xd5, 0xf2, 0x17, 0x7b, 0x34, 0xd0, 0xb3, 0x64, 0x2d, 0x8e, 0xaa, - 0xe5, 0x0d, 0x9c, 0xc0, 0xea, 0x0d, 0x8b, 0x79, 0xfd, 0x69, 0x15, 0x7c, 0x63, 0x9f, 0xf6, 0xf4, - 0xa1, 0x7a, 0x3f, 0x3b, 0xaa, 0xde, 0x20, 0xb0, 0xec, 0x45, 0xcb, 0x09, 0xfc, 0xc0, 0x4b, 0x57, - 0xd2, 0x7e, 0x17, 0xe0, 0xc2, 0xf2, 0xae, 0x1f, 0x78, 0xba, 0x11, 0x6c, 0xb9, 0xe6, 0x36, 0xed, - 0xf5, 0x6d, 0x3d, 0xa0, 0xe4, 0x00, 0xea, 0xec, 0x81, 0x4c, 0x3d, 0xd0, 0xe7, 0x0b, 0xd7, 0x0b, - 0x37, 0x9a, 0x4b, 0xcb, 0x0b, 0x63, 0xbe, 0xc0, 0x85, 0x4d, 0x09, 0xd4, 0x9e, 0x3a, 0x39, 0x6e, - 0xd5, 0xd5, 0x3f, 0x0c, 0x05, 0x90, 0xdf, 0x28, 0xc0, 0x94, 0xe3, 0x9a, 0xb4, 0x43, 0x6d, 0x6a, - 0x04, 0xae, 0x37, 0x5f, 0xbc, 0x5e, 0xba, 0xd1, 0x5c, 0xfa, 0xe6, 0xd8, 0x12, 0x33, 0x9e, 0x68, - 0xe1, 0x5e, 0x4c, 0xc0, 0x4d, 0x27, 0xf0, 0x8e, 0xda, 0x17, 0x7f, 0x70, 0xdc, 0x7a, 0xe1, 0xe4, - 0xb8, 0x35, 0x15, 0x27, 0x61, 0xa2, 0x25, 0x64, 0x07, 0x9a, 0x81, 0x6b, 0xb3, 0x2e, 0xb3, 0x5c, - 0xc7, 0x9f, 0x2f, 0xf1, 0x86, 0x5d, 0x5b, 0x10, 0x5d, 0xcd, 0xc4, 0x2f, 0xb0, 0x31, 0xb6, 0x70, - 0xf8, 0xea, 0xc2, 0x76, 0xc8, 0xd6, 0xbe, 0x20, 0x81, 0x9b, 0x51, 0x99, 0x8f, 0x71, 0x1c, 0x42, - 0x61, 0xd6, 0xa7, 0xc6, 0xc0, 0xb3, 0x82, 0xa3, 0x15, 0xd7, 0x09, 0xe8, 0xa3, 0x60, 0xbe, 0xcc, - 0x7b, 0xf9, 0x0b, 0x59, 0xd0, 0x5b, 0xae, 0xd9, 0x49, 0x72, 0xb7, 0x2f, 0x9c, 0x1c, 0xb7, 0x66, - 0x53, 0x85, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x59, 0x3d, 0xbd, 0x4b, 0xb7, 0x06, 0xb6, 0xdd, 0xa1, - 0x86, 0x47, 0x03, 0x7f, 0xbe, 0xc2, 0x1f, 0xe1, 0x46, 0x96, 0x9c, 0x0d, 0xd7, 0xd0, 0xed, 0xfb, - 0xbb, 0xef, 0x53, 0x23, 0x40, 0xba, 0x47, 0x3d, 0xea, 0x18, 0xb4, 0x3d, 0x2f, 0x1f, 0x66, 0x6e, - 0x3d, 0x85, 0x84, 0x43, 0xd8, 0xe4, 0x16, 0xbc, 0xd8, 0xf7, 0x2c, 0x97, 0x37, 0xc1, 0xd6, 0x7d, - 0xff, 0x9e, 0xde, 0xa3, 0xf3, 0xd5, 0xeb, 0x85, 0x1b, 0x8d, 0xf6, 0x65, 0x09, 0xf3, 0xe2, 0x56, - 0x9a, 0x01, 0x87, 0xeb, 0x90, 0x1b, 0x50, 0x57, 0x85, 0xf3, 0xb5, 0xeb, 0x85, 0x1b, 0x15, 0x31, - 0x76, 0x54, 0x5d, 0x0c, 0xa9, 0x64, 0x0d, 0xea, 0xfa, 0xde, 0x9e, 0xe5, 0x30, 0xce, 0x3a, 0xef, - 0xc2, 0xab, 0x59, 0x8f, 0xb6, 0x2c, 0x79, 0x04, 0x8e, 0xfa, 0x87, 0x61, 0x5d, 0x72, 0x07, 0x88, - 0x4f, 0xbd, 0x43, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0x0e, 0x9c, 0x80, 0xb7, 0xbd, 0xc1, 0xdb, 0x7e, - 0x45, 0xb6, 0x9d, 0x74, 0x86, 0x38, 0x30, 0xa3, 0x16, 0x79, 0x0b, 0xe6, 0xe4, 0xb7, 0x1a, 0xf5, - 0x02, 0x70, 0xa4, 0x8b, 0xac, 0x23, 0x31, 0x45, 0xc3, 0x21, 0x6e, 0x62, 0xc2, 0x55, 0x7d, 0x10, - 0xb8, 0x3d, 0x06, 0x99, 0x14, 0xba, 0xed, 0x1e, 0x50, 0x67, 0xbe, 0x79, 0xbd, 0x70, 0xa3, 0xde, - 0xbe, 0x7e, 0x72, 0xdc, 0xba, 0xba, 0xfc, 0x04, 0x3e, 0x7c, 0x22, 0x0a, 0xb9, 0x0f, 0x0d, 0xd3, - 0xf1, 0xb7, 0x5c, 0xdb, 0x32, 0x8e, 0xe6, 0xa7, 0x78, 0x03, 0x5f, 0x95, 0x8f, 0xda, 0x58, 0xbd, - 0xd7, 0x11, 0x84, 0xc7, 0xc7, 0xad, 0xab, 0xc3, 0x53, 0xea, 0x42, 0x48, 0xc7, 0x08, 0x83, 0x6c, - 0x72, 0xc0, 0x15, 0xd7, 0xd9, 0xb3, 0xba, 0xf3, 0xd3, 0xfc, 0x6d, 0x5c, 0x1f, 0x31, 0xa0, 0x57, - 0xef, 0x75, 0x04, 0x5f, 0x7b, 0x5a, 0x8a, 0x13, 0x7f, 0x31, 0x42, 0x20, 0x26, 0xcc, 0xa8, 0xc9, - 0x78, 0xc5, 0xd6, 0xad, 0x9e, 0x3f, 0x3f, 0xc3, 0x07, 0xef, 0x4f, 0x8c, 0xc0, 0xc4, 0x38, 0x73, - 0xfb, 0x92, 0x7c, 0x94, 0x99, 0x44, 0xb1, 0x8f, 0x29, 0xcc, 0x2b, 0x6f, 0xc2, 0x8b, 0x43, 0x73, - 0x03, 0x99, 0x83, 0xd2, 0x01, 0x3d, 0xe2, 0x53, 0x5f, 0x03, 0xd9, 0x4f, 0x72, 0x11, 0x2a, 0x87, - 0xba, 0x3d, 0xa0, 0xf3, 0x45, 0x5e, 0x26, 0xfe, 0xfc, 0x5c, 0xf1, 0x8d, 0x82, 0xf6, 0xd7, 0x4b, - 0x30, 0xa5, 0x66, 0x9c, 0x8e, 0xe5, 0x1c, 0x90, 0xb7, 0xa1, 0x64, 0xbb, 0x5d, 0x39, 0x6f, 0xfe, - 0xc2, 0xd8, 0xb3, 0xd8, 0x86, 0xdb, 0x6d, 0xd7, 0x4e, 0x8e, 0x5b, 0xa5, 0x0d, 0xb7, 0x8b, 0x0c, - 0x91, 0x18, 0x50, 0x39, 0xd0, 0xf7, 0x0e, 0x74, 0xde, 0x86, 0xe6, 0x52, 0x7b, 0x6c, 0xe8, 0xbb, - 0x0c, 0x85, 0xb5, 0xb5, 0xdd, 0x38, 0x39, 0x6e, 0x55, 0xf8, 0x5f, 0x14, 0xd8, 0xc4, 0x85, 0xc6, - 0xae, 0xad, 0x1b, 0x07, 0xfb, 0xae, 0x4d, 0xe7, 0x4b, 0x39, 0x05, 0xb5, 0x15, 0x92, 0x78, 0xcd, - 0xe1, 0x5f, 0x8c, 0x64, 0x10, 0x03, 0xaa, 0x03, 0xd3, 0xb7, 0x9c, 0x03, 0x39, 0x07, 0xbe, 0x39, - 0xb6, 0xb4, 0x9d, 0x55, 0xfe, 0x4c, 0x70, 0x72, 0xdc, 0xaa, 0x8a, 0xdf, 0x28, 0xa1, 0xb5, 0x3f, - 0x9c, 0x82, 0x19, 0xf5, 0x92, 0x1e, 0x50, 0x2f, 0xa0, 0x8f, 0xc8, 0x75, 0x28, 0x3b, 0xec, 0xd3, - 0xe4, 0x2f, 0xb9, 0x3d, 0x25, 0x87, 0x4b, 0x99, 0x7f, 0x92, 0x9c, 0xc2, 0x5a, 0x26, 0x86, 0x8a, - 0xec, 0xf0, 0xf1, 0x5b, 0xd6, 0xe1, 0x30, 0xa2, 0x65, 0xe2, 0x37, 0x4a, 0x68, 0xf2, 0x2e, 0x94, - 0xf9, 0xc3, 0x8b, 0xae, 0xfe, 0xea, 0xf8, 0x22, 0xd8, 0xa3, 0xd7, 0xd9, 0x13, 0xf0, 0x07, 0xe7, - 0xa0, 0x6c, 0x28, 0x0e, 0xcc, 0x3d, 0xd9, 0xb1, 0xbf, 0x90, 0xa3, 0x63, 0xd7, 0xc4, 0x50, 0xdc, - 0x59, 0x5d, 0x43, 0x86, 0x48, 0xfe, 0x62, 0x01, 0x5e, 0x34, 0x5c, 0x27, 0xd0, 0x99, 0x9e, 0xa1, - 0x16, 0xd9, 0xf9, 0x0a, 0x97, 0x73, 0x67, 0x6c, 0x39, 0x2b, 0x69, 0xc4, 0xf6, 0x4b, 0x6c, 0xcd, - 0x18, 0x2a, 0xc6, 0x61, 0xd9, 0xe4, 0x2f, 0x17, 0xe0, 0x25, 0x36, 0x97, 0x0f, 0x31, 0xf3, 0x15, - 0x68, 0xb2, 0xad, 0xba, 0x7c, 0x72, 0xdc, 0x7a, 0x69, 0x3d, 0x4b, 0x18, 0x66, 0xb7, 0x81, 0xb5, - 0xee, 0x82, 0x3e, 0xac, 0x96, 0xf0, 0xd5, 0xad, 0xb9, 0xb4, 0x31, 0x49, 0x55, 0xa7, 0xfd, 0x19, - 0x39, 0x94, 0xb3, 0x34, 0x3b, 0xcc, 0x6a, 0x05, 0xb9, 0x09, 0xb5, 0x43, 0xd7, 0x1e, 0xf4, 0xa8, - 0x3f, 0x5f, 0xe7, 0x53, 0xec, 0x95, 0xac, 0x29, 0xf6, 0x01, 0x67, 0x69, 0xcf, 0x4a, 0xf8, 0x9a, - 0xf8, 0xef, 0xa3, 0xaa, 0x4b, 0x2c, 0xa8, 0xda, 0x56, 0xcf, 0x0a, 0x7c, 0xbe, 0x70, 0x36, 0x97, - 0x6e, 0x8e, 0xfd, 0x58, 0xe2, 0x13, 0xdd, 0xe0, 0x60, 0xe2, 0xab, 0x11, 0xbf, 0x51, 0x0a, 0x60, - 0x53, 0xa1, 0x6f, 0xe8, 0xb6, 0x58, 0x58, 0x9b, 0x4b, 0x5f, 0x1b, 0xff, 0xb3, 0x61, 0x28, 0xed, - 0x69, 0xf9, 0x4c, 0x15, 0xfe, 0x17, 0x05, 0x36, 0xf9, 0x25, 0x98, 0x49, 0xbc, 0x4d, 0x7f, 0xbe, - 0xc9, 0x7b, 0xe7, 0x95, 0xac, 0xde, 0x09, 0xb9, 0xa2, 0x95, 0x27, 0x31, 0x42, 0x7c, 0x4c, 0x81, - 0x91, 0xbb, 0x50, 0xf7, 0x2d, 0x93, 0x1a, 0xba, 0xe7, 0xcf, 0x4f, 0x9d, 0x06, 0x78, 0x4e, 0x02, - 0xd7, 0x3b, 0xb2, 0x1a, 0x86, 0x00, 0x64, 0x01, 0xa0, 0xaf, 0x7b, 0x81, 0x25, 0x14, 0xd5, 0x69, - 0xae, 0x34, 0xcd, 0x9c, 0x1c, 0xb7, 0x60, 0x2b, 0x2c, 0xc5, 0x18, 0x07, 0xe3, 0x67, 0x75, 0xd7, - 0x9d, 0xfe, 0x20, 0x10, 0x0b, 0x6b, 0x43, 0xf0, 0x77, 0xc2, 0x52, 0x8c, 0x71, 0x90, 0xdf, 0x29, - 0xc0, 0x67, 0xa2, 0xbf, 0xc3, 0x1f, 0xd9, 0xec, 0xc4, 0x3f, 0xb2, 0xd6, 0xc9, 0x71, 0xeb, 0x33, - 0x9d, 0xd1, 0x22, 0xf1, 0x49, 0xed, 0x21, 0x1f, 0x16, 0x60, 0x66, 0xd0, 0x37, 0xf5, 0x80, 0x76, - 0x02, 0xb6, 0xe3, 0xe9, 0x1e, 0xcd, 0xcf, 0xf1, 0x26, 0xde, 0x1a, 0x7f, 0x16, 0x4c, 0xc0, 0x45, - 0xaf, 0x39, 0x59, 0x8e, 0x29, 0xb1, 0xda, 0xdb, 0x30, 0xbd, 0x3c, 0x08, 0xf6, 0x5d, 0xcf, 0xfa, - 0x80, 0xab, 0xff, 0x64, 0x0d, 0x2a, 0x01, 0x57, 0xe3, 0x84, 0x86, 0xf0, 0xf9, 0xac, 0x97, 0x2e, - 0x54, 0xea, 0xbb, 0xf4, 0x48, 0xe9, 0x25, 0x62, 0xa5, 0x16, 0x6a, 0x9d, 0xa8, 0xae, 0xfd, 0xe9, - 0x02, 0xd4, 0xda, 0xba, 0x71, 0xe0, 0xee, 0xed, 0x91, 0x77, 0xa0, 0x6e, 0x39, 0x01, 0xf5, 0x0e, - 0x75, 0x5b, 0xc2, 0x2e, 0xc4, 0x60, 0xc3, 0x0d, 0x61, 0xf4, 0x78, 0x6c, 0xf7, 0xc5, 0x04, 0xad, - 0x0e, 0xe4, 0xae, 0x85, 0x6b, 0xc6, 0xeb, 0x12, 0x03, 0x43, 0x34, 0xd2, 0x82, 0x8a, 0x1f, 0xd0, - 0xbe, 0xcf, 0xd7, 0xc0, 0x69, 0xd1, 0x8c, 0x0e, 0x2b, 0x40, 0x51, 0xae, 0xfd, 0xb5, 0x02, 0x34, - 0xda, 0xba, 0x6f, 0x19, 0xec, 0x29, 0xc9, 0x0a, 0x94, 0x07, 0x3e, 0xf5, 0xce, 0xf6, 0x6c, 0x7c, - 0xd9, 0xda, 0xf1, 0xa9, 0x87, 0xbc, 0x32, 0xb9, 0x0f, 0xf5, 0xbe, 0xee, 0xfb, 0x0f, 0x5d, 0xcf, - 0x94, 0x4b, 0xef, 0x29, 0x81, 0xc4, 0x36, 0x41, 0x56, 0xc5, 0x10, 0x44, 0x6b, 0x42, 0xa4, 0x7b, - 0x68, 0xbf, 0x5f, 0x80, 0x0b, 0xed, 0xc1, 0xde, 0x1e, 0xf5, 0xa4, 0x56, 0x2c, 0xf5, 0x4d, 0x0a, - 0x15, 0x8f, 0x9a, 0x96, 0x2f, 0xdb, 0xbe, 0x3a, 0xf6, 0x40, 0x41, 0x86, 0x22, 0xd5, 0x5b, 0xde, - 0x5f, 0xbc, 0x00, 0x05, 0x3a, 0x19, 0x40, 0xe3, 0x7d, 0xca, 0x76, 0xe3, 0x54, 0xef, 0xc9, 0xa7, - 0xbb, 0x3d, 0xb6, 0xa8, 0x3b, 0x34, 0xe8, 0x70, 0xa4, 0xb8, 0x36, 0x1d, 0x16, 0x62, 0x24, 0x49, - 0xfb, 0xdd, 0x0a, 0x4c, 0xad, 0xb8, 0xbd, 0x5d, 0xcb, 0xa1, 0xe6, 0x4d, 0xb3, 0x4b, 0xc9, 0x7b, - 0x50, 0xa6, 0x66, 0x97, 0xca, 0xa7, 0x1d, 0x5f, 0xf1, 0x60, 0x60, 0x91, 0xfa, 0xc4, 0xfe, 0x21, - 0x07, 0x26, 0x1b, 0x30, 0xb3, 0xe7, 0xb9, 0x3d, 0x31, 0x97, 0x6f, 0x1f, 0xf5, 0xa5, 0xee, 0xdc, - 0xfe, 0x09, 0xf5, 0xe1, 0xac, 0x25, 0xa8, 0x8f, 0x8f, 0x5b, 0x10, 0xfd, 0xc3, 0x54, 0x5d, 0xf2, - 0x0e, 0xcc, 0x47, 0x25, 0xe1, 0xa4, 0xb6, 0xc2, 0xb6, 0x33, 0x5c, 0x77, 0xaa, 0xb4, 0xaf, 0x9e, - 0x1c, 0xb7, 0xe6, 0xd7, 0x46, 0xf0, 0xe0, 0xc8, 0xda, 0x6c, 0xaa, 0x98, 0x8b, 0x88, 0x62, 0xa1, - 0x91, 0x2a, 0xd3, 0x84, 0x56, 0x30, 0xbe, 0xef, 0x5b, 0x4b, 0x89, 0xc0, 0x21, 0xa1, 0x64, 0x0d, - 0xa6, 0x02, 0x37, 0xd6, 0x5f, 0x15, 0xde, 0x5f, 0x9a, 0x32, 0x54, 0x6c, 0xbb, 0x23, 0x7b, 0x2b, - 0x51, 0x8f, 0x20, 0x5c, 0x52, 0xff, 0x53, 0x3d, 0x55, 0xe5, 0x3d, 0x75, 0xe5, 0xe4, 0xb8, 0x75, - 0x69, 0x3b, 0x93, 0x03, 0x47, 0xd4, 0x24, 0xbf, 0x56, 0x80, 0x19, 0x45, 0x92, 0x7d, 0x54, 0x9b, - 0x64, 0x1f, 0x11, 0x36, 0x22, 0xb6, 0x13, 0x02, 0x30, 0x25, 0x50, 0xfb, 0x7e, 0x0d, 0x1a, 0xe1, - 0x54, 0x4f, 0x3e, 0x07, 0x15, 0x6e, 0x82, 0x90, 0x1a, 0x7c, 0xb8, 0x86, 0x73, 0x4b, 0x05, 0x0a, - 0x1a, 0xf9, 0x3c, 0xd4, 0x0c, 0xb7, 0xd7, 0xd3, 0x1d, 0x93, 0x9b, 0x95, 0x1a, 0xed, 0x26, 0x53, - 0x5d, 0x56, 0x44, 0x11, 0x2a, 0x1a, 0xb9, 0x0a, 0x65, 0xdd, 0xeb, 0x0a, 0x0b, 0x4f, 0x43, 0xcc, - 0x47, 0xcb, 0x5e, 0xd7, 0x47, 0x5e, 0x4a, 0xbe, 0x02, 0x25, 0xea, 0x1c, 0xce, 0x97, 0x47, 0xeb, - 0x46, 0x37, 0x9d, 0xc3, 0x07, 0xba, 0xd7, 0x6e, 0xca, 0x36, 0x94, 0x6e, 0x3a, 0x87, 0xc8, 0xea, - 0x90, 0x0d, 0xa8, 0x51, 0xe7, 0x90, 0xbd, 0x7b, 0x69, 0x7a, 0xf9, 0xec, 0x88, 0xea, 0x8c, 0x45, - 0x6e, 0x13, 0x42, 0x0d, 0x4b, 0x16, 0xa3, 0x82, 0x20, 0xdf, 0x80, 0x29, 0xa1, 0x6c, 0x6d, 0xb2, - 0x77, 0xe2, 0xcf, 0x57, 0x39, 0x64, 0x6b, 0xb4, 0xb6, 0xc6, 0xf9, 0x22, 0x53, 0x57, 0xac, 0xd0, - 0xc7, 0x04, 0x14, 0xf9, 0x06, 0x34, 0xd4, 0xce, 0x58, 0xbd, 0xd9, 0x4c, 0x2b, 0x91, 0xda, 0x4e, - 0x23, 0xfd, 0xd6, 0xc0, 0xf2, 0x68, 0x8f, 0x3a, 0x81, 0xdf, 0x7e, 0x51, 0xd9, 0x0d, 0x14, 0xd5, - 0xc7, 0x08, 0x8d, 0xec, 0x0e, 0x9b, 0xbb, 0x84, 0xad, 0xe6, 0x73, 0x23, 0x66, 0xf5, 0x31, 0x6c, - 0x5d, 0xdf, 0x84, 0xd9, 0xd0, 0x1e, 0x25, 0x4d, 0x1a, 0xc2, 0x7a, 0xf3, 0x25, 0x56, 0x7d, 0x3d, - 0x49, 0x7a, 0x7c, 0xdc, 0x7a, 0x25, 0xc3, 0xa8, 0x11, 0x31, 0x60, 0x1a, 0x8c, 0x7c, 0x00, 0x33, - 0x1e, 0xd5, 0x4d, 0xcb, 0xa1, 0xbe, 0xbf, 0xe5, 0xb9, 0xbb, 0xf9, 0x35, 0x4f, 0x8e, 0x22, 0x86, - 0x3d, 0x26, 0x90, 0x31, 0x25, 0x89, 0x3c, 0x84, 0x69, 0xdb, 0x3a, 0xa4, 0x91, 0xe8, 0xe6, 0x44, - 0x44, 0xbf, 0x78, 0x72, 0xdc, 0x9a, 0xde, 0x88, 0x03, 0x63, 0x52, 0x0e, 0xd3, 0x54, 0xfa, 0xae, - 0x17, 0x28, 0xf5, 0xf4, 0xb3, 0x4f, 0x54, 0x4f, 0xb7, 0x5c, 0x2f, 0x88, 0x3e, 0x42, 0xf6, 0xcf, - 0x47, 0x51, 0x5d, 0xfb, 0xdb, 0x15, 0x18, 0xde, 0xc4, 0x25, 0x47, 0x5c, 0x61, 0xd2, 0x23, 0x2e, - 0x3d, 0x1a, 0xc4, 0xda, 0xf3, 0x86, 0xac, 0x36, 0x81, 0x11, 0x91, 0x31, 0xaa, 0x4b, 0x93, 0x1e, - 0xd5, 0xcf, 0xcd, 0xc4, 0x33, 0x3c, 0xfc, 0xab, 0x1f, 0xdf, 0xf0, 0xaf, 0x3d, 0x9b, 0xe1, 0xaf, - 0x7d, 0xaf, 0x0c, 0x33, 0xab, 0x3a, 0xed, 0xb9, 0xce, 0x53, 0xf7, 0xf1, 0x85, 0xe7, 0x62, 0x1f, - 0x7f, 0x03, 0xea, 0x1e, 0xed, 0xdb, 0x96, 0xa1, 0x0b, 0x75, 0x5d, 0xda, 0xcd, 0x51, 0x96, 0x61, - 0x48, 0x1d, 0x61, 0xbf, 0x29, 0x3d, 0x97, 0xf6, 0x9b, 0xf2, 0xc7, 0x6f, 0xbf, 0xd1, 0x7e, 0xad, - 0x08, 0x5c, 0xb5, 0x25, 0xd7, 0xa1, 0xcc, 0xd4, 0xb6, 0xb4, 0xd5, 0x90, 0x7f, 0x2d, 0x9c, 0x42, - 0xae, 0x40, 0x31, 0x70, 0xe5, 0x74, 0x03, 0x92, 0x5e, 0xdc, 0x76, 0xb1, 0x18, 0xb8, 0xe4, 0x03, - 0x00, 0xc3, 0x75, 0x4c, 0x4b, 0xb9, 0x93, 0xf2, 0x3d, 0xd8, 0x9a, 0xeb, 0x3d, 0xd4, 0x3d, 0x73, - 0x25, 0x44, 0x14, 0x3b, 0xf8, 0xe8, 0x3f, 0xc6, 0xa4, 0x91, 0x37, 0xa1, 0xea, 0x3a, 0x6b, 0x03, - 0xdb, 0xe6, 0x1d, 0xda, 0x68, 0x7f, 0xf1, 0xe4, 0xb8, 0x55, 0xbd, 0xcf, 0x4b, 0x1e, 0x1f, 0xb7, - 0x2e, 0x8b, 0x1d, 0x11, 0xfb, 0xf7, 0xb6, 0x67, 0x05, 0x96, 0xd3, 0x0d, 0x37, 0xb4, 0xb2, 0x9a, - 0xf6, 0xeb, 0x05, 0x68, 0xae, 0x59, 0x8f, 0xa8, 0xf9, 0xb6, 0xe5, 0x98, 0xee, 0x43, 0x82, 0x50, - 0xb5, 0xa9, 0xd3, 0x0d, 0xf6, 0xc7, 0xdc, 0x71, 0x0a, 0xbb, 0x0e, 0x47, 0x40, 0x89, 0x44, 0x16, - 0xa1, 0x21, 0xf6, 0x2b, 0x96, 0xd3, 0xe5, 0x7d, 0x58, 0x8f, 0x66, 0xfa, 0x8e, 0x22, 0x60, 0xc4, - 0xa3, 0x1d, 0xc1, 0x8b, 0x43, 0xdd, 0x40, 0x4c, 0x28, 0x07, 0x7a, 0x57, 0x2d, 0x2a, 0x6b, 0x63, - 0x77, 0xf0, 0xb6, 0xde, 0x8d, 0x75, 0x2e, 0xd7, 0x0a, 0xb7, 0x75, 0xa6, 0x15, 0x32, 0x74, 0xed, - 0x8f, 0x0a, 0x50, 0x5f, 0x1b, 0x38, 0x06, 0xdf, 0xd4, 0x3f, 0xdd, 0x9a, 0xac, 0x54, 0xcc, 0x62, - 0xa6, 0x8a, 0x39, 0x80, 0xea, 0xc1, 0xc3, 0x50, 0x05, 0x6d, 0x2e, 0x6d, 0x8e, 0x3f, 0x2a, 0x64, - 0x93, 0x16, 0xee, 0x72, 0x3c, 0xe1, 0xec, 0x9c, 0x91, 0x0d, 0xaa, 0xde, 0x7d, 0x9b, 0x0b, 0x95, - 0xc2, 0xae, 0x7c, 0x05, 0x9a, 0x31, 0xb6, 0x33, 0xf9, 0x3d, 0xfe, 0x4e, 0x19, 0xaa, 0xb7, 0x3a, - 0x9d, 0xe5, 0xad, 0x75, 0xf2, 0x1a, 0x34, 0xa5, 0x1f, 0xec, 0x5e, 0xd4, 0x07, 0xa1, 0x1b, 0xb4, - 0x13, 0x91, 0x30, 0xce, 0xc7, 0x14, 0x78, 0x8f, 0xea, 0x76, 0x4f, 0x7e, 0x2c, 0xa1, 0xee, 0x80, - 0xac, 0x10, 0x05, 0x8d, 0xe8, 0x30, 0x33, 0xf0, 0xa9, 0xc7, 0xba, 0x50, 0xec, 0xf7, 0xe5, 0x67, - 0x73, 0x4a, 0x8b, 0x00, 0x5f, 0x60, 0x76, 0x12, 0x00, 0x98, 0x02, 0x24, 0x6f, 0x40, 0x5d, 0x1f, - 0x04, 0xfb, 0x7c, 0xcb, 0x25, 0xbe, 0x8d, 0xab, 0xdc, 0x4d, 0x28, 0xcb, 0x1e, 0x1f, 0xb7, 0xa6, - 0xee, 0x62, 0xfb, 0x35, 0xf5, 0x1f, 0x43, 0x6e, 0xd6, 0x38, 0x65, 0x63, 0x90, 0x8d, 0xab, 0x9c, - 0xb9, 0x71, 0x5b, 0x09, 0x00, 0x4c, 0x01, 0x92, 0x77, 0x61, 0xea, 0x80, 0x1e, 0x05, 0xfa, 0xae, - 0x14, 0x50, 0x3d, 0x8b, 0x80, 0x39, 0xa6, 0xf4, 0xdf, 0x8d, 0x55, 0xc7, 0x04, 0x18, 0xf1, 0xe1, - 0xe2, 0x01, 0xf5, 0x76, 0xa9, 0xe7, 0x4a, 0x7b, 0x85, 0x14, 0x52, 0x3b, 0x8b, 0x90, 0xf9, 0x93, - 0xe3, 0xd6, 0xc5, 0xbb, 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xef, 0x22, 0xcc, 0xde, 0x12, 0x81, - 0x08, 0xae, 0x27, 0x34, 0x0f, 0x72, 0x19, 0x4a, 0x5e, 0x7f, 0xc0, 0x47, 0x4e, 0x49, 0xb8, 0x1a, - 0x70, 0x6b, 0x07, 0x59, 0x19, 0x79, 0x07, 0xea, 0xa6, 0x9c, 0x32, 0xa4, 0xb9, 0x64, 0x2c, 0xd3, - 0x96, 0xfa, 0x87, 0x21, 0x1a, 0xdb, 0x1b, 0xf6, 0xfc, 0x6e, 0xc7, 0xfa, 0x80, 0x4a, 0x0b, 0x02, - 0xdf, 0x1b, 0x6e, 0x8a, 0x22, 0x54, 0x34, 0xb6, 0xaa, 0x1e, 0xd0, 0x23, 0xb1, 0x7f, 0x2e, 0x47, - 0xab, 0xea, 0x5d, 0x59, 0x86, 0x21, 0x95, 0xb4, 0xd4, 0xc7, 0xc2, 0x46, 0x41, 0x59, 0xd8, 0x7e, - 0x1e, 0xb0, 0x02, 0xf9, 0xdd, 0xb0, 0x29, 0xf3, 0x7d, 0x2b, 0x08, 0xa8, 0x27, 0x5f, 0xe3, 0x58, - 0x53, 0xe6, 0x1d, 0x8e, 0x80, 0x12, 0x89, 0xfc, 0x14, 0x34, 0x38, 0x78, 0xdb, 0x76, 0x77, 0xf9, - 0x8b, 0x6b, 0x08, 0x2b, 0xd0, 0x03, 0x55, 0x88, 0x11, 0x5d, 0xfb, 0x71, 0x11, 0x2e, 0xdd, 0xa2, - 0x81, 0xd0, 0x6a, 0x56, 0x69, 0xdf, 0x76, 0x8f, 0x98, 0x3e, 0x8d, 0xf4, 0x5b, 0xe4, 0x2d, 0x00, - 0xcb, 0xdf, 0xed, 0x1c, 0x1a, 0xfc, 0x3b, 0x10, 0xdf, 0xf0, 0x75, 0xf9, 0x49, 0xc2, 0x7a, 0xa7, - 0x2d, 0x29, 0x8f, 0x13, 0xff, 0x30, 0x56, 0x27, 0xda, 0x90, 0x17, 0x9f, 0xb0, 0x21, 0xef, 0x00, - 0xf4, 0x23, 0xad, 0xbc, 0xc4, 0x39, 0x7f, 0x56, 0x89, 0x39, 0x8b, 0x42, 0x1e, 0x83, 0xc9, 0xa3, - 0x27, 0x3b, 0x30, 0x67, 0xd2, 0x3d, 0x7d, 0x60, 0x07, 0xe1, 0x4e, 0x42, 0x7e, 0xc4, 0xa7, 0xdf, - 0x8c, 0x84, 0x41, 0x12, 0xab, 0x29, 0x24, 0x1c, 0xc2, 0xd6, 0xfe, 0x6e, 0x09, 0xae, 0xdc, 0xa2, - 0x41, 0x68, 0xa3, 0x93, 0xb3, 0x63, 0xa7, 0x4f, 0x0d, 0xf6, 0x16, 0x3e, 0x2c, 0x40, 0xd5, 0xd6, - 0x77, 0xa9, 0xcd, 0x56, 0x2f, 0xf6, 0x34, 0xef, 0x8d, 0xbd, 0x10, 0x8c, 0x96, 0xb2, 0xb0, 0xc1, - 0x25, 0xa4, 0x96, 0x06, 0x51, 0x88, 0x52, 0x3c, 0x9b, 0xd4, 0x0d, 0x7b, 0xe0, 0x07, 0x62, 0x67, - 0x27, 0xf5, 0xc9, 0x70, 0x52, 0x5f, 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x86, 0x6d, 0x51, - 0x27, 0xe0, 0xb5, 0xc4, 0x77, 0x45, 0xd4, 0xfb, 0x5d, 0x09, 0x29, 0x18, 0xe3, 0x62, 0xa2, 0x7a, - 0xae, 0x63, 0x05, 0xae, 0x10, 0x55, 0x4e, 0x8a, 0xda, 0x8c, 0x48, 0x18, 0xe7, 0xe3, 0xd5, 0x68, - 0xe0, 0x59, 0x86, 0xcf, 0xab, 0x55, 0x52, 0xd5, 0x22, 0x12, 0xc6, 0xf9, 0xd8, 0x9a, 0x17, 0x7b, - 0xfe, 0x33, 0xad, 0x79, 0xbf, 0xdd, 0x80, 0x6b, 0x89, 0x6e, 0x0d, 0xf4, 0x80, 0xee, 0x0d, 0xec, - 0x0e, 0x0d, 0xd4, 0x0b, 0x1c, 0x73, 0x2d, 0xfc, 0x73, 0xd1, 0x7b, 0x17, 0xe1, 0x4f, 0xc6, 0x64, - 0xde, 0xfb, 0x50, 0x03, 0x4f, 0xf5, 0xee, 0x17, 0xa1, 0xe1, 0xe8, 0x81, 0xcf, 0x3f, 0x5c, 0xf9, - 0x8d, 0x86, 0x6a, 0xd8, 0x3d, 0x45, 0xc0, 0x88, 0x87, 0x6c, 0xc1, 0x45, 0xd9, 0xc5, 0x37, 0x1f, - 0xb1, 0x3d, 0x3f, 0xf5, 0x44, 0x5d, 0xb9, 0x9c, 0xca, 0xba, 0x17, 0x37, 0x33, 0x78, 0x30, 0xb3, - 0x26, 0xd9, 0x84, 0x0b, 0x86, 0x08, 0x09, 0xa1, 0xb6, 0xab, 0x9b, 0x0a, 0x50, 0x98, 0x44, 0xc3, - 0xad, 0xd1, 0xca, 0x30, 0x0b, 0x66, 0xd5, 0x4b, 0x8f, 0xe6, 0xea, 0x58, 0xa3, 0xb9, 0x36, 0xce, - 0x68, 0xae, 0x8f, 0x37, 0x9a, 0x1b, 0xa7, 0x1b, 0xcd, 0xac, 0xe7, 0xd9, 0x38, 0xa2, 0x1e, 0x53, - 0x4f, 0xc4, 0x0a, 0x1b, 0x8b, 0x38, 0x0a, 0x7b, 0xbe, 0x93, 0xc1, 0x83, 0x99, 0x35, 0xc9, 0x2e, - 0x5c, 0x11, 0xe5, 0x37, 0x1d, 0xc3, 0x3b, 0xea, 0xb3, 0x85, 0x27, 0x86, 0xdb, 0x4c, 0xd8, 0xa4, - 0xaf, 0x74, 0x46, 0x72, 0xe2, 0x13, 0x50, 0xc8, 0xcf, 0xc3, 0xb4, 0x78, 0x4b, 0x9b, 0x7a, 0x9f, - 0xc3, 0x8a, 0xf8, 0xa3, 0x97, 0x24, 0xec, 0xf4, 0x4a, 0x9c, 0x88, 0x49, 0x5e, 0xb2, 0x0c, 0xb3, - 0xfd, 0x43, 0x83, 0xfd, 0x5c, 0xdf, 0xbb, 0x47, 0xa9, 0x49, 0x4d, 0xee, 0xf0, 0x6c, 0xb4, 0x5f, - 0x56, 0xd6, 0x9d, 0xad, 0x24, 0x19, 0xd3, 0xfc, 0xe4, 0x0d, 0x98, 0xf2, 0x03, 0xdd, 0x0b, 0xa4, - 0x21, 0x78, 0x7e, 0x46, 0xc4, 0x67, 0x29, 0x3b, 0x69, 0x27, 0x46, 0xc3, 0x04, 0x67, 0xe6, 0x7a, - 0x31, 0x7b, 0x7e, 0xeb, 0x45, 0x9e, 0xd9, 0xea, 0x9f, 0x16, 0xe1, 0xfa, 0x2d, 0x1a, 0x6c, 0xba, - 0x8e, 0x34, 0xa3, 0x67, 0x2d, 0xfb, 0xa7, 0xb2, 0xa2, 0x27, 0x17, 0xed, 0xe2, 0x44, 0x17, 0xed, - 0xd2, 0x84, 0x16, 0xed, 0xf2, 0x39, 0x2e, 0xda, 0x7f, 0xbf, 0x08, 0x2f, 0x27, 0x7a, 0x72, 0xcb, - 0x35, 0xd5, 0x84, 0xff, 0x69, 0x07, 0x9e, 0xa2, 0x03, 0x1f, 0x0b, 0xbd, 0x93, 0x3b, 0x42, 0x53, - 0x1a, 0xcf, 0x77, 0xd3, 0x1a, 0xcf, 0xbb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0xa7, 0x5a, 0xf1, 0xee, - 0x00, 0xf1, 0xa4, 0xdb, 0x36, 0x32, 0x67, 0x4b, 0xa5, 0x27, 0x0c, 0x00, 0xc5, 0x21, 0x0e, 0xcc, - 0xa8, 0x45, 0x3a, 0xf0, 0x92, 0x4f, 0x9d, 0xc0, 0x72, 0xa8, 0x9d, 0x84, 0x13, 0xda, 0xd0, 0x2b, - 0x12, 0xee, 0xa5, 0x4e, 0x16, 0x13, 0x66, 0xd7, 0xcd, 0x33, 0x0f, 0xfc, 0x4b, 0xe0, 0x2a, 0xa7, - 0xe8, 0x9a, 0x89, 0x69, 0x2c, 0x1f, 0xa6, 0x35, 0x96, 0xf7, 0xf2, 0xbf, 0xb7, 0xf1, 0xb4, 0x95, - 0x25, 0x00, 0xfe, 0x16, 0xe2, 0xea, 0x4a, 0xb8, 0x48, 0x63, 0x48, 0xc1, 0x18, 0x17, 0x5b, 0x80, - 0x54, 0x3f, 0xc7, 0x35, 0x95, 0x70, 0x01, 0xea, 0xc4, 0x89, 0x98, 0xe4, 0x1d, 0xa9, 0xed, 0x54, - 0xc6, 0xd6, 0x76, 0xee, 0x00, 0x49, 0x18, 0x1e, 0x05, 0x5e, 0x35, 0x19, 0x7f, 0xbc, 0x3e, 0xc4, - 0x81, 0x19, 0xb5, 0x46, 0x0c, 0xe5, 0xda, 0x64, 0x87, 0x72, 0x7d, 0xfc, 0xa1, 0x4c, 0xde, 0x83, - 0xcb, 0x5c, 0x94, 0xec, 0x9f, 0x24, 0xb0, 0xd0, 0x7b, 0x3e, 0x2b, 0x81, 0x2f, 0xe3, 0x28, 0x46, - 0x1c, 0x8d, 0xc1, 0xde, 0x8f, 0xe1, 0x51, 0x93, 0x09, 0xd7, 0xed, 0xd1, 0x3a, 0xd1, 0x4a, 0x06, - 0x0f, 0x66, 0xd6, 0x64, 0x43, 0x2c, 0x60, 0xc3, 0x50, 0xdf, 0xb5, 0xa9, 0x29, 0xe3, 0xaf, 0xc3, - 0x21, 0xb6, 0xbd, 0xd1, 0x91, 0x14, 0x8c, 0x71, 0x65, 0xa9, 0x29, 0x53, 0x67, 0x54, 0x53, 0x6e, - 0x71, 0x2b, 0xfd, 0x5e, 0x42, 0x1b, 0x92, 0xba, 0x4e, 0x18, 0x51, 0xbf, 0x92, 0x66, 0xc0, 0xe1, - 0x3a, 0x5c, 0x4b, 0x34, 0x3c, 0xab, 0x1f, 0xf8, 0x49, 0xac, 0x99, 0x94, 0x96, 0x98, 0xc1, 0x83, - 0x99, 0x35, 0x99, 0x7e, 0xbe, 0x4f, 0x75, 0x3b, 0xd8, 0x4f, 0x02, 0xce, 0x26, 0xf5, 0xf3, 0xdb, - 0xc3, 0x2c, 0x98, 0x55, 0x2f, 0x73, 0x41, 0x9a, 0x7b, 0x3e, 0xd5, 0xaa, 0xef, 0x94, 0xe0, 0xf2, - 0x2d, 0x1a, 0x84, 0xa1, 0x69, 0x9f, 0x9a, 0x51, 0x3e, 0x06, 0x33, 0xca, 0x6f, 0x55, 0xe0, 0xc2, - 0x2d, 0x1a, 0x0c, 0x69, 0x63, 0xff, 0x9f, 0x76, 0xff, 0x26, 0x5c, 0x88, 0xa2, 0x21, 0x3b, 0x81, - 0xeb, 0x89, 0xb5, 0x3c, 0xb5, 0x5b, 0xee, 0x0c, 0xb3, 0x60, 0x56, 0x3d, 0xf2, 0x0d, 0x78, 0x99, - 0x2f, 0xf5, 0x4e, 0x57, 0xd8, 0x67, 0x85, 0x31, 0x21, 0x76, 0x9e, 0xa7, 0x25, 0x21, 0x5f, 0xee, - 0x64, 0xb3, 0xe1, 0xa8, 0xfa, 0xe4, 0xdb, 0x30, 0xd5, 0xb7, 0xfa, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, - 0xe5, 0x0e, 0x22, 0xda, 0x8a, 0x81, 0x45, 0x1b, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x66, 0x8e, 0xd4, - 0xfa, 0x39, 0x8e, 0xd4, 0xff, 0x51, 0x84, 0xda, 0x2d, 0xcf, 0x1d, 0xf4, 0xdb, 0x47, 0xa4, 0x0b, - 0xd5, 0x87, 0xdc, 0x79, 0x26, 0x5d, 0x53, 0xe3, 0x9f, 0x28, 0x10, 0x3e, 0xb8, 0x48, 0x25, 0x12, - 0xff, 0x51, 0xc2, 0xb3, 0x41, 0x7c, 0x40, 0x8f, 0xa8, 0x29, 0x7d, 0x68, 0xe1, 0x20, 0xbe, 0xcb, - 0x0a, 0x51, 0xd0, 0x48, 0x0f, 0x66, 0x75, 0xdb, 0x76, 0x1f, 0x52, 0x73, 0x43, 0x0f, 0xb8, 0xdf, - 0x5b, 0xfa, 0x56, 0xce, 0x6a, 0x96, 0xe6, 0xc1, 0x0c, 0xcb, 0x49, 0x28, 0x4c, 0x63, 0x93, 0xf7, - 0xa1, 0xe6, 0x07, 0xae, 0xa7, 0x94, 0xad, 0xe6, 0xd2, 0xca, 0xf8, 0x2f, 0xbd, 0xfd, 0xf5, 0x8e, - 0x80, 0x12, 0x36, 0x7b, 0xf9, 0x07, 0x95, 0x00, 0xed, 0x37, 0x0b, 0x00, 0xb7, 0xb7, 0xb7, 0xb7, - 0xa4, 0x7b, 0xc1, 0x84, 0xb2, 0x3e, 0x08, 0x1d, 0x95, 0xe3, 0x3b, 0x04, 0x13, 0x81, 0xbc, 0xd2, - 0x87, 0x37, 0x08, 0xf6, 0x91, 0xa3, 0x93, 0x9f, 0x84, 0x9a, 0x54, 0x90, 0x65, 0xb7, 0x87, 0xf1, - 0x14, 0x52, 0x89, 0x46, 0x45, 0xd7, 0xfe, 0x56, 0x11, 0x60, 0xdd, 0xb4, 0x69, 0x47, 0x1d, 0x02, - 0x69, 0x04, 0xfb, 0x1e, 0xf5, 0xf7, 0x5d, 0xdb, 0x1c, 0xd3, 0x9b, 0xca, 0x6d, 0xfe, 0xdb, 0x0a, - 0x04, 0x23, 0x3c, 0x62, 0xc2, 0x94, 0x1f, 0xd0, 0xbe, 0x8a, 0xed, 0x1d, 0xd3, 0x89, 0x32, 0x27, - 0xec, 0x22, 0x11, 0x0e, 0x26, 0x50, 0x89, 0x0e, 0x4d, 0xcb, 0x31, 0xc4, 0x07, 0xd2, 0x3e, 0x1a, - 0x73, 0x20, 0xcd, 0xb2, 0x1d, 0xc7, 0x7a, 0x04, 0x83, 0x71, 0x4c, 0xed, 0xf7, 0x8a, 0x70, 0x89, - 0xcb, 0x63, 0xcd, 0x48, 0x44, 0xf0, 0x92, 0x3f, 0x39, 0x74, 0x60, 0xf5, 0x8f, 0x9f, 0x4e, 0xb4, - 0x38, 0xef, 0xb8, 0x49, 0x03, 0x3d, 0xd2, 0xe7, 0xa2, 0xb2, 0xd8, 0x29, 0xd5, 0x01, 0x94, 0x7d, - 0x36, 0x5f, 0x89, 0xde, 0xeb, 0x8c, 0x3d, 0x84, 0xb2, 0x1f, 0x80, 0xcf, 0x5e, 0xa1, 0xd7, 0x98, - 0xcf, 0x5a, 0x5c, 0x1c, 0xf9, 0x15, 0xa8, 0xfa, 0x81, 0x1e, 0x0c, 0xd4, 0xa7, 0xb9, 0x33, 0x69, - 0xc1, 0x1c, 0x3c, 0x9a, 0x47, 0xc4, 0x7f, 0x94, 0x42, 0xb5, 0xdf, 0x2b, 0xc0, 0x95, 0xec, 0x8a, - 0x1b, 0x96, 0x1f, 0x90, 0x3f, 0x31, 0xd4, 0xed, 0xa7, 0x7c, 0xe3, 0xac, 0x36, 0xef, 0xf4, 0xf0, - 0x4c, 0x83, 0x2a, 0x89, 0x75, 0x79, 0x00, 0x15, 0x2b, 0xa0, 0x3d, 0xb5, 0xbf, 0xbc, 0x3f, 0xe1, - 0x47, 0x8f, 0x2d, 0xed, 0x4c, 0x0a, 0x0a, 0x61, 0xda, 0xf7, 0x8a, 0xa3, 0x1e, 0x99, 0x2f, 0x1f, - 0x76, 0x32, 0x4a, 0xfc, 0x6e, 0xbe, 0x28, 0xf1, 0x64, 0x83, 0x86, 0x83, 0xc5, 0xff, 0xd4, 0x70, - 0xb0, 0xf8, 0xfd, 0xfc, 0xc1, 0xe2, 0xa9, 0x6e, 0x18, 0x19, 0x33, 0xfe, 0x51, 0x09, 0xae, 0x3e, - 0x69, 0xd8, 0xb0, 0xf5, 0x4c, 0x8e, 0xce, 0xbc, 0xeb, 0xd9, 0x93, 0xc7, 0x21, 0x59, 0x82, 0x4a, - 0x7f, 0x5f, 0xf7, 0x95, 0x52, 0x76, 0x35, 0x0c, 0x33, 0x64, 0x85, 0x8f, 0xd9, 0xa4, 0xc1, 0x95, - 0x39, 0xfe, 0x17, 0x05, 0x2b, 0x9b, 0x8e, 0x7b, 0xd4, 0xf7, 0x23, 0x9b, 0x40, 0x38, 0x1d, 0x6f, - 0x8a, 0x62, 0x54, 0x74, 0x12, 0x40, 0x55, 0x98, 0x98, 0xe5, 0xca, 0x34, 0x7e, 0x20, 0x57, 0xc6, - 0xc1, 0x82, 0xe8, 0xa1, 0xa4, 0xb7, 0x42, 0xca, 0x22, 0x0b, 0x50, 0x0e, 0xa2, 0x30, 0x6f, 0xb5, - 0x35, 0x2f, 0x67, 0xe8, 0xa7, 0x9c, 0x8f, 0x6d, 0xec, 0xdd, 0x5d, 0x6e, 0x54, 0x37, 0xa5, 0xff, - 0xdc, 0x72, 0x1d, 0xae, 0x90, 0x95, 0xa2, 0x8d, 0xfd, 0xfd, 0x21, 0x0e, 0xcc, 0xa8, 0xa5, 0xfd, - 0x9b, 0x3a, 0x5c, 0xca, 0x1e, 0x0f, 0xac, 0xdf, 0x0e, 0xa9, 0xe7, 0x33, 0xec, 0x42, 0xb2, 0xdf, - 0x1e, 0x88, 0x62, 0x54, 0xf4, 0x4f, 0x74, 0xc0, 0xd9, 0x6f, 0x15, 0xe0, 0xb2, 0x27, 0x7d, 0x44, - 0xcf, 0x22, 0xe8, 0xec, 0x15, 0x61, 0xce, 0x18, 0x21, 0x10, 0x47, 0xb7, 0x85, 0xfc, 0x8d, 0x02, - 0xcc, 0xf7, 0x52, 0x76, 0x8e, 0x73, 0x3c, 0x73, 0xc9, 0xcf, 0x51, 0x6c, 0x8e, 0x90, 0x87, 0x23, - 0x5b, 0x42, 0xbe, 0x0d, 0xcd, 0x3e, 0x1b, 0x17, 0x7e, 0x40, 0x1d, 0x43, 0x05, 0x88, 0x8e, 0xff, - 0x25, 0x6d, 0x45, 0x58, 0xe1, 0x99, 0x2b, 0xae, 0x1f, 0xc4, 0x08, 0x18, 0x97, 0xf8, 0x9c, 0x1f, - 0xb2, 0xbc, 0x01, 0x75, 0x9f, 0x06, 0x81, 0xe5, 0x74, 0xc5, 0x7e, 0xa3, 0x21, 0xbe, 0x95, 0x8e, - 0x2c, 0xc3, 0x90, 0x4a, 0x7e, 0x0a, 0x1a, 0xdc, 0xe5, 0xb4, 0xec, 0x75, 0xfd, 0xf9, 0x06, 0x0f, - 0x17, 0x9b, 0x16, 0x01, 0x70, 0xb2, 0x10, 0x23, 0x3a, 0xf9, 0x12, 0x4c, 0xed, 0xf2, 0xcf, 0x57, - 0x9e, 0xbb, 0x17, 0x36, 0x2e, 0xae, 0xad, 0xb5, 0x63, 0xe5, 0x98, 0xe0, 0x22, 0x4b, 0x00, 0x34, - 0xf4, 0xcb, 0xa5, 0xed, 0x59, 0x91, 0xc7, 0x0e, 0x63, 0x5c, 0xe4, 0x15, 0x28, 0x05, 0xb6, 0xcf, - 0x6d, 0x58, 0xf5, 0x68, 0x0b, 0xba, 0xbd, 0xd1, 0x41, 0x56, 0xae, 0xfd, 0xb8, 0x00, 0xb3, 0xa9, - 0xe3, 0x48, 0xac, 0xca, 0xc0, 0xb3, 0xe5, 0x34, 0x12, 0x56, 0xd9, 0xc1, 0x0d, 0x64, 0xe5, 0xe4, - 0x3d, 0xa9, 0x96, 0x17, 0x73, 0xa6, 0x18, 0xb9, 0xa7, 0x07, 0x3e, 0xd3, 0xc3, 0x87, 0x34, 0x72, - 0xee, 0xe6, 0x8b, 0xda, 0x23, 0xd7, 0x81, 0x98, 0x9b, 0x2f, 0xa2, 0x61, 0x82, 0x33, 0x65, 0xf0, - 0x2b, 0x9f, 0xc6, 0xe0, 0xa7, 0xfd, 0x7a, 0x31, 0xd6, 0x03, 0x52, 0xb3, 0x7f, 0x4a, 0x0f, 0x7c, - 0x81, 0x2d, 0xa0, 0xe1, 0xe2, 0xde, 0x88, 0xaf, 0x7f, 0x7c, 0x31, 0x96, 0x54, 0xf2, 0xb6, 0xe8, - 0xfb, 0x52, 0xce, 0x83, 0xdc, 0xdb, 0x1b, 0x1d, 0x11, 0x5d, 0xa5, 0xde, 0x5a, 0xf8, 0x0a, 0xca, - 0xe7, 0xf4, 0x0a, 0xb4, 0x7f, 0x5e, 0x82, 0xe6, 0x1d, 0x77, 0xf7, 0x13, 0x12, 0x41, 0x9d, 0xbd, - 0x4c, 0x15, 0x3f, 0xc6, 0x65, 0x6a, 0x07, 0x5e, 0x0e, 0x02, 0xbb, 0x43, 0x0d, 0xd7, 0x31, 0xfd, - 0xe5, 0xbd, 0x80, 0x7a, 0x6b, 0x96, 0x63, 0xf9, 0xfb, 0xd4, 0x94, 0xee, 0xa4, 0xcf, 0x9c, 0x1c, - 0xb7, 0x5e, 0xde, 0xde, 0xde, 0xc8, 0x62, 0xc1, 0x51, 0x75, 0xf9, 0xb4, 0x21, 0xce, 0x8e, 0xf2, - 0xb3, 0x55, 0x32, 0xe6, 0x46, 0x4c, 0x1b, 0xb1, 0x72, 0x4c, 0x70, 0x69, 0xdf, 0x2f, 0x42, 0x23, - 0x4c, 0x1e, 0x41, 0x3e, 0x0f, 0xb5, 0x5d, 0xcf, 0x3d, 0xa0, 0x9e, 0xf0, 0xdc, 0xc9, 0xb3, 0x55, - 0x6d, 0x51, 0x84, 0x8a, 0x46, 0x3e, 0x07, 0x95, 0xc0, 0xed, 0x5b, 0x46, 0xda, 0xa0, 0xb6, 0xcd, - 0x0a, 0x51, 0xd0, 0xce, 0x6f, 0x80, 0x7f, 0x21, 0xa1, 0xda, 0x35, 0x46, 0x2a, 0x63, 0xef, 0x42, - 0xd9, 0xd7, 0x7d, 0x5b, 0xae, 0xa7, 0x39, 0xf2, 0x30, 0x2c, 0x77, 0x36, 0x64, 0x1e, 0x86, 0xe5, - 0xce, 0x06, 0x72, 0x50, 0xed, 0x0f, 0x8b, 0xd0, 0x14, 0xfd, 0x26, 0x66, 0x85, 0x49, 0xf6, 0xdc, - 0x9b, 0x3c, 0x94, 0xc2, 0x1f, 0xf4, 0xa8, 0xc7, 0xcd, 0x4c, 0x72, 0x92, 0x8b, 0xfb, 0x07, 0x22, - 0x62, 0x18, 0x4e, 0x11, 0x15, 0xa9, 0xae, 0x2f, 0x9f, 0x63, 0xd7, 0x57, 0x4e, 0xd5, 0xf5, 0xd5, - 0xf3, 0xe8, 0xfa, 0x0f, 0x8b, 0xd0, 0xd8, 0xb0, 0xf6, 0xa8, 0x71, 0x64, 0xd8, 0xfc, 0x14, 0xa9, - 0x49, 0x6d, 0x1a, 0xd0, 0x5b, 0x9e, 0x6e, 0xd0, 0x2d, 0xea, 0x59, 0x3c, 0xb9, 0x12, 0xfb, 0x3e, - 0xf8, 0x0c, 0x24, 0x4f, 0x91, 0xae, 0x8e, 0xe0, 0xc1, 0x91, 0xb5, 0xc9, 0x3a, 0x4c, 0x99, 0xd4, - 0xb7, 0x3c, 0x6a, 0x6e, 0xc5, 0x36, 0x2a, 0x9f, 0x57, 0x4b, 0xcd, 0x6a, 0x8c, 0xf6, 0xf8, 0xb8, - 0x35, 0xad, 0x0c, 0x94, 0x62, 0xc7, 0x92, 0xa8, 0xca, 0x3e, 0xf9, 0xbe, 0x3e, 0xf0, 0xb3, 0xda, - 0x18, 0xfb, 0xe4, 0xb7, 0xb2, 0x59, 0x70, 0x54, 0x5d, 0xad, 0x02, 0xa5, 0x0d, 0xb7, 0xab, 0x7d, - 0xaf, 0x04, 0x61, 0x16, 0x2e, 0xf2, 0x67, 0x0b, 0xd0, 0xd4, 0x1d, 0xc7, 0x0d, 0x64, 0x86, 0x2b, - 0xe1, 0x81, 0xc7, 0xdc, 0xc9, 0xbe, 0x16, 0x96, 0x23, 0x50, 0xe1, 0xbc, 0x0d, 0x1d, 0xca, 0x31, - 0x0a, 0xc6, 0x65, 0x93, 0x41, 0xca, 0x9f, 0xbc, 0x99, 0xbf, 0x15, 0xa7, 0xf0, 0x1e, 0x5f, 0xf9, - 0x1a, 0xcc, 0xa5, 0x1b, 0x7b, 0x16, 0x77, 0x50, 0x2e, 0xc7, 0x7c, 0x11, 0x20, 0x8a, 0x29, 0x79, - 0x06, 0x46, 0x2c, 0x2b, 0x61, 0xc4, 0x1a, 0x3f, 0x15, 0x42, 0xd4, 0xe8, 0x91, 0x86, 0xab, 0x6f, - 0xa5, 0x0c, 0x57, 0xeb, 0x93, 0x10, 0xf6, 0x64, 0x63, 0xd5, 0x2e, 0x5c, 0x88, 0x78, 0xa3, 0x6f, - 0xfe, 0x6e, 0xea, 0xcb, 0x14, 0xba, 0xd8, 0x17, 0x47, 0x7c, 0x99, 0xb3, 0xb1, 0x20, 0x9f, 0xe1, - 0x6f, 0x53, 0xfb, 0x9b, 0x05, 0x98, 0x8b, 0x0b, 0xe1, 0xe7, 0xb6, 0xbf, 0x0c, 0xd3, 0x1e, 0xd5, - 0xcd, 0xb6, 0x1e, 0x18, 0xfb, 0x3c, 0x9c, 0xbc, 0xc0, 0xe3, 0xbf, 0xf9, 0x09, 0x33, 0x8c, 0x13, - 0x30, 0xc9, 0x47, 0x74, 0x68, 0xb2, 0x82, 0x6d, 0xab, 0x47, 0xdd, 0x41, 0x30, 0xa6, 0x65, 0x96, - 0x6f, 0x8a, 0x30, 0x82, 0xc1, 0x38, 0xa6, 0xf6, 0x51, 0x01, 0x66, 0xe2, 0x0d, 0x3e, 0x77, 0xab, - 0xdd, 0x7e, 0xd2, 0x6a, 0xb7, 0x32, 0x81, 0xf7, 0x3e, 0xc2, 0x52, 0xf7, 0x9d, 0x66, 0xfc, 0xd1, - 0xb8, 0x75, 0x2e, 0x6e, 0x90, 0x28, 0x3c, 0xd1, 0x20, 0xf1, 0xc9, 0x4f, 0xee, 0x34, 0x4a, 0x93, - 0x2e, 0x3f, 0xc7, 0x9a, 0xf4, 0xc7, 0x99, 0x21, 0x2a, 0x96, 0xe5, 0xa8, 0x9a, 0x23, 0xcb, 0x51, - 0x2f, 0xcc, 0x72, 0x54, 0x9b, 0xd8, 0xc4, 0x76, 0x9a, 0x4c, 0x47, 0xf5, 0x67, 0x9a, 0xe9, 0xa8, - 0x71, 0x5e, 0x99, 0x8e, 0x20, 0x6f, 0xa6, 0xa3, 0xef, 0x16, 0x60, 0xc6, 0x4c, 0x9c, 0xca, 0x95, - 0xe7, 0xe1, 0xc7, 0x5f, 0xce, 0x92, 0x87, 0x7c, 0xc5, 0xb1, 0xac, 0x64, 0x19, 0xa6, 0x44, 0x66, - 0xe5, 0x17, 0x9a, 0xfa, 0x58, 0xf2, 0x0b, 0x91, 0x5f, 0x81, 0x86, 0xad, 0xd6, 0x3a, 0x99, 0x75, - 0x71, 0x63, 0x22, 0x43, 0x52, 0x62, 0x46, 0x91, 0xff, 0x61, 0x11, 0x46, 0x12, 0xb5, 0x3f, 0xa8, - 0xc5, 0x17, 0xc4, 0x67, 0xed, 0x17, 0x78, 0x3d, 0xe9, 0x17, 0xb8, 0x9e, 0xf6, 0x0b, 0x0c, 0xad, - 0xe6, 0xd2, 0x37, 0xf0, 0xd3, 0xb1, 0x75, 0xa2, 0xc4, 0x13, 0x1b, 0x85, 0x43, 0x2e, 0x63, 0xad, - 0x58, 0x86, 0x59, 0xa9, 0x04, 0x28, 0x22, 0x9f, 0x64, 0xa7, 0xa3, 0x48, 0xae, 0xd5, 0x24, 0x19, - 0xd3, 0xfc, 0x4c, 0xa0, 0xaf, 0xf2, 0xdb, 0x8a, 0xdd, 0x50, 0x34, 0xc6, 0x55, 0xee, 0xd9, 0x90, - 0x83, 0xed, 0x9c, 0x3c, 0xaa, 0xfb, 0xd2, 0xba, 0x1f, 0xdb, 0x39, 0x21, 0x2f, 0x45, 0x49, 0x8d, - 0xbb, 0x38, 0x6a, 0x4f, 0x71, 0x71, 0xe8, 0xd0, 0xb4, 0x75, 0x3f, 0x10, 0x83, 0xc9, 0x94, 0xb3, - 0xc9, 0x1f, 0x3b, 0xdd, 0xba, 0xcf, 0x74, 0x89, 0x48, 0x81, 0xdf, 0x88, 0x60, 0x30, 0x8e, 0x49, - 0x4c, 0x98, 0x62, 0x7f, 0xf9, 0xcc, 0x62, 0x2e, 0x07, 0x32, 0x0b, 0xdc, 0x59, 0x64, 0x84, 0x96, - 0xb9, 0x8d, 0x18, 0x0e, 0x26, 0x50, 0x47, 0x78, 0x41, 0x60, 0x1c, 0x2f, 0x08, 0xf9, 0x79, 0xa1, - 0xb8, 0x1d, 0x85, 0xaf, 0xb5, 0xc9, 0x5f, 0x6b, 0x18, 0x05, 0x8a, 0x71, 0x22, 0x26, 0x79, 0xd9, - 0xa8, 0x18, 0xc8, 0x6e, 0x50, 0xd5, 0xa7, 0x92, 0xa3, 0x62, 0x27, 0x49, 0xc6, 0x34, 0x3f, 0xd9, - 0x82, 0x8b, 0x61, 0x51, 0xbc, 0x19, 0xd3, 0x1c, 0x27, 0x0c, 0xcb, 0xdb, 0xc9, 0xe0, 0xc1, 0xcc, - 0x9a, 0xfc, 0x9c, 0xcb, 0xc0, 0xf3, 0xa8, 0x13, 0xdc, 0xd6, 0xfd, 0x7d, 0x19, 0xdf, 0x17, 0x9d, - 0x73, 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x02, 0x8e, 0xd7, 0x9a, 0x4d, 0x86, 0xd0, 0xee, - 0x84, 0x14, 0x8c, 0x71, 0x69, 0xdf, 0x6d, 0x40, 0xf3, 0x9e, 0x1e, 0x58, 0x87, 0x94, 0xbb, 0x2c, - 0xcf, 0xc7, 0x6f, 0xf4, 0x57, 0x0a, 0x70, 0x29, 0x19, 0x97, 0x7a, 0x8e, 0xce, 0x23, 0x9e, 0x17, - 0x09, 0x33, 0xa5, 0xe1, 0x88, 0x56, 0x70, 0x37, 0xd2, 0x50, 0x98, 0xeb, 0x79, 0xbb, 0x91, 0x3a, - 0xa3, 0x04, 0xe2, 0xe8, 0xb6, 0x7c, 0x52, 0xdc, 0x48, 0xcf, 0x77, 0x22, 0xcf, 0x94, 0x93, 0xab, - 0xf6, 0xdc, 0x38, 0xb9, 0xea, 0xcf, 0x85, 0xd6, 0xdf, 0x8f, 0x39, 0xb9, 0x1a, 0x39, 0x83, 0xad, - 0xe4, 0x51, 0x0e, 0x81, 0x36, 0xca, 0x59, 0xc6, 0xb3, 0x30, 0x28, 0xe7, 0x03, 0x53, 0x96, 0x77, - 0x75, 0xdf, 0x32, 0xa4, 0xda, 0x91, 0x23, 0x71, 0xb1, 0x4a, 0x68, 0x28, 0x62, 0x32, 0xf8, 0x5f, - 0x14, 0xd8, 0x51, 0xfe, 0xc6, 0x62, 0xae, 0xfc, 0x8d, 0x64, 0x05, 0xca, 0xce, 0x01, 0x3d, 0x3a, - 0x5b, 0x3e, 0x03, 0xbe, 0x09, 0xbc, 0x77, 0x97, 0x1e, 0x21, 0xaf, 0xac, 0x7d, 0xbf, 0x08, 0xc0, - 0x1e, 0xff, 0x74, 0xee, 0xa6, 0x9f, 0x84, 0x9a, 0x3f, 0xe0, 0x86, 0x21, 0xa9, 0x30, 0x45, 0x11, - 0x6a, 0xa2, 0x18, 0x15, 0x9d, 0x7c, 0x0e, 0x2a, 0xdf, 0x1a, 0xd0, 0x81, 0x8a, 0x9d, 0x08, 0xf7, - 0x0d, 0x5f, 0x67, 0x85, 0x28, 0x68, 0xe7, 0x67, 0x3a, 0x56, 0x6e, 0xa9, 0xca, 0x79, 0xb9, 0xa5, - 0x1a, 0x50, 0xbb, 0xe7, 0xf2, 0x80, 0x57, 0xed, 0xbf, 0x16, 0x01, 0xa2, 0x80, 0x42, 0xf2, 0x9b, - 0x05, 0x78, 0x29, 0xfc, 0xe0, 0x02, 0xb1, 0xfd, 0xe3, 0xb9, 0xc2, 0x73, 0xbb, 0xa8, 0xb2, 0x3e, - 0x76, 0x3e, 0x03, 0x6d, 0x65, 0x89, 0xc3, 0xec, 0x56, 0x10, 0x84, 0x3a, 0xed, 0xf5, 0x83, 0xa3, - 0x55, 0xcb, 0x93, 0x23, 0x30, 0x33, 0x6e, 0xf5, 0xa6, 0xe4, 0x11, 0x55, 0xa5, 0x8d, 0x82, 0x7f, - 0x44, 0x8a, 0x82, 0x21, 0x0e, 0xd9, 0x87, 0xba, 0xe3, 0xbe, 0xe7, 0xb3, 0xee, 0x90, 0xc3, 0xf1, - 0xad, 0xf1, 0xbb, 0x5c, 0x74, 0xab, 0x70, 0x69, 0xc8, 0x3f, 0x58, 0x73, 0x64, 0x67, 0xff, 0x46, - 0x11, 0x2e, 0x64, 0xf4, 0x03, 0x79, 0x0b, 0xe6, 0x64, 0xec, 0x66, 0x94, 0x34, 0xbf, 0x10, 0x25, - 0xcd, 0xef, 0xa4, 0x68, 0x38, 0xc4, 0x4d, 0xde, 0x03, 0xd0, 0x0d, 0x83, 0xfa, 0xfe, 0xa6, 0x6b, - 0xaa, 0xfd, 0xc0, 0x9b, 0x4c, 0x7d, 0x59, 0x0e, 0x4b, 0x1f, 0x1f, 0xb7, 0x7e, 0x26, 0x2b, 0x1c, - 0x3b, 0xd5, 0xcf, 0x51, 0x05, 0x8c, 0x41, 0x92, 0x6f, 0x02, 0x08, 0x1b, 0x40, 0x98, 0x31, 0xe2, - 0x29, 0x86, 0xb3, 0x05, 0x95, 0x90, 0x6c, 0xe1, 0xeb, 0x03, 0xdd, 0x09, 0xac, 0xe0, 0x48, 0x24, - 0xe8, 0x79, 0x10, 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0x27, 0x45, 0xa8, 0x2b, 0xb7, 0xc0, 0x33, 0xb0, - 0x05, 0x77, 0x13, 0xb6, 0xe0, 0x09, 0x05, 0x60, 0x67, 0x59, 0x82, 0xdd, 0x94, 0x25, 0xf8, 0x56, - 0x7e, 0x51, 0x4f, 0xb6, 0x03, 0xff, 0x4e, 0x11, 0x66, 0x14, 0x6b, 0x5e, 0x0b, 0xed, 0x57, 0x61, - 0x56, 0x04, 0x4e, 0x6c, 0xea, 0x8f, 0x44, 0xae, 0x22, 0xde, 0x61, 0x65, 0x11, 0xf3, 0xdc, 0x4e, - 0x92, 0x30, 0xcd, 0xcb, 0x86, 0xb5, 0x28, 0xda, 0x61, 0x9b, 0x30, 0xe1, 0x6a, 0x15, 0xfb, 0x4d, - 0x3e, 0xac, 0xdb, 0x29, 0x1a, 0x0e, 0x71, 0xa7, 0x4d, 0xc4, 0xe5, 0x73, 0x30, 0x11, 0xff, 0xbb, - 0x02, 0x4c, 0x45, 0xfd, 0x75, 0xee, 0x06, 0xe2, 0xbd, 0xa4, 0x81, 0x78, 0x39, 0xf7, 0x70, 0x18, - 0x61, 0x1e, 0xfe, 0x0b, 0x35, 0x48, 0x9c, 0x03, 0x20, 0xbb, 0x70, 0xc5, 0xca, 0x8c, 0x66, 0x8c, - 0xcd, 0x36, 0xe1, 0xc1, 0xf6, 0xf5, 0x91, 0x9c, 0xf8, 0x04, 0x14, 0x32, 0x80, 0xfa, 0x21, 0xf5, - 0x02, 0xcb, 0xa0, 0xea, 0xf9, 0x6e, 0xe5, 0x56, 0xc9, 0xa4, 0x11, 0x3c, 0xec, 0xd3, 0x07, 0x52, - 0x00, 0x86, 0xa2, 0xc8, 0x2e, 0x54, 0xa8, 0xd9, 0xa5, 0x2a, 0x7b, 0x54, 0xce, 0x6c, 0xbe, 0x61, - 0x7f, 0xb2, 0x7f, 0x3e, 0x0a, 0x68, 0xe2, 0xc7, 0x0d, 0x4d, 0xe5, 0x9c, 0x0a, 0xd6, 0x29, 0xcd, - 0x4b, 0xe4, 0x20, 0xb4, 0xb6, 0x56, 0x26, 0x34, 0x79, 0x3c, 0xc1, 0xd6, 0xea, 0x43, 0xe3, 0xa1, - 0x1e, 0x50, 0xaf, 0xa7, 0x7b, 0x07, 0x72, 0xb7, 0x31, 0xfe, 0x13, 0xbe, 0xad, 0x90, 0xa2, 0x27, - 0x0c, 0x8b, 0x30, 0x92, 0x43, 0x5c, 0x68, 0x04, 0x52, 0x7d, 0x56, 0x26, 0xe5, 0xf1, 0x85, 0x2a, - 0x45, 0xdc, 0x97, 0xe7, 0x01, 0xd4, 0x5f, 0x8c, 0x64, 0x90, 0xc3, 0x44, 0xea, 0x77, 0x91, 0xf0, - 0xbf, 0x9d, 0xc3, 0x35, 0x21, 0xa1, 0xa2, 0xe5, 0x26, 0x3b, 0x85, 0xbc, 0xf6, 0x3f, 0x2b, 0xd1, - 0xb4, 0xfc, 0xac, 0xed, 0x84, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa5, 0xed, 0x84, 0x29, 0x7f, 0xfc, - 0xd9, 0x23, 0x88, 0x53, 0xe6, 0xb5, 0xf2, 0x39, 0x98, 0xd7, 0x5e, 0x85, 0xe6, 0x21, 0x9f, 0x09, - 0x44, 0x2a, 0xaa, 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0xfe, 0x20, 0x2a, 0xc6, 0x38, 0x0f, 0xab, 0x22, - 0x2f, 0xbb, 0x09, 0xb3, 0x3f, 0xcb, 0x2a, 0x9d, 0xa8, 0x18, 0xe3, 0x3c, 0x3c, 0xf8, 0xd0, 0x72, - 0x0e, 0x44, 0x85, 0x1a, 0xaf, 0x20, 0x82, 0x0f, 0x55, 0x21, 0x46, 0x74, 0x72, 0x03, 0xea, 0x03, - 0x73, 0x4f, 0xf0, 0xd6, 0x39, 0x2f, 0xd7, 0x30, 0x77, 0x56, 0xd7, 0x64, 0x6a, 0x2c, 0x45, 0x65, - 0x2d, 0xe9, 0xe9, 0x7d, 0x45, 0xe0, 0x7b, 0x43, 0xd9, 0x92, 0xcd, 0xa8, 0x18, 0xe3, 0x3c, 0xe4, - 0xe7, 0x60, 0xc6, 0xa3, 0xe6, 0xc0, 0xa0, 0x61, 0x2d, 0xe0, 0xb5, 0x64, 0xce, 0xd0, 0x38, 0x05, - 0x53, 0x9c, 0x23, 0x8c, 0x84, 0xcd, 0xb1, 0x8c, 0x84, 0x5f, 0x83, 0x19, 0xd3, 0xd3, 0x2d, 0x87, - 0x9a, 0xf7, 0x1d, 0x1e, 0x74, 0x21, 0x43, 0x20, 0x43, 0x03, 0xfd, 0x6a, 0x82, 0x8a, 0x29, 0x6e, - 0xed, 0x5f, 0x14, 0xa1, 0x22, 0x32, 0x99, 0xae, 0xc3, 0x05, 0xcb, 0xb1, 0x02, 0x4b, 0xb7, 0x57, - 0xa9, 0xad, 0x1f, 0x25, 0x03, 0x4f, 0x5e, 0x66, 0x1b, 0xed, 0xf5, 0x61, 0x32, 0x66, 0xd5, 0x61, - 0x9d, 0x13, 0x88, 0xe5, 0x5b, 0xa1, 0x08, 0x3b, 0x9a, 0x48, 0xa3, 0x9d, 0xa0, 0x60, 0x8a, 0x93, - 0x29, 0x43, 0xfd, 0x8c, 0xa8, 0x12, 0xae, 0x0c, 0x25, 0x63, 0x49, 0x92, 0x7c, 0x5c, 0x49, 0x1f, - 0x70, 0x85, 0x38, 0x3c, 0x68, 0x24, 0x03, 0xc7, 0x84, 0x92, 0x9e, 0xa2, 0xe1, 0x10, 0x37, 0x43, - 0xd8, 0xd3, 0x2d, 0x7b, 0xe0, 0xd1, 0x08, 0xa1, 0x12, 0x21, 0xac, 0xa5, 0x68, 0x38, 0xc4, 0xad, - 0xfd, 0xf7, 0x02, 0x90, 0xe1, 0xa3, 0x13, 0x64, 0x1f, 0xaa, 0x0e, 0xb7, 0x45, 0xe6, 0xce, 0xde, - 0x1f, 0x33, 0x69, 0x8a, 0x45, 0x42, 0x16, 0x48, 0x7c, 0xe2, 0x40, 0x9d, 0x3e, 0x0a, 0xa8, 0xe7, - 0x84, 0x47, 0xa9, 0x26, 0x73, 0x53, 0x80, 0xd8, 0x9b, 0x49, 0x64, 0x0c, 0x65, 0x68, 0xbf, 0x5f, - 0x84, 0x66, 0x8c, 0xef, 0x69, 0x5b, 0x7c, 0x9e, 0xcd, 0x41, 0x98, 0x00, 0x77, 0x3c, 0x5b, 0xce, - 0x77, 0xb1, 0x6c, 0x0e, 0x92, 0x84, 0x1b, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x3d, 0xdd, 0x0f, 0xa8, - 0xc7, 0x75, 0xa1, 0x54, 0x0e, 0x85, 0xcd, 0x90, 0x82, 0x31, 0x2e, 0x72, 0x5d, 0xde, 0xf5, 0x50, - 0x4e, 0xe6, 0xbc, 0x1c, 0x71, 0x91, 0x43, 0x65, 0x02, 0x17, 0x39, 0x90, 0x2e, 0xcc, 0xa9, 0x56, - 0x2b, 0xea, 0xd9, 0x32, 0x22, 0x8a, 0x81, 0x9a, 0x82, 0xc0, 0x21, 0x50, 0xed, 0xfb, 0x05, 0x98, - 0x4e, 0x18, 0xa0, 0x44, 0xb6, 0x4a, 0x75, 0xf0, 0x27, 0x91, 0xad, 0x32, 0x76, 0x5e, 0xe7, 0x0b, - 0x50, 0x15, 0x1d, 0x94, 0x8e, 0xe7, 0x15, 0x5d, 0x88, 0x92, 0xca, 0x56, 0x16, 0x69, 0xe2, 0x4e, - 0xaf, 0x2c, 0xd2, 0x06, 0x8e, 0x8a, 0x2e, 0x3c, 0x47, 0xa2, 0x75, 0xb2, 0xa7, 0x63, 0x9e, 0x23, - 0x51, 0x8e, 0x21, 0x87, 0xf6, 0x0f, 0x78, 0xbb, 0x03, 0xef, 0x28, 0xdc, 0x59, 0x77, 0xa1, 0x26, - 0x63, 0x38, 0xe5, 0xa7, 0xf1, 0x56, 0x0e, 0xab, 0x18, 0xc7, 0x91, 0xd1, 0x8a, 0xba, 0x71, 0x70, - 0x7f, 0x6f, 0x0f, 0x15, 0x3a, 0xb9, 0x09, 0x0d, 0xd7, 0x91, 0x5f, 0xb0, 0x7c, 0xfc, 0x2f, 0xb2, - 0x95, 0xe3, 0xbe, 0x2a, 0x7c, 0x7c, 0xdc, 0xba, 0x14, 0xfe, 0x49, 0x34, 0x12, 0xa3, 0x9a, 0xda, - 0x9f, 0x29, 0xc0, 0x4b, 0xe8, 0xda, 0xb6, 0xe5, 0x74, 0x93, 0x9e, 0x4f, 0x62, 0xc3, 0x4c, 0x4f, - 0x7f, 0xb4, 0xe3, 0xe8, 0x87, 0xba, 0x65, 0xeb, 0xbb, 0x36, 0x7d, 0xea, 0xce, 0x78, 0x10, 0x58, - 0xf6, 0x82, 0xb8, 0xfb, 0x72, 0x61, 0xdd, 0x09, 0xee, 0x7b, 0x9d, 0xc0, 0xb3, 0x9c, 0xae, 0x98, - 0x25, 0x37, 0x13, 0x58, 0x98, 0xc2, 0xd6, 0xfe, 0xa0, 0x04, 0x3c, 0x8e, 0x90, 0x7c, 0x19, 0x1a, - 0x3d, 0x6a, 0xec, 0xeb, 0x8e, 0xe5, 0xab, 0xbc, 0xbf, 0x97, 0xd9, 0x73, 0x6d, 0xaa, 0xc2, 0xc7, - 0xec, 0x55, 0x2c, 0x77, 0x36, 0xf8, 0x51, 0x9d, 0x88, 0x97, 0x18, 0x50, 0xed, 0xfa, 0xbe, 0xde, - 0xb7, 0x72, 0x87, 0x98, 0x88, 0x3c, 0xab, 0x62, 0x3a, 0x12, 0xbf, 0x51, 0x42, 0x13, 0x03, 0x2a, - 0x7d, 0x5b, 0xb7, 0x9c, 0xdc, 0x77, 0xb5, 0xb1, 0x27, 0xd8, 0x62, 0x48, 0xc2, 0x54, 0xc9, 0x7f, - 0xa2, 0xc0, 0x26, 0x03, 0x68, 0xfa, 0x86, 0xa7, 0xf7, 0xfc, 0x7d, 0x7d, 0xe9, 0xb5, 0xd7, 0x73, - 0x2b, 0xff, 0x91, 0x28, 0xa1, 0x8b, 0xac, 0xe0, 0xf2, 0x66, 0xe7, 0xf6, 0xf2, 0xd2, 0x6b, 0xaf, - 0x63, 0x5c, 0x4e, 0x5c, 0xec, 0x6b, 0xaf, 0x2e, 0xc9, 0x19, 0x64, 0xe2, 0x62, 0x5f, 0x7b, 0x75, - 0x09, 0xe3, 0x72, 0xb4, 0xff, 0x55, 0x80, 0x46, 0xc8, 0x4b, 0x76, 0x00, 0xd8, 0x5c, 0x26, 0x33, - 0xa3, 0x9e, 0xe9, 0x5e, 0x1b, 0x6e, 0xed, 0xd9, 0x09, 0x2b, 0x63, 0x0c, 0x28, 0x23, 0x75, 0x6c, - 0x71, 0xd2, 0xa9, 0x63, 0x17, 0xa1, 0xb1, 0xaf, 0x3b, 0xa6, 0xbf, 0xaf, 0x1f, 0x88, 0x29, 0x3d, - 0x96, 0x4c, 0xf9, 0xb6, 0x22, 0x60, 0xc4, 0xa3, 0xfd, 0xa3, 0x2a, 0x88, 0xb8, 0x10, 0x36, 0xe9, - 0x98, 0x96, 0x2f, 0x0e, 0x3f, 0x14, 0x78, 0xcd, 0x70, 0xd2, 0x59, 0x95, 0xe5, 0x18, 0x72, 0x90, - 0xcb, 0x50, 0xea, 0x59, 0x8e, 0xd4, 0x40, 0xb8, 0x21, 0x77, 0xd3, 0x72, 0x90, 0x95, 0x71, 0x92, - 0xfe, 0x48, 0x6a, 0x18, 0x82, 0xa4, 0x3f, 0x42, 0x56, 0x46, 0xbe, 0x0a, 0xb3, 0xb6, 0xeb, 0x1e, - 0xb0, 0xe9, 0x43, 0x29, 0x22, 0xc2, 0xab, 0xce, 0x4d, 0x2b, 0x1b, 0x49, 0x12, 0xa6, 0x79, 0xc9, - 0x0e, 0xbc, 0xfc, 0x01, 0xf5, 0x5c, 0x39, 0x5f, 0x76, 0x6c, 0x4a, 0xfb, 0x0a, 0x46, 0xa8, 0xc6, - 0x3c, 0x4a, 0xf6, 0x17, 0xb3, 0x59, 0x70, 0x54, 0x5d, 0x1e, 0x6f, 0xaf, 0x7b, 0x5d, 0x1a, 0x6c, - 0x79, 0x2e, 0xd3, 0x5d, 0x2c, 0xa7, 0xab, 0x60, 0xab, 0x11, 0xec, 0x76, 0x36, 0x0b, 0x8e, 0xaa, - 0x4b, 0xde, 0x81, 0x79, 0x41, 0x12, 0x6a, 0xcb, 0xb2, 0x98, 0x66, 0x2c, 0x5b, 0x5d, 0x71, 0x3a, - 0x2d, 0xfc, 0x65, 0xdb, 0x23, 0x78, 0x70, 0x64, 0x6d, 0x72, 0x07, 0xe6, 0x94, 0xb7, 0x74, 0x8b, - 0x7a, 0x9d, 0x30, 0x56, 0x68, 0xba, 0x7d, 0xed, 0xe4, 0xb8, 0x75, 0x65, 0x95, 0xf6, 0x3d, 0x6a, - 0xc4, 0xbd, 0xce, 0x8a, 0x0b, 0x87, 0xea, 0x11, 0x84, 0x4b, 0x3c, 0x20, 0x68, 0xa7, 0xbf, 0xe2, - 0xba, 0xb6, 0xe9, 0x3e, 0x74, 0xd4, 0xb3, 0x0b, 0x85, 0x9d, 0x3b, 0x48, 0x3b, 0x99, 0x1c, 0x38, - 0xa2, 0x26, 0x7b, 0x72, 0x4e, 0x59, 0x75, 0x1f, 0x3a, 0x69, 0x54, 0x88, 0x9e, 0xbc, 0x33, 0x82, - 0x07, 0x47, 0xd6, 0x26, 0x6b, 0x40, 0xd2, 0x4f, 0xb0, 0xd3, 0x97, 0x2e, 0xfc, 0x4b, 0x22, 0xc9, - 0x51, 0x9a, 0x8a, 0x19, 0x35, 0xc8, 0x06, 0x5c, 0x4c, 0x97, 0x32, 0x71, 0xd2, 0x9b, 0xcf, 0xd3, - 0x1b, 0x63, 0x06, 0x1d, 0x33, 0x6b, 0x69, 0xff, 0xb8, 0x08, 0xd3, 0x89, 0xac, 0x18, 0xcf, 0x5d, - 0xf6, 0x01, 0xb6, 0x79, 0xe8, 0xf9, 0xdd, 0xf5, 0xd5, 0xdb, 0x54, 0x37, 0xa9, 0x77, 0x97, 0xaa, - 0x0c, 0x26, 0x62, 0x59, 0x4c, 0x50, 0x30, 0xc5, 0x49, 0xf6, 0xa0, 0x22, 0xfc, 0x04, 0x79, 0x6f, - 0x48, 0x52, 0x7d, 0xc4, 0x9d, 0x05, 0xf2, 0x5a, 0x31, 0xd7, 0xa3, 0x28, 0xe0, 0xb5, 0x00, 0xa6, - 0xe2, 0x1c, 0x6c, 0x22, 0x89, 0xd4, 0xde, 0x5a, 0x42, 0xe5, 0x5d, 0x87, 0x52, 0x10, 0x8c, 0x9b, - 0xd7, 0x40, 0xf8, 0x9d, 0xb6, 0x37, 0x90, 0x61, 0x68, 0x7b, 0xec, 0xdd, 0xf9, 0xbe, 0xe5, 0x3a, - 0x32, 0xc9, 0xfd, 0x0e, 0xd4, 0xe4, 0xee, 0x69, 0xcc, 0xbc, 0x0c, 0x5c, 0x57, 0x52, 0x66, 0x57, - 0x85, 0xa5, 0xfd, 0xfb, 0x22, 0x34, 0x42, 0x33, 0xc9, 0x29, 0x92, 0xc7, 0xbb, 0xd0, 0x08, 0x03, - 0x1a, 0x73, 0x5f, 0xff, 0x1a, 0xc5, 0xd9, 0xf1, 0x9d, 0x7d, 0xf8, 0x17, 0x23, 0x19, 0xf1, 0x60, - 0xc9, 0x52, 0x8e, 0x60, 0xc9, 0x3e, 0xd4, 0x02, 0xcf, 0xea, 0x76, 0xe5, 0x2e, 0x21, 0x4f, 0xb4, - 0x64, 0xd8, 0x5d, 0xdb, 0x02, 0x50, 0xf6, 0xac, 0xf8, 0x83, 0x4a, 0x8c, 0xf6, 0x3e, 0xcc, 0xa5, - 0x39, 0xb9, 0x0a, 0x6d, 0xec, 0x53, 0x73, 0x60, 0xab, 0x3e, 0x8e, 0x54, 0x68, 0x59, 0x8e, 0x21, - 0x07, 0xb9, 0x01, 0x75, 0xf6, 0x9a, 0x3e, 0x70, 0x1d, 0xa5, 0xc6, 0xf2, 0xdd, 0xc8, 0xb6, 0x2c, - 0xc3, 0x90, 0xaa, 0xfd, 0x97, 0x12, 0x5c, 0x8e, 0x8c, 0x5d, 0x9b, 0xba, 0xa3, 0x77, 0x4f, 0x71, - 0xe7, 0xe7, 0xa7, 0x27, 0xdd, 0xce, 0x7a, 0x03, 0x48, 0xe9, 0x39, 0xb8, 0x01, 0xe4, 0xff, 0x14, - 0x81, 0x07, 0x5f, 0x93, 0x6f, 0xc3, 0x94, 0x1e, 0xbb, 0xee, 0x59, 0xbe, 0xce, 0x9b, 0xb9, 0x5f, - 0x27, 0x8f, 0xf1, 0x0e, 0x03, 0xe0, 0xe2, 0xa5, 0x98, 0x10, 0x48, 0x5c, 0xa8, 0xef, 0xe9, 0xb6, - 0xcd, 0x74, 0xa1, 0xdc, 0xce, 0xbb, 0x84, 0x70, 0x3e, 0xcc, 0xd7, 0x24, 0x34, 0x86, 0x42, 0xc8, - 0x77, 0x0b, 0x30, 0xed, 0xc5, 0xb7, 0x6b, 0xf2, 0x85, 0xe4, 0x09, 0xed, 0x88, 0xa1, 0xc5, 0xc3, - 0xed, 0xe2, 0x7b, 0xc2, 0xa4, 0x4c, 0xed, 0x3f, 0x17, 0x60, 0xba, 0x63, 0x5b, 0xa6, 0xe5, 0x74, - 0xcf, 0xf1, 0x02, 0x92, 0xfb, 0x50, 0xf1, 0x6d, 0xcb, 0xa4, 0x63, 0xae, 0x26, 0x62, 0x1d, 0x63, - 0x00, 0x28, 0x70, 0x92, 0x37, 0x9a, 0x94, 0x4e, 0x71, 0xa3, 0xc9, 0x1f, 0x55, 0x41, 0x1e, 0x23, - 0x20, 0x03, 0x68, 0x74, 0xd5, 0x45, 0x09, 0xf2, 0x19, 0x6f, 0xe7, 0x48, 0xb2, 0x99, 0xb8, 0x72, - 0x41, 0xcc, 0xfd, 0x61, 0x21, 0x46, 0x92, 0x08, 0x4d, 0xde, 0x33, 0xbe, 0x9a, 0xf3, 0x9e, 0x71, - 0x21, 0x6e, 0xf8, 0xa6, 0x71, 0x1d, 0xca, 0xfb, 0x41, 0xd0, 0x97, 0x83, 0x69, 0xfc, 0x73, 0x22, - 0x51, 0x9e, 0x27, 0xa1, 0x13, 0xb1, 0xff, 0xc8, 0xa1, 0x99, 0x08, 0x47, 0x0f, 0x6f, 0x73, 0x5c, - 0xc9, 0x15, 0x46, 0x12, 0x17, 0xc1, 0xfe, 0x23, 0x87, 0x26, 0xbf, 0x0c, 0xcd, 0xc0, 0xd3, 0x1d, - 0x7f, 0xcf, 0xf5, 0x7a, 0xd4, 0x93, 0x7b, 0xd4, 0xb5, 0x1c, 0x57, 0x6d, 0x6f, 0x47, 0x68, 0xc2, - 0x24, 0x9b, 0x28, 0xc2, 0xb8, 0x34, 0x72, 0x00, 0xf5, 0x81, 0x29, 0x1a, 0x26, 0xcd, 0x60, 0xcb, - 0x79, 0x6e, 0x4f, 0x8f, 0x05, 0x89, 0xa8, 0x7f, 0x18, 0x0a, 0x48, 0x5e, 0x5c, 0x5a, 0x9b, 0xd4, - 0xc5, 0xa5, 0xf1, 0xd1, 0x98, 0x95, 0x84, 0x86, 0xf4, 0xa4, 0x5e, 0xeb, 0x74, 0x65, 0x8c, 0xdb, - 0x5a, 0x6e, 0x95, 0x53, 0x88, 0x6c, 0x86, 0xba, 0xb1, 0xd3, 0x45, 0x25, 0x43, 0xeb, 0x81, 0xf4, - 0x1d, 0x11, 0x23, 0x71, 0x59, 0x93, 0x38, 0x19, 0xb9, 0x78, 0xba, 0xf9, 0x20, 0xbc, 0x35, 0x28, - 0x96, 0x2c, 0x3e, 0xf3, 0x56, 0x26, 0xed, 0x3f, 0x14, 0xa1, 0xb4, 0xbd, 0xd1, 0x11, 0x09, 0x60, - 0xf9, 0xf5, 0x6f, 0xb4, 0x73, 0x60, 0xf5, 0x1f, 0x50, 0xcf, 0xda, 0x3b, 0x92, 0x5b, 0xef, 0x58, - 0x02, 0xd8, 0x34, 0x07, 0x66, 0xd4, 0x22, 0xef, 0xc2, 0x94, 0xa1, 0xaf, 0x50, 0x2f, 0x18, 0xc7, - 0xb0, 0xc0, 0x8f, 0x80, 0xaf, 0x2c, 0x47, 0xd5, 0x31, 0x01, 0x46, 0x76, 0x00, 0x8c, 0x08, 0xba, - 0x74, 0x66, 0x73, 0x48, 0x0c, 0x38, 0x06, 0x44, 0x10, 0x1a, 0x07, 0x8c, 0x95, 0xa3, 0x96, 0xcf, - 0x82, 0xca, 0x47, 0xce, 0x5d, 0x55, 0x17, 0x23, 0x18, 0xcd, 0x81, 0xe9, 0xc4, 0x0d, 0x4e, 0xe4, - 0x2b, 0x50, 0x77, 0xfb, 0xb1, 0xe9, 0xb4, 0xc1, 0xa3, 0x69, 0xeb, 0xf7, 0x65, 0xd9, 0xe3, 0xe3, - 0xd6, 0xf4, 0x86, 0xdb, 0xb5, 0x0c, 0x55, 0x80, 0x21, 0x3b, 0xd1, 0xa0, 0xca, 0xcf, 0x6d, 0xaa, - 0xfb, 0x9b, 0xf8, 0xda, 0xc1, 0xaf, 0x58, 0xf1, 0x51, 0x52, 0xb4, 0x5f, 0x2d, 0x43, 0xe4, 0x71, - 0x25, 0x3e, 0x54, 0xc5, 0x99, 0x11, 0x39, 0x73, 0x9f, 0xeb, 0xf1, 0x14, 0x29, 0x8a, 0x74, 0xa1, - 0xf4, 0xbe, 0xbb, 0x9b, 0x7b, 0xe2, 0x8e, 0x25, 0x6c, 0x10, 0xb6, 0xb2, 0x58, 0x01, 0x32, 0x09, - 0xe4, 0xaf, 0x16, 0xe0, 0x45, 0x3f, 0xad, 0xfa, 0xca, 0xe1, 0x80, 0xf9, 0x75, 0xfc, 0xb4, 0x32, - 0x2d, 0xc3, 0x9e, 0x47, 0x91, 0x71, 0xb8, 0x2d, 0xac, 0xff, 0x85, 0x2b, 0x54, 0x0e, 0xa7, 0x5b, - 0x39, 0xef, 0xa9, 0x4d, 0xf6, 0x7f, 0xb2, 0x0c, 0xa5, 0x28, 0xed, 0x3b, 0x45, 0x68, 0xc6, 0x66, - 0xeb, 0xdc, 0xd7, 0x82, 0x3d, 0x4a, 0x5d, 0x0b, 0xb6, 0x35, 0x7e, 0x64, 0x40, 0xd4, 0xaa, 0xf3, - 0xbe, 0x19, 0xec, 0x9f, 0x15, 0xa1, 0xb4, 0xb3, 0xba, 0x96, 0xdc, 0xb4, 0x16, 0x9e, 0xc1, 0xa6, - 0x75, 0x1f, 0x6a, 0xbb, 0x03, 0xcb, 0x0e, 0x2c, 0x27, 0x77, 0x4a, 0x19, 0x75, 0x8b, 0x9a, 0xf4, - 0x75, 0x08, 0x54, 0x54, 0xf0, 0xa4, 0x0b, 0xb5, 0xae, 0xc8, 0xe9, 0x99, 0x3b, 0x5e, 0x52, 0xe6, - 0x06, 0x15, 0x82, 0xe4, 0x1f, 0x54, 0xe8, 0xda, 0x11, 0x54, 0x77, 0x56, 0xa5, 0xda, 0xff, 0x6c, - 0x7b, 0x53, 0xfb, 0x65, 0x08, 0xb5, 0x80, 0x67, 0x2f, 0xfc, 0xbf, 0x15, 0x20, 0xa9, 0xf8, 0x3c, - 0xfb, 0xd1, 0x74, 0x90, 0x1e, 0x4d, 0xab, 0x93, 0xf8, 0xf8, 0xb2, 0x07, 0x94, 0xf6, 0x6f, 0x0b, - 0x90, 0x3a, 0xe8, 0x47, 0x5e, 0x97, 0xe9, 0xe1, 0x92, 0x81, 0x69, 0x2a, 0x3d, 0x1c, 0x49, 0x72, - 0xc7, 0xd2, 0xc4, 0x7d, 0xc8, 0xb6, 0x6b, 0x71, 0x07, 0x9a, 0x6c, 0xfe, 0xbd, 0xf1, 0xb7, 0x6b, - 0x59, 0xee, 0x38, 0x19, 0x3c, 0x19, 0x27, 0x61, 0x52, 0xae, 0xf6, 0x0f, 0x8b, 0x50, 0x7d, 0x66, - 0xb9, 0x0d, 0x68, 0x22, 0x9e, 0x75, 0x25, 0xe7, 0x6c, 0x3f, 0x32, 0x9a, 0xb5, 0x97, 0x8a, 0x66, - 0xcd, 0x7b, 0xfd, 0xf9, 0x53, 0x62, 0x59, 0xff, 0x75, 0x01, 0xe4, 0x5a, 0xb3, 0xee, 0xf8, 0x81, - 0xee, 0x18, 0x94, 0x18, 0xe1, 0xc2, 0x96, 0x37, 0x68, 0x4a, 0x06, 0x16, 0x0a, 0x5d, 0x86, 0xff, - 0x56, 0x0b, 0x19, 0xf9, 0x69, 0xa8, 0xef, 0xbb, 0x7e, 0xc0, 0x17, 0xaf, 0x62, 0xd2, 0x64, 0x76, - 0x5b, 0x96, 0x63, 0xc8, 0x91, 0x76, 0x67, 0x57, 0x46, 0xbb, 0xb3, 0xb5, 0xdf, 0x2e, 0xc2, 0xd4, - 0x27, 0x25, 0x79, 0x42, 0x56, 0xf4, 0x6f, 0x29, 0x67, 0xf4, 0x6f, 0xf9, 0x2c, 0xd1, 0xbf, 0xda, - 0x0f, 0x0b, 0x00, 0xcf, 0x2c, 0x73, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0x3d, 0xae, 0xb2, 0xc3, 0x72, - 0xff, 0x5e, 0x45, 0x3d, 0x12, 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0x33, 0x7a, 0x22, 0xd0, 0x35, 0xb7, - 0xbe, 0x9c, 0x8a, 0x9b, 0x0d, 0xe3, 0xb4, 0x92, 0xe5, 0x98, 0x12, 0x4b, 0xde, 0x88, 0x32, 0x93, - 0xdf, 0x8b, 0x86, 0xfd, 0x50, 0x4a, 0x71, 0xae, 0xbb, 0x25, 0x38, 0x9f, 0x12, 0x58, 0x5c, 0x9a, - 0x48, 0x60, 0x71, 0xfc, 0xc8, 0x64, 0xf9, 0x89, 0x47, 0x26, 0x0f, 0xa1, 0xb1, 0xe7, 0xb9, 0x3d, - 0x1e, 0xbb, 0x2b, 0xef, 0xfe, 0xbe, 0x99, 0x63, 0xa1, 0xec, 0xed, 0x5a, 0x0e, 0x35, 0x79, 0x5c, - 0x70, 0x68, 0xb8, 0x5a, 0x53, 0xf8, 0x18, 0x89, 0xe2, 0xb6, 0x7e, 0x57, 0x48, 0xad, 0x4e, 0x52, - 0x6a, 0x38, 0x97, 0x6c, 0x0b, 0x74, 0x54, 0x62, 0x92, 0xf1, 0xba, 0xb5, 0x67, 0x13, 0xaf, 0xab, - 0xfd, 0xf9, 0x9a, 0x9a, 0xc0, 0x9e, 0xbb, 0x24, 0xb8, 0x9f, 0x1e, 0x74, 0xef, 0xd2, 0xa1, 0x53, - 0xe8, 0xf5, 0x67, 0x78, 0x0a, 0xbd, 0x31, 0x99, 0x53, 0xe8, 0x90, 0xef, 0x14, 0x7a, 0x73, 0x42, - 0xa7, 0xd0, 0xa7, 0x26, 0x75, 0x0a, 0x7d, 0x7a, 0xac, 0x53, 0xe8, 0x33, 0xa7, 0x3a, 0x85, 0x7e, - 0x5c, 0x82, 0xd4, 0x66, 0xfc, 0x53, 0xc7, 0xdb, 0xff, 0x53, 0x8e, 0xb7, 0xef, 0x15, 0x21, 0x9a, - 0x88, 0xcf, 0x18, 0x98, 0xf4, 0x0e, 0xd4, 0x7b, 0xfa, 0x23, 0x1e, 0x38, 0x9d, 0xe7, 0xee, 0xe8, - 0x4d, 0x89, 0x81, 0x21, 0x1a, 0xf1, 0x01, 0xac, 0xf0, 0xfe, 0x86, 0xdc, 0x2e, 0x8c, 0xe8, 0x2a, - 0x08, 0x61, 0x24, 0x8d, 0xfe, 0x63, 0x4c, 0x8c, 0xf6, 0xaf, 0x8a, 0x20, 0x2f, 0xfa, 0x20, 0x14, - 0x2a, 0x7b, 0xd6, 0x23, 0x6a, 0xe6, 0x0e, 0x77, 0x8e, 0xdd, 0xe8, 0x2f, 0x7c, 0x34, 0xbc, 0x00, - 0x05, 0x3a, 0x37, 0xbe, 0x0b, 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, - 0xdf, 0x45, 0x11, 0x2a, 0x19, 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, - 0xca, 0xd6, 0xef, 0x8b, 0x34, 0x14, 0x52, 0x46, 0xfb, 0x97, 0x7e, 0xf0, 0xa3, 0x6b, 0x2f, 0xfc, - 0xf0, 0x47, 0xd7, 0x5e, 0xf8, 0xe8, 0x47, 0xd7, 0x5e, 0xf8, 0xd5, 0x93, 0x6b, 0x85, 0x1f, 0x9c, - 0x5c, 0x2b, 0xfc, 0xf0, 0xe4, 0x5a, 0xe1, 0xa3, 0x93, 0x6b, 0x85, 0xff, 0x78, 0x72, 0xad, 0xf0, - 0x97, 0xfe, 0xd3, 0xb5, 0x17, 0x7e, 0xf1, 0xcb, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, - 0xc5, 0xfe, 0x41, 0x77, 0x91, 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0xbf, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xdf, 0xb8, 0xfb, 0xb9, 0x3d, 0x9f, 0x00, 0x00, + // 8114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x1c, 0x59, + 0x76, 0xde, 0xf4, 0x7f, 0xf7, 0x69, 0xfe, 0xcd, 0x95, 0x46, 0x43, 0x69, 0x35, 0x6a, 0x6d, 0xad, + 0x77, 0x57, 0x8e, 0x6d, 0x32, 0x43, 0xef, 0xcc, 0xce, 0xda, 0xde, 0x9d, 0x61, 0x93, 0xa2, 0x44, + 0x89, 0x94, 0xb8, 0xa7, 0x49, 0xcd, 0xac, 0x27, 0xde, 0x49, 0xb1, 0xea, 0xb2, 0x59, 0xc3, 0xea, + 0xaa, 0xde, 0xaa, 0x6a, 0x4a, 0x1c, 0xc7, 0x58, 0x7b, 0x37, 0xc1, 0x6c, 0x90, 0x04, 0x09, 0xfc, + 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x41, 0x36, 0x0f, 0x01, 0xf2, 0xe3, + 0x20, 0x48, 0x36, 0xff, 0x8b, 0x20, 0x40, 0x26, 0x0f, 0x21, 0xb2, 0x0c, 0xf2, 0x90, 0x00, 0x09, + 0x8c, 0x18, 0x89, 0x1d, 0xc1, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x2d, 0x91, 0x5d, 0x4d, 0x8d, + 0xc6, 0x99, 0xb7, 0xaa, 0x7b, 0xcf, 0xfd, 0xce, 0xad, 0x5b, 0xf7, 0xe7, 0xdc, 0x73, 0xce, 0x3d, + 0x17, 0x6e, 0x75, 0xad, 0x60, 0x7f, 0xb0, 0xbb, 0x60, 0xb8, 0xbd, 0x45, 0x67, 0xd0, 0xd3, 0xfb, + 0x9e, 0xfb, 0x3e, 0x7f, 0xd8, 0xb3, 0xdd, 0x87, 0x8b, 0xfd, 0x83, 0xee, 0xa2, 0xde, 0xb7, 0xfc, + 0x28, 0xe5, 0xf0, 0x55, 0xdd, 0xee, 0xef, 0xeb, 0xaf, 0x2e, 0x76, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, + 0xe6, 0x42, 0xdf, 0x73, 0x03, 0x97, 0x7c, 0x39, 0x02, 0x5a, 0x50, 0x40, 0x0b, 0xaa, 0xd8, 0x42, + 0xff, 0xa0, 0xbb, 0xc0, 0x80, 0xa2, 0x14, 0x05, 0x74, 0xe5, 0xa7, 0x62, 0x35, 0xe8, 0xba, 0x5d, + 0x77, 0x91, 0xe3, 0xed, 0x0e, 0xf6, 0xf8, 0x1b, 0x7f, 0xe1, 0x4f, 0x82, 0xcf, 0x15, 0xed, 0xe0, + 0x0d, 0x7f, 0xc1, 0x72, 0x59, 0xb5, 0x16, 0x0d, 0xd7, 0xa3, 0x8b, 0x87, 0x43, 0x75, 0xb9, 0xf2, + 0xa5, 0x88, 0xa6, 0xa7, 0x1b, 0xfb, 0x96, 0x43, 0xbd, 0x23, 0xf5, 0x2d, 0x8b, 0x1e, 0xf5, 0xdd, + 0x81, 0x67, 0xd0, 0x33, 0x95, 0xf2, 0x17, 0x7b, 0x34, 0xd0, 0xb3, 0x78, 0x2d, 0x8e, 0x2a, 0xe5, + 0x0d, 0x9c, 0xc0, 0xea, 0x0d, 0xb3, 0x79, 0xfd, 0x69, 0x05, 0x7c, 0x63, 0x9f, 0xf6, 0xf4, 0xa1, + 0x72, 0x3f, 0x3d, 0xaa, 0xdc, 0x20, 0xb0, 0xec, 0x45, 0xcb, 0x09, 0xfc, 0xc0, 0x4b, 0x17, 0xd2, + 0x7e, 0x1b, 0xe0, 0xc2, 0xf2, 0xae, 0x1f, 0x78, 0xba, 0x11, 0x6c, 0xb9, 0xe6, 0x36, 0xed, 0xf5, + 0x6d, 0x3d, 0xa0, 0xe4, 0x00, 0xea, 0xec, 0x83, 0x4c, 0x3d, 0xd0, 0xe7, 0x0b, 0xd7, 0x0b, 0x37, + 0x9a, 0x4b, 0xcb, 0x0b, 0x63, 0xfe, 0xc0, 0x85, 0x4d, 0x09, 0xd4, 0x9e, 0x3a, 0x39, 0x6e, 0xd5, + 0xd5, 0x1b, 0x86, 0x0c, 0xc8, 0xaf, 0x15, 0x60, 0xca, 0x71, 0x4d, 0xda, 0xa1, 0x36, 0x35, 0x02, + 0xd7, 0x9b, 0x2f, 0x5e, 0x2f, 0xdd, 0x68, 0x2e, 0x7d, 0x73, 0x6c, 0x8e, 0x19, 0x5f, 0xb4, 0x70, + 0x2f, 0xc6, 0xe0, 0xa6, 0x13, 0x78, 0x47, 0xed, 0x8b, 0x3f, 0x38, 0x6e, 0xbd, 0x70, 0x72, 0xdc, + 0x9a, 0x8a, 0x67, 0x61, 0xa2, 0x26, 0x64, 0x07, 0x9a, 0x81, 0x6b, 0xb3, 0x26, 0xb3, 0x5c, 0xc7, + 0x9f, 0x2f, 0xf1, 0x8a, 0x5d, 0x5b, 0x10, 0x4d, 0xcd, 0xd8, 0x2f, 0xb0, 0x3e, 0xb6, 0x70, 0xf8, + 0xea, 0xc2, 0x76, 0x48, 0xd6, 0xbe, 0x20, 0x81, 0x9b, 0x51, 0x9a, 0x8f, 0x71, 0x1c, 0x42, 0x61, + 0xd6, 0xa7, 0xc6, 0xc0, 0xb3, 0x82, 0xa3, 0x15, 0xd7, 0x09, 0xe8, 0xa3, 0x60, 0xbe, 0xcc, 0x5b, + 0xf9, 0x0b, 0x59, 0xd0, 0x5b, 0xae, 0xd9, 0x49, 0x52, 0xb7, 0x2f, 0x9c, 0x1c, 0xb7, 0x66, 0x53, + 0x89, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x59, 0x3d, 0xbd, 0x4b, 0xb7, 0x06, 0xb6, 0xdd, 0xa1, 0x86, + 0x47, 0x03, 0x7f, 0xbe, 0xc2, 0x3f, 0xe1, 0x46, 0x16, 0x9f, 0x0d, 0xd7, 0xd0, 0xed, 0xfb, 0xbb, + 0xef, 0x53, 0x23, 0x40, 0xba, 0x47, 0x3d, 0xea, 0x18, 0xb4, 0x3d, 0x2f, 0x3f, 0x66, 0x6e, 0x3d, + 0x85, 0x84, 0x43, 0xd8, 0xe4, 0x16, 0xbc, 0xd8, 0xf7, 0x2c, 0x97, 0x57, 0xc1, 0xd6, 0x7d, 0xff, + 0x9e, 0xde, 0xa3, 0xf3, 0xd5, 0xeb, 0x85, 0x1b, 0x8d, 0xf6, 0x65, 0x09, 0xf3, 0xe2, 0x56, 0x9a, + 0x00, 0x87, 0xcb, 0x90, 0x1b, 0x50, 0x57, 0x89, 0xf3, 0xb5, 0xeb, 0x85, 0x1b, 0x15, 0xd1, 0x77, + 0x54, 0x59, 0x0c, 0x73, 0xc9, 0x1a, 0xd4, 0xf5, 0xbd, 0x3d, 0xcb, 0x61, 0x94, 0x75, 0xde, 0x84, + 0x57, 0xb3, 0x3e, 0x6d, 0x59, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x59, 0x72, 0x07, 0x88, 0x4f, + 0xbd, 0x43, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0x0e, 0x9c, 0x80, 0xd7, 0xbd, 0xc1, 0xeb, 0x7e, 0x45, + 0xd6, 0x9d, 0x74, 0x86, 0x28, 0x30, 0xa3, 0x14, 0x79, 0x0b, 0xe6, 0xe4, 0x58, 0x8d, 0x5a, 0x01, + 0x38, 0xd2, 0x45, 0xd6, 0x90, 0x98, 0xca, 0xc3, 0x21, 0x6a, 0x62, 0xc2, 0x55, 0x7d, 0x10, 0xb8, + 0x3d, 0x06, 0x99, 0x64, 0xba, 0xed, 0x1e, 0x50, 0x67, 0xbe, 0x79, 0xbd, 0x70, 0xa3, 0xde, 0xbe, + 0x7e, 0x72, 0xdc, 0xba, 0xba, 0xfc, 0x04, 0x3a, 0x7c, 0x22, 0x0a, 0xb9, 0x0f, 0x0d, 0xd3, 0xf1, + 0xb7, 0x5c, 0xdb, 0x32, 0x8e, 0xe6, 0xa7, 0x78, 0x05, 0x5f, 0x95, 0x9f, 0xda, 0x58, 0xbd, 0xd7, + 0x11, 0x19, 0x8f, 0x8f, 0x5b, 0x57, 0x87, 0xa7, 0xd4, 0x85, 0x30, 0x1f, 0x23, 0x0c, 0xb2, 0xc9, + 0x01, 0x57, 0x5c, 0x67, 0xcf, 0xea, 0xce, 0x4f, 0xf3, 0xbf, 0x71, 0x7d, 0x44, 0x87, 0x5e, 0xbd, + 0xd7, 0x11, 0x74, 0xed, 0x69, 0xc9, 0x4e, 0xbc, 0x62, 0x84, 0x40, 0x4c, 0x98, 0x51, 0x93, 0xf1, + 0x8a, 0xad, 0x5b, 0x3d, 0x7f, 0x7e, 0x86, 0x77, 0xde, 0x1f, 0x1b, 0x81, 0x89, 0x71, 0xe2, 0xf6, + 0x25, 0xf9, 0x29, 0x33, 0x89, 0x64, 0x1f, 0x53, 0x98, 0x57, 0xde, 0x84, 0x17, 0x87, 0xe6, 0x06, + 0x32, 0x07, 0xa5, 0x03, 0x7a, 0xc4, 0xa7, 0xbe, 0x06, 0xb2, 0x47, 0x72, 0x11, 0x2a, 0x87, 0xba, + 0x3d, 0xa0, 0xf3, 0x45, 0x9e, 0x26, 0x5e, 0x7e, 0xa6, 0xf8, 0x46, 0x41, 0xfb, 0xeb, 0x25, 0x98, + 0x52, 0x33, 0x4e, 0xc7, 0x72, 0x0e, 0xc8, 0xdb, 0x50, 0xb2, 0xdd, 0xae, 0x9c, 0x37, 0x7f, 0x6e, + 0xec, 0x59, 0x6c, 0xc3, 0xed, 0xb6, 0x6b, 0x27, 0xc7, 0xad, 0xd2, 0x86, 0xdb, 0x45, 0x86, 0x48, + 0x0c, 0xa8, 0x1c, 0xe8, 0x7b, 0x07, 0x3a, 0xaf, 0x43, 0x73, 0xa9, 0x3d, 0x36, 0xf4, 0x5d, 0x86, + 0xc2, 0xea, 0xda, 0x6e, 0x9c, 0x1c, 0xb7, 0x2a, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, 0xb1, 0x6b, + 0xeb, 0xc6, 0xc1, 0xbe, 0x6b, 0xd3, 0xf9, 0x52, 0x4e, 0x46, 0x6d, 0x85, 0x24, 0x7e, 0x73, 0xf8, + 0x8a, 0x11, 0x0f, 0x62, 0x40, 0x75, 0x60, 0xfa, 0x96, 0x73, 0x20, 0xe7, 0xc0, 0x37, 0xc7, 0xe6, + 0xb6, 0xb3, 0xca, 0xbf, 0x09, 0x4e, 0x8e, 0x5b, 0x55, 0xf1, 0x8c, 0x12, 0x5a, 0xfb, 0xfd, 0x29, + 0x98, 0x51, 0x3f, 0xe9, 0x01, 0xf5, 0x02, 0xfa, 0x88, 0x5c, 0x87, 0xb2, 0xc3, 0x86, 0x26, 0xff, + 0xc9, 0xed, 0x29, 0xd9, 0x5d, 0xca, 0x7c, 0x48, 0xf2, 0x1c, 0x56, 0x33, 0xd1, 0x55, 0x64, 0x83, + 0x8f, 0x5f, 0xb3, 0x0e, 0x87, 0x11, 0x35, 0x13, 0xcf, 0x28, 0xa1, 0xc9, 0xbb, 0x50, 0xe6, 0x1f, + 0x2f, 0x9a, 0xfa, 0xab, 0xe3, 0xb3, 0x60, 0x9f, 0x5e, 0x67, 0x5f, 0xc0, 0x3f, 0x9c, 0x83, 0xb2, + 0xae, 0x38, 0x30, 0xf7, 0x64, 0xc3, 0xfe, 0x5c, 0x8e, 0x86, 0x5d, 0x13, 0x5d, 0x71, 0x67, 0x75, + 0x0d, 0x19, 0x22, 0xf9, 0x8b, 0x05, 0x78, 0xd1, 0x70, 0x9d, 0x40, 0x67, 0x72, 0x86, 0x5a, 0x64, + 0xe7, 0x2b, 0x9c, 0xcf, 0x9d, 0xb1, 0xf9, 0xac, 0xa4, 0x11, 0xdb, 0x2f, 0xb1, 0x35, 0x63, 0x28, + 0x19, 0x87, 0x79, 0x93, 0xbf, 0x5c, 0x80, 0x97, 0xd8, 0x5c, 0x3e, 0x44, 0xcc, 0x57, 0xa0, 0xc9, + 0xd6, 0xea, 0xf2, 0xc9, 0x71, 0xeb, 0xa5, 0xf5, 0x2c, 0x66, 0x98, 0x5d, 0x07, 0x56, 0xbb, 0x0b, + 0xfa, 0xb0, 0x58, 0xc2, 0x57, 0xb7, 0xe6, 0xd2, 0xc6, 0x24, 0x45, 0x9d, 0xf6, 0x67, 0x64, 0x57, + 0xce, 0x92, 0xec, 0x30, 0xab, 0x16, 0xe4, 0x26, 0xd4, 0x0e, 0x5d, 0x7b, 0xd0, 0xa3, 0xfe, 0x7c, + 0x9d, 0x4f, 0xb1, 0x57, 0xb2, 0xa6, 0xd8, 0x07, 0x9c, 0xa4, 0x3d, 0x2b, 0xe1, 0x6b, 0xe2, 0xdd, + 0x47, 0x55, 0x96, 0x58, 0x50, 0xb5, 0xad, 0x9e, 0x15, 0xf8, 0x7c, 0xe1, 0x6c, 0x2e, 0xdd, 0x1c, + 0xfb, 0xb3, 0xc4, 0x10, 0xdd, 0xe0, 0x60, 0x62, 0xd4, 0x88, 0x67, 0x94, 0x0c, 0xd8, 0x54, 0xe8, + 0x1b, 0xba, 0x2d, 0x16, 0xd6, 0xe6, 0xd2, 0xd7, 0xc6, 0x1f, 0x36, 0x0c, 0xa5, 0x3d, 0x2d, 0xbf, + 0xa9, 0xc2, 0x5f, 0x51, 0x60, 0x93, 0x5f, 0x80, 0x99, 0xc4, 0xdf, 0xf4, 0xe7, 0x9b, 0xbc, 0x75, + 0x5e, 0xc9, 0x6a, 0x9d, 0x90, 0x2a, 0x5a, 0x79, 0x12, 0x3d, 0xc4, 0xc7, 0x14, 0x18, 0xb9, 0x0b, + 0x75, 0xdf, 0x32, 0xa9, 0xa1, 0x7b, 0xfe, 0xfc, 0xd4, 0x69, 0x80, 0xe7, 0x24, 0x70, 0xbd, 0x23, + 0x8b, 0x61, 0x08, 0x40, 0x16, 0x00, 0xfa, 0xba, 0x17, 0x58, 0x42, 0x50, 0x9d, 0xe6, 0x42, 0xd3, + 0xcc, 0xc9, 0x71, 0x0b, 0xb6, 0xc2, 0x54, 0x8c, 0x51, 0x30, 0x7a, 0x56, 0x76, 0xdd, 0xe9, 0x0f, + 0x02, 0xb1, 0xb0, 0x36, 0x04, 0x7d, 0x27, 0x4c, 0xc5, 0x18, 0x05, 0xf9, 0xad, 0x02, 0x7c, 0x26, + 0x7a, 0x1d, 0x1e, 0x64, 0xb3, 0x13, 0x1f, 0x64, 0xad, 0x93, 0xe3, 0xd6, 0x67, 0x3a, 0xa3, 0x59, + 0xe2, 0x93, 0xea, 0x43, 0x3e, 0x2c, 0xc0, 0xcc, 0xa0, 0x6f, 0xea, 0x01, 0xed, 0x04, 0x6c, 0xc7, + 0xd3, 0x3d, 0x9a, 0x9f, 0xe3, 0x55, 0xbc, 0x35, 0xfe, 0x2c, 0x98, 0x80, 0x8b, 0x7e, 0x73, 0x32, + 0x1d, 0x53, 0x6c, 0xb5, 0xb7, 0x61, 0x7a, 0x79, 0x10, 0xec, 0xbb, 0x9e, 0xf5, 0x01, 0x17, 0xff, + 0xc9, 0x1a, 0x54, 0x02, 0x2e, 0xc6, 0x09, 0x09, 0xe1, 0xf3, 0x59, 0x3f, 0x5d, 0x88, 0xd4, 0x77, + 0xe9, 0x91, 0x92, 0x4b, 0xc4, 0x4a, 0x2d, 0xc4, 0x3a, 0x51, 0x5c, 0xfb, 0xd3, 0x05, 0xa8, 0xb5, + 0x75, 0xe3, 0xc0, 0xdd, 0xdb, 0x23, 0xef, 0x40, 0xdd, 0x72, 0x02, 0xea, 0x1d, 0xea, 0xb6, 0x84, + 0x5d, 0x88, 0xc1, 0x86, 0x1b, 0xc2, 0xe8, 0xf3, 0xd8, 0xee, 0x8b, 0x31, 0x5a, 0x1d, 0xc8, 0x5d, + 0x0b, 0x97, 0x8c, 0xd7, 0x25, 0x06, 0x86, 0x68, 0xa4, 0x05, 0x15, 0x3f, 0xa0, 0x7d, 0x9f, 0xaf, + 0x81, 0xd3, 0xa2, 0x1a, 0x1d, 0x96, 0x80, 0x22, 0x5d, 0xfb, 0x6b, 0x05, 0x68, 0xb4, 0x75, 0xdf, + 0x32, 0xd8, 0x57, 0x92, 0x15, 0x28, 0x0f, 0x7c, 0xea, 0x9d, 0xed, 0xdb, 0xf8, 0xb2, 0xb5, 0xe3, + 0x53, 0x0f, 0x79, 0x61, 0x72, 0x1f, 0xea, 0x7d, 0xdd, 0xf7, 0x1f, 0xba, 0x9e, 0x29, 0x97, 0xde, + 0x53, 0x02, 0x89, 0x6d, 0x82, 0x2c, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x48, 0xf6, 0xd0, 0x7e, 0xb7, + 0x00, 0x17, 0xda, 0x83, 0xbd, 0x3d, 0xea, 0x49, 0xa9, 0x58, 0xca, 0x9b, 0x14, 0x2a, 0x1e, 0x35, + 0x2d, 0x5f, 0xd6, 0x7d, 0x75, 0xec, 0x8e, 0x82, 0x0c, 0x45, 0x8a, 0xb7, 0xbc, 0xbd, 0x78, 0x02, + 0x0a, 0x74, 0x32, 0x80, 0xc6, 0xfb, 0x94, 0xed, 0xc6, 0xa9, 0xde, 0x93, 0x5f, 0x77, 0x7b, 0x6c, + 0x56, 0x77, 0x68, 0xd0, 0xe1, 0x48, 0x71, 0x69, 0x3a, 0x4c, 0xc4, 0x88, 0x93, 0xf6, 0xdb, 0x15, + 0x98, 0x5a, 0x71, 0x7b, 0xbb, 0x96, 0x43, 0xcd, 0x9b, 0x66, 0x97, 0x92, 0xf7, 0xa0, 0x4c, 0xcd, + 0x2e, 0x95, 0x5f, 0x3b, 0xbe, 0xe0, 0xc1, 0xc0, 0x22, 0xf1, 0x89, 0xbd, 0x21, 0x07, 0x26, 0x1b, + 0x30, 0xb3, 0xe7, 0xb9, 0x3d, 0x31, 0x97, 0x6f, 0x1f, 0xf5, 0xa5, 0xec, 0xdc, 0xfe, 0x31, 0x35, + 0x70, 0xd6, 0x12, 0xb9, 0x8f, 0x8f, 0x5b, 0x10, 0xbd, 0x61, 0xaa, 0x2c, 0x79, 0x07, 0xe6, 0xa3, + 0x94, 0x70, 0x52, 0x5b, 0x61, 0xdb, 0x19, 0x2e, 0x3b, 0x55, 0xda, 0x57, 0x4f, 0x8e, 0x5b, 0xf3, + 0x6b, 0x23, 0x68, 0x70, 0x64, 0x69, 0x36, 0x55, 0xcc, 0x45, 0x99, 0x62, 0xa1, 0x91, 0x22, 0xd3, + 0x84, 0x56, 0x30, 0xbe, 0xef, 0x5b, 0x4b, 0xb1, 0xc0, 0x21, 0xa6, 0x64, 0x0d, 0xa6, 0x02, 0x37, + 0xd6, 0x5e, 0x15, 0xde, 0x5e, 0x9a, 0x52, 0x54, 0x6c, 0xbb, 0x23, 0x5b, 0x2b, 0x51, 0x8e, 0x20, + 0x5c, 0x52, 0xef, 0xa9, 0x96, 0xaa, 0xf2, 0x96, 0xba, 0x72, 0x72, 0xdc, 0xba, 0xb4, 0x9d, 0x49, + 0x81, 0x23, 0x4a, 0x92, 0x5f, 0x29, 0xc0, 0x8c, 0xca, 0x92, 0x6d, 0x54, 0x9b, 0x64, 0x1b, 0x11, + 0xd6, 0x23, 0xb6, 0x13, 0x0c, 0x30, 0xc5, 0x50, 0xfb, 0x7e, 0x0d, 0x1a, 0xe1, 0x54, 0x4f, 0x3e, + 0x07, 0x15, 0xae, 0x82, 0x90, 0x12, 0x7c, 0xb8, 0x86, 0x73, 0x4d, 0x05, 0x8a, 0x3c, 0xf2, 0x79, + 0xa8, 0x19, 0x6e, 0xaf, 0xa7, 0x3b, 0x26, 0x57, 0x2b, 0x35, 0xda, 0x4d, 0x26, 0xba, 0xac, 0x88, + 0x24, 0x54, 0x79, 0xe4, 0x2a, 0x94, 0x75, 0xaf, 0x2b, 0x34, 0x3c, 0x0d, 0x31, 0x1f, 0x2d, 0x7b, + 0x5d, 0x1f, 0x79, 0x2a, 0xf9, 0x0a, 0x94, 0xa8, 0x73, 0x38, 0x5f, 0x1e, 0x2d, 0x1b, 0xdd, 0x74, + 0x0e, 0x1f, 0xe8, 0x5e, 0xbb, 0x29, 0xeb, 0x50, 0xba, 0xe9, 0x1c, 0x22, 0x2b, 0x43, 0x36, 0xa0, + 0x46, 0x9d, 0x43, 0xf6, 0xef, 0xa5, 0xea, 0xe5, 0xb3, 0x23, 0x8a, 0x33, 0x12, 0xb9, 0x4d, 0x08, + 0x25, 0x2c, 0x99, 0x8c, 0x0a, 0x82, 0x7c, 0x03, 0xa6, 0x84, 0xb0, 0xb5, 0xc9, 0xfe, 0x89, 0x3f, + 0x5f, 0xe5, 0x90, 0xad, 0xd1, 0xd2, 0x1a, 0xa7, 0x8b, 0x54, 0x5d, 0xb1, 0x44, 0x1f, 0x13, 0x50, + 0xe4, 0x1b, 0xd0, 0x50, 0x3b, 0x63, 0xf5, 0x67, 0x33, 0xb5, 0x44, 0x6a, 0x3b, 0x8d, 0xf4, 0x5b, + 0x03, 0xcb, 0xa3, 0x3d, 0xea, 0x04, 0x7e, 0xfb, 0x45, 0xa5, 0x37, 0x50, 0xb9, 0x3e, 0x46, 0x68, + 0x64, 0x77, 0x58, 0xdd, 0x25, 0x74, 0x35, 0x9f, 0x1b, 0x31, 0xab, 0x8f, 0xa1, 0xeb, 0xfa, 0x26, + 0xcc, 0x86, 0xfa, 0x28, 0xa9, 0xd2, 0x10, 0xda, 0x9b, 0x2f, 0xb1, 0xe2, 0xeb, 0xc9, 0xac, 0xc7, + 0xc7, 0xad, 0x57, 0x32, 0x94, 0x1a, 0x11, 0x01, 0xa6, 0xc1, 0xc8, 0x07, 0x30, 0xe3, 0x51, 0xdd, + 0xb4, 0x1c, 0xea, 0xfb, 0x5b, 0x9e, 0xbb, 0x9b, 0x5f, 0xf2, 0xe4, 0x28, 0xa2, 0xdb, 0x63, 0x02, + 0x19, 0x53, 0x9c, 0xc8, 0x43, 0x98, 0xb6, 0xad, 0x43, 0x1a, 0xb1, 0x6e, 0x4e, 0x84, 0xf5, 0x8b, + 0x27, 0xc7, 0xad, 0xe9, 0x8d, 0x38, 0x30, 0x26, 0xf9, 0x30, 0x49, 0xa5, 0xef, 0x7a, 0x81, 0x12, + 0x4f, 0x3f, 0xfb, 0x44, 0xf1, 0x74, 0xcb, 0xf5, 0x82, 0x68, 0x10, 0xb2, 0x37, 0x1f, 0x45, 0x71, + 0xed, 0x6f, 0x57, 0x60, 0x78, 0x13, 0x97, 0xec, 0x71, 0x85, 0x49, 0xf7, 0xb8, 0x74, 0x6f, 0x10, + 0x6b, 0xcf, 0x1b, 0xb2, 0xd8, 0x04, 0x7a, 0x44, 0x46, 0xaf, 0x2e, 0x4d, 0xba, 0x57, 0x3f, 0x37, + 0x13, 0xcf, 0x70, 0xf7, 0xaf, 0x7e, 0x7c, 0xdd, 0xbf, 0xf6, 0x6c, 0xba, 0xbf, 0xf6, 0xbd, 0x32, + 0xcc, 0xac, 0xea, 0xb4, 0xe7, 0x3a, 0x4f, 0xdd, 0xc7, 0x17, 0x9e, 0x8b, 0x7d, 0xfc, 0x0d, 0xa8, + 0x7b, 0xb4, 0x6f, 0x5b, 0x86, 0x2e, 0xc4, 0x75, 0xa9, 0x37, 0x47, 0x99, 0x86, 0x61, 0xee, 0x08, + 0xfd, 0x4d, 0xe9, 0xb9, 0xd4, 0xdf, 0x94, 0x3f, 0x7e, 0xfd, 0x8d, 0xf6, 0x2b, 0x45, 0xe0, 0xa2, + 0x2d, 0xb9, 0x0e, 0x65, 0x26, 0xb6, 0xa5, 0xb5, 0x86, 0x7c, 0xb4, 0xf0, 0x1c, 0x72, 0x05, 0x8a, + 0x81, 0x2b, 0xa7, 0x1b, 0x90, 0xf9, 0xc5, 0x6d, 0x17, 0x8b, 0x81, 0x4b, 0x3e, 0x00, 0x30, 0x5c, + 0xc7, 0xb4, 0x94, 0x39, 0x29, 0xdf, 0x87, 0xad, 0xb9, 0xde, 0x43, 0xdd, 0x33, 0x57, 0x42, 0x44, + 0xb1, 0x83, 0x8f, 0xde, 0x31, 0xc6, 0x8d, 0xbc, 0x09, 0x55, 0xd7, 0x59, 0x1b, 0xd8, 0x36, 0x6f, + 0xd0, 0x46, 0xfb, 0x8b, 0x27, 0xc7, 0xad, 0xea, 0x7d, 0x9e, 0xf2, 0xf8, 0xb8, 0x75, 0x59, 0xec, + 0x88, 0xd8, 0xdb, 0xdb, 0x9e, 0x15, 0x58, 0x4e, 0x37, 0xdc, 0xd0, 0xca, 0x62, 0xda, 0xaf, 0x16, + 0xa0, 0xb9, 0x66, 0x3d, 0xa2, 0xe6, 0xdb, 0x96, 0x63, 0xba, 0x0f, 0x09, 0x42, 0xd5, 0xa6, 0x4e, + 0x37, 0xd8, 0x1f, 0x73, 0xc7, 0x29, 0xf4, 0x3a, 0x1c, 0x01, 0x25, 0x12, 0x59, 0x84, 0x86, 0xd8, + 0xaf, 0x58, 0x4e, 0x97, 0xb7, 0x61, 0x3d, 0x9a, 0xe9, 0x3b, 0x2a, 0x03, 0x23, 0x1a, 0xed, 0x08, + 0x5e, 0x1c, 0x6a, 0x06, 0x62, 0x42, 0x39, 0xd0, 0xbb, 0x6a, 0x51, 0x59, 0x1b, 0xbb, 0x81, 0xb7, + 0xf5, 0x6e, 0xac, 0x71, 0xb9, 0x54, 0xb8, 0xad, 0x33, 0xa9, 0x90, 0xa1, 0x6b, 0x7f, 0x50, 0x80, + 0xfa, 0xda, 0xc0, 0x31, 0xf8, 0xa6, 0xfe, 0xe9, 0xda, 0x64, 0x25, 0x62, 0x16, 0x33, 0x45, 0xcc, + 0x01, 0x54, 0x0f, 0x1e, 0x86, 0x22, 0x68, 0x73, 0x69, 0x73, 0xfc, 0x5e, 0x21, 0xab, 0xb4, 0x70, + 0x97, 0xe3, 0x09, 0x63, 0xe7, 0x8c, 0xac, 0x50, 0xf5, 0xee, 0xdb, 0x9c, 0xa9, 0x64, 0x76, 0xe5, + 0x2b, 0xd0, 0x8c, 0x91, 0x9d, 0xc9, 0xee, 0xf1, 0x77, 0xca, 0x50, 0xbd, 0xd5, 0xe9, 0x2c, 0x6f, + 0xad, 0x93, 0xd7, 0xa0, 0x29, 0xed, 0x60, 0xf7, 0xa2, 0x36, 0x08, 0xcd, 0xa0, 0x9d, 0x28, 0x0b, + 0xe3, 0x74, 0x4c, 0x80, 0xf7, 0xa8, 0x6e, 0xf7, 0xe4, 0x60, 0x09, 0x65, 0x07, 0x64, 0x89, 0x28, + 0xf2, 0x88, 0x0e, 0x33, 0x03, 0x9f, 0x7a, 0xac, 0x09, 0xc5, 0x7e, 0x5f, 0x0e, 0x9b, 0x53, 0x6a, + 0x04, 0xf8, 0x02, 0xb3, 0x93, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x03, 0xea, 0xfa, 0x20, 0xd8, 0xe7, + 0x5b, 0x2e, 0x31, 0x36, 0xae, 0x72, 0x33, 0xa1, 0x4c, 0x7b, 0x7c, 0xdc, 0x9a, 0xba, 0x8b, 0xed, + 0xd7, 0xd4, 0x3b, 0x86, 0xd4, 0xac, 0x72, 0x4a, 0xc7, 0x20, 0x2b, 0x57, 0x39, 0x73, 0xe5, 0xb6, + 0x12, 0x00, 0x98, 0x02, 0x24, 0xef, 0xc2, 0xd4, 0x01, 0x3d, 0x0a, 0xf4, 0x5d, 0xc9, 0xa0, 0x7a, + 0x16, 0x06, 0x73, 0x4c, 0xe8, 0xbf, 0x1b, 0x2b, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xc5, 0x03, 0xea, + 0xed, 0x52, 0xcf, 0x95, 0xfa, 0x0a, 0xc9, 0xa4, 0x76, 0x16, 0x26, 0xf3, 0x27, 0xc7, 0xad, 0x8b, + 0x77, 0x33, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x3f, 0x45, 0x98, 0xbd, 0x25, 0x1c, 0x11, 0x5c, 0x4f, + 0x48, 0x1e, 0xe4, 0x32, 0x94, 0xbc, 0xfe, 0x80, 0xf7, 0x9c, 0x92, 0x30, 0x35, 0xe0, 0xd6, 0x0e, + 0xb2, 0x34, 0xf2, 0x0e, 0xd4, 0x4d, 0x39, 0x65, 0x48, 0x75, 0xc9, 0x58, 0xaa, 0x2d, 0xf5, 0x86, + 0x21, 0x1a, 0xdb, 0x1b, 0xf6, 0xfc, 0x6e, 0xc7, 0xfa, 0x80, 0x4a, 0x0d, 0x02, 0xdf, 0x1b, 0x6e, + 0x8a, 0x24, 0x54, 0x79, 0x6c, 0x55, 0x3d, 0xa0, 0x47, 0x62, 0xff, 0x5c, 0x8e, 0x56, 0xd5, 0xbb, + 0x32, 0x0d, 0xc3, 0x5c, 0xd2, 0x52, 0x83, 0x85, 0xf5, 0x82, 0xb2, 0xd0, 0xfd, 0x3c, 0x60, 0x09, + 0x72, 0xdc, 0xb0, 0x29, 0xf3, 0x7d, 0x2b, 0x08, 0xa8, 0x27, 0x7f, 0xe3, 0x58, 0x53, 0xe6, 0x1d, + 0x8e, 0x80, 0x12, 0x89, 0xfc, 0x04, 0x34, 0x38, 0x78, 0xdb, 0x76, 0x77, 0xf9, 0x8f, 0x6b, 0x08, + 0x2d, 0xd0, 0x03, 0x95, 0x88, 0x51, 0xbe, 0xf6, 0x87, 0x45, 0xb8, 0x74, 0x8b, 0x06, 0x42, 0xaa, + 0x59, 0xa5, 0x7d, 0xdb, 0x3d, 0x62, 0xf2, 0x34, 0xd2, 0x6f, 0x91, 0xb7, 0x00, 0x2c, 0x7f, 0xb7, + 0x73, 0x68, 0xf0, 0x71, 0x20, 0xc6, 0xf0, 0x75, 0x39, 0x24, 0x61, 0xbd, 0xd3, 0x96, 0x39, 0x8f, + 0x13, 0x6f, 0x18, 0x2b, 0x13, 0x6d, 0xc8, 0x8b, 0x4f, 0xd8, 0x90, 0x77, 0x00, 0xfa, 0x91, 0x54, + 0x5e, 0xe2, 0x94, 0x3f, 0xad, 0xd8, 0x9c, 0x45, 0x20, 0x8f, 0xc1, 0xe4, 0x91, 0x93, 0x1d, 0x98, + 0x33, 0xe9, 0x9e, 0x3e, 0xb0, 0x83, 0x70, 0x27, 0x21, 0x07, 0xf1, 0xe9, 0x37, 0x23, 0xa1, 0x93, + 0xc4, 0x6a, 0x0a, 0x09, 0x87, 0xb0, 0xb5, 0xbf, 0x5b, 0x82, 0x2b, 0xb7, 0x68, 0x10, 0xea, 0xe8, + 0xe4, 0xec, 0xd8, 0xe9, 0x53, 0x83, 0xfd, 0x85, 0x0f, 0x0b, 0x50, 0xb5, 0xf5, 0x5d, 0x6a, 0xb3, + 0xd5, 0x8b, 0x7d, 0xcd, 0x7b, 0x63, 0x2f, 0x04, 0xa3, 0xb9, 0x2c, 0x6c, 0x70, 0x0e, 0xa9, 0xa5, + 0x41, 0x24, 0xa2, 0x64, 0xcf, 0x26, 0x75, 0xc3, 0x1e, 0xf8, 0x81, 0xd8, 0xd9, 0x49, 0x79, 0x32, + 0x9c, 0xd4, 0x57, 0xa2, 0x2c, 0x8c, 0xd3, 0x91, 0x25, 0x00, 0xc3, 0xb6, 0xa8, 0x13, 0xf0, 0x52, + 0x62, 0x5c, 0x11, 0xf5, 0x7f, 0x57, 0xc2, 0x1c, 0x8c, 0x51, 0x31, 0x56, 0x3d, 0xd7, 0xb1, 0x02, + 0x57, 0xb0, 0x2a, 0x27, 0x59, 0x6d, 0x46, 0x59, 0x18, 0xa7, 0xe3, 0xc5, 0x68, 0xe0, 0x59, 0x86, + 0xcf, 0x8b, 0x55, 0x52, 0xc5, 0xa2, 0x2c, 0x8c, 0xd3, 0xb1, 0x35, 0x2f, 0xf6, 0xfd, 0x67, 0x5a, + 0xf3, 0x7e, 0xb3, 0x01, 0xd7, 0x12, 0xcd, 0x1a, 0xe8, 0x01, 0xdd, 0x1b, 0xd8, 0x1d, 0x1a, 0xa8, + 0x1f, 0x38, 0xe6, 0x5a, 0xf8, 0xe7, 0xa2, 0xff, 0x2e, 0xdc, 0x9f, 0x8c, 0xc9, 0xfc, 0xf7, 0xa1, + 0x0a, 0x9e, 0xea, 0xdf, 0x2f, 0x42, 0xc3, 0xd1, 0x03, 0x9f, 0x0f, 0x5c, 0x39, 0x46, 0x43, 0x31, + 0xec, 0x9e, 0xca, 0xc0, 0x88, 0x86, 0x6c, 0xc1, 0x45, 0xd9, 0xc4, 0x37, 0x1f, 0xb1, 0x3d, 0x3f, + 0xf5, 0x44, 0x59, 0xb9, 0x9c, 0xca, 0xb2, 0x17, 0x37, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd9, 0x84, + 0x0b, 0x86, 0x70, 0x09, 0xa1, 0xb6, 0xab, 0x9b, 0x0a, 0x50, 0xa8, 0x44, 0xc3, 0xad, 0xd1, 0xca, + 0x30, 0x09, 0x66, 0x95, 0x4b, 0xf7, 0xe6, 0xea, 0x58, 0xbd, 0xb9, 0x36, 0x4e, 0x6f, 0xae, 0x8f, + 0xd7, 0x9b, 0x1b, 0xa7, 0xeb, 0xcd, 0xac, 0xe5, 0x59, 0x3f, 0xa2, 0x1e, 0x13, 0x4f, 0xc4, 0x0a, + 0x1b, 0xf3, 0x38, 0x0a, 0x5b, 0xbe, 0x93, 0x41, 0x83, 0x99, 0x25, 0xc9, 0x2e, 0x5c, 0x11, 0xe9, + 0x37, 0x1d, 0xc3, 0x3b, 0xea, 0xb3, 0x85, 0x27, 0x86, 0xdb, 0x4c, 0xe8, 0xa4, 0xaf, 0x74, 0x46, + 0x52, 0xe2, 0x13, 0x50, 0xc8, 0xcf, 0xc2, 0xb4, 0xf8, 0x4b, 0x9b, 0x7a, 0x9f, 0xc3, 0x0a, 0xff, + 0xa3, 0x97, 0x24, 0xec, 0xf4, 0x4a, 0x3c, 0x13, 0x93, 0xb4, 0x64, 0x19, 0x66, 0xfb, 0x87, 0x06, + 0x7b, 0x5c, 0xdf, 0xbb, 0x47, 0xa9, 0x49, 0x4d, 0x6e, 0xf0, 0x6c, 0xb4, 0x5f, 0x56, 0xda, 0x9d, + 0xad, 0x64, 0x36, 0xa6, 0xe9, 0xc9, 0x1b, 0x30, 0xe5, 0x07, 0xba, 0x17, 0x48, 0x45, 0xf0, 0xfc, + 0x8c, 0xf0, 0xcf, 0x52, 0x7a, 0xd2, 0x4e, 0x2c, 0x0f, 0x13, 0x94, 0x99, 0xeb, 0xc5, 0xec, 0xf9, + 0xad, 0x17, 0x79, 0x66, 0xab, 0x7f, 0x5a, 0x84, 0xeb, 0xb7, 0x68, 0xb0, 0xe9, 0x3a, 0x52, 0x8d, + 0x9e, 0xb5, 0xec, 0x9f, 0x4a, 0x8b, 0x9e, 0x5c, 0xb4, 0x8b, 0x13, 0x5d, 0xb4, 0x4b, 0x13, 0x5a, + 0xb4, 0xcb, 0xe7, 0xb8, 0x68, 0xff, 0xfd, 0x22, 0xbc, 0x9c, 0x68, 0xc9, 0x2d, 0xd7, 0x54, 0x13, + 0xfe, 0xa7, 0x0d, 0x78, 0x8a, 0x06, 0x7c, 0x2c, 0xe4, 0x4e, 0x6e, 0x08, 0x4d, 0x49, 0x3c, 0xdf, + 0x4d, 0x4b, 0x3c, 0xef, 0xe6, 0x59, 0xf9, 0x32, 0x38, 0x9c, 0x6a, 0xc5, 0xbb, 0x03, 0xc4, 0x93, + 0x66, 0xdb, 0x48, 0x9d, 0x2d, 0x85, 0x9e, 0xd0, 0x01, 0x14, 0x87, 0x28, 0x30, 0xa3, 0x14, 0xe9, + 0xc0, 0x4b, 0x3e, 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x48, 0x43, 0xaf, 0x48, 0xb8, 0x97, + 0x3a, 0x59, 0x44, 0x98, 0x5d, 0x36, 0xcf, 0x3c, 0xf0, 0x2f, 0x81, 0x8b, 0x9c, 0xa2, 0x69, 0x26, + 0x26, 0xb1, 0x7c, 0x98, 0x96, 0x58, 0xde, 0xcb, 0xff, 0xdf, 0xc6, 0x93, 0x56, 0x96, 0x00, 0xf8, + 0x5f, 0x88, 0x8b, 0x2b, 0xe1, 0x22, 0x8d, 0x61, 0x0e, 0xc6, 0xa8, 0xd8, 0x02, 0xa4, 0xda, 0x39, + 0x2e, 0xa9, 0x84, 0x0b, 0x50, 0x27, 0x9e, 0x89, 0x49, 0xda, 0x91, 0xd2, 0x4e, 0x65, 0x6c, 0x69, + 0xe7, 0x0e, 0x90, 0x84, 0xe2, 0x51, 0xe0, 0x55, 0x93, 0xfe, 0xc7, 0xeb, 0x43, 0x14, 0x98, 0x51, + 0x6a, 0x44, 0x57, 0xae, 0x4d, 0xb6, 0x2b, 0xd7, 0xc7, 0xef, 0xca, 0xe4, 0x3d, 0xb8, 0xcc, 0x59, + 0xc9, 0xf6, 0x49, 0x02, 0x0b, 0xb9, 0xe7, 0xb3, 0x12, 0xf8, 0x32, 0x8e, 0x22, 0xc4, 0xd1, 0x18, + 0xec, 0xff, 0x18, 0x1e, 0x35, 0x19, 0x73, 0xdd, 0x1e, 0x2d, 0x13, 0xad, 0x64, 0xd0, 0x60, 0x66, + 0x49, 0xd6, 0xc5, 0x02, 0xd6, 0x0d, 0xf5, 0x5d, 0x9b, 0x9a, 0xd2, 0xff, 0x3a, 0xec, 0x62, 0xdb, + 0x1b, 0x1d, 0x99, 0x83, 0x31, 0xaa, 0x2c, 0x31, 0x65, 0xea, 0x8c, 0x62, 0xca, 0x2d, 0xae, 0xa5, + 0xdf, 0x4b, 0x48, 0x43, 0x52, 0xd6, 0x09, 0x3d, 0xea, 0x57, 0xd2, 0x04, 0x38, 0x5c, 0x86, 0x4b, + 0x89, 0x86, 0x67, 0xf5, 0x03, 0x3f, 0x89, 0x35, 0x93, 0x92, 0x12, 0x33, 0x68, 0x30, 0xb3, 0x24, + 0x93, 0xcf, 0xf7, 0xa9, 0x6e, 0x07, 0xfb, 0x49, 0xc0, 0xd9, 0xa4, 0x7c, 0x7e, 0x7b, 0x98, 0x04, + 0xb3, 0xca, 0x65, 0x2e, 0x48, 0x73, 0xcf, 0xa7, 0x58, 0xf5, 0x9d, 0x12, 0x5c, 0xbe, 0x45, 0x83, + 0xd0, 0x35, 0xed, 0x53, 0x35, 0xca, 0xc7, 0xa0, 0x46, 0xf9, 0x8d, 0x0a, 0x5c, 0xb8, 0x45, 0x83, + 0x21, 0x69, 0xec, 0xff, 0xd3, 0xe6, 0xdf, 0x84, 0x0b, 0x91, 0x37, 0x64, 0x27, 0x70, 0x3d, 0xb1, + 0x96, 0xa7, 0x76, 0xcb, 0x9d, 0x61, 0x12, 0xcc, 0x2a, 0x47, 0xbe, 0x01, 0x2f, 0xf3, 0xa5, 0xde, + 0xe9, 0x0a, 0xfd, 0xac, 0x50, 0x26, 0xc4, 0xce, 0xf3, 0xb4, 0x24, 0xe4, 0xcb, 0x9d, 0x6c, 0x32, + 0x1c, 0x55, 0x9e, 0x7c, 0x1b, 0xa6, 0xfa, 0x56, 0x9f, 0xda, 0x96, 0xc3, 0xe5, 0xb3, 0xdc, 0x4e, + 0x44, 0x5b, 0x31, 0xb0, 0x68, 0x03, 0x17, 0x4f, 0xc5, 0x04, 0xc3, 0xcc, 0x9e, 0x5a, 0x3f, 0xc7, + 0x9e, 0xfa, 0x3f, 0x8b, 0x50, 0xbb, 0xe5, 0xb9, 0x83, 0x7e, 0xfb, 0x88, 0x74, 0xa1, 0xfa, 0x90, + 0x1b, 0xcf, 0xa4, 0x69, 0x6a, 0xfc, 0x13, 0x05, 0xc2, 0x06, 0x17, 0x89, 0x44, 0xe2, 0x1d, 0x25, + 0x3c, 0xeb, 0xc4, 0x07, 0xf4, 0x88, 0x9a, 0xd2, 0x86, 0x16, 0x76, 0xe2, 0xbb, 0x2c, 0x11, 0x45, + 0x1e, 0xe9, 0xc1, 0xac, 0x6e, 0xdb, 0xee, 0x43, 0x6a, 0x6e, 0xe8, 0x01, 0xb7, 0x7b, 0x4b, 0xdb, + 0xca, 0x59, 0xd5, 0xd2, 0xdc, 0x99, 0x61, 0x39, 0x09, 0x85, 0x69, 0x6c, 0xf2, 0x3e, 0xd4, 0xfc, + 0xc0, 0xf5, 0x94, 0xb0, 0xd5, 0x5c, 0x5a, 0x19, 0xff, 0xa7, 0xb7, 0xbf, 0xde, 0x11, 0x50, 0x42, + 0x67, 0x2f, 0x5f, 0x50, 0x31, 0xd0, 0x7e, 0xbd, 0x00, 0x70, 0x7b, 0x7b, 0x7b, 0x4b, 0x9a, 0x17, + 0x4c, 0x28, 0xeb, 0x83, 0xd0, 0x50, 0x39, 0xbe, 0x41, 0x30, 0xe1, 0xc8, 0x2b, 0x6d, 0x78, 0x83, + 0x60, 0x1f, 0x39, 0x3a, 0xf9, 0x71, 0xa8, 0x49, 0x01, 0x59, 0x36, 0x7b, 0xe8, 0x4f, 0x21, 0x85, + 0x68, 0x54, 0xf9, 0xda, 0xdf, 0x2a, 0x02, 0xac, 0x9b, 0x36, 0xed, 0xa8, 0x43, 0x20, 0x8d, 0x60, + 0xdf, 0xa3, 0xfe, 0xbe, 0x6b, 0x9b, 0x63, 0x5a, 0x53, 0xb9, 0xce, 0x7f, 0x5b, 0x81, 0x60, 0x84, + 0x47, 0x4c, 0x98, 0xf2, 0x03, 0xda, 0x57, 0xbe, 0xbd, 0x63, 0x1a, 0x51, 0xe6, 0x84, 0x5e, 0x24, + 0xc2, 0xc1, 0x04, 0x2a, 0xd1, 0xa1, 0x69, 0x39, 0x86, 0x18, 0x20, 0xed, 0xa3, 0x31, 0x3b, 0xd2, + 0x2c, 0xdb, 0x71, 0xac, 0x47, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0xa7, 0x08, 0x97, 0x38, 0x3f, 0x56, + 0x8d, 0x84, 0x07, 0x2f, 0xf9, 0x93, 0x43, 0x07, 0x56, 0xff, 0xf8, 0xe9, 0x58, 0x8b, 0xf3, 0x8e, + 0x9b, 0x34, 0xd0, 0x23, 0x79, 0x2e, 0x4a, 0x8b, 0x9d, 0x52, 0x1d, 0x40, 0xd9, 0x67, 0xf3, 0x95, + 0x68, 0xbd, 0xce, 0xd8, 0x5d, 0x28, 0xfb, 0x03, 0xf8, 0xec, 0x15, 0x5a, 0x8d, 0xf9, 0xac, 0xc5, + 0xd9, 0x91, 0x5f, 0x82, 0xaa, 0x1f, 0xe8, 0xc1, 0x40, 0x0d, 0xcd, 0x9d, 0x49, 0x33, 0xe6, 0xe0, + 0xd1, 0x3c, 0x22, 0xde, 0x51, 0x32, 0xd5, 0x7e, 0xa7, 0x00, 0x57, 0xb2, 0x0b, 0x6e, 0x58, 0x7e, + 0x40, 0xfe, 0xc4, 0x50, 0xb3, 0x9f, 0xf2, 0x8f, 0xb3, 0xd2, 0xbc, 0xd1, 0xc3, 0x33, 0x0d, 0x2a, + 0x25, 0xd6, 0xe4, 0x01, 0x54, 0xac, 0x80, 0xf6, 0xd4, 0xfe, 0xf2, 0xfe, 0x84, 0x3f, 0x3d, 0xb6, + 0xb4, 0x33, 0x2e, 0x28, 0x98, 0x69, 0xdf, 0x2b, 0x8e, 0xfa, 0x64, 0xbe, 0x7c, 0xd8, 0x49, 0x2f, + 0xf1, 0xbb, 0xf9, 0xbc, 0xc4, 0x93, 0x15, 0x1a, 0x76, 0x16, 0xff, 0x53, 0xc3, 0xce, 0xe2, 0xf7, + 0xf3, 0x3b, 0x8b, 0xa7, 0x9a, 0x61, 0xa4, 0xcf, 0xf8, 0x47, 0x25, 0xb8, 0xfa, 0xa4, 0x6e, 0xc3, + 0xd6, 0x33, 0xd9, 0x3b, 0xf3, 0xae, 0x67, 0x4f, 0xee, 0x87, 0x64, 0x09, 0x2a, 0xfd, 0x7d, 0xdd, + 0x57, 0x42, 0xd9, 0xd5, 0xd0, 0xcd, 0x90, 0x25, 0x3e, 0x66, 0x93, 0x06, 0x17, 0xe6, 0xf8, 0x2b, + 0x0a, 0x52, 0x36, 0x1d, 0xf7, 0xa8, 0xef, 0x47, 0x3a, 0x81, 0x70, 0x3a, 0xde, 0x14, 0xc9, 0xa8, + 0xf2, 0x49, 0x00, 0x55, 0xa1, 0x62, 0x96, 0x2b, 0xd3, 0xf8, 0x8e, 0x5c, 0x19, 0x07, 0x0b, 0xa2, + 0x8f, 0x92, 0xd6, 0x0a, 0xc9, 0x8b, 0x2c, 0x40, 0x39, 0x88, 0xdc, 0xbc, 0xd5, 0xd6, 0xbc, 0x9c, + 0x21, 0x9f, 0x72, 0x3a, 0xb6, 0xb1, 0x77, 0x77, 0xb9, 0x52, 0xdd, 0x94, 0xf6, 0x73, 0xcb, 0x75, + 0xb8, 0x40, 0x56, 0x8a, 0x36, 0xf6, 0xf7, 0x87, 0x28, 0x30, 0xa3, 0x94, 0xf6, 0x6f, 0xea, 0x70, + 0x29, 0xbb, 0x3f, 0xb0, 0x76, 0x3b, 0xa4, 0x9e, 0xcf, 0xb0, 0x0b, 0xc9, 0x76, 0x7b, 0x20, 0x92, + 0x51, 0xe5, 0x7f, 0xa2, 0x1d, 0xce, 0x7e, 0xa3, 0x00, 0x97, 0x3d, 0x69, 0x23, 0x7a, 0x16, 0x4e, + 0x67, 0xaf, 0x08, 0x75, 0xc6, 0x08, 0x86, 0x38, 0xba, 0x2e, 0xe4, 0x6f, 0x14, 0x60, 0xbe, 0x97, + 0xd2, 0x73, 0x9c, 0xe3, 0x99, 0x4b, 0x7e, 0x8e, 0x62, 0x73, 0x04, 0x3f, 0x1c, 0x59, 0x13, 0xf2, + 0x6d, 0x68, 0xf6, 0x59, 0xbf, 0xf0, 0x03, 0xea, 0x18, 0xca, 0x41, 0x74, 0xfc, 0x91, 0xb4, 0x15, + 0x61, 0x85, 0x67, 0xae, 0xb8, 0x7c, 0x10, 0xcb, 0xc0, 0x38, 0xc7, 0xe7, 0xfc, 0x90, 0xe5, 0x0d, + 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x2b, 0xf6, 0x1b, 0x0d, 0x31, 0x56, 0x3a, 0x32, 0x0d, 0xc3, + 0x5c, 0xf2, 0x13, 0xd0, 0xe0, 0x26, 0xa7, 0x65, 0xaf, 0xeb, 0xcf, 0x37, 0xb8, 0xbb, 0xd8, 0xb4, + 0x70, 0x80, 0x93, 0x89, 0x18, 0xe5, 0x93, 0x2f, 0xc1, 0xd4, 0x2e, 0x1f, 0xbe, 0xf2, 0xdc, 0xbd, + 0xd0, 0x71, 0x71, 0x69, 0xad, 0x1d, 0x4b, 0xc7, 0x04, 0x15, 0x59, 0x02, 0xa0, 0xa1, 0x5d, 0x2e, + 0xad, 0xcf, 0x8a, 0x2c, 0x76, 0x18, 0xa3, 0x22, 0xaf, 0x40, 0x29, 0xb0, 0x7d, 0xae, 0xc3, 0xaa, + 0x47, 0x5b, 0xd0, 0xed, 0x8d, 0x0e, 0xb2, 0x74, 0xed, 0x0f, 0x0b, 0x30, 0x9b, 0x3a, 0x8e, 0xc4, + 0x8a, 0x0c, 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x91, 0x1d, 0xdc, 0x40, 0x96, 0x4e, 0xde, 0x93, 0x62, + 0x79, 0x31, 0x67, 0x88, 0x91, 0x7b, 0x7a, 0xe0, 0x33, 0x39, 0x7c, 0x48, 0x22, 0xe7, 0x66, 0xbe, + 0xa8, 0x3e, 0x72, 0x1d, 0x88, 0x99, 0xf9, 0xa2, 0x3c, 0x4c, 0x50, 0xa6, 0x14, 0x7e, 0xe5, 0xd3, + 0x28, 0xfc, 0xb4, 0x5f, 0x2d, 0xc6, 0x5a, 0x40, 0x4a, 0xf6, 0x4f, 0x69, 0x81, 0x2f, 0xb0, 0x05, + 0x34, 0x5c, 0xdc, 0x1b, 0xf1, 0xf5, 0x8f, 0x2f, 0xc6, 0x32, 0x97, 0xbc, 0x2d, 0xda, 0xbe, 0x94, + 0xf3, 0x20, 0xf7, 0xf6, 0x46, 0x47, 0x78, 0x57, 0xa9, 0xbf, 0x16, 0xfe, 0x82, 0xf2, 0x39, 0xfd, + 0x02, 0xed, 0x9f, 0x97, 0xa0, 0x79, 0xc7, 0xdd, 0xfd, 0x84, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, + 0x8f, 0x71, 0x99, 0xda, 0x81, 0x97, 0x83, 0xc0, 0xee, 0x50, 0xc3, 0x75, 0x4c, 0x7f, 0x79, 0x2f, + 0xa0, 0xde, 0x9a, 0xe5, 0x58, 0xfe, 0x3e, 0x35, 0xa5, 0x39, 0xe9, 0x33, 0x27, 0xc7, 0xad, 0x97, + 0xb7, 0xb7, 0x37, 0xb2, 0x48, 0x70, 0x54, 0x59, 0x3e, 0x6d, 0x88, 0xb3, 0xa3, 0xfc, 0x6c, 0x95, + 0xf4, 0xb9, 0x11, 0xd3, 0x46, 0x2c, 0x1d, 0x13, 0x54, 0xda, 0x7f, 0x2c, 0x42, 0x23, 0x0c, 0x1e, + 0x41, 0x3e, 0x0f, 0xb5, 0x5d, 0xcf, 0x3d, 0xa0, 0x9e, 0xb0, 0xdc, 0xc9, 0xb3, 0x55, 0x6d, 0x91, + 0x84, 0x2a, 0x8f, 0x7c, 0x0e, 0x2a, 0x81, 0xdb, 0xb7, 0x8c, 0xb4, 0x42, 0x6d, 0x9b, 0x25, 0xa2, + 0xc8, 0xe3, 0x03, 0x81, 0xbb, 0x15, 0xf2, 0xaf, 0xaa, 0xc7, 0x06, 0x02, 0x4f, 0x45, 0x99, 0xab, + 0x06, 0x42, 0x79, 0xe2, 0x03, 0xe1, 0x0b, 0xa1, 0x08, 0x58, 0x49, 0x8e, 0xc4, 0x94, 0xd0, 0xf6, + 0x2e, 0x94, 0x7d, 0xdd, 0xb7, 0xe5, 0xf2, 0x96, 0x23, 0x5e, 0xc3, 0x72, 0x67, 0x43, 0xc6, 0x6b, + 0x58, 0xee, 0x6c, 0x20, 0x07, 0xd5, 0x7e, 0xbf, 0x08, 0x4d, 0xd1, 0xbe, 0x62, 0xf6, 0x98, 0x64, + 0x0b, 0xbf, 0xc9, 0x5d, 0x2e, 0xfc, 0x41, 0x8f, 0x7a, 0x5c, 0x1d, 0x25, 0x27, 0xc3, 0xb8, 0x1d, + 0x21, 0xca, 0x0c, 0xdd, 0x2e, 0xa2, 0xa4, 0x3f, 0xe2, 0x4d, 0xff, 0x61, 0x11, 0x1a, 0x1b, 0xd6, + 0x1e, 0x35, 0x8e, 0x0c, 0x9b, 0x9f, 0x36, 0x35, 0xa9, 0x4d, 0x03, 0x7a, 0xcb, 0xd3, 0x0d, 0xba, + 0x45, 0x3d, 0x8b, 0x07, 0x61, 0x62, 0xe3, 0x88, 0xcf, 0x54, 0xf2, 0xb4, 0xe9, 0xea, 0x08, 0x1a, + 0x1c, 0x59, 0x9a, 0xac, 0xc3, 0x94, 0x49, 0x7d, 0xcb, 0xa3, 0xe6, 0x56, 0x6c, 0x43, 0xf3, 0x79, + 0xb5, 0x24, 0xad, 0xc6, 0xf2, 0x1e, 0x1f, 0xb7, 0xa6, 0x95, 0x22, 0x53, 0xec, 0x6c, 0x12, 0x45, + 0xd9, 0xd4, 0xd0, 0xd7, 0x07, 0x7e, 0x56, 0x1d, 0x63, 0x53, 0xc3, 0x56, 0x36, 0x09, 0x8e, 0x2a, + 0xab, 0x55, 0xa0, 0xb4, 0xe1, 0x76, 0xb5, 0xef, 0x95, 0x20, 0x8c, 0xd6, 0x45, 0xfe, 0x6c, 0x01, + 0x9a, 0xba, 0xe3, 0xb8, 0x81, 0x8c, 0x84, 0x25, 0x2c, 0xf5, 0x98, 0x3b, 0x28, 0xd8, 0xc2, 0x72, + 0x04, 0x2a, 0x8c, 0xbc, 0xa1, 0xe1, 0x39, 0x96, 0x83, 0x71, 0xde, 0x64, 0x90, 0xb2, 0x3b, 0x6f, + 0xe6, 0xaf, 0xc5, 0x29, 0xac, 0xcc, 0x57, 0xbe, 0x06, 0x73, 0xe9, 0xca, 0x9e, 0xc5, 0x6c, 0x94, + 0xcb, 0x80, 0x5f, 0x04, 0x88, 0x7c, 0x4f, 0x9e, 0x81, 0xb2, 0xcb, 0x4a, 0x28, 0xbb, 0xc6, 0x0f, + 0x99, 0x10, 0x55, 0x7a, 0xa4, 0x82, 0xeb, 0x5b, 0x29, 0x05, 0xd7, 0xfa, 0x24, 0x98, 0x3d, 0x59, + 0xa9, 0xb5, 0x0b, 0x17, 0x22, 0xda, 0x68, 0xcc, 0xdf, 0x4d, 0x8d, 0x4c, 0x21, 0xb3, 0x7d, 0x71, + 0xc4, 0xc8, 0x9c, 0x8d, 0x39, 0x03, 0x0d, 0x8f, 0x4d, 0xed, 0x6f, 0x16, 0x60, 0x2e, 0xce, 0x84, + 0x9f, 0xef, 0xfe, 0x32, 0x4c, 0x7b, 0x54, 0x37, 0xdb, 0x7a, 0x60, 0xec, 0x73, 0xb7, 0xf3, 0x02, + 0xf7, 0x13, 0xe7, 0x27, 0xd1, 0x30, 0x9e, 0x81, 0x49, 0x3a, 0xa2, 0x43, 0x93, 0x25, 0x6c, 0x5b, + 0x3d, 0xea, 0x0e, 0x82, 0x31, 0x35, 0xb8, 0x7c, 0xf3, 0x84, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x8f, + 0x0a, 0x30, 0x13, 0xaf, 0xf0, 0xb9, 0x6b, 0xf7, 0xf6, 0x93, 0xda, 0xbd, 0x95, 0x09, 0xfc, 0xf7, + 0x11, 0x1a, 0xbd, 0xef, 0x34, 0xe3, 0x9f, 0xc6, 0xb5, 0x78, 0x71, 0xc5, 0x45, 0xe1, 0x89, 0x8a, + 0x8b, 0x4f, 0x7e, 0x10, 0xa8, 0x51, 0x12, 0x77, 0xf9, 0x39, 0x96, 0xb8, 0x3f, 0xce, 0x48, 0x52, + 0xb1, 0x68, 0x48, 0xd5, 0x1c, 0xd1, 0x90, 0x7a, 0x61, 0x34, 0xa4, 0xda, 0xc4, 0x26, 0xb6, 0xd3, + 0x44, 0x44, 0xaa, 0x3f, 0xd3, 0x88, 0x48, 0x8d, 0xf3, 0x8a, 0x88, 0x04, 0x79, 0x23, 0x22, 0x7d, + 0xb7, 0x00, 0x33, 0x66, 0xe2, 0xf4, 0xae, 0x3c, 0x37, 0x3f, 0xfe, 0x72, 0x96, 0x3c, 0x0c, 0x2c, + 0x8e, 0x6f, 0x25, 0xd3, 0x30, 0xc5, 0x32, 0x2b, 0x0e, 0xd1, 0xd4, 0xc7, 0x12, 0x87, 0x88, 0xfc, + 0x12, 0x34, 0x6c, 0xb5, 0xd6, 0xc9, 0xe8, 0x8c, 0x1b, 0x13, 0xe9, 0x92, 0x12, 0x33, 0x3a, 0x21, + 0x10, 0x26, 0x61, 0xc4, 0x51, 0xfb, 0xbd, 0x5a, 0x7c, 0x41, 0x7c, 0xd6, 0xf6, 0x83, 0xd7, 0x93, + 0xf6, 0x83, 0xeb, 0x69, 0xfb, 0xc1, 0xd0, 0x6a, 0x2e, 0x6d, 0x08, 0x3f, 0x19, 0x5b, 0x27, 0x4a, + 0x3c, 0x00, 0x52, 0xd8, 0xe5, 0x32, 0xd6, 0x8a, 0x65, 0x98, 0x95, 0x42, 0x80, 0xca, 0xe4, 0x93, + 0xec, 0x74, 0xe4, 0xf1, 0xb5, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x63, 0xe8, 0xab, 0x38, 0xb8, 0x62, + 0x37, 0x14, 0xf5, 0x71, 0x15, 0xa3, 0x36, 0xa4, 0x60, 0x3b, 0x27, 0x8f, 0xea, 0xbe, 0xb4, 0x02, + 0xc4, 0x76, 0x4e, 0xc8, 0x53, 0x51, 0xe6, 0xc6, 0x4d, 0x21, 0xb5, 0xa7, 0x98, 0x42, 0x74, 0x68, + 0xda, 0xba, 0x1f, 0x88, 0xce, 0x64, 0xca, 0xd9, 0xe4, 0x8f, 0x9d, 0x6e, 0xdd, 0x67, 0xb2, 0x44, + 0x24, 0xc0, 0x6f, 0x44, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb1, 0x57, 0x3e, 0xb3, 0x98, 0xcb, + 0x81, 0x8c, 0x16, 0x77, 0x16, 0x1e, 0xa1, 0x06, 0x6f, 0x23, 0x86, 0x83, 0x09, 0xd4, 0x11, 0xd6, + 0x12, 0x18, 0xc7, 0x5a, 0x42, 0x7e, 0x56, 0x08, 0x6e, 0x47, 0xe1, 0x6f, 0x6d, 0xf2, 0xdf, 0x1a, + 0x7a, 0x8b, 0x62, 0x3c, 0x13, 0x93, 0xb4, 0xac, 0x57, 0x0c, 0x64, 0x33, 0xa8, 0xe2, 0x53, 0xc9, + 0x5e, 0xb1, 0x93, 0xcc, 0xc6, 0x34, 0x3d, 0xd9, 0x82, 0x8b, 0x61, 0x52, 0xbc, 0x1a, 0xd3, 0x1c, + 0x27, 0x74, 0xdf, 0xdb, 0xc9, 0xa0, 0xc1, 0xcc, 0x92, 0xfc, 0x3c, 0xcc, 0xc0, 0xf3, 0xa8, 0x13, + 0xdc, 0xd6, 0xfd, 0x7d, 0xe9, 0x07, 0x18, 0x9d, 0x87, 0x89, 0xb2, 0x30, 0x4e, 0x47, 0x96, 0x00, + 0x04, 0x1c, 0x2f, 0x35, 0x9b, 0x74, 0xb5, 0xdd, 0x09, 0x73, 0x30, 0x46, 0xa5, 0x7d, 0xb7, 0x01, + 0xcd, 0x7b, 0x7a, 0x60, 0x1d, 0x52, 0x6e, 0xda, 0x3c, 0x1f, 0xfb, 0xd2, 0x5f, 0x29, 0xc0, 0xa5, + 0xa4, 0xff, 0xea, 0x39, 0x1a, 0x99, 0x78, 0xfc, 0x24, 0xcc, 0xe4, 0x86, 0x23, 0x6a, 0xc1, 0xcd, + 0x4d, 0x43, 0xee, 0xb0, 0xe7, 0x6d, 0x6e, 0xea, 0x8c, 0x62, 0x88, 0xa3, 0xeb, 0xf2, 0x49, 0x31, + 0x37, 0x3d, 0xdf, 0x01, 0x3f, 0x53, 0xc6, 0xb0, 0xda, 0x73, 0x63, 0x0c, 0xab, 0x3f, 0x17, 0x52, + 0x7f, 0x3f, 0x66, 0x0c, 0x6b, 0xe4, 0x74, 0xca, 0x92, 0x47, 0x3e, 0x04, 0xda, 0x28, 0xa3, 0x1a, + 0x8f, 0xd6, 0xa0, 0x8c, 0x14, 0x4c, 0x58, 0xde, 0xd5, 0x7d, 0xcb, 0x90, 0x62, 0x47, 0x8e, 0x00, + 0xc7, 0x2a, 0xf0, 0xa1, 0xf0, 0xdd, 0xe0, 0xaf, 0x28, 0xb0, 0xa3, 0x38, 0x8f, 0xc5, 0x5c, 0x71, + 0x1e, 0xc9, 0x0a, 0x94, 0x9d, 0x03, 0x7a, 0x74, 0xb6, 0xb8, 0x07, 0x7c, 0x13, 0x78, 0xef, 0x2e, + 0x3d, 0x42, 0x5e, 0x58, 0xfb, 0x7e, 0x11, 0x80, 0x7d, 0xfe, 0xe9, 0xcc, 0x52, 0x3f, 0x0e, 0x35, + 0x7f, 0xc0, 0x15, 0x43, 0x52, 0x60, 0x8a, 0x3c, 0xd9, 0x44, 0x32, 0xaa, 0x7c, 0xf2, 0x39, 0xa8, + 0x7c, 0x6b, 0x40, 0x07, 0xca, 0xc7, 0x22, 0xdc, 0x37, 0x7c, 0x9d, 0x25, 0xa2, 0xc8, 0x3b, 0x3f, + 0xd5, 0xb1, 0x32, 0x5f, 0x55, 0xce, 0xcb, 0x7c, 0xd5, 0x80, 0xda, 0x3d, 0x97, 0x3b, 0xc6, 0x6a, + 0xff, 0xad, 0x08, 0x10, 0x39, 0x1e, 0x92, 0x5f, 0x2f, 0xc0, 0x4b, 0xe1, 0x80, 0x0b, 0xc4, 0xf6, + 0x8f, 0xc7, 0x14, 0xcf, 0x6d, 0xca, 0xca, 0x1a, 0xec, 0x7c, 0x06, 0xda, 0xca, 0x62, 0x87, 0xd9, + 0xb5, 0x20, 0x08, 0x75, 0xda, 0xeb, 0x07, 0x47, 0xab, 0x96, 0x27, 0x7b, 0x60, 0xa6, 0x7f, 0xeb, + 0x4d, 0x49, 0x23, 0x8a, 0x4a, 0x1d, 0x05, 0x1f, 0x44, 0x2a, 0x07, 0x43, 0x1c, 0xb2, 0x0f, 0x75, + 0xc7, 0x7d, 0xcf, 0x67, 0xcd, 0x21, 0xbb, 0xe3, 0x5b, 0xe3, 0x37, 0xb9, 0x68, 0x56, 0x61, 0xd2, + 0x90, 0x2f, 0x58, 0x73, 0x64, 0x63, 0xff, 0x5a, 0x11, 0x2e, 0x64, 0xb4, 0x03, 0x79, 0x0b, 0xe6, + 0xa4, 0x8f, 0x67, 0x14, 0x5c, 0xbf, 0x10, 0x05, 0xd7, 0xef, 0xa4, 0xf2, 0x70, 0x88, 0x9a, 0xbc, + 0x07, 0xa0, 0x1b, 0x06, 0xf5, 0xfd, 0x4d, 0xd7, 0x54, 0xfb, 0x81, 0x37, 0x99, 0xf8, 0xb2, 0x1c, + 0xa6, 0x3e, 0x3e, 0x6e, 0xfd, 0x54, 0x96, 0xdb, 0x76, 0xaa, 0x9d, 0xa3, 0x02, 0x18, 0x83, 0x24, + 0xdf, 0x04, 0x10, 0x3a, 0x80, 0x30, 0xb2, 0xc4, 0x53, 0x14, 0x67, 0x0b, 0x2a, 0x70, 0xd9, 0xc2, + 0xd7, 0x07, 0xba, 0x13, 0x58, 0xc1, 0x91, 0x08, 0xe4, 0xf3, 0x20, 0x44, 0xc1, 0x18, 0xa2, 0xf6, + 0x4f, 0x8a, 0x50, 0x57, 0x66, 0x81, 0x67, 0xa0, 0x0b, 0xee, 0x26, 0x74, 0xc1, 0x13, 0x72, 0xd4, + 0xce, 0xd2, 0x04, 0xbb, 0x29, 0x4d, 0xf0, 0xad, 0xfc, 0xac, 0x9e, 0xac, 0x07, 0xfe, 0xad, 0x22, + 0xcc, 0x28, 0xd2, 0xbc, 0x1a, 0xda, 0xaf, 0xc2, 0xac, 0x70, 0xb0, 0xd8, 0xd4, 0x1f, 0x89, 0x98, + 0x46, 0xbc, 0xc1, 0xca, 0xc2, 0x37, 0xba, 0x9d, 0xcc, 0xc2, 0x34, 0x2d, 0xeb, 0xd6, 0x22, 0x69, + 0x87, 0x6d, 0xc2, 0x84, 0x49, 0x56, 0xec, 0x37, 0x79, 0xb7, 0x6e, 0xa7, 0xf2, 0x70, 0x88, 0x3a, + 0xad, 0x22, 0x2e, 0x9f, 0x83, 0x8a, 0xf8, 0xdf, 0x15, 0x60, 0x2a, 0x6a, 0xaf, 0x73, 0x57, 0x10, + 0xef, 0x25, 0x15, 0xc4, 0xcb, 0xb9, 0xbb, 0xc3, 0x08, 0xf5, 0xf0, 0x5f, 0xa8, 0x41, 0xe2, 0xbc, + 0x00, 0xd9, 0x85, 0x2b, 0x56, 0xa6, 0xd7, 0x63, 0x6c, 0xb6, 0x09, 0x0f, 0xc0, 0xaf, 0x8f, 0xa4, + 0xc4, 0x27, 0xa0, 0x90, 0x01, 0xd4, 0x0f, 0xa9, 0x17, 0x58, 0x06, 0x55, 0xdf, 0x77, 0x2b, 0xb7, + 0x48, 0x26, 0x95, 0xe0, 0x61, 0x9b, 0x3e, 0x90, 0x0c, 0x30, 0x64, 0x45, 0x76, 0xa1, 0x42, 0xcd, + 0x2e, 0x55, 0x51, 0xa6, 0x72, 0x46, 0xfd, 0x0d, 0xdb, 0x93, 0xbd, 0xf9, 0x28, 0xa0, 0x89, 0x1f, + 0x57, 0x34, 0x95, 0x73, 0x0a, 0x58, 0xa7, 0x54, 0x2f, 0x91, 0x83, 0x50, 0xdb, 0x5a, 0x99, 0xd0, + 0xe4, 0xf1, 0x04, 0x5d, 0xab, 0x0f, 0x8d, 0x87, 0x7a, 0x40, 0xbd, 0x9e, 0xee, 0x1d, 0xc8, 0xdd, + 0xc6, 0xf8, 0x5f, 0xf8, 0xb6, 0x42, 0x8a, 0xbe, 0x30, 0x4c, 0xc2, 0x88, 0x0f, 0x71, 0xa1, 0x11, + 0x48, 0xf1, 0x59, 0xa9, 0x94, 0xc7, 0x67, 0xaa, 0x04, 0x71, 0x5f, 0x9e, 0x1b, 0x50, 0xaf, 0x18, + 0xf1, 0x20, 0x87, 0x89, 0x10, 0xf1, 0xe2, 0x62, 0x80, 0x76, 0x0e, 0xd3, 0x84, 0x84, 0x8a, 0x96, + 0x9b, 0xec, 0x50, 0xf3, 0xda, 0xff, 0xaa, 0x44, 0xd3, 0xf2, 0xb3, 0xd6, 0x13, 0x7e, 0x29, 0xa9, + 0x27, 0xbc, 0x96, 0xd6, 0x13, 0xa6, 0xec, 0xf1, 0x67, 0xf7, 0x34, 0x4e, 0xa9, 0xd7, 0xca, 0xe7, + 0xa0, 0x5e, 0x7b, 0x15, 0x9a, 0x87, 0x7c, 0x26, 0x10, 0x21, 0xab, 0x2a, 0x7c, 0x19, 0xe1, 0x33, + 0xfb, 0x83, 0x28, 0x19, 0xe3, 0x34, 0xac, 0x88, 0xbc, 0x14, 0x27, 0x8c, 0x12, 0x2d, 0x8b, 0x74, + 0xa2, 0x64, 0x8c, 0xd3, 0x70, 0x27, 0x45, 0xcb, 0x39, 0x10, 0x05, 0x6a, 0xbc, 0x80, 0x70, 0x52, + 0x54, 0x89, 0x18, 0xe5, 0x93, 0x1b, 0x50, 0x1f, 0x98, 0x7b, 0x82, 0xb6, 0xce, 0x69, 0xb9, 0x84, + 0xb9, 0xb3, 0xba, 0x26, 0x43, 0x68, 0xa9, 0x5c, 0x56, 0x93, 0x9e, 0xde, 0x57, 0x19, 0x7c, 0x6f, + 0x28, 0x6b, 0xb2, 0x19, 0x25, 0x63, 0x9c, 0x86, 0xfc, 0x0c, 0xcc, 0x78, 0xd4, 0x1c, 0x18, 0x34, + 0x2c, 0x05, 0xbc, 0x94, 0x8c, 0x2d, 0x1a, 0xcf, 0xc1, 0x14, 0xe5, 0x08, 0x25, 0x61, 0x73, 0x2c, + 0x25, 0xe1, 0xd7, 0x60, 0xc6, 0xf4, 0x74, 0xcb, 0xa1, 0xe6, 0x7d, 0x87, 0x3b, 0x5d, 0x48, 0x57, + 0xc9, 0x50, 0x41, 0xbf, 0x9a, 0xc8, 0xc5, 0x14, 0xb5, 0xf6, 0x2f, 0x8a, 0x50, 0x11, 0x11, 0x4f, + 0xd7, 0xe1, 0x82, 0xe5, 0x58, 0x81, 0xa5, 0xdb, 0xab, 0xd4, 0xd6, 0x8f, 0x92, 0x8e, 0x27, 0x2f, + 0xb3, 0x8d, 0xf6, 0xfa, 0x70, 0x36, 0x66, 0x95, 0x61, 0x8d, 0x13, 0x88, 0xe5, 0x5b, 0xa1, 0x08, + 0x3d, 0x9a, 0x08, 0xb7, 0x9d, 0xc8, 0xc1, 0x14, 0x25, 0x13, 0x86, 0xfa, 0x19, 0x5e, 0x25, 0x5c, + 0x18, 0x4a, 0xfa, 0x92, 0x24, 0xe9, 0xb8, 0x90, 0x3e, 0xe0, 0x02, 0x71, 0x78, 0x20, 0x49, 0x3a, + 0x98, 0x09, 0x21, 0x3d, 0x95, 0x87, 0x43, 0xd4, 0x0c, 0x61, 0x4f, 0xb7, 0xec, 0x81, 0x47, 0x23, + 0x84, 0x4a, 0x84, 0xb0, 0x96, 0xca, 0xc3, 0x21, 0x6a, 0xed, 0x7f, 0x14, 0x80, 0x0c, 0x1f, 0xb1, + 0x20, 0xfb, 0x50, 0x75, 0xb8, 0x2e, 0x32, 0x77, 0x94, 0xff, 0x98, 0x4a, 0x53, 0x2c, 0x12, 0x32, + 0x41, 0xe2, 0x13, 0x07, 0xea, 0xf4, 0x51, 0x40, 0x3d, 0x27, 0x3c, 0x72, 0x35, 0x99, 0x1b, 0x05, + 0xc4, 0xde, 0x4c, 0x22, 0x63, 0xc8, 0x43, 0xfb, 0xdd, 0x22, 0x34, 0x63, 0x74, 0x4f, 0xdb, 0xe2, + 0xf3, 0xa8, 0x0f, 0x42, 0x05, 0xb8, 0xe3, 0xd9, 0x72, 0xbe, 0x8b, 0x45, 0x7d, 0x90, 0x59, 0xb8, + 0x81, 0x71, 0x3a, 0xb2, 0x04, 0xd0, 0xd3, 0xfd, 0x80, 0x7a, 0x5c, 0x16, 0x4a, 0xc5, 0x5a, 0xd8, + 0x0c, 0x73, 0x30, 0x46, 0x45, 0xae, 0xcb, 0x3b, 0x21, 0xca, 0xc9, 0xd8, 0x98, 0x23, 0x2e, 0x7c, + 0xa8, 0x4c, 0xe0, 0xc2, 0x07, 0xd2, 0x85, 0x39, 0x55, 0x6b, 0x95, 0x7b, 0xb6, 0xc8, 0x89, 0xa2, + 0xa3, 0xa6, 0x20, 0x70, 0x08, 0x54, 0xfb, 0x7e, 0x01, 0xa6, 0x13, 0x0a, 0x28, 0x11, 0xd5, 0x52, + 0x1d, 0x10, 0x4a, 0x44, 0xb5, 0x8c, 0x9d, 0xeb, 0xf9, 0x02, 0x54, 0x45, 0x03, 0xa5, 0xfd, 0x7e, + 0x45, 0x13, 0xa2, 0xcc, 0x65, 0x2b, 0x8b, 0x54, 0x71, 0xa7, 0x57, 0x16, 0xa9, 0x03, 0x47, 0x95, + 0x2f, 0x2c, 0x47, 0xa2, 0x76, 0xb2, 0xa5, 0x63, 0x96, 0x23, 0x91, 0x8e, 0x21, 0x85, 0xf6, 0x0f, + 0x78, 0xbd, 0x03, 0xef, 0x28, 0xdc, 0x59, 0x77, 0xa1, 0x26, 0x7d, 0x3d, 0xe5, 0xd0, 0x78, 0x2b, + 0x87, 0x56, 0x8c, 0xe3, 0x48, 0x6f, 0x45, 0xdd, 0x38, 0xb8, 0xbf, 0xb7, 0x87, 0x0a, 0x9d, 0xdc, + 0x84, 0x86, 0xeb, 0xc8, 0x11, 0x2c, 0x3f, 0xff, 0x8b, 0x6c, 0xe5, 0xb8, 0xaf, 0x12, 0x1f, 0x1f, + 0xb7, 0x2e, 0x85, 0x2f, 0x89, 0x4a, 0x62, 0x54, 0x52, 0xfb, 0x33, 0x05, 0x78, 0x09, 0x5d, 0xdb, + 0xb6, 0x9c, 0x6e, 0xd2, 0xf2, 0x49, 0x6c, 0x98, 0xe9, 0xe9, 0x8f, 0x76, 0x1c, 0xfd, 0x50, 0xb7, + 0x6c, 0x7d, 0xd7, 0xa6, 0x4f, 0xdd, 0x19, 0x0f, 0x02, 0xcb, 0x5e, 0x10, 0x77, 0x64, 0x2e, 0xac, + 0x3b, 0xc1, 0x7d, 0xaf, 0x13, 0x78, 0x96, 0xd3, 0x15, 0xb3, 0xe4, 0x66, 0x02, 0x0b, 0x53, 0xd8, + 0xda, 0xef, 0x95, 0x80, 0xfb, 0x11, 0x92, 0x2f, 0x43, 0xa3, 0x47, 0x8d, 0x7d, 0xdd, 0xb1, 0x7c, + 0x15, 0x1f, 0xf8, 0x32, 0xfb, 0xae, 0x4d, 0x95, 0xf8, 0x98, 0xfd, 0x8a, 0xe5, 0xce, 0x06, 0x3f, + 0xd2, 0x13, 0xd1, 0x12, 0x03, 0xaa, 0x5d, 0xdf, 0xd7, 0xfb, 0x56, 0x6e, 0x17, 0x13, 0x11, 0x8f, + 0x55, 0x4c, 0x47, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb7, 0x75, 0xcb, 0xc9, 0x7d, 0xa7, + 0x1b, 0xfb, 0x82, 0x2d, 0x86, 0x24, 0x54, 0x95, 0xfc, 0x11, 0x05, 0x36, 0x19, 0x40, 0xd3, 0x37, + 0x3c, 0xbd, 0xe7, 0xef, 0xeb, 0x4b, 0xaf, 0xbd, 0x9e, 0x5b, 0xf8, 0x8f, 0x58, 0x09, 0x59, 0x64, + 0x05, 0x97, 0x37, 0x3b, 0xb7, 0x97, 0x97, 0x5e, 0x7b, 0x1d, 0xe3, 0x7c, 0xe2, 0x6c, 0x5f, 0x7b, + 0x75, 0x49, 0xce, 0x20, 0x13, 0x67, 0xfb, 0xda, 0xab, 0x4b, 0x18, 0xe7, 0xa3, 0xfd, 0xef, 0x02, + 0x34, 0x42, 0x5a, 0xb2, 0x03, 0xc0, 0xe6, 0x32, 0x19, 0x41, 0xf5, 0x4c, 0xf7, 0xdf, 0x70, 0x6d, + 0xcf, 0x4e, 0x58, 0x18, 0x63, 0x40, 0x19, 0x21, 0x66, 0x8b, 0x93, 0x0e, 0x31, 0xbb, 0x08, 0x8d, + 0x7d, 0xdd, 0x31, 0xfd, 0x7d, 0xfd, 0x80, 0x4a, 0xf7, 0xeb, 0x70, 0x2b, 0x72, 0x5b, 0x65, 0x60, + 0x44, 0xa3, 0xfd, 0xa3, 0x2a, 0x08, 0xbf, 0x10, 0x36, 0xe9, 0x98, 0x96, 0x2f, 0x0e, 0x49, 0x14, + 0x78, 0xc9, 0x70, 0xd2, 0x59, 0x95, 0xe9, 0x18, 0x52, 0x90, 0xcb, 0x50, 0xea, 0x59, 0x8e, 0x94, + 0x40, 0xb8, 0x22, 0x77, 0xd3, 0x72, 0x90, 0xa5, 0xf1, 0x2c, 0xfd, 0x91, 0x94, 0x30, 0x44, 0x96, + 0xfe, 0x08, 0x59, 0x1a, 0xf9, 0x2a, 0xcc, 0xda, 0xae, 0x7b, 0xc0, 0xa6, 0x0f, 0x25, 0x88, 0x08, + 0xab, 0x3a, 0x57, 0xad, 0x6c, 0x24, 0xb3, 0x30, 0x4d, 0x4b, 0x76, 0xe0, 0xe5, 0x0f, 0xa8, 0xe7, + 0xca, 0xf9, 0xb2, 0x63, 0x53, 0xda, 0x57, 0x30, 0x42, 0x34, 0xe6, 0x5e, 0xb2, 0x3f, 0x9f, 0x4d, + 0x82, 0xa3, 0xca, 0x72, 0xbf, 0x7c, 0xdd, 0xeb, 0xd2, 0x60, 0xcb, 0x73, 0x99, 0xec, 0x62, 0x39, + 0x5d, 0x05, 0x5b, 0x8d, 0x60, 0xb7, 0xb3, 0x49, 0x70, 0x54, 0x59, 0xf2, 0x0e, 0xcc, 0x8b, 0x2c, + 0x21, 0xb6, 0x2c, 0x8b, 0x69, 0xc6, 0xb2, 0xd5, 0x55, 0xa8, 0xd3, 0xc2, 0x5e, 0xb6, 0x3d, 0x82, + 0x06, 0x47, 0x96, 0x26, 0x77, 0x60, 0x4e, 0x59, 0x4b, 0xb7, 0xa8, 0xd7, 0x09, 0x7d, 0x85, 0xa6, + 0xdb, 0xd7, 0x4e, 0x8e, 0x5b, 0x57, 0x56, 0x69, 0xdf, 0xa3, 0x46, 0xdc, 0xea, 0xac, 0xa8, 0x70, + 0xa8, 0x1c, 0x41, 0xb8, 0xc4, 0x1d, 0x82, 0x76, 0xfa, 0x2b, 0xae, 0x6b, 0x9b, 0xee, 0x43, 0x47, + 0x7d, 0xbb, 0x10, 0xd8, 0xb9, 0x81, 0xb4, 0x93, 0x49, 0x81, 0x23, 0x4a, 0xb2, 0x2f, 0xe7, 0x39, + 0xab, 0xee, 0x43, 0x27, 0x8d, 0x0a, 0xd1, 0x97, 0x77, 0x46, 0xd0, 0xe0, 0xc8, 0xd2, 0x64, 0x0d, + 0x48, 0xfa, 0x0b, 0x76, 0xfa, 0xd2, 0x84, 0x7f, 0x49, 0x04, 0x43, 0x4a, 0xe7, 0x62, 0x46, 0x09, + 0xb2, 0x01, 0x17, 0xd3, 0xa9, 0x8c, 0x9d, 0xb4, 0xe6, 0xf3, 0x30, 0xc8, 0x98, 0x91, 0x8f, 0x99, + 0xa5, 0xb4, 0x7f, 0x5c, 0x84, 0xe9, 0x44, 0xf4, 0x8c, 0xe7, 0x2e, 0x4a, 0x01, 0xdb, 0x3c, 0xf4, + 0xfc, 0xee, 0xfa, 0xea, 0x6d, 0xaa, 0x9b, 0xd4, 0x53, 0x87, 0x33, 0x1a, 0x72, 0x59, 0x4c, 0xe4, + 0x60, 0x8a, 0x92, 0xec, 0x41, 0x45, 0xd8, 0x09, 0xf2, 0xde, 0xa4, 0xa4, 0xda, 0x88, 0x1b, 0x0b, + 0xe4, 0xf5, 0x63, 0xae, 0x47, 0x51, 0xc0, 0x6b, 0x01, 0x4c, 0xc5, 0x29, 0xd8, 0x44, 0x12, 0x89, + 0xbd, 0xb5, 0x84, 0xc8, 0xbb, 0x0e, 0xa5, 0x20, 0x18, 0x37, 0xfe, 0x81, 0xb0, 0x3b, 0x6d, 0x6f, + 0x20, 0xc3, 0xd0, 0xf6, 0xd8, 0xbf, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0x30, 0xfc, 0x1d, 0xa8, 0xc9, + 0xdd, 0xd3, 0x98, 0xf1, 0x1b, 0xb8, 0xac, 0xa4, 0xd4, 0xae, 0x0a, 0x4b, 0xfb, 0xf7, 0x45, 0x68, + 0x84, 0x6a, 0x92, 0x53, 0x04, 0x99, 0x77, 0xa1, 0x11, 0x3a, 0x34, 0xe6, 0xbe, 0x26, 0x36, 0xf2, + 0xb3, 0xe3, 0x3b, 0xfb, 0xf0, 0x15, 0x23, 0x1e, 0x71, 0x67, 0xc9, 0x52, 0x0e, 0x67, 0xc9, 0x3e, + 0xd4, 0x02, 0xcf, 0xea, 0x76, 0xe5, 0x2e, 0x21, 0x8f, 0xb7, 0x64, 0xd8, 0x5c, 0xdb, 0x02, 0x50, + 0xb6, 0xac, 0x78, 0x41, 0xc5, 0x46, 0x7b, 0x1f, 0xe6, 0xd2, 0x94, 0x5c, 0x84, 0x36, 0xf6, 0xa9, + 0x39, 0xb0, 0x55, 0x1b, 0x47, 0x22, 0xb4, 0x4c, 0xc7, 0x90, 0x82, 0xdc, 0x80, 0x3a, 0xfb, 0x4d, + 0x1f, 0xb8, 0x8e, 0x12, 0x63, 0xf9, 0x6e, 0x64, 0x5b, 0xa6, 0x61, 0x98, 0xab, 0xfd, 0xd7, 0x12, + 0x5c, 0x8e, 0x94, 0x5d, 0x9b, 0xba, 0xa3, 0x77, 0x4f, 0x71, 0x37, 0xe8, 0xa7, 0x27, 0xe2, 0xce, + 0x7a, 0x53, 0x48, 0xe9, 0x39, 0xb8, 0x29, 0xe4, 0xff, 0x16, 0x81, 0x3b, 0x5f, 0x93, 0x6f, 0xc3, + 0x94, 0x1e, 0xbb, 0x16, 0x5a, 0xfe, 0xce, 0x9b, 0xb9, 0x7f, 0x27, 0xf7, 0xf1, 0x0e, 0x1d, 0xe0, + 0xe2, 0xa9, 0x98, 0x60, 0x48, 0x5c, 0xa8, 0xef, 0xe9, 0xb6, 0xcd, 0x64, 0xa1, 0xdc, 0xc6, 0xbb, + 0x04, 0x73, 0xde, 0xcd, 0xd7, 0x24, 0x34, 0x86, 0x4c, 0xc8, 0x77, 0x0b, 0x30, 0xed, 0xc5, 0xb7, + 0x6b, 0xf2, 0x87, 0xe4, 0x71, 0xed, 0x88, 0xa1, 0xc5, 0xdd, 0xed, 0xe2, 0x7b, 0xc2, 0x24, 0x4f, + 0xed, 0xbf, 0x14, 0x60, 0xba, 0x63, 0x5b, 0xa6, 0xe5, 0x74, 0xcf, 0xf1, 0xa2, 0x92, 0xfb, 0x50, + 0xf1, 0x6d, 0xcb, 0xa4, 0x63, 0xae, 0x26, 0x62, 0x1d, 0x63, 0x00, 0x28, 0x70, 0x92, 0x37, 0x9f, + 0x94, 0x4e, 0x71, 0xf3, 0xc9, 0x1f, 0x54, 0x41, 0x1e, 0x23, 0x20, 0x03, 0x68, 0x74, 0xd5, 0x85, + 0x0a, 0xf2, 0x1b, 0x6f, 0xe7, 0x08, 0xc6, 0x99, 0xb8, 0x9a, 0x41, 0xcc, 0xfd, 0x61, 0x22, 0x46, + 0x9c, 0x08, 0x4d, 0xde, 0x47, 0xbe, 0x9a, 0xf3, 0x3e, 0x72, 0xc1, 0x6e, 0xf8, 0x46, 0x72, 0x1d, + 0xca, 0xfb, 0x41, 0xd0, 0x97, 0x9d, 0x69, 0xfc, 0x73, 0x22, 0x51, 0x3c, 0x28, 0x21, 0x13, 0xb1, + 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xa3, 0x87, 0xb7, 0x3e, 0xae, 0xe4, 0x72, 0x23, 0x89, 0xb3, 0x60, + 0xef, 0xc8, 0xa1, 0xc9, 0x2f, 0x42, 0x33, 0xf0, 0x74, 0xc7, 0xdf, 0x73, 0xbd, 0x1e, 0xf5, 0xe4, + 0x1e, 0x75, 0x2d, 0xc7, 0x95, 0xdc, 0xdb, 0x11, 0x9a, 0x50, 0xc9, 0x26, 0x92, 0x30, 0xce, 0x8d, + 0x1c, 0x40, 0x7d, 0x60, 0x8a, 0x8a, 0x49, 0x35, 0xd8, 0x72, 0x9e, 0x5b, 0xd6, 0x63, 0x4e, 0x22, + 0xea, 0x0d, 0x43, 0x06, 0xc9, 0x0b, 0x4e, 0x6b, 0x93, 0xba, 0xe0, 0x34, 0xde, 0x1b, 0xb3, 0x82, + 0xd5, 0x90, 0x9e, 0x94, 0x6b, 0x9d, 0xae, 0xf4, 0x71, 0x5b, 0xcb, 0x2d, 0x72, 0x0a, 0x96, 0xcd, + 0x50, 0x36, 0x76, 0xba, 0xa8, 0x78, 0x68, 0x3d, 0x90, 0xb6, 0x23, 0x62, 0x24, 0x2e, 0x75, 0x12, + 0x27, 0x23, 0x17, 0x4f, 0x37, 0x1f, 0x84, 0xb7, 0x0b, 0xc5, 0x82, 0xca, 0x67, 0xde, 0xde, 0xa4, + 0xfd, 0x87, 0x22, 0x94, 0xb6, 0x37, 0x3a, 0x22, 0x50, 0x2c, 0xbf, 0x26, 0x8e, 0x76, 0x0e, 0xac, + 0xfe, 0x03, 0xea, 0x59, 0x7b, 0x47, 0x72, 0xeb, 0x1d, 0x0b, 0x14, 0x9b, 0xa6, 0xc0, 0x8c, 0x52, + 0xe4, 0x5d, 0x98, 0x32, 0xf4, 0x15, 0xea, 0x05, 0xe3, 0x28, 0x16, 0xf8, 0x51, 0xf1, 0x95, 0xe5, + 0xa8, 0x38, 0x26, 0xc0, 0xc8, 0x0e, 0x80, 0x11, 0x41, 0x97, 0xce, 0xac, 0x0e, 0x89, 0x01, 0xc7, + 0x80, 0x08, 0x42, 0xe3, 0x80, 0x91, 0x72, 0xd4, 0xf2, 0x59, 0x50, 0x79, 0xcf, 0xb9, 0xab, 0xca, + 0x62, 0x04, 0xa3, 0x39, 0x30, 0x9d, 0xb8, 0xe9, 0x89, 0x7c, 0x05, 0xea, 0x6e, 0x3f, 0x36, 0x9d, + 0x36, 0xb8, 0x37, 0x6d, 0xfd, 0xbe, 0x4c, 0x7b, 0x7c, 0xdc, 0x9a, 0xde, 0x70, 0xbb, 0x96, 0xa1, + 0x12, 0x30, 0x24, 0x27, 0x1a, 0x54, 0xf9, 0xb9, 0x4d, 0x75, 0xcf, 0x13, 0x5f, 0x3b, 0xf8, 0x55, + 0x2c, 0x3e, 0xca, 0x1c, 0xed, 0x97, 0xcb, 0x10, 0x59, 0x5c, 0x89, 0x0f, 0x55, 0x71, 0x66, 0x44, + 0xce, 0xdc, 0xe7, 0x7a, 0x3c, 0x45, 0xb2, 0x22, 0x5d, 0x28, 0xbd, 0xef, 0xee, 0xe6, 0x9e, 0xb8, + 0x63, 0x81, 0x1d, 0x84, 0xae, 0x2c, 0x96, 0x80, 0x8c, 0x03, 0xf9, 0xab, 0x05, 0x78, 0xd1, 0x4f, + 0x8b, 0xbe, 0xb2, 0x3b, 0x60, 0x7e, 0x19, 0x3f, 0x2d, 0x4c, 0x4b, 0xb7, 0xe7, 0x51, 0xd9, 0x38, + 0x5c, 0x17, 0xd6, 0xfe, 0xc2, 0x14, 0x2a, 0xbb, 0xd3, 0xad, 0x9c, 0xf7, 0xd9, 0x26, 0xdb, 0x3f, + 0x99, 0x86, 0x92, 0x95, 0xf6, 0x9d, 0x22, 0x34, 0x63, 0xb3, 0x75, 0xee, 0xeb, 0xc3, 0x1e, 0xa5, + 0xae, 0x0f, 0xdb, 0x1a, 0xdf, 0x33, 0x20, 0xaa, 0xd5, 0x79, 0xdf, 0x20, 0xf6, 0xcf, 0x8a, 0x50, + 0xda, 0x59, 0x5d, 0x4b, 0x6e, 0x5a, 0x0b, 0xcf, 0x60, 0xd3, 0xba, 0x0f, 0xb5, 0xdd, 0x81, 0x65, + 0x07, 0x96, 0x93, 0x3b, 0xf4, 0x8c, 0xba, 0x6d, 0x4d, 0xda, 0x3a, 0x04, 0x2a, 0x2a, 0x78, 0xd2, + 0x85, 0x5a, 0x57, 0xc4, 0xfe, 0xcc, 0xed, 0x2f, 0x29, 0x63, 0x88, 0x0a, 0x46, 0xf2, 0x05, 0x15, + 0xba, 0x76, 0x04, 0xd5, 0x9d, 0x55, 0x29, 0xf6, 0x3f, 0xdb, 0xd6, 0xd4, 0x7e, 0x11, 0x42, 0x29, + 0xe0, 0xd9, 0x33, 0xff, 0xef, 0x05, 0x48, 0x0a, 0x3e, 0xcf, 0xbe, 0x37, 0x1d, 0xa4, 0x7b, 0xd3, + 0xea, 0x24, 0x06, 0x5f, 0x76, 0x87, 0xd2, 0xfe, 0x6d, 0x01, 0x52, 0x07, 0xfd, 0xc8, 0xeb, 0x32, + 0x8c, 0x5c, 0xd2, 0x31, 0x4d, 0x85, 0x91, 0x23, 0x49, 0xea, 0x58, 0x38, 0xb9, 0x0f, 0xd9, 0x76, + 0x2d, 0x6e, 0x40, 0x93, 0xd5, 0xbf, 0x37, 0xfe, 0x76, 0x2d, 0xcb, 0x1c, 0x27, 0x9d, 0x27, 0xe3, + 0x59, 0x98, 0xe4, 0xab, 0xfd, 0xc3, 0x22, 0x54, 0x9f, 0x59, 0x6c, 0x03, 0x9a, 0xf0, 0x67, 0x5d, + 0xc9, 0x39, 0xdb, 0x8f, 0xf4, 0x66, 0xed, 0xa5, 0xbc, 0x59, 0xf3, 0x5e, 0x93, 0xfe, 0x14, 0x5f, + 0xd6, 0x7f, 0x5d, 0x00, 0xb9, 0xd6, 0xac, 0x3b, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0xb8, 0xb0, + 0xe5, 0x75, 0x9a, 0x92, 0x8e, 0x85, 0x42, 0x96, 0xe1, 0xcf, 0x6a, 0x21, 0x23, 0x3f, 0x09, 0xf5, + 0x7d, 0xd7, 0x0f, 0xf8, 0xe2, 0x55, 0x4c, 0xaa, 0xcc, 0x6e, 0xcb, 0x74, 0x0c, 0x29, 0xd2, 0xe6, + 0xec, 0xca, 0x68, 0x73, 0xb6, 0xf6, 0x9b, 0x45, 0x98, 0xfa, 0xa4, 0x04, 0x4f, 0xc8, 0xf2, 0xfe, + 0x2d, 0xe5, 0xf4, 0xfe, 0x2d, 0x9f, 0xc5, 0xfb, 0x57, 0xfb, 0x61, 0x01, 0xe0, 0x99, 0x45, 0x6e, + 0x30, 0x93, 0x8e, 0xb9, 0xb9, 0xfb, 0x55, 0xb6, 0x5b, 0xee, 0xdf, 0xab, 0xa8, 0x4f, 0xe2, 0x4e, + 0xb9, 0x1f, 0x16, 0x60, 0x46, 0x4f, 0x38, 0xba, 0xe6, 0x96, 0x97, 0x53, 0x7e, 0xb3, 0xa1, 0x9f, + 0x56, 0x32, 0x1d, 0x53, 0x6c, 0xc9, 0x1b, 0x51, 0x04, 0xf3, 0x7b, 0x51, 0xb7, 0x1f, 0x0a, 0x3d, + 0xce, 0x65, 0xb7, 0x04, 0xe5, 0x53, 0x1c, 0x8b, 0x4b, 0x13, 0x71, 0x2c, 0x8e, 0x1f, 0x99, 0x2c, + 0x3f, 0xf1, 0xc8, 0xe4, 0x21, 0x34, 0xf6, 0x3c, 0xb7, 0xc7, 0x7d, 0x77, 0xe5, 0x1d, 0xe1, 0x37, + 0x73, 0x2c, 0x94, 0xbd, 0x5d, 0xcb, 0xa1, 0x26, 0xf7, 0x0b, 0x0e, 0x15, 0x57, 0x6b, 0x0a, 0x1f, + 0x23, 0x56, 0x5c, 0xd7, 0xef, 0x0a, 0xae, 0xd5, 0x49, 0x72, 0x0d, 0xe7, 0x92, 0x6d, 0x81, 0x8e, + 0x8a, 0x4d, 0xd2, 0x5f, 0xb7, 0xf6, 0x6c, 0xfc, 0x75, 0xb5, 0x3f, 0x5f, 0x53, 0x13, 0xd8, 0x73, + 0x17, 0x2c, 0xf7, 0xd3, 0x83, 0xee, 0x5d, 0x3a, 0x74, 0x0a, 0xbd, 0xfe, 0x0c, 0x4f, 0xa1, 0x37, + 0x26, 0x73, 0x0a, 0x1d, 0xf2, 0x9d, 0x42, 0x6f, 0x4e, 0xe8, 0x14, 0xfa, 0xd4, 0xa4, 0x4e, 0xa1, + 0x4f, 0x8f, 0x75, 0x0a, 0x7d, 0xe6, 0x54, 0xa7, 0xd0, 0x8f, 0x4b, 0x90, 0xda, 0x8c, 0x7f, 0x6a, + 0x78, 0xfb, 0x23, 0x65, 0x78, 0xfb, 0x5e, 0x11, 0xa2, 0x89, 0xf8, 0x8c, 0x8e, 0x49, 0xef, 0x40, + 0xbd, 0xa7, 0x3f, 0xe2, 0x8e, 0xd3, 0x79, 0xee, 0x98, 0xde, 0x94, 0x18, 0x18, 0xa2, 0x11, 0x1f, + 0xc0, 0x0a, 0xef, 0x79, 0xc8, 0x6d, 0xc2, 0x88, 0xae, 0x8c, 0x10, 0x4a, 0xd2, 0xe8, 0x1d, 0x63, + 0x6c, 0xb4, 0x7f, 0x55, 0x04, 0x79, 0x21, 0x08, 0xa1, 0x50, 0xd9, 0xb3, 0x1e, 0x51, 0x33, 0xb7, + 0xbb, 0x73, 0xec, 0xe6, 0x7f, 0x61, 0xa3, 0xe1, 0x09, 0x28, 0xd0, 0xb9, 0xf2, 0x5d, 0xd8, 0xdc, + 0x64, 0xfb, 0xe5, 0x50, 0xbe, 0xc7, 0x6d, 0x77, 0x52, 0xf9, 0x2e, 0x92, 0x50, 0xf1, 0x10, 0xba, + 0x7e, 0xee, 0x7e, 0x91, 0xdb, 0xc4, 0x98, 0x70, 0xe3, 0x50, 0xba, 0x7e, 0x5f, 0x84, 0xa1, 0x90, + 0x3c, 0xda, 0xbf, 0xf0, 0x83, 0x1f, 0x5d, 0x7b, 0xe1, 0x87, 0x3f, 0xba, 0xf6, 0xc2, 0x47, 0x3f, + 0xba, 0xf6, 0xc2, 0x2f, 0x9f, 0x5c, 0x2b, 0xfc, 0xe0, 0xe4, 0x5a, 0xe1, 0x87, 0x27, 0xd7, 0x0a, + 0x1f, 0x9d, 0x5c, 0x2b, 0xfc, 0xa7, 0x93, 0x6b, 0x85, 0xbf, 0xf4, 0x9f, 0xaf, 0xbd, 0xf0, 0xf3, + 0x5f, 0x8e, 0xaa, 0xb0, 0xa8, 0xaa, 0xb0, 0xa8, 0x18, 0x2e, 0xf6, 0x0f, 0xba, 0x8b, 0xac, 0x0a, + 0x51, 0x8a, 0xaa, 0xc2, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x40, 0x5a, 0x93, 0x65, 0x9f, + 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6148,13 +6149,13 @@ func (m *KafkaSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x32 } i -= len(m.Config) copy(dAtA[i:], m.Config) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Config))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a if m.TLS != nil { { size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) @@ -6165,8 +6166,16 @@ func (m *KafkaSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 + } + i-- + if m.SetKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 i -= len(m.Topic) copy(dAtA[i:], m.Topic) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) @@ -10636,6 +10645,7 @@ func (m *KafkaSink) Size() (n int) { } l = len(m.Topic) n += 1 + l + sovGenerated(uint64(l)) + n += 2 if m.TLS != nil { l = m.TLS.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -12623,6 +12633,7 @@ func (this *KafkaSink) String() string { s := strings.Join([]string{`&KafkaSink{`, `Brokers:` + fmt.Sprintf("%v", this.Brokers) + `,`, `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `SetKey:` + fmt.Sprintf("%v", this.SetKey) + `,`, `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, `Config:` + fmt.Sprintf("%v", this.Config) + `,`, `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, @@ -22503,6 +22514,26 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SetKey = bool(v != 0) + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } @@ -22538,7 +22569,7 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } @@ -22570,7 +22601,7 @@ func (m *KafkaSink) Unmarshal(dAtA []byte) error { } m.Config = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) } diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index f967281d56..1fdcdbcad5 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -823,18 +823,25 @@ message KafkaSink { optional string topic = 2; + // SetKey sets the Kafka key to the keys passed in the Message. + // When the key is null (default), the record is sent randomly to one of the available partitions of the topic. + // If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This + // ensures that messages with the same key end up in the same partition. + // +optional + optional bool setKey = 3; + // TLS user to configure TLS connection for kafka broker // TLS.enable=true default for TLS. // +optional - optional TLS tls = 3; + optional TLS tls = 4; // +optional - optional string config = 4; + optional string config = 5; // SASL user to configure SASL connection for kafka broker // SASL.enable=true default for SASL. // +optional - optional SASL sasl = 5; + optional SASL sasl = 6; } message KafkaSource { diff --git a/pkg/apis/numaflow/v1alpha1/kafka_sink.go b/pkg/apis/numaflow/v1alpha1/kafka_sink.go index a2396f6981..8c139e07e3 100644 --- a/pkg/apis/numaflow/v1alpha1/kafka_sink.go +++ b/pkg/apis/numaflow/v1alpha1/kafka_sink.go @@ -19,14 +19,20 @@ package v1alpha1 type KafkaSink struct { Brokers []string `json:"brokers,omitempty" protobuf:"bytes,1,rep,name=brokers"` Topic string `json:"topic" protobuf:"bytes,2,opt,name=topic"` + // SetKey sets the Kafka key to the keys passed in the Message. + // When the key is null (default), the record is sent randomly to one of the available partitions of the topic. + // If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This + // ensures that messages with the same key end up in the same partition. + // +optional + SetKey bool `json:"setKey" protobuf:"varint,3,opt,name=setKey"` // TLS user to configure TLS connection for kafka broker // TLS.enable=true default for TLS. // +optional - TLS *TLS `json:"tls" protobuf:"bytes,3,opt,name=tls"` + TLS *TLS `json:"tls" protobuf:"bytes,4,opt,name=tls"` // +optional - Config string `json:"config,omitempty" protobuf:"bytes,4,opt,name=config"` + Config string `json:"config,omitempty" protobuf:"bytes,5,opt,name=config"` // SASL user to configure SASL connection for kafka broker // SASL.enable=true default for SASL. // +optional - SASL *SASL `json:"sasl" protobuf:"bytes,5,opt,name=sasl"` + SASL *SASL `json:"sasl" protobuf:"bytes,6,opt,name=sasl"` } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 3885f6e75f..5272ee72d4 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -2861,6 +2861,14 @@ func schema_pkg_apis_numaflow_v1alpha1_KafkaSink(ref common.ReferenceCallback) c Format: "", }, }, + "setKey": { + SchemaProps: spec.SchemaProps{ + Description: "SetKey sets the Kafka key to the keys passed in the Message. When the key is null (default), the record is sent randomly to one of the available partitions of the topic. If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This ensures that messages with the same key end up in the same partition.", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, "tls": { SchemaProps: spec.SchemaProps{ Description: "TLS user to configure TLS connection for kafka broker TLS.enable=true default for TLS.", diff --git a/pkg/sinks/kafka/kafka.go b/pkg/sinks/kafka/kafka.go index b1ba1f05f3..f9a7ef8bcc 100644 --- a/pkg/sinks/kafka/kafka.go +++ b/pkg/sinks/kafka/kafka.go @@ -19,6 +19,7 @@ package kafka import ( "context" "fmt" + "strings" "time" "github.com/IBM/sarama" @@ -38,6 +39,7 @@ type ToKafka struct { producer sarama.AsyncProducer connected bool topic string + setKey bool kafkaSink *dfv1.KafkaSink log *zap.SugaredLogger } @@ -51,6 +53,7 @@ func NewToKafka(ctx context.Context, vertexInstance *dfv1.VertexInstance) (*ToKa toKafka.name = vertexInstance.Vertex.Spec.Name toKafka.pipelineName = vertexInstance.Vertex.Spec.PipelineName toKafka.topic = kafkaSink.Topic + toKafka.setKey = kafkaSink.SetKey toKafka.kafkaSink = kafkaSink producer, err := connect(kafkaSink) @@ -162,8 +165,13 @@ func (tk *ToKafka) Write(_ context.Context, messages []isb.Message) ([]isb.Offse } headers = append(headers, keyLen) + // keys is concatenated keys + var keys string // write keys into header if length > 0 if len(msg.Keys) > 0 { + // all keys concatenated together to set kafka key field if need be + keys = strings.Join(msg.Keys, ":") + for idx, key := range msg.Keys { headers = append(headers, sarama.RecordHeader{ Key: []byte(fmt.Sprintf("__key_%d", idx)), @@ -172,7 +180,14 @@ func (tk *ToKafka) Write(_ context.Context, messages []isb.Message) ([]isb.Offse } } + var kafkaKey sarama.StringEncoder + // set Kafka Key if SetKey is set. + if tk.setKey { + kafkaKey = sarama.StringEncoder(keys) + } + message := &sarama.ProducerMessage{ + Key: kafkaKey, Topic: tk.topic, Value: sarama.ByteEncoder(msg.Payload), Headers: headers, diff --git a/rust/numaflow-models/src/models/kafka_sink.rs b/rust/numaflow-models/src/models/kafka_sink.rs index c58d41025e..a2cc54e5b3 100644 --- a/rust/numaflow-models/src/models/kafka_sink.rs +++ b/rust/numaflow-models/src/models/kafka_sink.rs @@ -24,6 +24,9 @@ pub struct KafkaSink { pub config: Option, #[serde(rename = "sasl", skip_serializing_if = "Option::is_none")] pub sasl: Option>, + /// SetKey sets the Kafka key to the keys passed in the Message. When the key is null (default), the record is sent randomly to one of the available partitions of the topic. If a key exists, Kafka hashes the key, and the result is used to map the message to a specific partition. This ensures that messages with the same key end up in the same partition. + #[serde(rename = "setKey", skip_serializing_if = "Option::is_none")] + pub set_key: Option, #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] pub tls: Option>, #[serde(rename = "topic")] @@ -36,6 +39,7 @@ impl KafkaSink { brokers: None, config: None, sasl: None, + set_key: None, tls: None, topic, } From fb328854d8a49aa915aaf7d3843ebcfdfd6c81a9 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 13 Oct 2024 21:01:55 -0700 Subject: [PATCH 103/188] feat: actor pattern for forwarder + sink trait (#2141) Signed-off-by: Vigith Maurice Signed-off-by: Sreekanth Co-authored-by: Sreekanth --- rust/numaflow-core/src/message.rs | 63 +++++- rust/numaflow-core/src/monovertex.rs | 97 ++++----- .../numaflow-core/src/monovertex/forwarder.rs | 195 +++++++++--------- rust/numaflow-core/src/monovertex/metrics.rs | 33 +-- rust/numaflow-core/src/shared/utils.rs | 7 +- rust/numaflow-core/src/sink.rs | 87 +++++++- rust/numaflow-core/src/sink/user_defined.rs | 40 ++-- rust/numaflow-core/src/source.rs | 130 +++++++++++- rust/numaflow-core/src/source/generator.rs | 8 +- rust/numaflow-core/src/source/user_defined.rs | 20 +- .../src/transformer/user_defined.rs | 88 ++++++-- 11 files changed, 541 insertions(+), 227 deletions(-) diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index b3d1eab848..f0275df07e 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -1,3 +1,4 @@ +use std::cmp::PartialEq; use std::collections::HashMap; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; @@ -7,7 +8,8 @@ use chrono::{DateTime, Utc}; use crate::error::Error; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; use numaflow_grpc::clients::sink::sink_request::Request; -use numaflow_grpc::clients::sink::SinkRequest; +use numaflow_grpc::clients::sink::Status::{Failure, Fallback, Success}; +use numaflow_grpc::clients::sink::{sink_response, SinkRequest, SinkResponse}; use numaflow_grpc::clients::source::{read_response, AckRequest}; use numaflow_grpc::clients::sourcetransformer::SourceTransformRequest; @@ -113,3 +115,62 @@ impl From for SinkRequest { } } } + +/// Sink's status for each [Message] written to Sink. +#[derive(PartialEq)] +pub(crate) enum ResponseStatusFromSink { + /// Successfully wrote to the Sink. + Success, + /// Failed with error message. + Failed(String), + /// Write to FallBack Sink. + Fallback, +} + +/// Sink will give a response per [Message]. +pub(crate) struct ResponseFromSink { + /// Unique id per [Message]. We need to track per [Message] status. + pub(crate) id: String, + /// Status of the "sink" operation per [Message]. + pub(crate) status: ResponseStatusFromSink, +} + +impl From for SinkResponse { + fn from(value: ResponseFromSink) -> Self { + let (status, err_msg) = match value.status { + ResponseStatusFromSink::Success => (Success, "".to_string()), + ResponseStatusFromSink::Failed(err) => (Failure, err.to_string()), + ResponseStatusFromSink::Fallback => (Fallback, "".to_string()), + }; + + Self { + result: Some(sink_response::Result { + id: value.id, + status: status as i32, + err_msg, + }), + handshake: None, + } + } +} + +impl TryFrom for ResponseFromSink { + type Error = crate::Error; + + fn try_from(value: SinkResponse) -> Result { + let value = value + .result + .ok_or(Error::SinkError("result is empty".to_string()))?; + + let status = match value.status() { + Success => ResponseStatusFromSink::Success, + Failure => ResponseStatusFromSink::Failed(value.err_msg), + Fallback => ResponseStatusFromSink::Fallback, + }; + + Ok(Self { + id: value.id, + status, + }) + } +} diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 374a10e52d..eee02bd304 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,26 +1,28 @@ +use std::time::Duration; + +use tokio::signal; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; +use tracing::info; + +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use crate::config::{config, Settings}; use crate::error; -use crate::reader::LagReader; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; -use crate::sink::user_defined::SinkWriter; +use crate::sink::SinkHandle; use crate::source::generator::{new_generator, GeneratorAck, GeneratorLagReader, GeneratorRead}; use crate::source::user_defined::{ new_source, UserDefinedSourceAck, UserDefinedSourceLagReader, UserDefinedSourceRead, }; -use crate::source::{SourceAcker, SourceReader}; -use crate::transformer::user_defined::SourceTransformer; +use crate::source::SourceHandle; +use crate::transformer::user_defined::SourceTransformHandle; use forwarder::ForwarderBuilder; use metrics::UserDefinedContainerState; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; -use std::time::Duration; -use tokio::signal; -use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; -use tonic::transport::Channel; -use tracing::info; /// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -78,8 +80,8 @@ async fn shutdown_signal() { } } -enum SourceType { - UdSource( +pub(crate) enum SourceType { + UserDefinedSource( UserDefinedSourceRead, UserDefinedSourceAck, UserDefinedSourceLagReader, @@ -155,7 +157,7 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err ) .await?; - let source_type = fetch_source(&config, &mut source_grpc_client).await?; + let source_type = fetch_source(config, &mut source_grpc_client).await?; // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not @@ -171,38 +173,21 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err // FIXME: what to do with the handle utils::start_metrics_server(metrics_state).await; - match source_type { - SourceType::UdSource(udsource_reader, udsource_acker, udsource_lag_reader) => { - start_forwarder_with_source( - udsource_reader, - udsource_acker, - udsource_lag_reader, - sink_grpc_client, - transformer_grpc_client, - fb_sink_grpc_client, - cln_token, - ) - .await?; - } - SourceType::Generator(generator_reader, generator_acker, generator_lag_reader) => { - start_forwarder_with_source( - generator_reader, - generator_acker, - generator_lag_reader, - sink_grpc_client, - transformer_grpc_client, - fb_sink_grpc_client, - cln_token, - ) - .await?; - } - } + let source = SourceHandle::new(source_type); + start_forwarder_with_source( + source, + sink_grpc_client, + transformer_grpc_client, + fb_sink_grpc_client, + cln_token, + ) + .await?; info!("Forwarder stopped gracefully"); Ok(()) } -async fn fetch_source( +pub(crate) async fn fetch_source( config: &Settings, source_grpc_client: &mut Option>, ) -> crate::Result { @@ -213,7 +198,7 @@ async fn fetch_source( config.timeout_in_ms as u16, ) .await?; - SourceType::UdSource(source_read, source_ack, lag_reader) + SourceType::UserDefinedSource(source_read, source_ack, lag_reader) } else if let Some(generator_config) = &config.generator_config { let (source_read, source_ack, lag_reader) = new_generator( generator_config.content.clone(), @@ -230,39 +215,31 @@ async fn fetch_source( Ok(source_type) } -async fn start_forwarder_with_source( - source_reader: R, - source_acker: A, - source_lag_reader: L, +async fn start_forwarder_with_source( + source: SourceHandle, sink_grpc_client: SinkClient, transformer_client: Option>, fallback_sink_client: Option>, cln_token: CancellationToken, -) -> error::Result<()> -where - R: SourceReader, - A: SourceAcker, - L: LagReader + Clone + 'static, -{ +) -> error::Result<()> { // start the pending reader to publish pending metrics - let mut pending_reader = utils::create_pending_reader(source_lag_reader).await; + let mut pending_reader = utils::create_pending_reader(source.clone()).await; pending_reader.start().await; // build the forwarder - let sink_writer = SinkWriter::new(sink_grpc_client).await?; + let sink_writer = SinkHandle::new(sink_grpc_client).await?; - let mut forwarder_builder = - ForwarderBuilder::new(source_reader, source_acker, sink_writer, cln_token); + let mut forwarder_builder = ForwarderBuilder::new(source, sink_writer, cln_token); // add transformer if exists if let Some(transformer_client) = transformer_client { - let transformer = SourceTransformer::new(transformer_client).await?; + let transformer = SourceTransformHandle::new(transformer_client).await?; forwarder_builder = forwarder_builder.source_transformer(transformer); } // add fallback sink if exists if let Some(fallback_sink_client) = fallback_sink_client { - let fallback_writer = SinkWriter::new(fallback_sink_client).await?; + let fallback_writer = SinkHandle::new(fallback_sink_client).await?; forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_writer); } // build the final forwarder diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 164864d185..6ac44b7637 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -1,54 +1,50 @@ -use chrono::Utc; -use log::warn; use std::collections::HashMap; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tracing::{debug, info}; use crate::config::{config, OnFailureStrategy}; +use crate::error; use crate::error::Error; -use crate::message::{Message, Offset}; +use crate::message::{Message, Offset, ResponseStatusFromSink}; use crate::monovertex::metrics; use crate::monovertex::metrics::forward_metrics; -use crate::sink::user_defined::SinkWriter; -use crate::transformer::user_defined::SourceTransformer; -use crate::{error, source}; -use numaflow_grpc::clients::sink::Status::{Failure, Fallback, Success}; +use crate::sink::SinkHandle; +use crate::{source::SourceHandle, transformer::user_defined::SourceTransformHandle}; + +use chrono::Utc; +use log::warn; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. -pub(crate) struct Forwarder { - source_read: R, - source_ack: A, - sink_writer: SinkWriter, - source_transformer: Option, - fb_sink_writer: Option, +pub(crate) struct Forwarder { + source_reader: SourceHandle, + sink_writer: SinkHandle, + source_transformer: Option, + fb_sink_writer: Option, cln_token: CancellationToken, common_labels: Vec<(String, String)>, } /// ForwarderBuilder is used to build a Forwarder instance with optional fields. -pub(crate) struct ForwarderBuilder { - source_read: R, - source_ack: A, - sink_writer: SinkWriter, +pub(crate) struct ForwarderBuilder { + source_reader: SourceHandle, + sink_writer: SinkHandle, cln_token: CancellationToken, - source_transformer: Option, - fb_sink_writer: Option, + source_transformer: Option, + fb_sink_writer: Option, } -impl ForwarderBuilder { +impl ForwarderBuilder { /// Create a new builder with mandatory fields pub(crate) fn new( - source_read: R, - source_ack: A, - sink_writer: SinkWriter, + source_reader: SourceHandle, + sink_writer: SinkHandle, cln_token: CancellationToken, ) -> Self { Self { - source_read, - source_ack, + source_reader, sink_writer, cln_token, source_transformer: None, @@ -57,24 +53,23 @@ impl ForwarderBuilder { } /// Set the optional transformer client - pub(crate) fn source_transformer(mut self, transformer_client: SourceTransformer) -> Self { + pub(crate) fn source_transformer(mut self, transformer_client: SourceTransformHandle) -> Self { self.source_transformer = Some(transformer_client); self } /// Set the optional fallback client - pub(crate) fn fallback_sink_writer(mut self, fallback_client: SinkWriter) -> Self { + pub(crate) fn fallback_sink_writer(mut self, fallback_client: SinkHandle) -> Self { self.fb_sink_writer = Some(fallback_client); self } /// Build the Forwarder instance #[must_use] - pub(crate) fn build(self) -> Forwarder { + pub(crate) fn build(self) -> Forwarder { let common_labels = metrics::forward_metrics_labels().clone(); Forwarder { - source_read: self.source_read, - source_ack: self.source_ack, + source_reader: self.source_reader, sink_writer: self.sink_writer, source_transformer: self.source_transformer, fb_sink_writer: self.fb_sink_writer, @@ -84,11 +79,7 @@ impl ForwarderBuilder { } } -impl Forwarder -where - A: source::SourceAcker, - R: source::SourceReader, -{ +impl Forwarder { /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. /// this means that, in the happy path scenario a block is always completely processed. /// this function will return on any error and will cause end up in a non-0 exit code. @@ -129,7 +120,7 @@ where /// and then acknowledge the messages back to the source. async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); - let messages = self.source_read.read().await.map_err(|e| { + let messages = self.source_reader.read().await.map_err(|e| { Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) })?; @@ -198,13 +189,13 @@ where // Applies transformation to the messages if transformer is present // we concurrently apply transformation to all the messages. async fn apply_transformer(&mut self, messages: Vec) -> error::Result> { - let Some(transformer_client) = &mut self.source_transformer else { + let Some(client) = &mut self.source_transformer else { // return early if there is no transformer return Ok(messages); }; let start_time = tokio::time::Instant::now(); - let results = transformer_client.transform_fn(messages).await?; + let results = client.transform(messages).await?; debug!( "Transformer latency - {}ms", @@ -362,7 +353,7 @@ where messages_to_send: &mut Vec, ) -> error::Result { let start_time = tokio::time::Instant::now(); - match self.sink_writer.sink_fn(messages_to_send.clone()).await { + match self.sink_writer.sink(messages_to_send.clone()).await { Ok(response) => { debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); @@ -370,13 +361,8 @@ where // for the udsink to return the results in the same order as the requests let result_map = response .into_iter() - .map(|resp| match resp.result { - Some(result) => Ok((result.id.clone(), result)), - None => Err(Error::SinkError( - "Response does not contain a result".to_string(), - )), - }) - .collect::>>()?; + .map(|resp| (resp.id, resp.status)) + .collect::>(); error_map.clear(); // drain all the messages that were successfully written @@ -384,14 +370,16 @@ where // construct the error map for the failed messages messages_to_send.retain(|msg| { if let Some(result) = result_map.get(&msg.id) { - return if result.status == Success as i32 { - false - } else if result.status == Fallback as i32 { - fallback_msgs.push(msg.clone()); // add to fallback messages - false - } else { - *error_map.entry(result.err_msg.clone()).or_insert(0) += 1; - true + return match result { + ResponseStatusFromSink::Success => false, + ResponseStatusFromSink::Failed(err_msg) => { + *error_map.entry(err_msg.clone()).or_insert(0) += 1; + true + } + ResponseStatusFromSink::Fallback => { + fallback_msgs.push(msg.clone()); + false + } }; } false @@ -441,7 +429,7 @@ where while attempts < max_attempts { let start_time = tokio::time::Instant::now(); - match fallback_client.sink_fn(messages_to_send.clone()).await { + match fallback_client.sink(messages_to_send.clone()).await { Ok(fb_response) => { debug!( "Fallback sink latency - {}ms", @@ -451,14 +439,9 @@ where // create a map of id to result, since there is no strict requirement // for the udsink to return the results in the same order as the requests let result_map = fb_response - .iter() - .map(|resp| match &resp.result { - Some(result) => Ok((result.id.clone(), result)), - None => Err(Error::SinkError( - "Response does not contain a result".to_string(), - )), - }) - .collect::>>()?; + .into_iter() + .map(|resp| (resp.id, resp.status)) + .collect::>(); let mut contains_fallback_status = false; @@ -468,17 +451,17 @@ where // construct the error map for the failed messages messages_to_send.retain(|msg| { if let Some(result) = result_map.get(&msg.id) { - if result.status == Failure as i32 { - *fallback_error_map - .entry(result.err_msg.clone()) - .or_insert(0) += 1; - true - } else if result.status == Fallback as i32 { - contains_fallback_status = true; - false - } else { - false - } + return match result { + ResponseStatusFromSink::Success => false, + ResponseStatusFromSink::Failed(err_msg) => { + *fallback_error_map.entry(err_msg.clone()).or_insert(0) += 1; + true + } + ResponseStatusFromSink::Fallback => { + contains_fallback_status = true; + false + } + }; } else { false } @@ -525,7 +508,7 @@ where let n = offsets.len(); let start_time = tokio::time::Instant::now(); - self.source_ack.ack(offsets).await?; + self.source_reader.ack(offsets).await?; debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); @@ -548,10 +531,12 @@ mod tests { use crate::config::config; use crate::monovertex::forwarder::ForwarderBuilder; + use crate::monovertex::SourceType; use crate::shared::utils::create_rpc_channel; - use crate::sink::user_defined::SinkWriter; + use crate::sink::SinkHandle; use crate::source::user_defined::new_source; - use crate::transformer::user_defined::SourceTransformer; + use crate::source::SourceHandle; + use crate::transformer::user_defined::SourceTransformHandle; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; @@ -739,7 +724,7 @@ mod tests { let cln_token = CancellationToken::new(); - let (source_read, source_ack, _) = new_source( + let (source_read, source_ack, source_lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), config().batch_size as usize, config().timeout_in_ms as u16, @@ -747,22 +732,27 @@ mod tests { .await .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( + let src_reader = SourceHandle::new(SourceType::UserDefinedSource( + source_read, + source_ack, + source_lag_reader, + )); + + let sink_writer = SinkHandle::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) .await .expect("failed to connect to sink server"); - let transformer_client = SourceTransformer::new(SourceTransformClient::new( + let transformer_client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(transformer_sock_file).await.unwrap(), )) .await .expect("failed to connect to transformer server"); - let mut forwarder = - ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()) - .source_transformer(transformer_client) - .build(); + let mut forwarder = ForwarderBuilder::new(src_reader, sink_writer, cln_token.clone()) + .source_transformer(transformer_client) + .build(); // Assert the received message in a different task let assert_handle = tokio::spawn(async move { @@ -864,7 +854,7 @@ mod tests { let cln_token = CancellationToken::new(); - let (source_read, source_ack, _) = new_source( + let (source_read, source_ack, lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), 500, 100, @@ -872,14 +862,20 @@ mod tests { .await .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( + let source_reader = SourceHandle::new(SourceType::UserDefinedSource( + source_read, + source_ack, + lag_reader, + )); + + let sink_writer = SinkHandle::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) .await .expect("failed to connect to sink server"); let mut forwarder = - ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()).build(); + ForwarderBuilder::new(source_reader, sink_writer, cln_token.clone()).build(); let cancel_handle = tokio::spawn(async move { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; @@ -981,7 +977,7 @@ mod tests { let cln_token = CancellationToken::new(); - let (source_read, source_ack, _) = new_source( + let (source_read, source_ack, source_lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), 500, 100, @@ -989,22 +985,27 @@ mod tests { .await .expect("failed to connect to source server"); - let sink_writer = SinkWriter::new(SinkClient::new( + let source = SourceHandle::new(SourceType::UserDefinedSource( + source_read, + source_ack, + source_lag_reader, + )); + + let sink_writer = SinkHandle::new(SinkClient::new( create_rpc_channel(sink_sock_file).await.unwrap(), )) .await .expect("failed to connect to sink server"); - let fb_sink_writer = SinkWriter::new(SinkClient::new( + let fb_sink_writer = SinkHandle::new(SinkClient::new( create_rpc_channel(fb_sink_sock_file).await.unwrap(), )) .await .expect("failed to connect to fb sink server"); - let mut forwarder = - ForwarderBuilder::new(source_read, source_ack, sink_writer, cln_token.clone()) - .fallback_sink_writer(fb_sink_writer) - .build(); + let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) + .fallback_sink_writer(fb_sink_writer) + .build(); let assert_handle = tokio::spawn(async move { let received_message = sink_rx.recv().await.unwrap(); diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 9ee6f5c65e..f5d432c765 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -23,13 +23,14 @@ use tonic::transport::Channel; use tonic::Request; use tracing::{debug, error, info}; -use crate::config::config; -use crate::error::Error; -use crate::reader; use numaflow_grpc::clients::sink::sink_client::SinkClient; use numaflow_grpc::clients::source::source_client::SourceClient; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use crate::config::config; +use crate::error::Error; +use crate::source::SourceHandle; + // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon const MVTX_NAME_LABEL: &str = "mvtx_name"; @@ -341,8 +342,8 @@ struct TimestampedPending { /// PendingReader is responsible for periodically checking the lag of the reader /// and exposing the metrics. It maintains a list of pending stats and ensures that /// only the most recent entries are kept. -pub(crate) struct PendingReader { - lag_reader: T, +pub(crate) struct PendingReader { + lag_reader: SourceHandle, lag_checking_interval: Duration, refresh_interval: Duration, buildup_handle: Option>, @@ -351,14 +352,14 @@ pub(crate) struct PendingReader { } /// PendingReaderBuilder is used to build a [LagReader] instance. -pub(crate) struct PendingReaderBuilder { - lag_reader: T, +pub(crate) struct PendingReaderBuilder { + lag_reader: SourceHandle, lag_checking_interval: Option, refresh_interval: Option, } -impl PendingReaderBuilder { - pub(crate) fn new(lag_reader: T) -> Self { +impl PendingReaderBuilder { + pub(crate) fn new(lag_reader: SourceHandle) -> Self { Self { lag_reader, lag_checking_interval: None, @@ -376,7 +377,7 @@ impl PendingReaderBuilder { self } - pub(crate) fn build(self) -> PendingReader { + pub(crate) fn build(self) -> PendingReader { PendingReader { lag_reader: self.lag_reader, lag_checking_interval: self @@ -392,7 +393,7 @@ impl PendingReaderBuilder { } } -impl PendingReader { +impl PendingReader { /// Starts the lag reader by spawning tasks to build up pending info and expose pending metrics. /// /// This method spawns two asynchronous tasks: @@ -416,7 +417,7 @@ impl PendingReader { } /// When the PendingReader is dropped, we need to clean up the pending exposer and the pending builder tasks. -impl Drop for PendingReader { +impl Drop for PendingReader { fn drop(&mut self) { if let Some(handle) = self.expose_handle.take() { handle.abort(); @@ -430,15 +431,15 @@ impl Drop for PendingReader { } /// Periodically checks the pending messages from the source client and build the pending stats. -async fn build_pending_info( - mut lag_reader: T, +async fn build_pending_info( + source: SourceHandle, lag_checking_interval: Duration, pending_stats: Arc>>, ) { let mut ticker = time::interval(lag_checking_interval); loop { ticker.tick().await; - match fetch_pending(&mut lag_reader).await { + match fetch_pending(&source).await { Ok(pending) => { if pending != -1 { let mut stats = pending_stats.lock().await; @@ -460,7 +461,7 @@ async fn build_pending_info( } } -async fn fetch_pending(lag_reader: &mut T) -> crate::error::Result { +async fn fetch_pending(lag_reader: &SourceHandle) -> crate::error::Result { let response: i64 = lag_reader.pending().await?.map_or(-1, |p| p as i64); // default to -1(unavailable) Ok(response) } diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index de4c79187b..7dd1d51ac3 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -3,12 +3,13 @@ use std::path::PathBuf; use std::time::Duration; use crate::config::config; +use crate::error; use crate::error::Error; use crate::monovertex::metrics::{ start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, }; use crate::shared::server_info; -use crate::{error, reader}; +use crate::source::SourceHandle; use numaflow_grpc::clients::sink::sink_client::SinkClient; use numaflow_grpc::clients::source::source_client::SourceClient; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; @@ -85,9 +86,7 @@ pub(crate) async fn start_metrics_server( }) } -pub(crate) async fn create_pending_reader( - lag_reader_grpc_client: T, -) -> PendingReader { +pub(crate) async fn create_pending_reader(lag_reader_grpc_client: SourceHandle) -> PendingReader { PendingReaderBuilder::new(lag_reader_grpc_client) .lag_checking_interval(Duration::from_secs( config().lag_check_interval_in_secs.into(), diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index ccd6fb8fbe..e39892ddd8 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -1,4 +1,89 @@ +use tokio::sync::{mpsc, oneshot}; +use tonic::transport::Channel; + +use crate::config::config; +use crate::message::{Message, ResponseFromSink}; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use user_defined::UserDefinedSink; + /// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Sink]: https://numaflow.numaproj.io/user-guide/sinks/user-defined-sinks/ -pub(crate) mod user_defined; +mod user_defined; + +/// Set of items to be implemented be a Numaflow Sink. +/// +/// [Sink]: https://numaflow.numaproj.io/user-guide/sinks/overview/ +#[trait_variant::make(Sink: Send)] +#[allow(unused)] +pub(crate) trait LocalSink { + /// Write the messages to the Sink. + async fn sink(&mut self, messages: Vec) -> crate::Result>; +} + +enum ActorMessage { + Sink { + messages: Vec, + respond_to: oneshot::Sender>>, + }, +} + +struct SinkActor { + actor_messages: mpsc::Receiver, + sink: T, +} + +impl SinkActor +where + T: Sink, +{ + fn new(actor_messages: mpsc::Receiver, sink: T) -> Self { + Self { + actor_messages, + sink, + } + } + + async fn handle_message(&mut self, msg: ActorMessage) { + match msg { + ActorMessage::Sink { + messages, + respond_to, + } => { + let response = self.sink.sink(messages).await; + let _ = respond_to.send(response); + } + } + } +} + +pub(crate) struct SinkHandle { + sender: mpsc::Sender, +} + +impl SinkHandle { + pub(crate) async fn new(sink_client: SinkClient) -> crate::Result { + let (sender, receiver) = mpsc::channel(config().batch_size as usize); + let sink = UserDefinedSink::new(sink_client).await?; + tokio::spawn(async move { + let mut actor = SinkActor::new(receiver, sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + Ok(Self { sender }) + } + + pub(crate) async fn sink( + &self, + messages: Vec, + ) -> crate::Result> { + let (tx, rx) = oneshot::channel(); + let msg = ActorMessage::Sink { + messages, + respond_to: tx, + }; + let _ = self.sender.send(msg).await; + rx.await.unwrap() + } +} diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 54ec4bc527..529abfed56 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,23 +1,26 @@ -use crate::error; -use crate::error::Error; -use crate::message::Message; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::sink::sink_request::Status; -use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::sink::sink_request::Status; +use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse}; + +use crate::error; +use crate::error::Error; +use crate::message::{Message, ResponseFromSink}; +use crate::sink::Sink; + const DEFAULT_CHANNEL_SIZE: usize = 1000; -/// SinkWriter writes messages to a sink. -pub struct SinkWriter { +/// User-Defined Sink code writes messages to a custom [Sink]. +pub struct UserDefinedSink { sink_tx: mpsc::Sender, resp_stream: Streaming, } -impl SinkWriter { +impl UserDefinedSink { pub(crate) async fn new(mut client: SinkClient) -> error::Result { let (sink_tx, sink_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let sink_stream = ReceiverStream::new(sink_rx); @@ -54,12 +57,11 @@ impl SinkWriter { resp_stream, }) } +} +impl Sink for UserDefinedSink { /// writes a set of messages to the sink. - pub(crate) async fn sink_fn( - &mut self, - messages: Vec, - ) -> error::Result> { + async fn sink(&mut self, messages: Vec) -> error::Result> { let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); let num_requests = requests.len(); @@ -93,7 +95,7 @@ impl SinkWriter { .message() .await? .ok_or(Error::SinkError("failed to receive response".to_string()))?; - responses.push(response); + responses.push(response.try_into()?); } Ok(responses) @@ -102,6 +104,8 @@ impl SinkWriter { #[cfg(test)] mod tests { + use super::*; + use chrono::offset::Utc; use numaflow::sink; use tokio::sync::mpsc; @@ -110,7 +114,7 @@ mod tests { use crate::error::Result; use crate::message::{Message, Offset}; use crate::shared::utils::create_rpc_channel; - use crate::sink::user_defined::SinkWriter; + use crate::sink::user_defined::UserDefinedSink; use numaflow_grpc::clients::sink::sink_client::SinkClient; struct Logger; @@ -157,7 +161,7 @@ mod tests { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let mut sink_client = - SinkWriter::new(SinkClient::new(create_rpc_channel(sock_file).await?)) + UserDefinedSink::new(SinkClient::new(create_rpc_channel(sock_file).await?)) .await .expect("failed to connect to sink server"); @@ -186,10 +190,10 @@ mod tests { }, ]; - let response = sink_client.sink_fn(messages.clone()).await?; + let response = sink_client.sink(messages.clone()).await?; assert_eq!(response.len(), 2); - let response = sink_client.sink_fn(messages.clone()).await?; + let response = sink_client.sink(messages.clone()).await?; assert_eq!(response.len(), 2); drop(sink_client); diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 32fea1c0ad..5a825b0c78 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,10 +1,20 @@ -use crate::message::{Message, Offset}; +use tokio::sync::{mpsc, oneshot}; + +use crate::config::config; +use crate::{ + message::{Message, Offset}, + monovertex::SourceType, + reader::LagReader, +}; /// [User-Defined Source] extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Source]: https://numaflow.numaproj.io/user-guide/sources/user-defined-sources/ pub(crate) mod user_defined; +/// [Generator] is a builtin to generate data for load testing and other internal use-cases. +/// +/// [Generator]: https://numaflow.numaproj.io/user-guide/sources/generator/ pub(crate) mod generator; /// Set of Read related items that has to be implemented to become a Source. @@ -25,3 +35,121 @@ pub(crate) trait SourceAcker { /// acknowledge an offset. The implementor might choose to do it in an asynchronous way. async fn ack(&mut self, _: Vec) -> crate::Result<()>; } + +enum ActorMessage { + #[allow(dead_code)] + Name { + respond_to: oneshot::Sender<&'static str>, + }, + Read { + respond_to: oneshot::Sender>>, + }, + Ack { + respond_to: oneshot::Sender>, + offsets: Vec, + }, + Pending { + respond_to: oneshot::Sender>>, + }, +} + +struct SourceActor { + receiver: mpsc::Receiver, + reader: R, + acker: A, + lag_reader: L, +} + +impl SourceActor +where + R: SourceReader, + A: SourceAcker, + L: LagReader, +{ + fn new(receiver: mpsc::Receiver, reader: R, acker: A, lag_reader: L) -> Self { + Self { + receiver, + reader, + acker, + lag_reader, + } + } + + async fn handle_message(&mut self, msg: ActorMessage) { + match msg { + ActorMessage::Name { respond_to } => { + let name = self.reader.name(); + let _ = respond_to.send(name); + } + ActorMessage::Read { respond_to } => { + let msgs = self.reader.read().await; + let _ = respond_to.send(msgs); + } + ActorMessage::Ack { + respond_to, + offsets, + } => { + let ack = self.acker.ack(offsets).await; + let _ = respond_to.send(ack); + } + ActorMessage::Pending { respond_to } => { + let pending = self.lag_reader.pending().await; + let _ = respond_to.send(pending); + } + } + } +} + +#[derive(Clone)] +pub(crate) struct SourceHandle { + sender: mpsc::Sender, +} + +impl SourceHandle { + pub(crate) fn new(src_type: SourceType) -> Self { + let (sender, receiver) = mpsc::channel(config().batch_size as usize); + match src_type { + SourceType::UserDefinedSource(reader, acker, lag_reader) => { + tokio::spawn(async move { + let mut actor = SourceActor::new(receiver, reader, acker, lag_reader); + while let Some(msg) = actor.receiver.recv().await { + actor.handle_message(msg).await; + } + }); + } + SourceType::Generator(reader, acker, lag_reader) => { + tokio::spawn(async move { + let mut actor = SourceActor::new(receiver, reader, acker, lag_reader); + while let Some(msg) = actor.receiver.recv().await { + actor.handle_message(msg).await; + } + }); + } + }; + Self { sender } + } + + pub(crate) async fn read(&self) -> crate::Result> { + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::Read { respond_to: sender }; + let _ = self.sender.send(msg).await; + receiver.await.unwrap() + } + + pub(crate) async fn ack(&self, offsets: Vec) -> crate::Result<()> { + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::Ack { + respond_to: sender, + offsets, + }; + let _ = self.sender.send(msg).await; + receiver.await.unwrap() + } + + pub(crate) async fn pending(&self) -> crate::error::Result> { + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::Pending { respond_to: sender }; + let _ = self.sender.send(msg).await; + receiver.await.unwrap() + } +} diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 611969c740..08f20e25e0 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -1,9 +1,11 @@ +use std::time::Duration; + +use bytes::Bytes; +use futures::StreamExt; + use crate::message::{Message, Offset}; use crate::reader; use crate::source; -use bytes::Bytes; -use futures::StreamExt; -use std::time::Duration; /// Stream Generator returns a set of messages for every `.next` call. It will throttle itself if /// the call exceeds the RPU. It will return a max (batch size, RPU) till the quota for that unit of diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 3eb0846667..4ced502036 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -1,18 +1,20 @@ +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::transport::Channel; +use tonic::{Request, Streaming}; + +use numaflow_grpc::clients::source; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::source::{ + read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, +}; + use crate::config::config; use crate::error; use crate::error::Error::SourceError; use crate::message::{Message, Offset}; use crate::reader::LagReader; use crate::source::{SourceAcker, SourceReader}; -use numaflow_grpc::clients::source; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::source::{ - read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, -}; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use tonic::transport::Channel; -use tonic::{Request, Streaming}; /// User-Defined Source to operative on custom sources. #[derive(Debug)] diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index f06346053e..42131622ba 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,14 +1,6 @@ use std::collections::HashMap; -use crate::config::config; -use crate::error::{Error, Result}; -use crate::message::{Message, Offset}; -use crate::shared::utils::utc_from_timestamp; -use numaflow_grpc::clients::sourcetransformer::{ - self, source_transform_client::SourceTransformClient, SourceTransformRequest, - SourceTransformResponse, -}; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; @@ -16,16 +8,30 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::warn; +use numaflow_grpc::clients::sourcetransformer::{ + self, source_transform_client::SourceTransformClient, SourceTransformRequest, + SourceTransformResponse, +}; + +use crate::config::config; +use crate::error::{Error, Result}; +use crate::message::{Message, Offset}; +use crate::shared::utils::utc_from_timestamp; + const DROP: &str = "U+005C__DROP__"; /// TransformerClient is a client to interact with the transformer server. -pub struct SourceTransformer { +struct SourceTransformer { + actor_messages: mpsc::Receiver, read_tx: mpsc::Sender, resp_stream: Streaming, } impl SourceTransformer { - pub(crate) async fn new(mut client: SourceTransformClient) -> Result { + async fn new( + mut client: SourceTransformClient, + actor_messages: mpsc::Receiver, + ) -> Result { let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); let read_stream = ReceiverStream::new(read_rx); @@ -56,12 +62,25 @@ impl SourceTransformer { } Ok(Self { + actor_messages, read_tx, resp_stream, }) } - pub(crate) async fn transform_fn(&mut self, messages: Vec) -> Result> { + async fn handle_message(&mut self, message: ActorMessage) { + match message { + ActorMessage::Transform { + messages, + respond_to, + } => { + let result = self.transform_fn(messages).await; + let _ = respond_to.send(result); + } + } + } + + async fn transform_fn(&mut self, messages: Vec) -> Result> { // fields which will not be changed struct MessageInfo { offset: Offset, @@ -169,13 +188,48 @@ impl SourceTransformer { } } +enum ActorMessage { + Transform { + messages: Vec, + respond_to: oneshot::Sender>>, + }, +} + +#[derive(Clone)] +pub(crate) struct SourceTransformHandle { + sender: mpsc::Sender, +} + +impl SourceTransformHandle { + pub(crate) async fn new(client: SourceTransformClient) -> crate::Result { + let (sender, receiver) = mpsc::channel(config().batch_size as usize); + let mut client = SourceTransformer::new(client, receiver).await?; + tokio::spawn(async move { + while let Some(msg) = client.actor_messages.recv().await { + client.handle_message(msg).await; + } + }); + Ok(Self { sender }) + } + + pub(crate) async fn transform(&self, messages: Vec) -> Result> { + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::Transform { + messages, + respond_to: sender, + }; + let _ = self.sender.send(msg).await; + receiver.await.unwrap() + } +} + #[cfg(test)] mod tests { use std::error::Error; use std::time::Duration; use crate::shared::utils::create_rpc_channel; - use crate::transformer::user_defined::SourceTransformer; + use crate::transformer::user_defined::SourceTransformHandle; use numaflow::sourcetransform; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; @@ -216,7 +270,7 @@ mod tests { // wait for the server to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut client = SourceTransformer::new(SourceTransformClient::new( + let client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) .await?; @@ -235,7 +289,7 @@ mod tests { let resp = tokio::time::timeout( tokio::time::Duration::from_secs(2), - client.transform_fn(vec![message]), + client.transform(vec![message]), ) .await??; assert_eq!(resp.len(), 1); @@ -291,7 +345,7 @@ mod tests { // wait for the server to start tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let mut client = SourceTransformer::new(SourceTransformClient::new( + let client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, )) .await?; @@ -308,7 +362,7 @@ mod tests { headers: Default::default(), }; - let resp = client.transform_fn(vec![message]).await?; + let resp = client.transform(vec![message]).await?; assert!(resp.is_empty()); // we need to drop the client, because if there are any in-flight requests From 6fb36acfc31f07bd53bfadf587fd2253dda9fe34 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 14 Oct 2024 22:29:57 +0530 Subject: [PATCH 104/188] feat: Unify MapStream and Unary Map Operations Using a Shared gRPC Protocol (#2149) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- go.mod | 2 +- go.sum | 4 +- pkg/apis/proto/map/v1/map.proto | 6 +- pkg/apis/proto/sink/v1/sink.proto | 13 +- pkg/sdkclient/mapper/client.go | 41 +++- pkg/sdkclient/mapper/interface.go | 1 + pkg/sdkclient/mapstreamer/client.go | 105 ---------- pkg/sdkclient/mapstreamer/client_test.go | 106 ---------- pkg/sdkclient/mapstreamer/interface.go | 31 --- pkg/sdkclient/sinker/client.go | 16 +- pkg/sdkclient/sinker/client_test.go | 5 + pkg/sinks/udsink/udsink_grpc_test.go | 3 + pkg/udf/forward/forward.go | 133 ++++++------ pkg/udf/map_udf.go | 7 +- pkg/udf/rpc/grpc_map.go | 68 +++++++ pkg/udf/rpc/grpc_mapstream.go | 135 ------------- pkg/udf/rpc/grpc_mapstream_test.go | 211 -------------------- rust/Cargo.lock | 26 ++- rust/numaflow-core/Cargo.toml | 2 +- rust/numaflow-core/src/message.rs | 1 + rust/numaflow-core/src/sink/user_defined.rs | 19 +- rust/numaflow-grpc/src/clients/map.v1.rs | 6 +- rust/numaflow-grpc/src/clients/sink.v1.rs | 16 +- 23 files changed, 267 insertions(+), 690 deletions(-) delete mode 100644 pkg/sdkclient/mapstreamer/client.go delete mode 100644 pkg/sdkclient/mapstreamer/client_test.go delete mode 100644 pkg/sdkclient/mapstreamer/interface.go delete mode 100644 pkg/udf/rpc/grpc_mapstream.go delete mode 100644 pkg/udf/rpc/grpc_mapstream_test.go diff --git a/go.mod b/go.mod index 07f6cc2d62..02e20f7a35 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1 + github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.55.0 diff --git a/go.sum b/go.sum index d481755086..8df905cd8e 100644 --- a/go.sum +++ b/go.sum @@ -483,8 +483,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1 h1:4uHQqImTmgGkCFrgEhX7atxsAe/nRgjv/2Px0rwqw/I= -github.com/numaproj/numaflow-go v0.8.2-0.20241013052921-0aa35d8766f1/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= +github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd h1:yL7sbAaeCw2rWar1CF19N69KEHmcJpL1YjtqOWEG41c= +github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/apis/proto/map/v1/map.proto b/pkg/apis/proto/map/v1/map.proto index 49e88740b3..260570a761 100644 --- a/pkg/apis/proto/map/v1/map.proto +++ b/pkg/apis/proto/map/v1/map.proto @@ -47,7 +47,7 @@ message MapRequest { // This ID is used to uniquely identify a map request string id = 2; optional Handshake handshake = 3; - optional Status status = 4; + optional TransmissionStatus status = 4; } /* @@ -61,7 +61,7 @@ message Handshake { /* * Status message to indicate the status of the message. */ -message Status { +message TransmissionStatus { bool eot = 1; } @@ -78,7 +78,7 @@ message MapResponse { // This ID is used to refer the responses to the request it corresponds to. string id = 2; optional Handshake handshake = 3; - optional Status status = 4; + optional TransmissionStatus status = 4; } /** diff --git a/pkg/apis/proto/sink/v1/sink.proto b/pkg/apis/proto/sink/v1/sink.proto index 255b9b6969..8f42720b5d 100644 --- a/pkg/apis/proto/sink/v1/sink.proto +++ b/pkg/apis/proto/sink/v1/sink.proto @@ -45,14 +45,11 @@ message SinkRequest { string id = 5; map headers = 6; } - message Status { - bool eot = 1; - } // Required field indicating the request. Request request = 1; // Required field indicating the status of the request. // If eot is set to true, it indicates the end of transmission. - Status status = 2; + TransmissionStatus status = 2; // optional field indicating the handshake message. optional Handshake handshake = 3; } @@ -72,6 +69,13 @@ message ReadyResponse { bool ready = 1; } +/** + * TransmissionStatus is the status of the transmission. + */ +message TransmissionStatus { + bool eot = 1; +} + /* * Status is the status of the response. */ @@ -95,4 +99,5 @@ message SinkResponse { } Result result = 1; optional Handshake handshake = 2; + optional TransmissionStatus status = 3; } \ No newline at end of file diff --git a/pkg/sdkclient/mapper/client.go b/pkg/sdkclient/mapper/client.go index 972e842d4e..6bb47a0568 100644 --- a/pkg/sdkclient/mapper/client.go +++ b/pkg/sdkclient/mapper/client.go @@ -19,6 +19,7 @@ package mapper import ( "context" "fmt" + "io" "time" "go.uber.org/zap" @@ -157,7 +158,7 @@ func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*ma if c.batchMapMode { // if it is a batch map, we need to send an end of transmission message to the server // to indicate that the batch is finished. - requests = append(requests, &mappb.MapRequest{Status: &mappb.Status{Eot: true}}) + requests = append(requests, &mappb.MapRequest{Status: &mappb.TransmissionStatus{Eot: true}}) } var eg errgroup.Group // send n requests @@ -192,13 +193,11 @@ func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*ma if resp.GetStatus() != nil && resp.GetStatus().GetEot() { // we might get an end of transmission message from the server before receiving all the responses. if i < len(requests)-1 { - c.log.Errorw("Received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received_responses", i+1), zap.Int("total_requests", len(requests))) - } else { - break + c.log.Errorw("received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received_responses", i+1), zap.Int("total_requests", len(requests))) } - } else { - responses = append(responses, resp) + continue } + responses = append(responses, resp) } return nil }) @@ -211,3 +210,33 @@ func (c *client) MapFn(ctx context.Context, requests []*mappb.MapRequest) ([]*ma return responses, nil } + +// MapStreamFn applies a function to each datum element and writes the response to the stream. +func (c *client) MapStreamFn(ctx context.Context, request *mappb.MapRequest, responseCh chan<- *mappb.MapResponse) error { + defer close(responseCh) + err := c.stream.Send(request) + if err != nil { + return fmt.Errorf("failed to execute c.grpcClt.MapStreamFn(): %w", err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + var resp *mappb.MapResponse + resp, err = c.stream.Recv() + if err == io.EOF { + return nil + } + if resp.GetStatus() != nil && resp.GetStatus().GetEot() { + return nil + } + err = sdkerror.ToUDFErr("c.grpcClt.MapStreamFn", err) + if err != nil { + return err + } + responseCh <- resp + } + } +} diff --git a/pkg/sdkclient/mapper/interface.go b/pkg/sdkclient/mapper/interface.go index e079833767..9c345b4cf7 100644 --- a/pkg/sdkclient/mapper/interface.go +++ b/pkg/sdkclient/mapper/interface.go @@ -28,4 +28,5 @@ type Client interface { CloseConn() error IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) MapFn(ctx context.Context, mapRequest []*mappb.MapRequest) ([]*mappb.MapResponse, error) + MapStreamFn(ctx context.Context, request *mappb.MapRequest, responseCh chan<- *mappb.MapResponse) error } diff --git a/pkg/sdkclient/mapstreamer/client.go b/pkg/sdkclient/mapstreamer/client.go deleted file mode 100644 index 405372d4af..0000000000 --- a/pkg/sdkclient/mapstreamer/client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mapstreamer - -import ( - "context" - "fmt" - "io" - - mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/numaproj/numaflow/pkg/sdkclient" - sdkerror "github.com/numaproj/numaflow/pkg/sdkclient/error" - grpcutil "github.com/numaproj/numaflow/pkg/sdkclient/grpc" - "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" -) - -// client contains the grpc connection and the grpc client. -type client struct { - conn *grpc.ClientConn - grpcClt mapstreampb.MapStreamClient -} - -// New creates a new client object. -func New(serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { - var opts = sdkclient.DefaultOptions(sdkclient.MapStreamAddr) - - for _, inputOption := range inputOptions { - inputOption(opts) - } - - // Connect to the server - conn, err := grpcutil.ConnectToServer(opts.UdsSockAddr(), serverInfo, opts.MaxMessageSize()) - if err != nil { - return nil, err - } - - c := new(client) - c.conn = conn - c.grpcClt = mapstreampb.NewMapStreamClient(conn) - return c, nil -} - -func NewFromClient(c mapstreampb.MapStreamClient) (Client, error) { - return &client{ - grpcClt: c, - }, nil -} - -// CloseConn closes the grpc client connection. -func (c *client) CloseConn() error { - return c.conn.Close() -} - -// IsReady returns true if the grpc connection is ready to use. -func (c *client) IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) { - resp, err := c.grpcClt.IsReady(ctx, in) - if err != nil { - return false, err - } - return resp.GetReady(), nil -} - -// MapStreamFn applies a function to each datum element and returns a stream. -func (c *client) MapStreamFn(ctx context.Context, request *mapstreampb.MapStreamRequest, responseCh chan<- *mapstreampb.MapStreamResponse) error { - defer close(responseCh) - stream, err := c.grpcClt.MapStreamFn(ctx, request) - if err != nil { - return fmt.Errorf("failed to execute c.grpcClt.MapStreamFn(): %w", err) - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - var resp *mapstreampb.MapStreamResponse - resp, err = stream.Recv() - if err == io.EOF { - return nil - } - err = sdkerror.ToUDFErr("c.grpcClt.MapStreamFn", err) - if err != nil { - return err - } - responseCh <- resp - } - } -} diff --git a/pkg/sdkclient/mapstreamer/client_test.go b/pkg/sdkclient/mapstreamer/client_test.go deleted file mode 100644 index 31b3a87b76..0000000000 --- a/pkg/sdkclient/mapstreamer/client_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mapstreamer - -import ( - "context" - "fmt" - "io" - "reflect" - "testing" - - "github.com/golang/mock/gomock" - mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1/mapstreammock" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/emptypb" -) - -func TestClient_IsReady(t *testing.T) { - var ctx = context.Background() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapstreammock.NewMockMapStreamClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mapstreampb.ReadyResponse{Ready: true}, nil) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mapstreampb.ReadyResponse{Ready: false}, fmt.Errorf("mock connection refused")) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - ready, err := testClient.IsReady(ctx, &emptypb.Empty{}) - assert.True(t, ready) - assert.NoError(t, err) - - ready, err = testClient.IsReady(ctx, &emptypb.Empty{}) - assert.False(t, ready) - assert.EqualError(t, err, "mock connection refused") -} - -func TestClient_MapStreamFn(t *testing.T) { - var ctx = context.Background() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapstreammock.NewMockMapStreamClient(ctrl) - mockStreamClient := mapstreammock.NewMockMapStream_MapStreamFnClient(ctrl) - - mockStreamClient.EXPECT().Recv().Return(&mapstreampb.MapStreamResponse{Result: &mapstreampb.MapStreamResponse_Result{ - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }}, nil) - - mockStreamClient.EXPECT().Recv().Return(&mapstreampb.MapStreamResponse{Result: &mapstreampb.MapStreamResponse_Result{ - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }}, io.EOF) - - mockStreamClient.EXPECT().CloseSend().Return(nil).AnyTimes() - - mockClient.EXPECT().MapStreamFn(gomock.Any(), gomock.Any()).Return(mockStreamClient, nil) - - testClient, err := NewFromClient(mockClient) - assert.NoError(t, err) - reflect.DeepEqual(testClient, &client{ - grpcClt: mockClient, - }) - - responseCh := make(chan *mapstreampb.MapStreamResponse) - - go func() { - select { - case <-ctx.Done(): - return - case resp := <-responseCh: - assert.Equal(t, resp, &mapstreampb.MapStreamResponse{Result: &mapstreampb.MapStreamResponse_Result{ - Keys: []string{"temp-key"}, - Value: []byte("mock result"), - Tags: nil, - }}) - } - }() - - err = testClient.MapStreamFn(ctx, &mapstreampb.MapStreamRequest{}, responseCh) - assert.NoError(t, err) -} diff --git a/pkg/sdkclient/mapstreamer/interface.go b/pkg/sdkclient/mapstreamer/interface.go deleted file mode 100644 index f7543227f8..0000000000 --- a/pkg/sdkclient/mapstreamer/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mapstreamer - -import ( - "context" - - v1 "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "google.golang.org/protobuf/types/known/emptypb" -) - -// Client contains methods to call a gRPC client. -type Client interface { - CloseConn() error - IsReady(ctx context.Context, in *emptypb.Empty) (bool, error) - MapStreamFn(ctx context.Context, request *v1.MapStreamRequest, responseCh chan<- *v1.MapStreamResponse) error -} diff --git a/pkg/sdkclient/sinker/client.go b/pkg/sdkclient/sinker/client.go index 51c1273568..3c7bc4b23d 100644 --- a/pkg/sdkclient/sinker/client.go +++ b/pkg/sdkclient/sinker/client.go @@ -37,13 +37,13 @@ type client struct { conn *grpc.ClientConn grpcClt sinkpb.SinkClient sinkStream sinkpb.Sink_SinkFnClient + log *zap.SugaredLogger } var _ Client = (*client)(nil) func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions ...sdkclient.Option) (Client, error) { var opts = sdkclient.DefaultOptions(sdkclient.SinkAddr) - var logger = logging.FromContext(ctx) for _, inputOption := range inputOptions { inputOption(opts) @@ -58,7 +58,7 @@ func New(ctx context.Context, serverInfo *serverinfo.ServerInfo, inputOptions .. c.conn = conn c.grpcClt = sinkpb.NewSinkClient(conn) - + c.log = logging.FromContext(ctx) // Wait until the server is ready waitUntilReady: for { @@ -70,7 +70,7 @@ waitUntilReady: if ready { break waitUntilReady } else { - logger.Warnw("waiting for the server to be ready", zap.String("server", opts.UdsSockAddr())) + c.log.Warnw("waiting for the server to be ready", zap.String("server", opts.UdsSockAddr())) time.Sleep(100 * time.Millisecond) } } @@ -156,7 +156,7 @@ func (c *client) SinkFn(ctx context.Context, requests []*sinkpb.SinkRequest) ([] // send eot request eotRequest := &sinkpb.SinkRequest{ - Status: &sinkpb.SinkRequest_Status{ + Status: &sinkpb.TransmissionStatus{ Eot: true, }, } @@ -167,11 +167,17 @@ func (c *client) SinkFn(ctx context.Context, requests []*sinkpb.SinkRequest) ([] // Wait for the corresponding responses var responses []*sinkpb.SinkResponse - for i := 0; i < len(requests); i++ { + for i := 0; i < len(requests)+1; i++ { resp, err := c.sinkStream.Recv() if err != nil { return nil, fmt.Errorf("failed to receive sink response: %v", err) } + if resp.GetStatus() != nil && resp.GetStatus().GetEot() { + if i != len(requests) { + c.log.Errorw("Received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received", i), zap.Int("expected", len(requests))) + } + continue + } responses = append(responses, resp) } diff --git a/pkg/sdkclient/sinker/client_test.go b/pkg/sdkclient/sinker/client_test.go index 762a989fed..e95b39dba4 100644 --- a/pkg/sdkclient/sinker/client_test.go +++ b/pkg/sdkclient/sinker/client_test.go @@ -70,6 +70,11 @@ func TestClient_SinkFn(t *testing.T) { Status: sinkpb.Status_SUCCESS, }, }, nil) + mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ + Status: &sinkpb.TransmissionStatus{ + Eot: true, + }, + }, nil) mockClient := sinkmock.NewMockSinkClient(ctrl) mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockSinkClient, nil) diff --git a/pkg/sinks/udsink/udsink_grpc_test.go b/pkg/sinks/udsink/udsink_grpc_test.go index 96968fd9eb..2af72530fc 100644 --- a/pkg/sinks/udsink/udsink_grpc_test.go +++ b/pkg/sinks/udsink/udsink_grpc_test.go @@ -104,6 +104,9 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ Result: testResponseList[1], }, nil) + mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{Status: &sinkpb.TransmissionStatus{ + Eot: true, + }}, nil).AnyTimes() mockClient := sinkmock.NewMockSinkClient(ctrl) mockClient.EXPECT().SinkFn(gomock.Any(), gomock.Any()).Return(mockSinkClient, nil) diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index 0f91c5e33a..e081359cf1 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -364,84 +364,99 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { // streamMessage streams the data messages to the next step. func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessages []*isb.ReadMessage) (map[string][][]isb.Offset, error) { - // create space for writeMessages specific to each step as we could forward to all the steps too. - // these messages are for per partition (due to round-robin writes) for load balancing - var messageToStep = make(map[string][][]isb.Message) - var writeOffsets = make(map[string][][]isb.Offset) + // Initialize maps for messages and offsets + messageToStep := make(map[string][][]isb.Message) + writeOffsets := make(map[string][][]isb.Offset) for toVertex := range isdf.toBuffers { - // over allocating to have a predictable pattern messageToStep[toVertex] = make([][]isb.Message, len(isdf.toBuffers[toVertex])) writeOffsets[toVertex] = make([][]isb.Offset, len(isdf.toBuffers[toVertex])) } - if len(dataMessages) > 1 { + // Ensure dataMessages length is 1 for streaming + if len(dataMessages) != 1 { errMsg := "data message size is not 1 with map UDF streaming" isdf.opts.logger.Errorw(errMsg) return nil, errors.New(errMsg) - } else if len(dataMessages) == 1 { - // send to map UDF only the data messages - - // process the mapStreamUDF and get the result - start := time.Now() - metrics.UDFReadMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Inc() - - writeMessageCh := make(chan isb.WriteMessage) - errs, ctx := errgroup.WithContext(ctx) - errs.Go(func() error { - return isdf.opts.streamMapUdfApplier.ApplyMapStream(ctx, dataMessages[0], writeMessageCh) - }) - - // Stream the message to the next vertex. First figure out which vertex - // to send the result to. Then update the toBuffer(s) with writeMessage. - msgIndex := 0 - for writeMessage := range writeMessageCh { - writeMessage.Headers = dataMessages[0].Headers - msgIndex += 1 - metrics.UDFWriteMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(1)) + } - // update toBuffers - if err := isdf.whereToStep(&writeMessage, messageToStep, dataMessages[0]); err != nil { - return nil, fmt.Errorf("failed at whereToStep, error: %w", err) - } + // Process the single data message + start := time.Now() + metrics.UDFReadMessagesCount.With(map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), + }).Inc() + + writeMessageCh := make(chan isb.WriteMessage) + errs, ctx := errgroup.WithContext(ctx) + errs.Go(func() error { + return isdf.opts.streamMapUdfApplier.ApplyMapStream(ctx, dataMessages[0], writeMessageCh) + }) - // Forward the message to the edge buffer (could be multiple edges) - curWriteOffsets, err := isdf.writeToBuffers(ctx, messageToStep) - if err != nil { - return nil, fmt.Errorf("failed to write to toBuffers, error: %w", err) - } - // Merge curWriteOffsets into writeOffsets - for vertexName, toVertexBufferOffsets := range curWriteOffsets { - for index, offsets := range toVertexBufferOffsets { - writeOffsets[vertexName][index] = append(writeOffsets[vertexName][index], offsets...) - } - } + // Stream the message to the next vertex + for writeMessage := range writeMessageCh { + writeMessage.Headers = dataMessages[0].Headers + metrics.UDFWriteMessagesCount.With(map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), + }).Add(1) + + // Determine where to step and write to buffers + if err := isdf.whereToStep(&writeMessage, messageToStep, dataMessages[0]); err != nil { + return nil, fmt.Errorf("failed at whereToStep, error: %w", err) } - // look for errors in udf processing, if we see even 1 error NoAck all messages - // then return. Handling partial retrying is not worth ATM. - if err := errs.Wait(); err != nil { - metrics.UDFError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() - // We do not retry as we are streaming - if ok, _ := isdf.IsShuttingDown(); ok { - isdf.opts.logger.Errorw("mapUDF.Apply, Stop called while stuck on an internal error", zap.Error(err)) - metrics.PlatformError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() + curWriteOffsets, err := isdf.writeToBuffers(ctx, messageToStep) + if err != nil { + return nil, fmt.Errorf("failed to write to toBuffers, error: %w", err) + } + + // Merge current write offsets into the main writeOffsets map + for vertexName, toVertexBufferOffsets := range curWriteOffsets { + for index, offsets := range toVertexBufferOffsets { + writeOffsets[vertexName][index] = append(writeOffsets[vertexName][index], offsets...) } - return nil, fmt.Errorf("failed to applyUDF, error: %w", err) } - metrics.UDFProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) - } else { - // Even not data messages, forward the message to the edge buffer (could be multiple edges) - var err error - writeOffsets, err = isdf.writeToBuffers(ctx, messageToStep) - if err != nil { - return nil, fmt.Errorf("failed to write to toBuffers, error: %w", err) + // Clear messageToStep, as we have written the messages to the buffers + for toVertex := range isdf.toBuffers { + messageToStep[toVertex] = make([][]isb.Message, len(isdf.toBuffers[toVertex])) } } + // Handle errors in UDF processing + if err := errs.Wait(); err != nil { + metrics.UDFError.With(map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + }).Inc() + if ok, _ := isdf.IsShuttingDown(); ok { + isdf.opts.logger.Errorw("mapUDF.Apply, Stop called while stuck on an internal error", zap.Error(err)) + metrics.PlatformError.With(map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + }).Inc() + } + return nil, fmt.Errorf("failed to applyUDF, error: %w", err) + } + + metrics.UDFProcessingTime.With(map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + }).Observe(float64(time.Since(start).Microseconds())) + return writeOffsets, nil } diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index a8262a461e..4a913eca1a 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -32,7 +32,6 @@ import ( "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/sdkclient" "github.com/numaproj/numaflow/pkg/sdkclient/mapper" - "github.com/numaproj/numaflow/pkg/sdkclient/mapstreamer" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" "github.com/numaproj/numaflow/pkg/shared/callback" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" @@ -69,7 +68,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { fromVertexWmStores map[string]store.WatermarkStore toVertexWmStores map[string]store.WatermarkStore mapHandler *rpc.GRPCBasedMap - mapStreamHandler *rpc.GRPCBasedMapStream + mapStreamHandler *rpc.GRPCBasedMap idleManager wmb.IdleManager vertexName = u.VertexInstance.Vertex.Spec.Name pipelineName = u.VertexInstance.Vertex.Spec.PipelineName @@ -157,11 +156,11 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { // Map Stream mode enableMapUdfStream = true - mapStreamClient, err := mapstreamer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) + mapStreamClient, err := mapper.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.MapStreamAddr)) if err != nil { return fmt.Errorf("failed to create map stream client, %w", err) } - mapStreamHandler = rpc.NewUDSgRPCBasedMapStream(vertexName, mapStreamClient) + mapStreamHandler = rpc.NewUDSgRPCBasedMap(ctx, mapStreamClient, vertexName) // Readiness check if err := mapStreamHandler.WaitUntilReady(ctx); err != nil { diff --git a/pkg/udf/rpc/grpc_map.go b/pkg/udf/rpc/grpc_map.go index 71fe0fe790..8ec05c39c9 100644 --- a/pkg/udf/rpc/grpc_map.go +++ b/pkg/udf/rpc/grpc_map.go @@ -23,6 +23,7 @@ import ( mappb "github.com/numaproj/numaflow-go/pkg/apis/proto/map/v1" "go.uber.org/zap" + "golang.org/x/sync/errgroup" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -146,3 +147,70 @@ func (u *GRPCBasedMap) ApplyMap(ctx context.Context, readMessages []*isb.ReadMes } return results, nil } + +func (u *GRPCBasedMap) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { + defer close(writeMessageCh) + + keys := message.Keys + payload := message.Body.Payload + offset := message.ReadOffset + parentMessageInfo := message.MessageInfo + + var d = &mappb.MapRequest{ + Request: &mappb.MapRequest_Request{ + Keys: keys, + Value: payload, + EventTime: timestamppb.New(parentMessageInfo.EventTime), + Watermark: timestamppb.New(message.Watermark), + Headers: message.Headers, + }, + Id: offset.String(), + } + + responseCh := make(chan *mappb.MapResponse) + errs, ctx := errgroup.WithContext(ctx) + errs.Go(func() error { + err := u.client.MapStreamFn(ctx, d, responseCh) + if err != nil { + err = &ApplyUDFErr{ + UserUDFErr: false, + Message: fmt.Sprintf("gRPC client.MapStreamFn failed, %s", err), + InternalErr: InternalErr{ + Flag: true, + MainCarDown: false, + }, + } + return err + } + return nil + }) + + i := 0 + for response := range responseCh { + results := response.GetResults() + for _, result := range results { + i++ + keys := result.GetKeys() + taggedMessage := &isb.WriteMessage{ + Message: isb.Message{ + Header: isb.Header{ + MessageInfo: parentMessageInfo, + ID: isb.MessageID{ + VertexName: u.vertexName, + Offset: offset.String(), + Index: int32(i), + }, + Keys: keys, + }, + Body: isb.Body{ + Payload: result.GetValue(), + }, + }, + Tags: result.GetTags(), + } + writeMessageCh <- *taggedMessage + } + } + + return errs.Wait() +} diff --git a/pkg/udf/rpc/grpc_mapstream.go b/pkg/udf/rpc/grpc_mapstream.go deleted file mode 100644 index 8c5e610063..0000000000 --- a/pkg/udf/rpc/grpc_mapstream.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rpc - -import ( - "context" - "fmt" - "time" - - mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "golang.org/x/sync/errgroup" - "google.golang.org/protobuf/types/known/emptypb" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/sdkclient/mapstreamer" - "github.com/numaproj/numaflow/pkg/shared/logging" -) - -// GRPCBasedMapStream is a map stream applier that uses gRPC client to invoke the map stream UDF. It implements the applier.MapStreamApplier interface. -type GRPCBasedMapStream struct { - vertexName string - client mapstreamer.Client -} - -func NewUDSgRPCBasedMapStream(vertexName string, client mapstreamer.Client) *GRPCBasedMapStream { - return &GRPCBasedMapStream{ - vertexName: vertexName, - client: client, - } -} - -// Close closes the gRPC client connection. -func (u *GRPCBasedMapStream) Close() error { - return u.client.CloseConn() -} - -// IsHealthy checks if the map stream udf is healthy. -func (u *GRPCBasedMapStream) IsHealthy(ctx context.Context) error { - return u.WaitUntilReady(ctx) -} - -// WaitUntilReady waits until the map stream udf is connected. -func (u *GRPCBasedMapStream) WaitUntilReady(ctx context.Context) error { - log := logging.FromContext(ctx) - for { - select { - case <-ctx.Done(): - return fmt.Errorf("failed on readiness check: %w", ctx.Err()) - default: - if _, err := u.client.IsReady(ctx, &emptypb.Empty{}); err == nil { - return nil - } else { - log.Infof("waiting for map stream udf to be ready: %v", err) - time.Sleep(1 * time.Second) - } - } - } -} - -func (u *GRPCBasedMapStream) ApplyMapStream(ctx context.Context, message *isb.ReadMessage, writeMessageCh chan<- isb.WriteMessage) error { - defer close(writeMessageCh) - - keys := message.Keys - payload := message.Body.Payload - offset := message.ReadOffset - parentMessageInfo := message.MessageInfo - - var d = &mapstreampb.MapStreamRequest{ - Keys: keys, - Value: payload, - EventTime: timestamppb.New(parentMessageInfo.EventTime), - Watermark: timestamppb.New(message.Watermark), - Headers: message.Headers, - } - - responseCh := make(chan *mapstreampb.MapStreamResponse) - errs, ctx := errgroup.WithContext(ctx) - errs.Go(func() error { - err := u.client.MapStreamFn(ctx, d, responseCh) - if err != nil { - err = &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("gRPC client.MapStreamFn failed, %s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - } - return err - } - return nil - }) - - i := 0 - for response := range responseCh { - result := response.Result - i++ - keys := result.GetKeys() - taggedMessage := &isb.WriteMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: parentMessageInfo, - ID: isb.MessageID{ - VertexName: u.vertexName, - Offset: offset.String(), - Index: int32(i), - }, - Keys: keys, - }, - Body: isb.Body{ - Payload: result.GetValue(), - }, - }, - Tags: result.GetTags(), - } - writeMessageCh <- *taggedMessage - } - - return errs.Wait() -} diff --git a/pkg/udf/rpc/grpc_mapstream_test.go b/pkg/udf/rpc/grpc_mapstream_test.go deleted file mode 100644 index 40385ed3f0..0000000000 --- a/pkg/udf/rpc/grpc_mapstream_test.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rpc - -import ( - "context" - "errors" - "fmt" - "io" - "testing" - "time" - - "github.com/golang/mock/gomock" - mapstreampb "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1" - "github.com/numaproj/numaflow-go/pkg/apis/proto/mapstream/v1/mapstreammock" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/sdkclient/mapstreamer" -) - -func NewMockUDSGRPCBasedMapStream(mockClient *mapstreammock.MockMapStreamClient) *GRPCBasedMapStream { - c, _ := mapstreamer.NewFromClient(mockClient) - return &GRPCBasedMapStream{"test-vertex", c} -} - -func TestGRPCBasedMapStream_WaitUntilReadyWithMockClient(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapstreammock.NewMockMapStreamClient(ctrl) - mockClient.EXPECT().IsReady(gomock.Any(), gomock.Any()).Return(&mapstreampb.ReadyResponse{Ready: true}, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedMapStream(mockClient) - err := u.WaitUntilReady(ctx) - assert.NoError(t, err) -} - -type rpcMsg struct { - msg proto.Message -} - -func (r *rpcMsg) Matches(msg interface{}) bool { - m, ok := msg.(proto.Message) - if !ok { - return false - } - return proto.Equal(m, r.msg) -} - -func (r *rpcMsg) String() string { - return fmt.Sprintf("is %s", r.msg) -} - -func TestGRPCBasedUDF_BasicApplyStreamWithMockClient(t *testing.T) { - t.Run("test success", func(t *testing.T) { - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockClient := mapstreammock.NewMockMapStreamClient(ctrl) - mockMapStreamClient := mapstreammock.NewMockMapStream_MapStreamFnClient(ctrl) - - req := &mapstreampb.MapStreamRequest{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169600, 0)), - Watermark: timestamppb.New(time.Time{}), - } - expectedDatum := &mapstreampb.MapStreamResponse{ - Result: &mapstreampb.MapStreamResponse_Result{ - Keys: []string{"test_success_key"}, - Value: []byte(`forward_message`), - }, - } - mockMapStreamClient.EXPECT().Recv().Return(expectedDatum, nil).Times(1) - mockMapStreamClient.EXPECT().Recv().Return(nil, io.EOF).Times(1) - - mockClient.EXPECT().MapStreamFn(gomock.Any(), &rpcMsg{msg: req}).Return(mockMapStreamClient, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - writeMessageCh := make(chan isb.WriteMessage) - u := NewMockUDSGRPCBasedMapStream(mockClient) - - go func() { - err := u.ApplyMapStream(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169600, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "test-offset", - }, - Keys: []string{"test_success_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - Metadata: isb.MessageMetadata{ - NumDelivered: 1, - }, - }, writeMessageCh) - assert.NoError(t, err) - }() - - for msg := range writeMessageCh { - assert.Equal(t, req.Keys, msg.Keys) - assert.Equal(t, req.Value, msg.Payload) - } - }) - - t.Run("test error", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockClient := mapstreammock.NewMockMapStreamClient(ctrl) - mockMapStreamClient := mapstreammock.NewMockMapStream_MapStreamFnClient(ctrl) - - req := &mapstreampb.MapStreamRequest{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - EventTime: timestamppb.New(time.Unix(1661169660, 0)), - Watermark: timestamppb.New(time.Time{}), - } - - mockMapStreamClient.EXPECT().Recv().Return( - &mapstreampb.MapStreamResponse{ - Result: &mapstreampb.MapStreamResponse_Result{ - Keys: []string{"test_error_key"}, - Value: []byte(`forward_message`), - }, - }, errors.New("mock error for map")).AnyTimes() - - mockClient.EXPECT().MapStreamFn(gomock.Any(), &rpcMsg{msg: req}).Return(mockMapStreamClient, nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - go func() { - <-ctx.Done() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - t.Log(t.Name(), "test timeout") - } - }() - - u := NewMockUDSGRPCBasedMapStream(mockClient) - writeMessageCh := make(chan isb.WriteMessage) - - err := u.ApplyMapStream(ctx, &isb.ReadMessage{ - Message: isb.Message{ - Header: isb.Header{ - MessageInfo: isb.MessageInfo{ - EventTime: time.Unix(1661169660, 0), - }, - ID: isb.MessageID{ - VertexName: "test-vertex", - Offset: "test-offset", - }, - Keys: []string{"test_error_key"}, - }, - Body: isb.Body{ - Payload: []byte(`forward_message`), - }, - }, - ReadOffset: isb.SimpleStringOffset(func() string { return "0" }), - }, writeMessageCh) - assert.ErrorIs(t, err, &ApplyUDFErr{ - UserUDFErr: false, - Message: fmt.Sprintf("%s", err), - InternalErr: InternalErr{ - Flag: true, - MainCarDown: false, - }, - }) - }) -} diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 8e6ff8c3d3..8afd5705bc 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1577,6 +1577,28 @@ dependencies = [ "uuid", ] +[[package]] +name = "numaflow" +version = "0.1.1" +source = "git+https://github.com/numaproj/numaflow-rs.git?rev=9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387#9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387" +dependencies = [ + "chrono", + "futures-util", + "hyper-util", + "prost", + "prost-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tracing", + "uuid", +] + [[package]] name = "numaflow-core" version = "0.1.0" @@ -1591,7 +1613,7 @@ dependencies = [ "hyper-util", "kube", "log", - "numaflow 0.1.1", + "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387)", "numaflow-grpc", "numaflow-models", "parking_lot", @@ -2597,7 +2619,7 @@ dependencies = [ name = "servesink" version = "0.1.0" dependencies = [ - "numaflow 0.1.1", + "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=30d8ce1972fd3f0c0b8059fee209516afeef0088)", "reqwest 0.12.8", "tokio", "tonic", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 28cc7007cf..163cd99bb9 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -40,7 +40,7 @@ pin-project = "1.1.5" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387" } [build-dependencies] diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index f0275df07e..69d60d66e9 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -150,6 +150,7 @@ impl From for SinkResponse { err_msg, }), handshake: None, + status: None, } } } diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 529abfed56..461542ba45 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -4,8 +4,7 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::sink::sink_request::Status; -use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse}; +use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use crate::error; use crate::error::Error; @@ -77,7 +76,7 @@ impl Sink for UserDefinedSink { // send eot request to indicate the end of the stream let eot_request = SinkRequest { request: None, - status: Some(Status { eot: true }), + status: Some(TransmissionStatus { eot: true }), handshake: None, }; self.sink_tx @@ -85,16 +84,24 @@ impl Sink for UserDefinedSink { .await .map_err(|e| Error::SinkError(format!("failed to send eot request: {}", e)))?; - // now that we have sent, we wait for responses! - // NOTE: this works now because the results are not streamed, as of today it will give the + // Now that we have sent, we wait for responses! + // NOTE: This works now because the results are not streamed. As of today, it will give the // response only once it has read all the requests. + // We wait for num_requests + 1 responses because the last response will be the EOT response. let mut responses = Vec::new(); - for _ in 0..num_requests { + for i in 0..num_requests + 1 { let response = self .resp_stream .message() .await? .ok_or(Error::SinkError("failed to receive response".to_string()))?; + + if response.status.map_or(false, |s| s.eot) { + if i != num_requests { + log::error!("received EOT message before all responses are received, we will wait indefinitely for the remaining responses"); + } + continue; + } responses.push(response.try_into()?); } diff --git a/rust/numaflow-grpc/src/clients/map.v1.rs b/rust/numaflow-grpc/src/clients/map.v1.rs index 1f5de6a369..c2726bd6e6 100644 --- a/rust/numaflow-grpc/src/clients/map.v1.rs +++ b/rust/numaflow-grpc/src/clients/map.v1.rs @@ -11,7 +11,7 @@ pub struct MapRequest { #[prost(message, optional, tag = "3")] pub handshake: ::core::option::Option, #[prost(message, optional, tag = "4")] - pub status: ::core::option::Option, + pub status: ::core::option::Option, } /// Nested message and enum types in `MapRequest`. pub mod map_request { @@ -43,7 +43,7 @@ pub struct Handshake { /// /// Status message to indicate the status of the message. #[derive(Clone, Copy, PartialEq, ::prost::Message)] -pub struct Status { +pub struct TransmissionStatus { #[prost(bool, tag = "1")] pub eot: bool, } @@ -59,7 +59,7 @@ pub struct MapResponse { #[prost(message, optional, tag = "3")] pub handshake: ::core::option::Option, #[prost(message, optional, tag = "4")] - pub status: ::core::option::Option, + pub status: ::core::option::Option, } /// Nested message and enum types in `MapResponse`. pub mod map_response { diff --git a/rust/numaflow-grpc/src/clients/sink.v1.rs b/rust/numaflow-grpc/src/clients/sink.v1.rs index 2316cc0a9a..612e5693c3 100644 --- a/rust/numaflow-grpc/src/clients/sink.v1.rs +++ b/rust/numaflow-grpc/src/clients/sink.v1.rs @@ -9,7 +9,7 @@ pub struct SinkRequest { /// Required field indicating the status of the request. /// If eot is set to true, it indicates the end of transmission. #[prost(message, optional, tag = "2")] - pub status: ::core::option::Option, + pub status: ::core::option::Option, /// optional field indicating the handshake message. #[prost(message, optional, tag = "3")] pub handshake: ::core::option::Option, @@ -34,11 +34,6 @@ pub mod sink_request { ::prost::alloc::string::String, >, } - #[derive(Clone, Copy, PartialEq, ::prost::Message)] - pub struct Status { - #[prost(bool, tag = "1")] - pub eot: bool, - } } /// /// Handshake message between client and server to indicate the start of transmission. @@ -56,6 +51,13 @@ pub struct ReadyResponse { pub ready: bool, } /// * +/// TransmissionStatus is the status of the transmission. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct TransmissionStatus { + #[prost(bool, tag = "1")] + pub eot: bool, +} +/// * /// SinkResponse is the individual response of each message written to the sink. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SinkResponse { @@ -63,6 +65,8 @@ pub struct SinkResponse { pub result: ::core::option::Option, #[prost(message, optional, tag = "2")] pub handshake: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub status: ::core::option::Option, } /// Nested message and enum types in `SinkResponse`. pub mod sink_response { From 1ea4d2ea3f4a7b2ab939976eba5308d6cb0a9da0 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Mon, 14 Oct 2024 22:52:26 +0530 Subject: [PATCH 105/188] feat: Log sink implementation for Monovertex (#2150) Signed-off-by: Sreekanth Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config.rs | 8 +- rust/numaflow-core/src/monovertex.rs | 151 ++++++++++++------ .../numaflow-core/src/monovertex/forwarder.rs | 38 ++--- rust/numaflow-core/src/monovertex/metrics.rs | 54 ++++--- rust/numaflow-core/src/shared/utils.rs | 32 ++-- rust/numaflow-core/src/sink.rs | 33 +++- rust/numaflow-core/src/sink/log.rs | 34 ++++ 7 files changed, 229 insertions(+), 121 deletions(-) create mode 100644 rust/numaflow-core/src/sink/log.rs diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index 48c4e597b5..82d465ac73 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -90,7 +90,8 @@ pub struct Settings { pub sink_default_retry_strategy: RetryStrategy, pub transformer_config: Option, pub udsource_config: Option, - pub udsink_config: UDSinkConfig, + pub udsink_config: Option, + pub logsink_config: Option<()>, pub fallback_config: Option, pub generator_config: Option, } @@ -200,6 +201,7 @@ impl Default for Settings { transformer_config: None, udsource_config: None, udsink_config: Default::default(), + logsink_config: None, fallback_config: None, generator_config: None, } @@ -274,8 +276,8 @@ impl Settings { .ok_or(Error::ConfigError("Sink not found".to_string()))? .udsink { - Some(_) => UDSinkConfig::default(), - _ => UDSinkConfig::default(), + Some(_) => Some(UDSinkConfig::default()), + _ => None, }; settings.fallback_config = match mono_vertex_obj diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index eee02bd304..7bc12d8c18 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -11,10 +11,10 @@ use numaflow_grpc::clients::source::source_client::SourceClient; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use crate::config::{config, Settings}; -use crate::error; +use crate::error::{self, Error}; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; -use crate::sink::SinkHandle; +use crate::sink::{SinkClientType, SinkHandle}; use crate::source::generator::{new_generator, GeneratorAck, GeneratorLagReader, GeneratorRead}; use crate::source::user_defined::{ new_source, UserDefinedSourceAck, UserDefinedSourceLagReader, UserDefinedSourceRead, @@ -46,7 +46,7 @@ pub async fn mono_vertex() -> error::Result<()> { // Run the forwarder with cancellation token. if let Err(e) = start_forwarder(cln_token, config()).await { - error!("Application error: {:?}", e); + tracing::error!("Application error: {:?}", e); // abort the signal handler task since we have an error and we are shutting down if !shutdown_handle.is_finished() { @@ -97,7 +97,10 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err .udsource_config .as_ref() .map(|source_config| source_config.server_info_path.clone().into()), - config.udsink_config.server_info_path.clone().into(), + config + .udsink_config + .as_ref() + .map(|sink_config| sink_config.server_info_path.clone().into()), config .transformer_config .as_ref() @@ -119,19 +122,12 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err None }; - let mut sink_grpc_client = - SinkClient::new(create_rpc_channel(config.udsink_config.socket_path.clone().into()).await?) - .max_encoding_message_size(config.udsink_config.grpc_max_message_size) - .max_encoding_message_size(config.udsink_config.grpc_max_message_size); - - let mut transformer_grpc_client = if let Some(transformer_config) = &config.transformer_config { - let transformer_grpc_client = SourceTransformClient::new( - create_rpc_channel(transformer_config.socket_path.clone().into()).await?, + let mut sink_grpc_client = if let Some(udsink_config) = &config.udsink_config { + Some( + SinkClient::new(create_rpc_channel(udsink_config.socket_path.clone().into()).await?) + .max_encoding_message_size(udsink_config.grpc_max_message_size) + .max_encoding_message_size(udsink_config.grpc_max_message_size), ) - .max_encoding_message_size(transformer_config.grpc_max_message_size) - .max_encoding_message_size(transformer_config.grpc_max_message_size); - - Some(transformer_grpc_client.clone()) } else { None }; @@ -147,6 +143,18 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err None }; + let mut transformer_grpc_client = if let Some(transformer_config) = &config.transformer_config { + let transformer_grpc_client = SourceTransformClient::new( + create_rpc_channel(transformer_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(transformer_config.grpc_max_message_size) + .max_encoding_message_size(transformer_config.grpc_max_message_size); + + Some(transformer_grpc_client.clone()) + } else { + None + }; + // readiness check for all the ud containers utils::wait_until_ready( cln_token.clone(), @@ -158,6 +166,12 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err .await?; let source_type = fetch_source(config, &mut source_grpc_client).await?; + let (sink, fb_sink) = fetch_sink( + config, + sink_grpc_client.clone(), + fb_sink_grpc_client.clone(), + ) + .await?; // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not @@ -174,62 +188,89 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err utils::start_metrics_server(metrics_state).await; let source = SourceHandle::new(source_type); - start_forwarder_with_source( - source, - sink_grpc_client, - transformer_grpc_client, - fb_sink_grpc_client, - cln_token, - ) - .await?; + start_forwarder_with_source(source, sink, transformer_grpc_client, fb_sink, cln_token).await?; info!("Forwarder stopped gracefully"); Ok(()) } -pub(crate) async fn fetch_source( +// fetch right the source. +// source_grpc_client can be optional because it is valid only for user-defined source. +async fn fetch_source( config: &Settings, source_grpc_client: &mut Option>, ) -> crate::Result { - let source_type = if let Some(source_grpc_client) = source_grpc_client.clone() { + // check whether the source grpc client is provided, this happens only of the source is a + // user defined source + if let Some(source_grpc_client) = source_grpc_client.clone() { let (source_read, source_ack, lag_reader) = new_source( source_grpc_client, config.batch_size as usize, config.timeout_in_ms as u16, ) .await?; - SourceType::UserDefinedSource(source_read, source_ack, lag_reader) - } else if let Some(generator_config) = &config.generator_config { + return Ok(SourceType::UserDefinedSource( + source_read, + source_ack, + lag_reader, + )); + } + + // now that we know it is not a user-defined source, it has to be a built-in + if let Some(generator_config) = &config.generator_config { let (source_read, source_ack, lag_reader) = new_generator( generator_config.content.clone(), generator_config.rpu, config.batch_size as usize, Duration::from_millis(generator_config.duration as u64), )?; - SourceType::Generator(source_read, source_ack, lag_reader) + Ok(SourceType::Generator(source_read, source_ack, lag_reader)) } else { - return Err(error::Error::ConfigError( + Err(Error::ConfigError( "No valid source configuration found".into(), - )); + )) + } +} + +// fetch the actor handle for the sink. +// sink_grpc_client can be optional because it is valid only for user-defined sink. +async fn fetch_sink( + settings: &Settings, + sink_grpc_client: Option>, + fallback_sink_grpc_client: Option>, +) -> crate::Result<(SinkHandle, Option)> { + let fb_sink = match fallback_sink_grpc_client { + Some(fallback_sink) => { + Some(SinkHandle::new(SinkClientType::UserDefined(fallback_sink)).await?) + } + None => None, }; - Ok(source_type) + + if let Some(sink_client) = sink_grpc_client { + let sink = SinkHandle::new(SinkClientType::UserDefined(sink_client)).await?; + return Ok((sink, fb_sink)); + } + if settings.logsink_config.is_some() { + let log = SinkHandle::new(SinkClientType::Log).await?; + return Ok((log, fb_sink)); + } + Err(Error::ConfigError( + "No valid Sink configuration found".to_string(), + )) } async fn start_forwarder_with_source( source: SourceHandle, - sink_grpc_client: SinkClient, + sink: SinkHandle, transformer_client: Option>, - fallback_sink_client: Option>, + fallback_sink: Option, cln_token: CancellationToken, ) -> error::Result<()> { // start the pending reader to publish pending metrics - let mut pending_reader = utils::create_pending_reader(source.clone()).await; - pending_reader.start().await; - - // build the forwarder - let sink_writer = SinkHandle::new(sink_grpc_client).await?; + let pending_reader = utils::create_pending_reader(source.clone()).await; + let _pending_reader_handle = pending_reader.start().await; - let mut forwarder_builder = ForwarderBuilder::new(source, sink_writer, cln_token); + let mut forwarder_builder = ForwarderBuilder::new(source, sink, cln_token); // add transformer if exists if let Some(transformer_client) = transformer_client { @@ -238,9 +279,8 @@ async fn start_forwarder_with_source( } // add fallback sink if exists - if let Some(fallback_sink_client) = fallback_sink_client { - let fallback_writer = SinkHandle::new(fallback_sink_client).await?; - forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_writer); + if let Some(fallback_sink) = fallback_sink { + forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_sink); } // build the final forwarder let mut forwarder = forwarder_builder.build(); @@ -254,7 +294,7 @@ async fn start_forwarder_with_source( #[cfg(test)] mod tests { - use crate::config::{Settings, UDSourceConfig}; + use crate::config::{Settings, UDSinkConfig, UDSourceConfig}; use crate::error; use crate::monovertex::start_forwarder; use crate::shared::server_info::ServerInfo; @@ -363,17 +403,22 @@ mod tests { token_clone.cancel(); }); - let mut config = Settings::default(); - config.udsink_config.socket_path = sink_sock_file.to_str().unwrap().to_string(); - config.udsink_config.server_info_path = sink_server_info.to_str().unwrap().to_string(); - - config.udsource_config = Some(UDSourceConfig { - socket_path: src_sock_file.to_str().unwrap().to_string(), - server_info_path: src_info_file.to_str().unwrap().to_string(), - grpc_max_message_size: 1024, - }); + let config = Settings { + udsink_config: Some(UDSinkConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + server_info_path: sink_server_info.to_str().unwrap().to_string(), + grpc_max_message_size: 1024, + }), + udsource_config: Some(UDSourceConfig { + socket_path: src_sock_file.to_str().unwrap().to_string(), + server_info_path: src_info_file.to_str().unwrap().to_string(), + grpc_max_message_size: 1024, + }), + ..Default::default() + }; let result = start_forwarder(cln_token.clone(), &config).await; + dbg!(&result); assert!(result.is_ok()); // stop the source and sink servers diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 6ac44b7637..a0be586dbf 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -533,7 +533,7 @@ mod tests { use crate::monovertex::forwarder::ForwarderBuilder; use crate::monovertex::SourceType; use crate::shared::utils::create_rpc_channel; - use crate::sink::SinkHandle; + use crate::sink::{SinkClientType, SinkHandle}; use crate::source::user_defined::new_source; use crate::source::SourceHandle; use crate::transformer::user_defined::SourceTransformHandle; @@ -738,11 +738,10 @@ mod tests { source_lag_reader, )); - let sink_writer = SinkHandle::new(SinkClient::new( - create_rpc_channel(sink_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to sink server"); + let sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); + let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_grpc_client)) + .await + .expect("failed to connect to sink server"); let transformer_client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(transformer_sock_file).await.unwrap(), @@ -868,11 +867,10 @@ mod tests { lag_reader, )); - let sink_writer = SinkHandle::new(SinkClient::new( - create_rpc_channel(sink_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to sink server"); + let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); + let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client)) + .await + .expect("failed to connect to sink server"); let mut forwarder = ForwarderBuilder::new(source_reader, sink_writer, cln_token.clone()).build(); @@ -991,17 +989,15 @@ mod tests { source_lag_reader, )); - let sink_writer = SinkHandle::new(SinkClient::new( - create_rpc_channel(sink_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to sink server"); + let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); + let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client)) + .await + .expect("failed to connect to sink server"); - let fb_sink_writer = SinkHandle::new(SinkClient::new( - create_rpc_channel(fb_sink_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to fb sink server"); + let fb_sink_writer = SinkClient::new(create_rpc_channel(fb_sink_sock_file).await.unwrap()); + let fb_sink_writer = SinkHandle::new(SinkClientType::UserDefined(fb_sink_writer)) + .await + .expect("failed to connect to fb sink server"); let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) .fallback_sink_writer(fb_sink_writer) diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index f5d432c765..0c4ca65bc6 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -68,7 +68,7 @@ const SINK_TIME: &str = "monovtx_sink_time"; #[derive(Clone)] pub(crate) struct UserDefinedContainerState { pub source_client: Option>, - pub sink_client: SinkClient, + pub sink_client: Option>, pub transformer_client: Option>, pub fb_sink_client: Option>, } @@ -305,16 +305,18 @@ async fn livez() -> impl IntoResponse { StatusCode::NO_CONTENT } -async fn sidecar_livez(State(mut state): State) -> impl IntoResponse { +async fn sidecar_livez(State(state): State) -> impl IntoResponse { if let Some(mut source_client) = state.source_client { if source_client.is_ready(Request::new(())).await.is_err() { error!("Source client is not available"); return StatusCode::SERVICE_UNAVAILABLE; } } - if state.sink_client.is_ready(Request::new(())).await.is_err() { - error!("Sink client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; + if let Some(mut sink_client) = state.sink_client { + if sink_client.is_ready(Request::new(())).await.is_err() { + error!("Sink client is not available"); + return StatusCode::SERVICE_UNAVAILABLE; + } } if let Some(mut transformer_client) = state.transformer_client { if transformer_client.is_ready(Request::new(())).await.is_err() { @@ -346,11 +348,14 @@ pub(crate) struct PendingReader { lag_reader: SourceHandle, lag_checking_interval: Duration, refresh_interval: Duration, - buildup_handle: Option>, - expose_handle: Option>, pending_stats: Arc>>, } +pub(crate) struct PendingReaderTasks { + buildup_handle: JoinHandle<()>, + expose_handle: JoinHandle<()>, +} + /// PendingReaderBuilder is used to build a [LagReader] instance. pub(crate) struct PendingReaderBuilder { lag_reader: SourceHandle, @@ -386,8 +391,6 @@ impl PendingReaderBuilder { refresh_interval: self .refresh_interval .unwrap_or_else(|| Duration::from_secs(5)), - buildup_handle: None, - expose_handle: None, pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), } } @@ -399,33 +402,34 @@ impl PendingReader { /// This method spawns two asynchronous tasks: /// - One to periodically check the lag and update the pending stats. /// - Another to periodically expose the pending metrics. - pub async fn start(&mut self) { + /// + /// Dropping the PendingReaderTasks will abort the background tasks. + pub async fn start(&self) -> PendingReaderTasks { let pending_reader = self.lag_reader.clone(); let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; let pending_stats = self.pending_stats.clone(); - self.buildup_handle = Some(tokio::spawn(async move { + let buildup_handle = tokio::spawn(async move { build_pending_info(pending_reader, lag_checking_interval, pending_stats).await; - })); + }); let pending_stats = self.pending_stats.clone(); - self.expose_handle = Some(tokio::spawn(async move { + let expose_handle = tokio::spawn(async move { expose_pending_metrics(refresh_interval, pending_stats).await; - })); + }); + PendingReaderTasks { + buildup_handle, + expose_handle, + } } } -/// When the PendingReader is dropped, we need to clean up the pending exposer and the pending builder tasks. -impl Drop for PendingReader { +/// When the PendingReaderTasks is dropped, we need to clean up the pending exposer and the pending builder tasks. +impl Drop for PendingReaderTasks { fn drop(&mut self) { - if let Some(handle) = self.expose_handle.take() { - handle.abort(); - } - if let Some(handle) = self.buildup_handle.take() { - handle.abort(); - } - + self.expose_handle.abort(); + self.buildup_handle.abort(); info!("Stopped the Lag-Reader Expose and Builder tasks"); } } @@ -652,7 +656,9 @@ mod tests { source_client: Some(SourceClient::new( create_rpc_channel(src_sock_file).await.unwrap(), )), - sink_client: SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()), + sink_client: Some(SinkClient::new( + create_rpc_channel(sink_sock_file).await.unwrap(), + )), transformer_client: Some(SourceTransformClient::new( create_rpc_channel(sock_file).await.unwrap(), )), diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 7dd1d51ac3..264472534f 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -31,7 +31,7 @@ use tracing::{info, warn}; pub(crate) async fn check_compatibility( cln_token: &CancellationToken, source_file_path: Option, - sink_file_path: PathBuf, + sink_file_path: Option, transformer_file_path: Option, fb_sink_file_path: Option, ) -> error::Result<()> { @@ -44,12 +44,14 @@ pub(crate) async fn check_compatibility( })?; } - server_info::check_for_server_compatibility(sink_file_path, cln_token.clone()) - .await - .map_err(|e| { - error!("Error waiting for sink server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) - })?; + if let Some(sink_file_path) = sink_file_path { + server_info::check_for_server_compatibility(sink_file_path, cln_token.clone()) + .await + .map_err(|e| { + error!("Error waiting for sink server info file: {:?}", e); + Error::ForwarderError("Error waiting for server info file".to_string()) + })?; + } if let Some(transformer_path) = transformer_file_path { server_info::check_for_server_compatibility(transformer_path, cln_token.clone()) @@ -100,7 +102,7 @@ pub(crate) async fn create_pending_reader(lag_reader_grpc_client: SourceHandle) pub(crate) async fn wait_until_ready( cln_token: CancellationToken, source_client: &mut Option>, - sink_client: &mut SinkClient, + sink_client: &mut Option>, transformer_client: &mut Option>, fb_sink_client: &mut Option>, ) -> error::Result<()> { @@ -121,7 +123,11 @@ pub(crate) async fn wait_until_ready( true }; - let sink_ready = sink_client.is_ready(Request::new(())).await.is_ok(); + let sink_ready = if let Some(sink_client) = sink_client { + sink_client.is_ready(Request::new(())).await.is_ok() + } else { + true + }; if !sink_ready { info!("UDSink is not ready, waiting..."); } @@ -255,7 +261,7 @@ mod tests { let result = check_compatibility( &cln_token, Some(source_file_path), - sink_file_path, + Some(sink_file_path), None, None, ) @@ -283,7 +289,7 @@ mod tests { let result = check_compatibility( &cln_token, Some(source_file_path), - sink_file_path, + Some(sink_file_path), Some(transformer_file_path), Some(fb_sink_file_path), ) @@ -388,7 +394,7 @@ mod tests { let source_grpc_client = SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); - let mut sink_grpc_client = + let sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); let mut transformer_grpc_client = Some(SourceTransformClient::new( create_rpc_channel(transformer_sock_file.clone()) @@ -402,7 +408,7 @@ mod tests { let result = wait_until_ready( cln_token, &mut Some(source_grpc_client), - &mut sink_grpc_client, + &mut Some(sink_grpc_client), &mut transformer_grpc_client, &mut fb_sink_grpc_client, ) diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index e39892ddd8..8c29966ccb 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -6,6 +6,7 @@ use crate::message::{Message, ResponseFromSink}; use numaflow_grpc::clients::sink::sink_client::SinkClient; use user_defined::UserDefinedSink; +mod log; /// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Sink]: https://numaflow.numaproj.io/user-guide/sinks/user-defined-sinks/ @@ -61,16 +62,34 @@ pub(crate) struct SinkHandle { sender: mpsc::Sender, } +pub(crate) enum SinkClientType { + Log, + UserDefined(SinkClient), +} + impl SinkHandle { - pub(crate) async fn new(sink_client: SinkClient) -> crate::Result { + pub(crate) async fn new(sink_client: SinkClientType) -> crate::Result { let (sender, receiver) = mpsc::channel(config().batch_size as usize); - let sink = UserDefinedSink::new(sink_client).await?; - tokio::spawn(async move { - let mut actor = SinkActor::new(receiver, sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; + match sink_client { + SinkClientType::Log => { + let log_sink = log::LogSink; + tokio::spawn(async { + let mut actor = SinkActor::new(receiver, log_sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); } - }); + SinkClientType::UserDefined(sink_client) => { + let sink = UserDefinedSink::new(sink_client).await?; + tokio::spawn(async { + let mut actor = SinkActor::new(receiver, sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + } + }; Ok(Self { sender }) } diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs new file mode 100644 index 0000000000..8db95b9072 --- /dev/null +++ b/rust/numaflow-core/src/sink/log.rs @@ -0,0 +1,34 @@ +use crate::{ + error, + message::{Message, ResponseFromSink, ResponseStatusFromSink}, + sink::Sink, +}; + +pub(crate) struct LogSink; + +impl Sink for LogSink { + async fn sink(&mut self, messages: Vec) -> error::Result> { + let mut result = Vec::with_capacity(messages.len()); + for msg in messages { + let mut headers = String::new(); + msg.headers.iter().for_each(|(k, v)| { + headers.push_str(&format!("{}: {}, ", k, v)); + }); + + let log_line = format!( + "Payload - {} Keys - {} EventTime - {} Headers - {} ID - {}", + &String::from_utf8_lossy(&msg.value), + msg.keys.join(","), + msg.event_time.timestamp_millis(), + headers, + msg.id, + ); + tracing::info!("{}", log_line); + result.push(ResponseFromSink { + id: msg.id, + status: ResponseStatusFromSink::Success, + }) + } + Ok(result) + } +} From c95d930830912ceef3516b46994508c56214d236 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 14 Oct 2024 22:33:03 -0700 Subject: [PATCH 106/188] feat: support multiple controller with instance config (#2153) Signed-off-by: Derek Wang --- .../namespaced-controller-wo-crds.yaml | 2 ++ .../numaflow-controller-config.yaml | 2 ++ config/install.yaml | 2 ++ config/namespace-install.yaml | 2 ++ docs/operations/installation.md | 26 ++++++++++++++ .../numaflow-controller-config.yaml | 4 +++ go.mod | 4 +-- go.sum | 8 ++--- pkg/apis/numaflow/v1alpha1/const.go | 3 +- pkg/reconciler/config.go | 7 ++++ pkg/reconciler/config_test.go | 34 +++++++++++++++++++ pkg/reconciler/isbsvc/controller.go | 8 ++++- pkg/reconciler/monovertex/controller.go | 4 +++ pkg/reconciler/pipeline/controller.go | 12 +++++++ pkg/reconciler/vertex/controller.go | 8 +++++ 15 files changed, 118 insertions(+), 8 deletions(-) diff --git a/config/advanced-install/namespaced-controller-wo-crds.yaml b/config/advanced-install/namespaced-controller-wo-crds.yaml index 20ca3f2913..487bb74249 100644 --- a/config/advanced-install/namespaced-controller-wo-crds.yaml +++ b/config/advanced-install/namespaced-controller-wo-crds.yaml @@ -137,6 +137,8 @@ metadata: apiVersion: v1 data: controller-config.yaml: | + # "instance" configuration can be used to run multiple Numaflow controllers, check details at https://numaflow.numaproj.io/operations/installation/#multiple-controllers + instance: "" defaults: containerResources: | requests: diff --git a/config/base/controller-manager/numaflow-controller-config.yaml b/config/base/controller-manager/numaflow-controller-config.yaml index eaf49cd124..2164f116d8 100644 --- a/config/base/controller-manager/numaflow-controller-config.yaml +++ b/config/base/controller-manager/numaflow-controller-config.yaml @@ -4,6 +4,8 @@ metadata: name: numaflow-controller-config data: controller-config.yaml: |+ + # "instance" configuration can be used to run multiple Numaflow controllers, check details at https://numaflow.numaproj.io/operations/installation/#multiple-controllers + instance: "" defaults: containerResources: | requests: diff --git a/config/install.yaml b/config/install.yaml index a130e9c144..7acb3d54a0 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28252,6 +28252,8 @@ metadata: apiVersion: v1 data: controller-config.yaml: | + # "instance" configuration can be used to run multiple Numaflow controllers, check details at https://numaflow.numaproj.io/operations/installation/#multiple-controllers + instance: "" defaults: containerResources: | requests: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 38053d2bfc..e97e13ba1d 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28143,6 +28143,8 @@ metadata: apiVersion: v1 data: controller-config.yaml: | + # "instance" configuration can be used to run multiple Numaflow controllers, check details at https://numaflow.numaproj.io/operations/installation/#multiple-controllers + instance: "" defaults: containerResources: | requests: diff --git a/docs/operations/installation.md b/docs/operations/installation.md index d92d1cfa06..fcffdb6f55 100644 --- a/docs/operations/installation.md +++ b/docs/operations/installation.md @@ -138,3 +138,29 @@ data: ``` If HA is turned off, the controller deployment should not run with multiple replicas. + +## Multiple Controllers + +With in one cluster, or even in one namespace, you can run multiple Numaflow controllers by leveraging the `instance` configuration in the `numaflow-controller-config` ConfigMap. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: numaflow-controller-config +data: + controller-config.yaml: | + # Within a cluster, setting "instance" can be used to run N Numaflow controllers. + # If configured, the controller will only watch the objects having an annotation with the key "numaflow.numaproj.io/instance" and the corresponding value. + # If not configured (or empty string), the controller will watch all objects. + instance: "" + defaults: + containerResources: | + requests: + memory: "128Mi" + cpu: "100m" + isbsvc: + ... +``` + +When `instance` is configured (e.g. `my-instance`), the controller will only watch the objects (`InterStepBufferService`, `Pipeline` and `MonoVertex`) having the annotation `numaflow.numaproj.io/instance: my-instance`. Correspondingly, if a `Pipeline` object has an annotation `numaflow.numaproj.io/instance: my-instance`, it requires the referenced `InterStepBufferService` also has the same annotation, or it will fail to orchestrate the pipeline. diff --git a/docs/operations/numaflow-controller-config.yaml b/docs/operations/numaflow-controller-config.yaml index e8e6a8af19..734e6981e4 100644 --- a/docs/operations/numaflow-controller-config.yaml +++ b/docs/operations/numaflow-controller-config.yaml @@ -4,6 +4,10 @@ metadata: name: numaflow-controller-config data: controller-config.yaml: | + # Within a cluster, setting "instance" can be used to run N Numaflow controllers. + # If configured, the controller will only watch the objects having an annotation with the key "numaflow.numaproj.io/instance" and the corresponding value. + # If not configured (or empty string), the controller will watch all objects. + instance: "" defaults: containerResources: | requests: diff --git a/go.mod b/go.mod index 02e20f7a35..a40001aeb8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/numaproj/numaflow go 1.23.1 require ( - github.com/IBM/sarama v1.43.2 + github.com/IBM/sarama v1.43.3 github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig/v3 v3.2.3 github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 @@ -85,7 +85,7 @@ require ( github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect diff --git a/go.sum b/go.sum index 8df905cd8e..c5f73afc67 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -123,8 +123,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index f65e2a5bd7..574752c304 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -26,7 +26,8 @@ const ( Project = "numaflow" // label/annotation keys. - KeyHash = "numaflow.numaproj.io/hash" // hash of the object + KeyInstance = "numaflow.numaproj.io/instance" // instance key of the object + KeyHash = "numaflow.numaproj.io/hash" // hash of the object KeyComponent = "app.kubernetes.io/component" KeyPartOf = "app.kubernetes.io/part-of" KeyManagedBy = "app.kubernetes.io/managed-by" diff --git a/pkg/reconciler/config.go b/pkg/reconciler/config.go index 16d368abf3..993c9bee4a 100644 --- a/pkg/reconciler/config.go +++ b/pkg/reconciler/config.go @@ -36,6 +36,7 @@ type GlobalConfig struct { } type config struct { + Instance string `json:"instance"` Defaults *DefaultConfig `json:"defaults"` ISBSvc *ISBSvcConfig `json:"isbsvc"` } @@ -83,6 +84,12 @@ type JetStreamVersion struct { StartCommand string `json:"startCommand"` } +func (g *GlobalConfig) GetInstance() string { + g.lock.RLock() + defer g.lock.RUnlock() + return g.conf.Instance +} + // Get controller scope default config func (g *GlobalConfig) GetDefaults() DefaultConfig { g.lock.RLock() diff --git a/pkg/reconciler/config_test.go b/pkg/reconciler/config_test.go index 5befdc0264..0896b57444 100644 --- a/pkg/reconciler/config_test.go +++ b/pkg/reconciler/config_test.go @@ -18,6 +18,7 @@ package reconciler import ( "reflect" + "sync" "testing" corev1 "k8s.io/api/core/v1" @@ -77,3 +78,36 @@ func TestGlobalConfig_GetDefaultContainerResources(t *testing.T) { } } } + +func TestGlobalConfig_GetInstance(t *testing.T) { + tests := []struct { + name string + instance string + }{ + { + name: "Empty instance", + instance: "", + }, + { + name: "Non-empty instance", + instance: "test-instance", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := &GlobalConfig{ + conf: &config{ + Instance: tt.instance, + }, + lock: &sync.RWMutex{}, + } + + got := g.GetInstance() + if got != tt.instance { + t.Errorf("GetInstance() = %v, want %v", got, tt.instance) + } + + }) + } +} diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index d94e14424d..987c9322f7 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -36,6 +36,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" "github.com/numaproj/numaflow/pkg/reconciler/isbsvc/installer" + "github.com/numaproj/numaflow/pkg/shared/logging" ) const ( @@ -67,6 +68,11 @@ func (r *interStepBufferServiceReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, err } log := r.logger.With("namespace", isbSvc.Namespace).With("isbsvc", isbSvc.Name) + if instance := isbSvc.GetAnnotations()[dfv1.KeyInstance]; instance != r.config.GetInstance() { + log.Debugw("ISB Service not managed by this controller, skipping", zap.String("instance", instance)) + return ctrl.Result{}, nil + } + ctx = logging.WithLogger(ctx, log) isbSvcCopy := isbSvc.DeepCopy() reconcileErr := r.reconcile(ctx, isbSvcCopy) if reconcileErr != nil { @@ -87,7 +93,7 @@ func (r *interStepBufferServiceReconciler) Reconcile(ctx context.Context, req ct // reconcile does the real logic func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc *dfv1.InterStepBufferService) error { - log := r.logger.With("namespace", isbSvc.Namespace).With("isbsvc", isbSvc.Name) + log := logging.FromContext(ctx) if !isbSvc.DeletionTimestamp.IsZero() { log.Info("Deleting ISB Service") if controllerutil.ContainsFinalizer(isbSvc, finalizerName) { diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 5e31ed6f28..a40f620c69 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -72,6 +72,10 @@ func (mr *monoVertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } log := mr.logger.With("namespace", monoVtx.Namespace).With("monoVertex", monoVtx.Name) + if instance := monoVtx.GetAnnotations()[dfv1.KeyInstance]; instance != mr.config.GetInstance() { + log.Debugw("MonoVertex not managed by this controller, skipping", zap.String("instance", instance)) + return ctrl.Result{}, nil + } ctx = logging.WithLogger(ctx, log) monoVtxCopy := monoVtx.DeepCopy() result, err := mr.reconcile(ctx, monoVtxCopy) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 3ae7c49c00..0af2fb4788 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -81,6 +81,10 @@ func (r *pipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, err } log := r.logger.With("namespace", pl.Namespace).With("pipeline", pl.Name) + if instance := pl.GetAnnotations()[dfv1.KeyInstance]; instance != r.config.GetInstance() { + log.Debugw("Pipeline not managed by this controller, skipping", zap.String("instance", instance)) + return ctrl.Result{}, nil + } plCopy := pl.DeepCopy() ctx = logging.WithLogger(ctx, log) result, reconcileErr := r.reconcile(ctx, plCopy) @@ -236,6 +240,10 @@ func (r *pipelineReconciler) reconcileFixedResources(ctx context.Context, pl *df log.Errorw("Failed to get ISB Service", zap.String("isbsvc", isbSvcName), zap.Error(err)) return err } + if isbSvc.GetAnnotations()[dfv1.KeyInstance] != pl.GetAnnotations()[dfv1.KeyInstance] { + log.Errorw("ISB Service is found but not managed by the same controller of this pipeline", zap.String("isbsvc", isbSvcName), zap.Error(err)) + return fmt.Errorf("isbsvc not managed by the same controller of this pipeline") + } if !isbSvc.Status.IsHealthy() { log.Errorw("ISB Service is not in healthy status", zap.String("isbsvc", isbSvcName), zap.Error(err)) return fmt.Errorf("isbsvc not healthy") @@ -652,6 +660,10 @@ func buildVertices(pl *dfv1.Pipeline) map[string]dfv1.Vertex { }, Spec: spec, } + // If corresponding pipline has instance annotation, we should copy it to the vertex + if x := pl.GetAnnotations()[dfv1.KeyInstance]; x != "" { + obj.Annotations[dfv1.KeyInstance] = x + } result[obj.Name] = obj } return result diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index 20945639ab..8d520609bf 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -72,6 +72,10 @@ func (r *vertexReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } log := r.logger.With("namespace", vertex.Namespace).With("vertex", vertex.Name).With("pipeline", vertex.Spec.PipelineName) ctx = logging.WithLogger(ctx, log) + if instance := vertex.GetAnnotations()[dfv1.KeyInstance]; instance != r.config.GetInstance() { + log.Debugw("Vertex not managed by this controller, skipping", zap.String("instance", instance)) + return ctrl.Result{}, nil + } vertexCopy := vertex.DeepCopy() result, err := r.reconcile(ctx, vertexCopy) if err != nil { @@ -117,6 +121,10 @@ func (r *vertexReconciler) reconcile(ctx context.Context, vertex *dfv1.Vertex) ( vertex.Status.MarkPhaseFailed("FindISBSvcFailed", err.Error()) return ctrl.Result{}, err } + if isbSvc.GetAnnotations()[dfv1.KeyInstance] != vertex.GetAnnotations()[dfv1.KeyInstance] { + log.Errorw("ISB Service is found but not managed by the same controller of this vertex", zap.String("isbsvc", isbSvcName), zap.Error(err)) + return ctrl.Result{}, fmt.Errorf("isbsvc not managed by the same controller of this vertex") + } if !isbSvc.Status.IsHealthy() { log.Errorw("ISB Service is not in healthy status", zap.String("isbsvc", isbSvcName), zap.Error(err)) vertex.Status.MarkPhaseFailed("ISBSvcNotHealthy", "isbsvc not healthy") From b26863955335aa2685d52afcaffb905a7eee8a2f Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Tue, 15 Oct 2024 12:19:39 +0530 Subject: [PATCH 107/188] chore: Refactor - Rename Error variants (#2154) Signed-off-by: Sreekanth --- rust/numaflow-core/src/config.rs | 36 +++++++------- rust/numaflow-core/src/error.rs | 20 ++++---- rust/numaflow-core/src/lib.rs | 3 +- rust/numaflow-core/src/message.rs | 6 +-- rust/numaflow-core/src/monovertex.rs | 6 +-- .../numaflow-core/src/monovertex/forwarder.rs | 20 ++++---- rust/numaflow-core/src/monovertex/metrics.rs | 8 ++-- rust/numaflow-core/src/shared/server_info.rs | 47 +++++++++---------- rust/numaflow-core/src/shared/utils.rs | 38 +++++++-------- rust/numaflow-core/src/sink/user_defined.rs | 14 +++--- rust/numaflow-core/src/source/user_defined.rs | 39 ++++++++------- .../src/transformer/user_defined.rs | 21 ++++----- 12 files changed, 125 insertions(+), 133 deletions(-) diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index 82d465ac73..facca96e9c 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -1,11 +1,13 @@ -use crate::error::Error; +use std::env; +use std::fmt::Display; +use std::sync::OnceLock; + use base64::prelude::BASE64_STANDARD; use base64::Engine; use bytes::Bytes; + +use crate::Error; use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; -use std::env; -use std::fmt::Display; -use std::sync::OnceLock; const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; @@ -216,13 +218,11 @@ impl Settings { let mono_vertex_spec = BASE64_STANDARD .decode(mono_vertex_spec.as_bytes()) .map_err(|e| { - Error::ConfigError(format!("Failed to decode mono vertex spec: {:?}", e)) + Error::Config(format!("Failed to decode mono vertex spec: {:?}", e)) })?; - let mono_vertex_obj: MonoVertex = - serde_json::from_slice(&mono_vertex_spec).map_err(|e| { - Error::ConfigError(format!("Failed to parse mono vertex spec: {:?}", e)) - })?; + let mono_vertex_obj: MonoVertex = serde_json::from_slice(&mono_vertex_spec) + .map_err(|e| Error::Config(format!("Failed to parse mono vertex spec: {:?}", e)))?; settings.batch_size = mono_vertex_obj .spec @@ -245,13 +245,13 @@ impl Settings { settings.mono_vertex_name = mono_vertex_obj .metadata .and_then(|metadata| metadata.name) - .ok_or_else(|| Error::ConfigError("Mono vertex name not found".to_string()))?; + .ok_or_else(|| Error::Config("Mono vertex name not found".to_string()))?; settings.transformer_config = match mono_vertex_obj .spec .source .as_deref() - .ok_or(Error::ConfigError("Source not found".to_string()))? + .ok_or(Error::Config("Source not found".to_string()))? .transformer { Some(_) => Some(TransformerConfig::default()), @@ -262,7 +262,7 @@ impl Settings { .spec .source .as_deref() - .ok_or(Error::ConfigError("Source not found".to_string()))? + .ok_or(Error::Config("Source not found".to_string()))? .udsource { Some(_) => Some(UDSourceConfig::default()), @@ -273,7 +273,7 @@ impl Settings { .spec .sink .as_deref() - .ok_or(Error::ConfigError("Sink not found".to_string()))? + .ok_or(Error::Config("Sink not found".to_string()))? .udsink { Some(_) => Some(UDSinkConfig::default()), @@ -284,7 +284,7 @@ impl Settings { .spec .sink .as_deref() - .ok_or(Error::ConfigError("Sink not found".to_string()))? + .ok_or(Error::Config("Sink not found".to_string()))? .fallback { Some(_) => Some(UDSinkConfig::fallback_default()), @@ -295,7 +295,7 @@ impl Settings { .spec .source .as_deref() - .ok_or(Error::ConfigError("Source not found".to_string()))? + .ok_or(Error::Config("Source not found".to_string()))? .generator .as_deref() { @@ -341,7 +341,7 @@ impl Settings { // We do not allow 0 attempts to write to sink if settings.sink_max_retry_attempts == 0 { - return Err(Error::ConfigError( + return Err(Error::Config( "Retry Strategy given with 0 retry attempts".to_string(), )); } @@ -359,7 +359,7 @@ impl Settings { if settings.sink_retry_on_fail_strategy == OnFailureStrategy::Fallback && settings.fallback_config.is_none() { - return Err(Error::ConfigError( + return Err(Error::Config( "Retry Strategy given as fallback but Fallback sink not configured" .to_string(), )); @@ -370,7 +370,7 @@ impl Settings { settings.replica = env::var(ENV_POD_REPLICA) .unwrap_or_else(|_| "0".to_string()) .parse() - .map_err(|e| Error::ConfigError(format!("Failed to parse pod replica: {:?}", e)))?; + .map_err(|e| Error::Config(format!("Failed to parse pod replica: {:?}", e)))?; Ok(settings) } diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 50c5f87e2e..5b5095b70c 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -5,35 +5,35 @@ pub type Result = std::result::Result; #[derive(Error, Debug, Clone)] pub enum Error { #[error("Metrics Error - {0}")] - MetricsError(String), + Metrics(String), #[error("Source Error - {0}")] - SourceError(String), + Source(String), #[error("Sink Error - {0}")] - SinkError(String), + Sink(String), #[error("Transformer Error - {0}")] - TransformerError(String), + Transformer(String), #[error("Forwarder Error - {0}")] - ForwarderError(String), + Forwarder(String), #[error("Connection Error - {0}")] - ConnectionError(String), + Connection(String), #[error("gRPC Error - {0}")] - GRPCError(String), + Grpc(String), #[error("Config Error - {0}")] - ConfigError(String), + Config(String), #[error("ServerInfoError Error - {0}")] - ServerInfoError(String), + ServerInfo(String), } impl From for Error { fn from(status: tonic::Status) -> Self { - Error::GRPCError(status.to_string()) + Error::Grpc(status.to_string()) } } diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index 4e410c9f90..5b7fae0b4c 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -2,8 +2,7 @@ use tracing::error; /// Custom Error handling. mod error; -pub(crate) use crate::error::Error; -pub(crate) use crate::error::Result; +pub(crate) use crate::error::{Error, Result}; /// MonoVertex is a simplified version of the [Pipeline] spec which is ideal for high TPS, low latency /// use-cases which do not require [ISB]. diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 69d60d66e9..b2644e86f4 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -5,8 +5,8 @@ use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::Engine; use chrono::{DateTime, Utc}; -use crate::error::Error; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; +use crate::Error; use numaflow_grpc::clients::sink::sink_request::Request; use numaflow_grpc::clients::sink::Status::{Failure, Fallback, Success}; use numaflow_grpc::clients::sink::{sink_response, SinkRequest, SinkResponse}; @@ -84,7 +84,7 @@ impl TryFrom for Message { offset: BASE64_STANDARD.encode(o.offset), partition_id: o.partition_id, }, - None => return Err(Error::SourceError("Offset not found".to_string())), + None => return Err(Error::Source("Offset not found".to_string())), }; Ok(Message { @@ -161,7 +161,7 @@ impl TryFrom for ResponseFromSink { fn try_from(value: SinkResponse) -> Result { let value = value .result - .ok_or(Error::SinkError("result is empty".to_string()))?; + .ok_or(Error::Sink("result is empty".to_string()))?; let status = match value.status() { Success => ResponseStatusFromSink::Success, diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 7bc12d8c18..c647d51d0a 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -226,9 +226,7 @@ async fn fetch_source( )?; Ok(SourceType::Generator(source_read, source_ack, lag_reader)) } else { - Err(Error::ConfigError( - "No valid source configuration found".into(), - )) + Err(Error::Config("No valid source configuration found".into())) } } @@ -254,7 +252,7 @@ async fn fetch_sink( let log = SinkHandle::new(SinkClientType::Log).await?; return Ok((log, fb_sink)); } - Err(Error::ConfigError( + Err(Error::Config( "No valid Sink configuration found".to_string(), )) } diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index a0be586dbf..2ce41a5d80 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -2,11 +2,11 @@ use std::collections::HashMap; use crate::config::{config, OnFailureStrategy}; use crate::error; -use crate::error::Error; use crate::message::{Message, Offset, ResponseStatusFromSink}; use crate::monovertex::metrics; use crate::monovertex::metrics::forward_metrics; use crate::sink::SinkHandle; +use crate::Error; use crate::{source::SourceHandle, transformer::user_defined::SourceTransformHandle}; use chrono::Utc; @@ -121,7 +121,7 @@ impl Forwarder { async fn read_and_process_messages(&mut self) -> error::Result { let start_time = tokio::time::Instant::now(); let messages = self.source_reader.read().await.map_err(|e| { - Error::ForwarderError(format!("Failed to read messages from source {:?}", e)) + Error::Forwarder(format!("Failed to read messages from source {:?}", e)) })?; debug!( @@ -162,7 +162,7 @@ impl Forwarder { // Apply transformation if transformer is present let transformed_messages = self.apply_transformer(messages).await.map_err(|e| { - Error::ForwarderError(format!( + Error::Forwarder(format!( "Failed to apply transformation to messages {:?}", e )) @@ -171,13 +171,11 @@ impl Forwarder { // Write the messages to the sink self.write_to_sink(transformed_messages) .await - .map_err(|e| { - Error::ForwarderError(format!("Failed to write messages to sink {:?}", e)) - })?; + .map_err(|e| Error::Forwarder(format!("Failed to write messages to sink {:?}", e)))?; // Acknowledge the messages back to the source self.acknowledge_messages(offsets).await.map_err(|e| { - Error::ForwarderError(format!( + Error::Forwarder(format!( "Failed to acknowledge messages back to source {:?}", e )) @@ -248,7 +246,7 @@ impl Forwarder { // if we are shutting down, stop the retry if self.cln_token.is_cancelled() { - return Err(Error::SinkError( + return Err(Error::Sink( "Cancellation token triggered during retry".to_string(), )); } @@ -405,7 +403,7 @@ impl Forwarder { // Writes the fallback messages to the fallback sink async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> error::Result<()> { if self.fb_sink_writer.is_none() { - return Err(Error::SinkError( + return Err(Error::Sink( "Response contains fallback messages but no fallback sink is configured" .to_string(), )); @@ -469,7 +467,7 @@ impl Forwarder { // specifying fallback status in fallback response is not allowed if contains_fallback_status { - return Err(Error::SinkError( + return Err(Error::Sink( "Fallback response contains fallback status".to_string(), )); } @@ -490,7 +488,7 @@ impl Forwarder { } } if !messages_to_send.is_empty() { - return Err(Error::SinkError(format!( + return Err(Error::Sink(format!( "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", attempts, fallback_error_map ))); diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 0c4ca65bc6..79c0ce396a 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -28,8 +28,8 @@ use numaflow_grpc::clients::source::source_client::SourceClient; use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; use crate::config::config; -use crate::error::Error; use crate::source::SourceHandle; +use crate::Error; // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon @@ -275,18 +275,18 @@ pub(crate) async fn start_metrics_https_server( // Generate a self-signed certificate let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) - .map_err(|e| Error::MetricsError(format!("Generating self-signed certificate: {}", e)))?; + .map_err(|e| Error::Metrics(format!("Generating self-signed certificate: {}", e)))?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key_pair.serialize_pem().into()) .await - .map_err(|e| Error::MetricsError(format!("Creating tlsConfig from pem: {}", e)))?; + .map_err(|e| Error::Metrics(format!("Creating tlsConfig from pem: {}", e)))?; let metrics_app = metrics_router(metrics_state); axum_server::bind_rustls(addr, tls_config) .serve(metrics_app.into_make_service()) .await - .map_err(|e| Error::MetricsError(format!("Starting web server for metrics: {}", e)))?; + .map_err(|e| Error::Metrics(format!("Starting web server for metrics: {}", e)))?; Ok(()) } diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index b3f8b51847..ffc712ceb1 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -91,7 +91,7 @@ fn check_numaflow_compatibility( ) -> error::Result<()> { // Ensure that the minimum numaflow version is specified if min_numaflow_version.is_empty() { - return Err(Error::ServerInfoError("invalid version".to_string())); + return Err(Error::ServerInfo("invalid version".to_string())); } // Strip the 'v' prefix if present. @@ -99,12 +99,12 @@ fn check_numaflow_compatibility( // Parse the provided numaflow version as a semantic version let numaflow_version_semver = Version::parse(numaflow_version_stripped) - .map_err(|e| Error::ServerInfoError(format!("Error parsing Numaflow version: {}", e)))?; + .map_err(|e| Error::ServerInfo(format!("Error parsing Numaflow version: {}", e)))?; // Create a version constraint based on the minimum numaflow version let numaflow_constraint = format!(">={}", min_numaflow_version); check_constraint(&numaflow_version_semver, &numaflow_constraint).map_err(|e| { - Error::ServerInfoError(format!( + Error::ServerInfo(format!( "numaflow version {} must be upgraded to at least {}, in order to work with current SDK version {}", numaflow_version_semver, human_readable(min_numaflow_version), e )) @@ -120,7 +120,7 @@ fn check_sdk_compatibility( ) -> error::Result<()> { // Check if the SDK language is present in the minimum supported SDK versions if !min_supported_sdk_versions.contains_key(sdk_language) { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "SDK version constraint not found for language: {}, container type: {}", sdk_language, container_type ))); @@ -135,14 +135,13 @@ fn check_sdk_compatibility( // For Python, use Pep440 versioning if sdk_language.to_lowercase() == "python" { let sdk_version_pep440 = PepVersion::from_str(sdk_version) - .map_err(|e| Error::ServerInfoError(format!("Error parsing SDK version: {}", e)))?; + .map_err(|e| Error::ServerInfo(format!("Error parsing SDK version: {}", e)))?; - let specifiers = VersionSpecifier::from_str(&sdk_constraint).map_err(|e| { - Error::ServerInfoError(format!("Error parsing SDK constraint: {}", e)) - })?; + let specifiers = VersionSpecifier::from_str(&sdk_constraint) + .map_err(|e| Error::ServerInfo(format!("Error parsing SDK constraint: {}", e)))?; if !specifiers.contains(&sdk_version_pep440) { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", sdk_version_pep440, human_readable(sdk_required_version) ))); @@ -153,11 +152,11 @@ fn check_sdk_compatibility( // Parse the SDK version using semver let sdk_version_semver = Version::parse(sdk_version_stripped) - .map_err(|e| Error::ServerInfoError(format!("Error parsing SDK version: {}", e)))?; + .map_err(|e| Error::ServerInfo(format!("Error parsing SDK version: {}", e)))?; // Check if the SDK version satisfies the constraint check_constraint(&sdk_version_semver, &sdk_constraint).map_err(|_| { - Error::ServerInfoError(format!( + Error::ServerInfo(format!( "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", sdk_version_semver, human_readable(sdk_required_version) )) @@ -171,7 +170,7 @@ fn check_sdk_compatibility( ); // Return error indicating the language - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "SDK version constraint not found for language: {}, container type: {}", sdk_language, container_type ))); @@ -206,7 +205,7 @@ fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { // extract the major.minor.patch version let mmp_version = Version::parse(binding.split('-').next().unwrap_or_default()).map_err(|e| { - Error::ServerInfoError(format!( + Error::ServerInfo(format!( "Error parsing version: {}, version string: {}", e, binding )) @@ -241,7 +240,7 @@ fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { // Parse the given constraint as a semantic version requirement let version_req = VersionReq::parse(constraint).map_err(|e| { - Error::ServerInfoError(format!( + Error::ServerInfo(format!( "Error parsing constraint: {}, constraint string: {}", e, constraint )) @@ -249,7 +248,7 @@ fn check_constraint(version: &Version, constraint: &str) -> error::Result<()> { // Check if the provided version satisfies the parsed constraint if !version_req.matches(version) { - return Err(Error::ServerInfoError("invalid version".to_string())); + return Err(Error::ServerInfo("invalid version".to_string())); } Ok(()) @@ -285,7 +284,7 @@ async fn read_server_info( // Infinite loop to keep checking until the file is ready loop { if cln_token.is_cancelled() { - return Err(Error::ServerInfoError("Operation cancelled".to_string())); + return Err(Error::ServerInfo("Operation cancelled".to_string())); } // Check if the file exists and has content @@ -324,7 +323,7 @@ async fn read_server_info( retry += 1; if retry >= 10 { // Return an error if the retry limit is reached - return Err(Error::ServerInfoError( + return Err(Error::ServerInfo( "server-info reading retry exceeded".to_string(), )); } @@ -334,7 +333,7 @@ async fn read_server_info( // Parse the JSON; if there is an error, return the error let server_info: ServerInfo = serde_json::from_str(&contents).map_err(|e| { - Error::ServerInfoError(format!( + Error::ServerInfo(format!( "Failed to parse server-info file: {}, contents: {}", e, contents )) @@ -482,7 +481,7 @@ mod tests { // Remove the existing file if it exists if let Err(e) = fs::remove_file(svr_info_file_path) { if e.kind() != std::io::ErrorKind::NotFound { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "Failed to remove server-info file: {}", e ))); @@ -496,7 +495,7 @@ mod tests { let mut file = match file { Ok(f) => f, Err(e) => { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "Failed to create server-info file: {}", e ))); @@ -506,13 +505,13 @@ mod tests { // Write the serialized data and the END marker to the file // Remove the existing file if it exists if let Err(e) = file.write_all(serialized.as_bytes()) { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "Failed to write server-info file: {}", e ))); } if let Err(e) = file.write_all(END.as_bytes()) { - return Err(Error::ServerInfoError(format!( + return Err(Error::ServerInfo(format!( "Failed to write server-info file: {}", e ))); @@ -1074,7 +1073,7 @@ mod tests { // Check that we received the correct error variant let error = result.unwrap_err(); assert!( - matches!(error, Error::ServerInfoError(_)), + matches!(error, Error::ServerInfo(_)), "Expected ServerInfoError, got {:?}", error ); @@ -1140,7 +1139,7 @@ mod tests { let error = result.unwrap_err(); assert!( - matches!(error, Error::ServerInfoError(_)), + matches!(error, Error::ServerInfo(_)), "Expected ServerInfoError, got {:?}", error ); diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 264472534f..813153d07f 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -2,18 +2,6 @@ use std::net::SocketAddr; use std::path::PathBuf; use std::time::Duration; -use crate::config::config; -use crate::error; -use crate::error::Error; -use crate::monovertex::metrics::{ - start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, -}; -use crate::shared::server_info; -use crate::source::SourceHandle; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; - use axum::http::Uri; use backoff::retry::Retry; use backoff::strategy::fixed; @@ -28,6 +16,18 @@ use tonic::Request; use tower::service_fn; use tracing::{info, warn}; +use crate::config::config; +use crate::error; +use crate::monovertex::metrics::{ + start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, +}; +use crate::shared::server_info; +use crate::source::SourceHandle; +use crate::Error; +use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_grpc::clients::source::source_client::SourceClient; +use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; + pub(crate) async fn check_compatibility( cln_token: &CancellationToken, source_file_path: Option, @@ -40,7 +40,7 @@ pub(crate) async fn check_compatibility( .await .map_err(|e| { warn!("Error waiting for source server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::Forwarder("Error waiting for server info file".to_string()) })?; } @@ -49,7 +49,7 @@ pub(crate) async fn check_compatibility( .await .map_err(|e| { error!("Error waiting for sink server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::Forwarder("Error waiting for server info file".to_string()) })?; } @@ -58,7 +58,7 @@ pub(crate) async fn check_compatibility( .await .map_err(|e| { error!("Error waiting for transformer server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::Forwarder("Error waiting for server info file".to_string()) })?; } @@ -67,7 +67,7 @@ pub(crate) async fn check_compatibility( .await .map_err(|e| { warn!("Error waiting for fallback sink server info file: {:?}", e); - Error::ForwarderError("Error waiting for server info file".to_string()) + Error::Forwarder("Error waiting for server info file".to_string()) })?; } Ok(()) @@ -108,7 +108,7 @@ pub(crate) async fn wait_until_ready( ) -> error::Result<()> { loop { if cln_token.is_cancelled() { - return Err(Error::ForwarderError( + return Err(Error::Forwarder( "Cancellation token is cancelled".to_string(), )); } @@ -192,7 +192,7 @@ pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Re pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { let channel = Endpoint::try_from("http://[::]:50051") - .map_err(|e| Error::ConnectionError(format!("Failed to create endpoint: {:?}", e)))? + .map_err(|e| Error::Connection(format!("Failed to create endpoint: {:?}", e)))? .connect_with_connector(service_fn(move |_: Uri| { let uds_socket = uds_path.clone(); async move { @@ -202,7 +202,7 @@ pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result, num_records: usize, timeout_in_ms: u16, -) -> error::Result<( +) -> Result<( UserDefinedSourceRead, UserDefinedSourceAck, UserDefinedSourceLagReader, @@ -54,7 +53,7 @@ impl UserDefinedSourceRead { mut client: SourceClient, num_records: usize, timeout_in_ms: u16, - ) -> error::Result { + ) -> Result { let (read_tx, resp_stream) = Self::create_reader(&mut client).await?; Ok(Self { @@ -67,7 +66,7 @@ impl UserDefinedSourceRead { async fn create_reader( client: &mut SourceClient, - ) -> error::Result<(mpsc::Sender, Streaming)> { + ) -> Result<(mpsc::Sender, Streaming)> { let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); let read_stream = ReceiverStream::new(read_rx); @@ -79,7 +78,7 @@ impl UserDefinedSourceRead { read_tx .send(handshake_request) .await - .map_err(|e| SourceError(format!("failed to send handshake request: {}", e)))?; + .map_err(|e| Error::Source(format!("failed to send handshake request: {}", e)))?; let mut resp_stream = client .read_fn(Request::new(read_stream)) @@ -88,12 +87,12 @@ impl UserDefinedSourceRead { // first response from the server will be the handshake response. We need to check if the // server has accepted the handshake. - let handshake_response = resp_stream.message().await?.ok_or(SourceError( + let handshake_response = resp_stream.message().await?.ok_or(Error::Source( "failed to receive handshake response".to_string(), ))?; // handshake cannot to None during the initial phase and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { - return Err(SourceError("invalid handshake response".to_string())); + return Err(Error::Source("invalid handshake response".to_string())); } Ok((read_tx, resp_stream)) @@ -105,7 +104,7 @@ impl SourceReader for UserDefinedSourceRead { "user-defined-source" } - async fn read(&mut self) -> error::Result> { + async fn read(&mut self) -> Result> { let request = ReadRequest { request: Some(read_request::Request { num_records: self.num_records as u64, @@ -117,7 +116,7 @@ impl SourceReader for UserDefinedSourceRead { self.read_tx .send(request) .await - .map_err(|e| SourceError(e.to_string()))?; + .map_err(|e| Error::Source(e.to_string()))?; let mut messages = Vec::with_capacity(self.num_records); @@ -128,7 +127,7 @@ impl SourceReader for UserDefinedSourceRead { let result = response .result - .ok_or_else(|| SourceError("Empty message".to_string()))?; + .ok_or_else(|| Error::Source("Empty message".to_string()))?; messages.push(result.try_into()?); } @@ -141,7 +140,7 @@ impl SourceReader for UserDefinedSourceRead { } impl UserDefinedSourceAck { - async fn new(mut client: SourceClient) -> error::Result { + async fn new(mut client: SourceClient) -> Result { let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; Ok(Self { @@ -152,7 +151,7 @@ impl UserDefinedSourceAck { async fn create_acker( client: &mut SourceClient, - ) -> error::Result<(mpsc::Sender, Streaming)> { + ) -> Result<(mpsc::Sender, Streaming)> { let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); let ack_stream = ReceiverStream::new(ack_rx); @@ -164,18 +163,18 @@ impl UserDefinedSourceAck { ack_tx .send(ack_handshake_request) .await - .map_err(|e| SourceError(format!("failed to send ack handshake request: {}", e)))?; + .map_err(|e| Error::Source(format!("failed to send ack handshake request: {}", e)))?; let mut ack_resp_stream = client.ack_fn(Request::new(ack_stream)).await?.into_inner(); // first response from the server will be the handshake response. We need to check if the // server has accepted the handshake. - let ack_handshake_response = ack_resp_stream.message().await?.ok_or(SourceError( + let ack_handshake_response = ack_resp_stream.message().await?.ok_or(Error::Source( "failed to receive ack handshake response".to_string(), ))?; // handshake cannot to None during the initial phase and it has to set `sot` to true. if ack_handshake_response.handshake.map_or(true, |h| !h.sot) { - return Err(SourceError("invalid ack handshake response".to_string())); + return Err(Error::Source("invalid ack handshake response".to_string())); } Ok((ack_tx, ack_resp_stream)) @@ -183,7 +182,7 @@ impl UserDefinedSourceAck { } impl SourceAcker for UserDefinedSourceAck { - async fn ack(&mut self, offsets: Vec) -> error::Result<()> { + async fn ack(&mut self, offsets: Vec) -> Result<()> { let n = offsets.len(); // send n ack requests @@ -192,7 +191,7 @@ impl SourceAcker for UserDefinedSourceAck { self.ack_tx .send(request) .await - .map_err(|e| SourceError(e.to_string()))?; + .map_err(|e| Error::Source(e.to_string()))?; } // make sure we get n responses for the n requests. @@ -201,7 +200,7 @@ impl SourceAcker for UserDefinedSourceAck { .ack_resp_stream .message() .await? - .ok_or(SourceError("failed to receive ack response".to_string()))?; + .ok_or(Error::Source("failed to receive ack response".to_string()))?; } Ok(()) @@ -220,7 +219,7 @@ impl UserDefinedSourceLagReader { } impl LagReader for UserDefinedSourceLagReader { - async fn pending(&mut self) -> error::Result> { + async fn pending(&mut self) -> Result> { Ok(self .source_client .pending_fn(Request::new(())) diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 42131622ba..b542b423e5 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -40,9 +40,10 @@ impl SourceTransformer { request: None, handshake: Some(sourcetransformer::Handshake { sot: true }), }; - read_tx.send(handshake_request).await.map_err(|e| { - Error::TransformerError(format!("failed to send handshake request: {}", e)) - })?; + read_tx + .send(handshake_request) + .await + .map_err(|e| Error::Transformer(format!("failed to send handshake request: {}", e)))?; let mut resp_stream = client .source_transform_fn(Request::new(read_stream)) @@ -51,14 +52,12 @@ impl SourceTransformer { // first response from the server will be the handshake response. We need to check if the // server has accepted the handshake. - let handshake_response = resp_stream.message().await?.ok_or(Error::TransformerError( + let handshake_response = resp_stream.message().await?.ok_or(Error::Transformer( "failed to receive handshake response".to_string(), ))?; // handshake cannot to None during the initial phase and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { - return Err(Error::TransformerError( - "invalid handshake response".to_string(), - )); + return Err(Error::Transformer("invalid handshake response".to_string())); } Ok(Self { @@ -119,7 +118,7 @@ impl SourceTransformer { Ok(()) => continue, Err(e) => { token.cancel(); - return Err(Error::TransformerError(e.to_string())); + return Err(Error::Transformer(e.to_string())); } }; } @@ -147,7 +146,7 @@ impl SourceTransformer { } Err(e) => { token.cancel(); - return Err(Error::TransformerError(format!( + return Err(Error::Transformer(format!( "gRPC error while receiving messages from source transformer server: {e:?}" ))); } @@ -155,7 +154,7 @@ impl SourceTransformer { let Some((msg_id, msg_info)) = tracker.remove_entry(&resp.id) else { token.cancel(); - return Err(Error::TransformerError(format!( + return Err(Error::Transformer(format!( "Received message with unknown ID {}", resp.id ))); @@ -179,7 +178,7 @@ impl SourceTransformer { } sender_task.await.unwrap().map_err(|e| { - Error::TransformerError(format!( + Error::Transformer(format!( "Sending messages to gRPC transformer failed: {e:?}", )) })?; From 8bf96793aa477d85d31dac01edc36c9201f55fc2 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Tue, 15 Oct 2024 09:18:03 -0700 Subject: [PATCH 108/188] fix: create histogram buckets in a range (#2144) Signed-off-by: Sidhant Kohli --- rust/numaflow-core/src/monovertex/metrics.rs | 84 ++++++++++++++++++-- 1 file changed, 78 insertions(+), 6 deletions(-) diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 79c0ce396a..0f5cc88240 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::iter; use std::net::SocketAddr; use std::sync::{Arc, OnceLock}; use std::time::Duration; @@ -13,7 +14,7 @@ use prometheus_client::encoding::text::encode; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::metrics::histogram::Histogram; use prometheus_client::registry::Registry; use rcgen::{generate_simple_self_signed, CertifiedKey}; use tokio::sync::Mutex; @@ -126,6 +127,27 @@ pub struct MonoVtxMetrics { pub sink_time: Family, Histogram>, } +/// Exponential bucket distribution with range. +/// Creates `length` buckets, where the lowest bucket is `min` and the highest bucket is `max`. +/// The final +Inf bucket is not counted and not included in the returned iterator. +/// The function panics if `length` is 0 or negative, or if `min` is 0 or negative. +fn exponential_buckets_range(min: f64, max: f64, length: u16) -> impl Iterator { + if length < 1 { + panic!("ExponentialBucketsRange length needs a positive length"); + } + if min <= 0.0 { + panic!("ExponentialBucketsRange min needs to be greater than 0"); + } + + // We know max/min and highest bucket. Solve for growth_factor. + let growth_factor = (max / min).powf(1.0 / (length as f64 - 1.0)); + + iter::repeat(()) + .enumerate() + .map(move |(i, _)| min * growth_factor.powf(i as f64)) + .take(length.into()) +} + /// impl the MonoVtxMetrics struct and create a new object impl MonoVtxMetrics { fn new() -> Self { @@ -139,20 +161,21 @@ impl MonoVtxMetrics { // gauge source_pending: Family::, Gauge>::default(), // timers + // exponential buckets in the range 100 microseconds to 15 minutes e2e_time: Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), read_time: Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), transform_time: Family::, Histogram>::new_with_constructor( - || Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)), + || Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)), ), ack_time: Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), sink_time: Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets(100.0, 60000000.0 * 15.0, 10)) + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), }; @@ -752,4 +775,53 @@ mod tests { } assert_eq!(stored_values, [15, 20, 18, 18]); } + #[test] + fn test_exponential_buckets_range_basic() { + let min = 1.0; + let max = 32.0; + let length = 6; + let buckets: Vec = exponential_buckets_range(min, max, length).collect(); + let expected = vec![1.0, 2.0, 4.0, 8.0, 16.0, 32.0]; + assert_eq!(buckets, expected); + } + + #[test] + fn test_exponential_buckets_range_mico_to_seconds_minutes() { + let min = 100.0; + let max = 60000000.0 * 15.0; + let length = 10; + let buckets: Vec = exponential_buckets_range(min, max, length).collect(); + let expected: Vec = vec![ + 100.0, + 592.5071727239734, + 3510.6474972935644, + 20800.838230519028, + 123246.45850253566, + 730244.1067557991, + 4.32674871092222e+06, + 2.5636296457956206e+07, + 1.5189689533417246e+08, + 8.999999999999983e+08, + ]; + for (i, bucket) in buckets.iter().enumerate() { + assert!((bucket - expected[i]).abs() < 1e-2); + } + } + #[test] + #[should_panic(expected = "ExponentialBucketsRange length needs a positive length")] + fn test_exponential_buckets_range_zero_length() { + let _ = exponential_buckets_range(1.0, 100.0, 0).collect::>(); + } + + #[test] + #[should_panic(expected = "ExponentialBucketsRange min needs to be greater than 0")] + fn test_exponential_buckets_range_zero_min() { + let _ = exponential_buckets_range(0.0, 100.0, 10).collect::>(); + } + + #[test] + #[should_panic(expected = "ExponentialBucketsRange min needs to be greater than 0")] + fn test_exponential_buckets_range_negative_min() { + let _ = exponential_buckets_range(-1.0, 100.0, 10).collect::>(); + } } From 56eb742fc31b31795e38c985590438a8742a6c82 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 15 Oct 2024 11:38:47 -0700 Subject: [PATCH 109/188] chore: multi controller awareness on validating webhook (#2156) Signed-off-by: Derek Wang --- pkg/apis/numaflow/v1alpha1/isbsvc_types.go | 9 ++++ .../numaflow/v1alpha1/isbsvc_types_test.go | 41 +++++++++++++++++++ pkg/webhook/validator/isbsvc.go | 23 ++++++----- pkg/webhook/validator/isbsvc_test.go | 10 +++++ pkg/webhook/validator/pipeline.go | 16 ++++++-- pkg/webhook/validator/pipeline_test.go | 33 +++++++++++++-- pkg/webhook/validator/test_utils.go | 5 ++- 7 files changed, 117 insertions(+), 20 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go index 1c7424274a..56cbde600d 100644 --- a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go +++ b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go @@ -67,6 +67,15 @@ type InterStepBufferService struct { Status InterStepBufferServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +func (isbs InterStepBufferService) GetType() ISBSvcType { + if isbs.Spec.Redis != nil { + return ISBSvcTypeRedis + } else if isbs.Spec.JetStream != nil { + return ISBSvcTypeJetStream + } + return ISBSvcTypeUnknown +} + // InterStepBufferServiceList is the list of InterStepBufferService resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type InterStepBufferServiceList struct { diff --git a/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go b/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go index 22f050ad33..5b3d2ddab9 100644 --- a/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/isbsvc_types_test.go @@ -163,3 +163,44 @@ func Test_ISBSvcIsHealthy(t *testing.T) { }) } } + +func TestInterStepBufferService_GetType(t *testing.T) { + tests := []struct { + name string + isbs InterStepBufferService + want ISBSvcType + }{ + { + name: "Redis type", + isbs: InterStepBufferService{ + Spec: InterStepBufferServiceSpec{ + Redis: &RedisBufferService{}, + }, + }, + want: ISBSvcTypeRedis, + }, + { + name: "JetStream type", + isbs: InterStepBufferService{ + Spec: InterStepBufferServiceSpec{ + JetStream: &JetStreamBufferService{}, + }, + }, + want: ISBSvcTypeJetStream, + }, + { + name: "Unknown type", + isbs: InterStepBufferService{ + Spec: InterStepBufferServiceSpec{}, + }, + want: ISBSvcTypeUnknown, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.isbs.GetType() + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/webhook/validator/isbsvc.go b/pkg/webhook/validator/isbsvc.go index d02581ae7b..a790c427d7 100644 --- a/pkg/webhook/validator/isbsvc.go +++ b/pkg/webhook/validator/isbsvc.go @@ -47,21 +47,21 @@ func (v *isbsvcValidator) ValidateUpdate(_ context.Context) *admissionv1.Admissi if err := isbsvccontroller.ValidateInterStepBufferService(v.newISBService); err != nil { return DeniedResponse(err.Error()) } - switch { - case v.oldISBService.Spec.JetStream != nil: - // check the type of ISB Service is not changed - if v.newISBService.Spec.Redis != nil { - return DeniedResponse("can not change ISB Service type from Jetstream to Redis") - } + // chck if the instance annotation is changed + if v.oldISBService.GetAnnotations()[dfv1.KeyInstance] != v.newISBService.GetAnnotations()[dfv1.KeyInstance] { + return DeniedResponse("cannot update instance annotation " + dfv1.KeyInstance) + } + // check the type of ISB Service is not changed + if v.oldISBService.GetType() != v.newISBService.GetType() { + return DeniedResponse("can not change ISB Service type") + } + switch v.newISBService.GetType() { + case dfv1.ISBSvcTypeJetStream: // check the persistence of ISB Service is not changed if !equality.Semantic.DeepEqual(v.oldISBService.Spec.JetStream.Persistence, v.newISBService.Spec.JetStream.Persistence) { return DeniedResponse("can not change persistence of Jetstream ISB Service") } - case v.oldISBService.Spec.Redis != nil: - // check the type of ISB Service is not changed - if v.newISBService.Spec.JetStream != nil { - return DeniedResponse("can not change ISB Service type from Redis to Jetstream") - } + case dfv1.ISBSvcTypeRedis: // nil check for Redis Native, if one of them is nil and the other is not, it is NOT allowed if oldRedisNative, newRedisNative := v.oldISBService.Spec.Redis.Native, v.newISBService.Spec.Redis.Native; oldRedisNative != nil && newRedisNative == nil { return DeniedResponse("can not remove Redis Native from Redis ISB Service") @@ -72,6 +72,7 @@ func (v *isbsvcValidator) ValidateUpdate(_ context.Context) *admissionv1.Admissi if oldRedisNative, newRedisNative := v.oldISBService.Spec.Redis.Native, v.newISBService.Spec.Redis.Native; oldRedisNative != nil && newRedisNative != nil && !equality.Semantic.DeepEqual(oldRedisNative.Persistence, newRedisNative.Persistence) { return DeniedResponse("can not change persistence of Redis ISB Service") } + default: } return AllowedResponse() } diff --git a/pkg/webhook/validator/isbsvc_test.go b/pkg/webhook/validator/isbsvc_test.go index 5d285b20b6..39ff07610a 100644 --- a/pkg/webhook/validator/isbsvc_test.go +++ b/pkg/webhook/validator/isbsvc_test.go @@ -44,6 +44,16 @@ func TestValidateISBServiceUpdate(t *testing.T) { {name: "changing ISB Service type is not allowed - redis to jetstream", old: fakeRedisISBSvc(), new: fakeJetStreamISBSvc(), want: false}, {name: "changing ISB Service type is not allowed - jetstream to redis", old: fakeJetStreamISBSvc(), new: fakeRedisISBSvc(), want: false}, {name: "valid new ISBSvc spec", old: fakeRedisISBSvc(), new: fakeRedisISBSvc(), want: true}, + {name: "updating instance annotation is not allowed - jetstream", old: fakeJetStreamISBSvc(), + new: &dfv1.InterStepBufferService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: dfv1.DefaultISBSvcName, + Annotations: map[string]string{ + dfv1.KeyInstance: "test", + }, + }, + Spec: fakeJetStreamISBSvc().Spec}, want: false}, {name: "removing persistence is not allowed - jetstream", old: fakeJetStreamISBSvc(), new: &dfv1.InterStepBufferService{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/webhook/validator/pipeline.go b/pkg/webhook/validator/pipeline.go index 35af590d92..c911ff6114 100644 --- a/pkg/webhook/validator/pipeline.go +++ b/pkg/webhook/validator/pipeline.go @@ -55,7 +55,7 @@ func (v *pipelineValidator) ValidateCreate(ctx context.Context) *admissionv1.Adm } else { isbName = dfv1.DefaultISBSvcName } - if err := v.checkISBSVCExists(ctx, isbName); err != nil { + if err := v.validateISBSvc(ctx, isbName); err != nil { return DeniedResponse(err.Error()) } return AllowedResponse() @@ -76,12 +76,16 @@ func (v *pipelineValidator) ValidateUpdate(_ context.Context) *admissionv1.Admis return AllowedResponse() } -// checkISBSVCExists checks that the ISB service exists in the given namespace and is valid -func (v *pipelineValidator) checkISBSVCExists(ctx context.Context, isbSvcName string) error { +// validateISBSvc checks that the ISB service exists in the given namespace and is valid +func (v *pipelineValidator) validateISBSvc(ctx context.Context, isbSvcName string) error { isb, err := v.isbClient.Get(ctx, isbSvcName, metav1.GetOptions{}) if err != nil { return err } + // Check if they have the same instance annotation + if v.newPipeline.GetAnnotations()[dfv1.KeyInstance] != isb.GetAnnotations()[dfv1.KeyInstance] { + return fmt.Errorf("ISB service %q does not have the same annotation %q as the pipeline", isbSvcName, dfv1.KeyInstance) + } if !isb.Status.IsHealthy() { return fmt.Errorf("ISB service %q is not healthy", isbSvcName) } @@ -94,7 +98,11 @@ func validatePipelineUpdate(old, new *dfv1.Pipeline) error { if new.Spec.InterStepBufferServiceName != old.Spec.InterStepBufferServiceName { return fmt.Errorf("cannot update pipeline with different ISB service name") } - // rule 2: if a vertex is updated, the update must be valid + // rule 2: the instance annotation shall not change + if new.GetAnnotations()[dfv1.KeyInstance] != old.GetAnnotations()[dfv1.KeyInstance] { + return fmt.Errorf("cannot update pipeline with different annotation %q", dfv1.KeyInstance) + } + // rule 3: if a vertex is updated, the update must be valid // we consider that a vertex is updated if its name is the same but its spec is different nameMap := make(map[string]dfv1.AbstractVertex) for _, v := range old.Spec.Vertices { diff --git a/pkg/webhook/validator/pipeline_test.go b/pkg/webhook/validator/pipeline_test.go index 80fbc9f970..c40a40cf2d 100644 --- a/pkg/webhook/validator/pipeline_test.go +++ b/pkg/webhook/validator/pipeline_test.go @@ -27,9 +27,22 @@ import ( func TestValidatePipelineCreate(t *testing.T) { pipeline := fakePipeline() fk := MockInterStepBufferServices{} - v := NewPipelineValidator(&fk, nil, pipeline) - r := v.ValidateCreate(contextWithLogger(t)) - assert.True(t, r.Allowed) + + t.Run("test create ok", func(t *testing.T) { + v := NewPipelineValidator(&fk, nil, pipeline) + r := v.ValidateCreate(contextWithLogger(t)) + assert.True(t, r.Allowed) + }) + + t.Run("test create with pipeline and isbsvc instance annotation mismatch", func(t *testing.T) { + newPipeline := pipeline.DeepCopy() + newPipeline.Annotations[dfv1.KeyInstance] = "abc" + v := NewPipelineValidator(&fk, pipeline, newPipeline) + r := v.ValidateCreate(contextWithLogger(t)) + assert.False(t, r.Allowed) + assert.Contains(t, r.Result.Message, "does not have the same annotation") + }) + } func TestValidatePipelineUpdate(t *testing.T) { @@ -41,12 +54,14 @@ func TestValidatePipelineUpdate(t *testing.T) { assert.False(t, r.Allowed) assert.Contains(t, r.Result.Message, "old pipeline spec is nil") }) + t.Run("test invalid new pipeline spec", func(t *testing.T) { v := NewPipelineValidator(&fk, pipeline, nil) r := v.ValidateUpdate(contextWithLogger(t)) assert.False(t, r.Allowed) assert.Contains(t, r.Result.Message, "new pipeline spec is invalid") }) + t.Run("test pipeline interStepBufferServiceName change", func(t *testing.T) { newPipeline := pipeline.DeepCopy() newPipeline.Spec.InterStepBufferServiceName = "change-name" @@ -55,6 +70,16 @@ func TestValidatePipelineUpdate(t *testing.T) { assert.False(t, r.Allowed) assert.Contains(t, r.Result.Message, "different ISB service name") }) + + t.Run("test pipeline instance annotation change", func(t *testing.T) { + newPipeline := pipeline.DeepCopy() + newPipeline.Annotations[dfv1.KeyInstance] = "change-name" + v := NewPipelineValidator(&fk, pipeline, newPipeline) + r := v.ValidateUpdate(contextWithLogger(t)) + assert.False(t, r.Allowed) + assert.Contains(t, r.Result.Message, "cannot update pipeline with different annotation") + }) + t.Run("test should not change the type of a vertex", func(t *testing.T) { newPipeline := pipeline.DeepCopy() // in our test fake pipeline, the 3nd vertex is a reduce vertex @@ -67,6 +92,7 @@ func TestValidatePipelineUpdate(t *testing.T) { assert.False(t, r.Allowed) assert.Contains(t, r.Result.Message, "vertex type is immutable") }) + t.Run("test should not change the partition count of a reduce vertex", func(t *testing.T) { var oldPartitionCount, newPartitionCount int32 = 2, 3 newPipeline := pipeline.DeepCopy() @@ -78,6 +104,7 @@ func TestValidatePipelineUpdate(t *testing.T) { assert.False(t, r.Allowed) assert.Contains(t, r.Result.Message, "partition count is immutable for a reduce vertex") }) + t.Run("test should not change the persistent storage of a reduce vertex", func(t *testing.T) { newPipeline := pipeline.DeepCopy() newPipeline.Spec.Vertices[2].UDF.GroupBy.Storage = &dfv1.PBQStorage{ diff --git a/pkg/webhook/validator/test_utils.go b/pkg/webhook/validator/test_utils.go index 4e6a74b4b0..cf91de7b34 100644 --- a/pkg/webhook/validator/test_utils.go +++ b/pkg/webhook/validator/test_utils.go @@ -56,8 +56,9 @@ func fakeJetStreamISBSvc() *dfv1.InterStepBufferService { func fakePipeline() *dfv1.Pipeline { return &dfv1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-pl", - Namespace: testNamespace, + Name: "test-pl", + Namespace: testNamespace, + Annotations: map[string]string{}, }, Spec: dfv1.PipelineSpec{ Vertices: []dfv1.AbstractVertex{ From 54d3ce92dbb66e3d422fa92d3e52b14c6ccda7bc Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 15 Oct 2024 13:00:08 -0700 Subject: [PATCH 110/188] chore: do not unwrap (#2152) Signed-off-by: Vigith Maurice --- rust/numaflow-core/src/error.rs | 3 +++ rust/numaflow-core/src/source.rs | 18 +++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 5b5095b70c..052be3c549 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -30,6 +30,9 @@ pub enum Error { #[error("ServerInfoError Error - {0}")] ServerInfo(String), + + #[error("OneShot Receiver Error - {0}")] + ActorPatternRecv(String), } impl From for Error { diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 5a825b0c78..5e2eba5da5 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -132,8 +132,12 @@ impl SourceHandle { pub(crate) async fn read(&self) -> crate::Result> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Read { respond_to: sender }; + // Ignore send errors. If send fails, so does the recv.await below. There's no reason + // to check for the same failure twice. let _ = self.sender.send(msg).await; - receiver.await.unwrap() + receiver + .await + .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } pub(crate) async fn ack(&self, offsets: Vec) -> crate::Result<()> { @@ -142,14 +146,22 @@ impl SourceHandle { respond_to: sender, offsets, }; + // Ignore send errors. If send fails, so does the recv.await below. There's no reason + // to check for the same failure twice. let _ = self.sender.send(msg).await; - receiver.await.unwrap() + receiver + .await + .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } pub(crate) async fn pending(&self) -> crate::error::Result> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Pending { respond_to: sender }; + // Ignore send errors. If send fails, so does the recv.await below. There's no reason + // to check for the same failure twice. let _ = self.sender.send(msg).await; - receiver.await.unwrap() + receiver + .await + .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } } From 5e87391767089973f5efdefb561ef740d54af595 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 15 Oct 2024 19:22:43 -0700 Subject: [PATCH 111/188] chore: generate rust protobuf objects (#2157) Signed-off-by: Vigith Maurice --- rust/Cargo.lock | 1 + rust/numaflow-grpc/Cargo.toml | 1 + rust/numaflow-grpc/Makefile | 4 + rust/numaflow-grpc/src/lib.rs | 4 + rust/numaflow-grpc/src/main.rs | 18 ++++ rust/numaflow-grpc/src/objects.rs | 7 ++ rust/numaflow-grpc/src/objects/isb.rs | 123 ++++++++++++++++++++++++++ rust/numaflow-grpc/src/objects/wmb.rs | 23 +++++ 8 files changed, 181 insertions(+) create mode 100644 rust/numaflow-grpc/src/objects.rs create mode 100644 rust/numaflow-grpc/src/objects/isb.rs create mode 100644 rust/numaflow-grpc/src/objects/wmb.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 8afd5705bc..146e3d195a 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1645,6 +1645,7 @@ name = "numaflow-grpc" version = "0.1.0" dependencies = [ "prost", + "prost-build", "prost-types", "tonic", "tonic-build", diff --git a/rust/numaflow-grpc/Cargo.toml b/rust/numaflow-grpc/Cargo.toml index 4b970d45c2..3eba422657 100644 --- a/rust/numaflow-grpc/Cargo.toml +++ b/rust/numaflow-grpc/Cargo.toml @@ -12,3 +12,4 @@ tonic-build = "0.12.3" prost = "0.13.2" prost-types = "0.13.1" tonic = "0.12.3" +prost-build = "0.13.3" diff --git a/rust/numaflow-grpc/Makefile b/rust/numaflow-grpc/Makefile index 369b0ff6f7..7d2d8a03c5 100644 --- a/rust/numaflow-grpc/Makefile +++ b/rust/numaflow-grpc/Makefile @@ -6,9 +6,13 @@ clean: .PHONY: generate generate: clean rm -rf src/clients/*.rs + rm -rf src/objects/*.rs cp -r ../../pkg/apis/proto proto mv src/clients.rs /tmp/clients.rs.bak + mv src/objects.rs /tmp/objects.rs.bak > src/clients.rs + > src/objects.rs -./codegen.sh mv /tmp/clients.rs.bak src/clients.rs + mv /tmp/objects.rs.bak src/objects.rs $(MAKE) clean diff --git a/rust/numaflow-grpc/src/lib.rs b/rust/numaflow-grpc/src/lib.rs index 705f46dba5..b16bd6c261 100644 --- a/rust/numaflow-grpc/src/lib.rs +++ b/rust/numaflow-grpc/src/lib.rs @@ -1 +1,5 @@ +/// gRPC clients and also protobuf objects for gRPC. pub mod clients; + +/// protobuf objects for concrete types +pub mod objects; diff --git a/rust/numaflow-grpc/src/main.rs b/rust/numaflow-grpc/src/main.rs index 6c95d2b793..80e2390893 100644 --- a/rust/numaflow-grpc/src/main.rs +++ b/rust/numaflow-grpc/src/main.rs @@ -1,4 +1,12 @@ fn main() { + // gRPC clients for UDF + build_client(); + + // protobuf objects for serde + build_objects(); +} + +fn build_client() { tonic_build::configure() .build_client(true) .build_server(false) @@ -18,3 +26,13 @@ fn main() { ) .expect("failed to compile protos"); } + +fn build_objects() { + prost_build::Config::new() + .out_dir("src/objects") + .compile_protos( + &["proto/isb/message.proto", "proto/wmb/wmb.proto"], + &["proto"], + ) + .expect("failed to compile protos"); +} diff --git a/rust/numaflow-grpc/src/objects.rs b/rust/numaflow-grpc/src/objects.rs new file mode 100644 index 0000000000..f30f255ca1 --- /dev/null +++ b/rust/numaflow-grpc/src/objects.rs @@ -0,0 +1,7 @@ +#[path = "objects/isb.rs"] +#[rustfmt::skip] +pub mod isb; + +#[path = "objects/wmb.rs"] +#[rustfmt::skip] +pub mod wmb; diff --git a/rust/numaflow-grpc/src/objects/isb.rs b/rust/numaflow-grpc/src/objects/isb.rs new file mode 100644 index 0000000000..b22605360d --- /dev/null +++ b/rust/numaflow-grpc/src/objects/isb.rs @@ -0,0 +1,123 @@ +// This file is @generated by prost-build. +/// MessageInfo is the message information window of the payload. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MessageInfo { + /// EventTime represents the event time of the message + #[prost(message, optional, tag = "1")] + pub event_time: ::core::option::Option<::prost_types::Timestamp>, + /// IsLate is used to indicate if the message is a late data + #[prost(bool, tag = "2")] + pub is_late: bool, +} +/// MessageMetadata is the metadata of the message +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MessageMetadata { + /// NumDelivered is the number of times the message has been delivered. + #[prost(uint64, tag = "1")] + pub num_delivered: u64, +} +/// Header is the header of the message +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Header { + /// MessageInfo contains the information window of the payload. + #[prost(message, optional, tag = "1")] + pub message_info: ::core::option::Option, + /// Kind indicates the kind of Message + #[prost(enumeration = "MessageKind", tag = "2")] + pub kind: i32, + /// ID is used for exactly-once-semantics. ID is a combination of vertex name, offset and index of the message. + #[prost(message, optional, tag = "3")] + pub id: ::core::option::Option, + /// Keys is (key,value) in the map-reduce paradigm will be used for reduce operation + #[prost(string, repeated, tag = "4")] + pub keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Headers is the headers of the message which can be used to store and propagate source headers + #[prost(map = "string, string", tag = "5")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// MessageID is the message ID of the message which is used for exactly-once-semantics. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MessageId { + /// VertexName is the name of the vertex + #[prost(string, tag = "1")] + pub vertex_name: ::prost::alloc::string::String, + /// Offset is the offset of the message + #[prost(string, tag = "2")] + pub offset: ::prost::alloc::string::String, + /// Index is the index of a flatmap message. + #[prost(int32, tag = "3")] + pub index: i32, +} +/// Body is the body of the message +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Body { + /// Payload is the actual data of the message + #[prost(bytes = "vec", tag = "1")] + pub payload: ::prost::alloc::vec::Vec, +} +/// Message is inter step message +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Message { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(message, optional, tag = "2")] + pub body: ::core::option::Option, +} +/// ReadMessage is the message read from the buffer. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadMessage { + /// Message is the actual message read from the buffer + #[prost(message, optional, tag = "1")] + pub message: ::core::option::Option, + /// ReadOffset is the offset at which the message was read + #[prost(string, tag = "2")] + pub read_offset: ::prost::alloc::string::String, + /// Watermark is the watermark timestamp + #[prost(message, optional, tag = "3")] + pub watermark: ::core::option::Option<::prost_types::Timestamp>, + /// Metadata is the metadata of the message after a message is read from the buffer. + #[prost(message, optional, tag = "4")] + pub metadata: ::core::option::Option, +} +/// WriteMessage is a wrapper for an isb message with tag information which will be used for conditional forwarding. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteMessage { + /// Message is the actual message to be written + #[prost(message, optional, tag = "1")] + pub message: ::core::option::Option, + /// Tags are the tags associated with the message + #[prost(string, repeated, tag = "2")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// MessageKind represents the message type of the payload. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MessageKind { + /// Data payload + Data = 0, + /// Watermark Barrier + Wmb = 1, +} +impl MessageKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Data => "DATA", + Self::Wmb => "WMB", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DATA" => Some(Self::Data), + "WMB" => Some(Self::Wmb), + _ => None, + } + } +} diff --git a/rust/numaflow-grpc/src/objects/wmb.rs b/rust/numaflow-grpc/src/objects/wmb.rs new file mode 100644 index 0000000000..f60b4a0d07 --- /dev/null +++ b/rust/numaflow-grpc/src/objects/wmb.rs @@ -0,0 +1,23 @@ +// This file is @generated by prost-build. +/// WMB is used in the KV offset timeline bucket as the value for the given processor entity key. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Wmb { + /// Idle is set to true if the given processor entity hasn't published anything + /// to the offset timeline bucket in a batch processing cycle. + /// Idle is used to signal an idle watermark. + #[prost(bool, tag = "1")] + pub idle: bool, + /// Offset is the monotonically increasing index/offset of the buffer (buffer is the physical representation + /// of the partition of the edge). + #[prost(int64, tag = "2")] + pub offset: i64, + /// Watermark is tightly coupled with the offset and will be monotonically increasing for a given ProcessorEntity + /// as the offset increases. + /// When it is idling (Idle==true), for a given offset, the watermark can monotonically increase without offset + /// increasing. + #[prost(int64, tag = "3")] + pub watermark: i64, + /// Partition to identify the partition to which the watermark belongs. + #[prost(int32, tag = "4")] + pub partition: i32, +} From f81062c91bd5f4eb92ba5a8329d14e70cd92634b Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 15 Oct 2024 22:39:37 -0700 Subject: [PATCH 112/188] chore: rename numaflow-grpc to numaflow-pb (#2159) Signed-off-by: Derek Wang --- .codecov.yml | 24 ++++++++--------- rust/Cargo.lock | 26 +++++++++---------- rust/Cargo.toml | 4 +-- rust/Makefile | 2 +- rust/numaflow-core/Cargo.toml | 2 +- rust/numaflow-core/src/message.rs | 16 ++++++------ rust/numaflow-core/src/monovertex.rs | 6 ++--- .../numaflow-core/src/monovertex/forwarder.rs | 6 ++--- rust/numaflow-core/src/monovertex/metrics.rs | 6 ++--- rust/numaflow-core/src/shared/utils.rs | 6 ++--- rust/numaflow-core/src/sink.rs | 2 +- rust/numaflow-core/src/sink/user_defined.rs | 6 ++--- rust/numaflow-core/src/source/user_defined.rs | 8 +++--- .../src/transformer/user_defined.rs | 4 +-- .../{numaflow-grpc => numaflow-pb}/Cargo.toml | 4 +-- rust/{numaflow-grpc => numaflow-pb}/Makefile | 0 .../{numaflow-grpc => numaflow-pb}/codegen.sh | 0 .../src/clients.rs | 0 .../src/clients/map.v1.rs | 0 .../src/clients/mapstream.v1.rs | 0 .../src/clients/reduce.v1.rs | 0 .../src/clients/sessionreduce.v1.rs | 0 .../src/clients/sideinput.v1.rs | 0 .../src/clients/sink.v1.rs | 0 .../src/clients/source.v1.rs | 0 .../src/clients/sourcetransformer.v1.rs | 0 .../{numaflow-grpc => numaflow-pb}/src/lib.rs | 0 .../src/main.rs | 0 .../src/objects.rs | 0 .../src/objects/isb.rs | 0 .../src/objects/wmb.rs | 0 31 files changed, 61 insertions(+), 61 deletions(-) rename rust/{numaflow-grpc => numaflow-pb}/Cargo.toml (80%) rename rust/{numaflow-grpc => numaflow-pb}/Makefile (100%) rename rust/{numaflow-grpc => numaflow-pb}/codegen.sh (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/map.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/mapstream.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/reduce.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/sessionreduce.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/sideinput.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/sink.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/source.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/clients/sourcetransformer.v1.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/lib.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/main.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/objects.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/objects/isb.rs (100%) rename rust/{numaflow-grpc => numaflow-pb}/src/objects/wmb.rs (100%) diff --git a/.codecov.yml b/.codecov.yml index 98604b458f..9939d697da 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,16 +1,16 @@ ignore: -- "**/*.pb.go" -- "**/*.pb.gw.go" -- "**/*generated.go" -- "**/*generated.deepcopy.go" -- "**/*generated.openapi.go" -- "**/*_test.go" -- "pkg/client/.*" -- "vendor/.*" -- "test/.*" -- "rust/**/error.rs" -- "rust/numaflow-models/**" # ignore generated files -- "rust/numaflow-grpc/**" # ignore generated files + - "**/*.pb.go" + - "**/*.pb.gw.go" + - "**/*generated.go" + - "**/*generated.deepcopy.go" + - "**/*generated.openapi.go" + - "**/*_test.go" + - "pkg/client/.*" + - "vendor/.*" + - "test/.*" + - "rust/**/error.rs" + - "rust/numaflow-models/**" # ignore generated files + - "rust/numaflow-pb/**" # ignore generated files coverage: status: patch: off diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 146e3d195a..10539de448 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1547,7 +1547,7 @@ version = "0.1.0" dependencies = [ "backoff", "numaflow-core", - "numaflow-grpc", + "numaflow-pb", "servesink", "serving", "tokio", @@ -1614,8 +1614,8 @@ dependencies = [ "kube", "log", "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387)", - "numaflow-grpc", "numaflow-models", + "numaflow-pb", "parking_lot", "pep440_rs", "pin-project", @@ -1640,17 +1640,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "numaflow-grpc" -version = "0.1.0" -dependencies = [ - "prost", - "prost-build", - "prost-types", - "tonic", - "tonic-build", -] - [[package]] name = "numaflow-models" version = "0.0.0-pre" @@ -1665,6 +1654,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "numaflow-pb" +version = "0.1.0" +dependencies = [ + "prost", + "prost-build", + "prost-types", + "tonic", + "tonic-build", +] + [[package]] name = "object" version = "0.36.5" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index f273627ae5..99df3be31d 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -4,7 +4,7 @@ workspace = { members = [ "servesink", "serving", "numaflow-core", - "numaflow-grpc", + "numaflow-pb", ] } [[bin]] @@ -23,6 +23,6 @@ backoff = { path = "backoff" } servesink = { path = "servesink" } serving = { path = "serving" } numaflow-core = { path = "numaflow-core" } -numaflow-grpc = { path = "numaflow-grpc" } +numaflow-pb = { path = "numaflow-pb" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/Makefile b/rust/Makefile index c2d29917b1..eb4f0e8b90 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -3,7 +3,7 @@ SHELL:=/bin/bash .PHONY: generate generate: $(MAKE) --directory numaflow-models generate - $(MAKE) --directory numaflow-grpc generate + $(MAKE) --directory numaflow-pb generate .PHONY: build build: diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 163cd99bb9..59f380e8cb 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -23,7 +23,7 @@ tower = "0.4.13" uuid = { version = "1.10.0", features = ["v4"] } serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models" } -numaflow-grpc = { path = "../numaflow-grpc" } +numaflow-pb = { path = "../numaflow-pb" } trait-variant = "0.1.2" rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index b2644e86f4..34229ec078 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -7,11 +7,11 @@ use chrono::{DateTime, Utc}; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; use crate::Error; -use numaflow_grpc::clients::sink::sink_request::Request; -use numaflow_grpc::clients::sink::Status::{Failure, Fallback, Success}; -use numaflow_grpc::clients::sink::{sink_response, SinkRequest, SinkResponse}; -use numaflow_grpc::clients::source::{read_response, AckRequest}; -use numaflow_grpc::clients::sourcetransformer::SourceTransformRequest; +use numaflow_pb::clients::sink::sink_request::Request; +use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; +use numaflow_pb::clients::sink::{sink_response, SinkRequest, SinkResponse}; +use numaflow_pb::clients::source::{read_response, AckRequest}; +use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; /// A message that is sent from the source to the sink. #[derive(Debug, Clone)] @@ -42,8 +42,8 @@ pub(crate) struct Offset { impl From for AckRequest { fn from(offset: Offset) -> Self { Self { - request: Some(numaflow_grpc::clients::source::ack_request::Request { - offset: Some(numaflow_grpc::clients::source::Offset { + request: Some(numaflow_pb::clients::source::ack_request::Request { + offset: Some(numaflow_pb::clients::source::Offset { offset: BASE64_STANDARD .decode(offset.offset) .expect("we control the encoding, so this should never fail"), @@ -60,7 +60,7 @@ impl From for SourceTransformRequest { fn from(message: Message) -> Self { Self { request: Some( - numaflow_grpc::clients::sourcetransformer::source_transform_request::Request { + numaflow_pb::clients::sourcetransformer::source_transform_request::Request { id: message.id, keys: message.keys, value: message.value, diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index c647d51d0a..fb434da8af 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -6,9 +6,9 @@ use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use tracing::info; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use crate::config::{config, Settings}; use crate::error::{self, Error}; diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 2ce41a5d80..268598450d 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -538,9 +538,9 @@ mod tests { use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; - use numaflow_grpc::clients::sink::sink_client::SinkClient; - use numaflow_grpc::clients::source::source_client::SourceClient; - use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use numaflow_pb::clients::sink::sink_client::SinkClient; + use numaflow_pb::clients::source::source_client::SourceClient; + use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 0f5cc88240..9c7b7aed0d 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -24,9 +24,9 @@ use tonic::transport::Channel; use tonic::Request; use tracing::{debug, error, info}; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use crate::config::config; use crate::source::SourceHandle; diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 813153d07f..25708d68ae 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -24,9 +24,9 @@ use crate::monovertex::metrics::{ use crate::shared::server_info; use crate::source::SourceHandle; use crate::Error; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; pub(crate) async fn check_compatibility( cln_token: &CancellationToken, diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 8c29966ccb..59102213da 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -3,7 +3,7 @@ use tonic::transport::Channel; use crate::config::config; use crate::message::{Message, ResponseFromSink}; -use numaflow_grpc::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::sink::sink_client::SinkClient; use user_defined::UserDefinedSink; mod log; diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index ecc8dc150d..4bb74d6790 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -3,8 +3,8 @@ use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use numaflow_grpc::clients::sink::sink_client::SinkClient; -use numaflow_grpc::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use crate::error; use crate::message::{Message, ResponseFromSink}; @@ -122,7 +122,7 @@ mod tests { use crate::message::{Message, Offset}; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::UserDefinedSink; - use numaflow_grpc::clients::sink::sink_client::SinkClient; + use numaflow_pb::clients::sink::sink_client::SinkClient; struct Logger; #[tonic::async_trait] diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index ae8ff081ed..da71191fc3 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -3,9 +3,9 @@ use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use numaflow_grpc::clients::source; -use numaflow_grpc::clients::source::source_client::SourceClient; -use numaflow_grpc::clients::source::{ +use numaflow_pb::clients::source; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::source::{ read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; @@ -237,7 +237,7 @@ mod tests { use std::collections::HashSet; use crate::shared::utils::create_rpc_channel; - use numaflow_grpc::clients::source::source_client::SourceClient; + use numaflow_pb::clients::source::source_client::SourceClient; use chrono::Utc; use numaflow::source; diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index b542b423e5..653ee46293 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -8,7 +8,7 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::warn; -use numaflow_grpc::clients::sourcetransformer::{ +use numaflow_pb::clients::sourcetransformer::{ self, source_transform_client::SourceTransformClient, SourceTransformRequest, SourceTransformResponse, }; @@ -230,7 +230,7 @@ mod tests { use crate::shared::utils::create_rpc_channel; use crate::transformer::user_defined::SourceTransformHandle; use numaflow::sourcetransform; - use numaflow_grpc::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; struct NowCat; diff --git a/rust/numaflow-grpc/Cargo.toml b/rust/numaflow-pb/Cargo.toml similarity index 80% rename from rust/numaflow-grpc/Cargo.toml rename to rust/numaflow-pb/Cargo.toml index 3eba422657..dc2639e6e5 100644 --- a/rust/numaflow-grpc/Cargo.toml +++ b/rust/numaflow-pb/Cargo.toml @@ -1,9 +1,9 @@ [[bin]] -name = "numaflow-grpc" +name = "numaflow-pb" path = "src/main.rs" [package] -name = "numaflow-grpc" +name = "numaflow-pb" version = "0.1.0" edition = "2021" diff --git a/rust/numaflow-grpc/Makefile b/rust/numaflow-pb/Makefile similarity index 100% rename from rust/numaflow-grpc/Makefile rename to rust/numaflow-pb/Makefile diff --git a/rust/numaflow-grpc/codegen.sh b/rust/numaflow-pb/codegen.sh similarity index 100% rename from rust/numaflow-grpc/codegen.sh rename to rust/numaflow-pb/codegen.sh diff --git a/rust/numaflow-grpc/src/clients.rs b/rust/numaflow-pb/src/clients.rs similarity index 100% rename from rust/numaflow-grpc/src/clients.rs rename to rust/numaflow-pb/src/clients.rs diff --git a/rust/numaflow-grpc/src/clients/map.v1.rs b/rust/numaflow-pb/src/clients/map.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/map.v1.rs rename to rust/numaflow-pb/src/clients/map.v1.rs diff --git a/rust/numaflow-grpc/src/clients/mapstream.v1.rs b/rust/numaflow-pb/src/clients/mapstream.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/mapstream.v1.rs rename to rust/numaflow-pb/src/clients/mapstream.v1.rs diff --git a/rust/numaflow-grpc/src/clients/reduce.v1.rs b/rust/numaflow-pb/src/clients/reduce.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/reduce.v1.rs rename to rust/numaflow-pb/src/clients/reduce.v1.rs diff --git a/rust/numaflow-grpc/src/clients/sessionreduce.v1.rs b/rust/numaflow-pb/src/clients/sessionreduce.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/sessionreduce.v1.rs rename to rust/numaflow-pb/src/clients/sessionreduce.v1.rs diff --git a/rust/numaflow-grpc/src/clients/sideinput.v1.rs b/rust/numaflow-pb/src/clients/sideinput.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/sideinput.v1.rs rename to rust/numaflow-pb/src/clients/sideinput.v1.rs diff --git a/rust/numaflow-grpc/src/clients/sink.v1.rs b/rust/numaflow-pb/src/clients/sink.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/sink.v1.rs rename to rust/numaflow-pb/src/clients/sink.v1.rs diff --git a/rust/numaflow-grpc/src/clients/source.v1.rs b/rust/numaflow-pb/src/clients/source.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/source.v1.rs rename to rust/numaflow-pb/src/clients/source.v1.rs diff --git a/rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs b/rust/numaflow-pb/src/clients/sourcetransformer.v1.rs similarity index 100% rename from rust/numaflow-grpc/src/clients/sourcetransformer.v1.rs rename to rust/numaflow-pb/src/clients/sourcetransformer.v1.rs diff --git a/rust/numaflow-grpc/src/lib.rs b/rust/numaflow-pb/src/lib.rs similarity index 100% rename from rust/numaflow-grpc/src/lib.rs rename to rust/numaflow-pb/src/lib.rs diff --git a/rust/numaflow-grpc/src/main.rs b/rust/numaflow-pb/src/main.rs similarity index 100% rename from rust/numaflow-grpc/src/main.rs rename to rust/numaflow-pb/src/main.rs diff --git a/rust/numaflow-grpc/src/objects.rs b/rust/numaflow-pb/src/objects.rs similarity index 100% rename from rust/numaflow-grpc/src/objects.rs rename to rust/numaflow-pb/src/objects.rs diff --git a/rust/numaflow-grpc/src/objects/isb.rs b/rust/numaflow-pb/src/objects/isb.rs similarity index 100% rename from rust/numaflow-grpc/src/objects/isb.rs rename to rust/numaflow-pb/src/objects/isb.rs diff --git a/rust/numaflow-grpc/src/objects/wmb.rs b/rust/numaflow-pb/src/objects/wmb.rs similarity index 100% rename from rust/numaflow-grpc/src/objects/wmb.rs rename to rust/numaflow-pb/src/objects/wmb.rs From 3d6e47ffc119d8347a2087fb951f2061c516bc94 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Wed, 16 Oct 2024 11:31:45 -0700 Subject: [PATCH 113/188] feat: ISB(jetstream) writer framework (#2160) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Yashash H L --- rust/Cargo.lock | 37 +- rust/numaflow-core/Cargo.toml | 5 + rust/numaflow-core/src/error.rs | 8 +- rust/numaflow-core/src/lib.rs | 5 + rust/numaflow-core/src/message.rs | 379 ++++++++++++++++- .../numaflow-core/src/monovertex/forwarder.rs | 4 +- rust/numaflow-core/src/monovertex/metrics.rs | 2 +- rust/numaflow-core/src/pipeline.rs | 2 + rust/numaflow-core/src/pipeline/forwarder.rs | 1 + rust/numaflow-core/src/pipeline/isb.rs | 1 + .../src/pipeline/isb/jetstream.rs | 344 ++++++++++++++++ .../src/pipeline/isb/jetstream/writer.rs | 383 ++++++++++++++++++ rust/numaflow-core/src/shared/server_info.rs | 4 +- rust/numaflow-core/src/sink/log.rs | 2 +- rust/numaflow-core/src/sink/user_defined.rs | 14 +- rust/numaflow-core/src/source/generator.rs | 8 +- rust/numaflow-core/src/source/user_defined.rs | 2 +- .../src/transformer/user_defined.rs | 26 +- 18 files changed, 1190 insertions(+), 37 deletions(-) create mode 100644 rust/numaflow-core/src/pipeline.rs create mode 100644 rust/numaflow-core/src/pipeline/forwarder.rs create mode 100644 rust/numaflow-core/src/pipeline/isb.rs create mode 100644 rust/numaflow-core/src/pipeline/isb/jetstream.rs create mode 100644 rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 10539de448..f1105db300 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -86,6 +86,40 @@ dependencies = [ "url", ] +[[package]] +name = "async-nats" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3bdd6ea595b2ea504500a3566071beb81125fc15d40a6f6bffa43575f64152" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "memchr", + "nkeys", + "nuid", + "once_cell", + "portable-atomic", + "rand", + "regex", + "ring", + "rustls-native-certs 0.7.3", + "rustls-pemfile 2.2.0", + "rustls-webpki 0.102.8", + "serde", + "serde_json", + "serde_nanos", + "serde_repr", + "thiserror", + "time", + "tokio", + "tokio-rustls 0.26.0", + "tokio-util", + "tracing", + "tryhard", + "url", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -1603,6 +1637,7 @@ dependencies = [ name = "numaflow-core" version = "0.1.0" dependencies = [ + "async-nats 0.37.0", "axum", "axum-server", "backoff", @@ -2632,7 +2667,7 @@ dependencies = [ name = "serving" version = "0.1.0" dependencies = [ - "async-nats", + "async-nats 0.35.1", "axum", "axum-macros", "axum-server", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 59f380e8cb..c17f8e83ad 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -3,6 +3,10 @@ name = "numaflow-core" version = "0.1.0" edition = "2021" +[features] +nats-tests = [] +all-tests = ["nats-tests"] + [dependencies] axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } @@ -37,6 +41,7 @@ kube = "0.95.0" log = "0.4.22" futures = "0.3.30" pin-project = "1.1.5" +async-nats = "0.37.0" [dev-dependencies] tempfile = "3.11.0" diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 052be3c549..56e470d5e2 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -28,9 +28,15 @@ pub enum Error { #[error("Config Error - {0}")] Config(String), - #[error("ServerInfoError Error - {0}")] + #[error("ServerInfo Error - {0}")] ServerInfo(String), + #[error("Proto Error - {0}")] + Proto(String), + + #[error("ISB Error - {0}")] + ISB(String), + #[error("OneShot Receiver Error - {0}")] ActorPatternRecv(String), } diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index 5b7fae0b4c..a9c38e00fb 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -36,3 +36,8 @@ mod transformer; /// Reads from a stream. mod reader; + +/// [Pipeline] +/// +/// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ +mod pipeline; diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 34229ec078..db9c1ab681 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -1,20 +1,37 @@ -use std::cmp::PartialEq; -use std::collections::HashMap; - +use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; +use crate::Error; +use crate::Result; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::Engine; +use bytes::Bytes; use chrono::{DateTime, Utc}; - -use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; -use crate::Error; use numaflow_pb::clients::sink::sink_request::Request; use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; use numaflow_pb::clients::sink::{sink_response, SinkRequest, SinkResponse}; use numaflow_pb::clients::source::{read_response, AckRequest}; use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; +use prost::Message as ProtoMessage; +use serde::{Deserialize, Serialize}; +use std::cmp::PartialEq; +use std::collections::HashMap; +use std::sync::OnceLock; +use std::{env, fmt}; + +const NUMAFLOW_MONO_VERTEX_NAME: &str = "NUMAFLOW_MONO_VERTEX_NAME"; +const NUMAFLOW_VERTEX_NAME: &str = "NUMAFLOW_VERTEX_NAME"; + +static VERTEX_NAME: OnceLock = OnceLock::new(); + +pub(crate) fn get_vertex_name() -> &'static str { + VERTEX_NAME.get_or_init(|| { + env::var(NUMAFLOW_MONO_VERTEX_NAME) + .or_else(|_| env::var(NUMAFLOW_VERTEX_NAME)) + .unwrap_or_default() + }) +} /// A message that is sent from the source to the sink. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Message { /// keys of the message pub(crate) keys: Vec, @@ -25,13 +42,13 @@ pub(crate) struct Message { /// event time of the message pub(crate) event_time: DateTime, /// id of the message - pub(crate) id: String, + pub(crate) id: MessageID, /// headers of the message pub(crate) headers: HashMap, } /// Offset of the message which will be used to acknowledge the message. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Offset { /// unique identifier of the message pub(crate) offset: String, @@ -39,6 +56,35 @@ pub(crate) struct Offset { pub(crate) partition_id: i32, } +impl fmt::Display for Offset { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.offset, self.partition_id) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct MessageID { + pub(crate) vertex_name: String, + pub(crate) offset: String, + pub(crate) index: i32, +} + +impl MessageID { + fn new(vertex_name: String, offset: String, index: i32) -> Self { + Self { + vertex_name, + offset, + index, + } + } +} + +impl fmt::Display for MessageID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}-{}", self.vertex_name, self.offset, self.index) + } +} + impl From for AckRequest { fn from(offset: Offset) -> Self { Self { @@ -55,13 +101,81 @@ impl From for AckRequest { } } +impl TryFrom for Vec { + type Error = Error; + + fn try_from(message: Message) -> std::result::Result { + let proto_message = numaflow_pb::objects::isb::Message { + header: Some(numaflow_pb::objects::isb::Header { + message_info: Some(numaflow_pb::objects::isb::MessageInfo { + event_time: prost_timestamp_from_utc(message.event_time), + is_late: false, // Set this according to your logic + }), + kind: numaflow_pb::objects::isb::MessageKind::Data as i32, + id: Some(numaflow_pb::objects::isb::MessageId { + vertex_name: get_vertex_name().to_string(), + offset: message.offset.to_string(), + index: 0, + }), + keys: message.keys.clone(), + headers: message.headers.clone(), + }), + body: Some(numaflow_pb::objects::isb::Body { + payload: message.value.clone(), + }), + }; + + let mut buf = Vec::new(); + proto_message + .encode(&mut buf) + .map_err(|e| Error::Proto(e.to_string()))?; + Ok(buf) + } +} + +impl TryFrom> for Message { + type Error = Error; + + fn try_from(bytes: Vec) -> std::result::Result { + let proto_message = numaflow_pb::objects::isb::Message::decode(Bytes::from(bytes)) + .map_err(|e| Error::Proto(e.to_string()))?; + + let header = proto_message + .header + .ok_or(Error::Proto("Missing header".to_string()))?; + let body = proto_message + .body + .ok_or(Error::Proto("Missing body".to_string()))?; + let message_info = header + .message_info + .ok_or(Error::Proto("Missing message_info".to_string()))?; + let id = header.id.ok_or(Error::Proto("Missing id".to_string()))?; + + Ok(Message { + keys: header.keys, + value: body.payload, + offset: Offset { + offset: id.offset.clone(), + partition_id: 0, // Set this according to your logic + }, + event_time: utc_from_timestamp(message_info.event_time), + id: MessageID { + vertex_name: id.vertex_name, + offset: id.offset, + index: id.index, + }, + headers: header.headers, + }) + } +} + /// Convert the [`Message`] to [`SourceTransformRequest`] impl From for SourceTransformRequest { fn from(message: Message) -> Self { Self { request: Some( numaflow_pb::clients::sourcetransformer::source_transform_request::Request { - id: message.id, + id: message.id.to_string(), keys: message.keys, value: message.value, event_time: prost_timestamp_from_utc(message.event_time), @@ -76,9 +190,9 @@ impl From for SourceTransformRequest { /// Convert [`read_response::Result`] to [`Message`] impl TryFrom for Message { - type Error = crate::Error; + type Error = Error; - fn try_from(result: read_response::Result) -> Result { + fn try_from(result: read_response::Result) -> Result { let source_offset = match result.offset { Some(o) => Offset { offset: BASE64_STANDARD.encode(o.offset), @@ -92,7 +206,11 @@ impl TryFrom for Message { value: result.payload, offset: source_offset.clone(), event_time: utc_from_timestamp(result.event_time), - id: format!("{}-{}", source_offset.partition_id, source_offset.offset), + id: MessageID { + vertex_name: get_vertex_name().to_string(), + offset: source_offset.offset, + index: 0, + }, headers: result.headers, }) } @@ -107,7 +225,7 @@ impl From for SinkRequest { value: message.value, event_time: prost_timestamp_from_utc(message.event_time), watermark: None, - id: message.id, + id: message.id.to_string(), headers: message.headers, }), status: None, @@ -117,7 +235,7 @@ impl From for SinkRequest { } /// Sink's status for each [Message] written to Sink. -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] pub(crate) enum ResponseStatusFromSink { /// Successfully wrote to the Sink. Success, @@ -156,9 +274,9 @@ impl From for SinkResponse { } impl TryFrom for ResponseFromSink { - type Error = crate::Error; + type Error = Error; - fn try_from(value: SinkResponse) -> Result { + fn try_from(value: SinkResponse) -> Result { let value = value .result .ok_or(Error::Sink("result is empty".to_string()))?; @@ -175,3 +293,230 @@ impl TryFrom for ResponseFromSink { }) } } + +#[cfg(test)] +mod tests { + use super::*; + use chrono::TimeZone; + use numaflow_pb::clients::sink::sink_response::Result as SinkResult; + use numaflow_pb::clients::source::Offset as SourceOffset; + use numaflow_pb::objects::isb::{ + Body, Header, Message as ProtoMessage, MessageId, MessageInfo, + }; + use std::collections::HashMap; + + #[test] + fn test_offset_display() { + let offset = Offset { + offset: "123".to_string(), + partition_id: 1, + }; + assert_eq!(format!("{}", offset), "123-1"); + } + + #[test] + fn test_message_id_display() { + let message_id = MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }; + assert_eq!(format!("{}", message_id), "vertex-123-0"); + } + + #[test] + fn test_offset_to_ack_request() { + let offset = Offset { + offset: BASE64_STANDARD.encode("123"), + partition_id: 1, + }; + let ack_request: AckRequest = offset.into(); + assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); + } + + #[test] + fn test_message_to_vec_u8() { + let message = Message { + keys: vec!["key1".to_string()], + value: vec![1, 2, 3], + offset: Offset { + offset: "123".to_string(), + partition_id: 0, + }, + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + + let result: Result> = message.clone().try_into(); + assert!(result.is_ok()); + + let proto_message = ProtoMessage { + header: Some(Header { + message_info: Some(MessageInfo { + event_time: prost_timestamp_from_utc(message.event_time), + is_late: false, + }), + kind: numaflow_pb::objects::isb::MessageKind::Data as i32, + id: Some(MessageId { + vertex_name: get_vertex_name().to_string(), + offset: message.offset.to_string(), + index: 0, + }), + keys: message.keys.clone(), + headers: message.headers.clone(), + }), + body: Some(Body { + payload: message.value.clone(), + }), + }; + + let mut buf = Vec::new(); + prost::Message::encode(&proto_message, &mut buf).unwrap(); + assert_eq!(result.unwrap(), buf); + } + + #[test] + fn test_vec_u8_to_message() { + let proto_message = ProtoMessage { + header: Some(Header { + message_info: Some(MessageInfo { + event_time: prost_timestamp_from_utc(Utc.timestamp_opt(1627846261, 0).unwrap()), + is_late: false, + }), + kind: numaflow_pb::objects::isb::MessageKind::Data as i32, + id: Some(MessageId { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }), + keys: vec!["key1".to_string()], + headers: HashMap::new(), + }), + body: Some(Body { + payload: vec![1, 2, 3], + }), + }; + + let mut buf = Vec::new(); + prost::Message::encode(&proto_message, &mut buf).unwrap(); + + let result: Result = buf.try_into(); + assert!(result.is_ok()); + + let message = result.unwrap(); + assert_eq!(message.keys, vec!["key1".to_string()]); + assert_eq!(message.value, vec![1, 2, 3]); + assert_eq!(message.offset.offset, "123"); + assert_eq!( + message.event_time, + Utc.timestamp_opt(1627846261, 0).unwrap() + ); + } + + #[test] + fn test_message_to_source_transform_request() { + let message = Message { + keys: vec!["key1".to_string()], + value: vec![1, 2, 3], + offset: Offset { + offset: "123".to_string(), + partition_id: 0, + }, + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + + let request: SourceTransformRequest = message.into(); + assert!(request.request.is_some()); + } + + #[test] + fn test_read_response_result_to_message() { + let result = read_response::Result { + payload: vec![1, 2, 3], + offset: Some(SourceOffset { + offset: BASE64_STANDARD.encode("123").into_bytes(), + partition_id: 0, + }), + event_time: Some( + prost_timestamp_from_utc(Utc.timestamp_opt(1627846261, 0).unwrap()).unwrap(), + ), + keys: vec!["key1".to_string()], + headers: HashMap::new(), + }; + + let message: Result = result.try_into(); + assert!(message.is_ok()); + + let message = message.unwrap(); + assert_eq!(message.keys, vec!["key1".to_string()]); + assert_eq!(message.value, vec![1, 2, 3]); + assert_eq!( + message.event_time, + Utc.timestamp_opt(1627846261, 0).unwrap() + ); + } + + #[test] + fn test_message_to_sink_request() { + let message = Message { + keys: vec!["key1".to_string()], + value: vec![1, 2, 3], + offset: Offset { + offset: "123".to_string(), + partition_id: 0, + }, + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + + let request: SinkRequest = message.into(); + assert!(request.request.is_some()); + } + + #[test] + fn test_response_from_sink_to_sink_response() { + let response = ResponseFromSink { + id: "123".to_string(), + status: ResponseStatusFromSink::Success, + }; + + let sink_response: SinkResponse = response.into(); + assert_eq!(sink_response.result.unwrap().status, Success as i32); + } + + #[test] + fn test_sink_response_to_response_from_sink() { + let sink_response = SinkResponse { + result: Some(SinkResult { + id: "123".to_string(), + status: Success as i32, + err_msg: "".to_string(), + }), + handshake: None, + status: None, + }; + + let response: Result = sink_response.try_into(); + assert!(response.is_ok()); + + let response = response.unwrap(); + assert_eq!(response.id, "123"); + assert_eq!(response.status, ResponseStatusFromSink::Success); + } +} diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 268598450d..a1bc6112a8 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -367,7 +367,7 @@ impl Forwarder { // and keep only the failed messages to send again // construct the error map for the failed messages messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.id) { + if let Some(result) = result_map.get(&msg.id.to_string()) { return match result { ResponseStatusFromSink::Success => false, ResponseStatusFromSink::Failed(err_msg) => { @@ -448,7 +448,7 @@ impl Forwarder { // and keep only the failed messages to send again // construct the error map for the failed messages messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.id) { + if let Some(result) = result_map.get(&msg.offset.to_string()) { return match result { ResponseStatusFromSink::Success => false, ResponseStatusFromSink::Failed(err_msg) => { diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 9c7b7aed0d..b7db9c8d47 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -759,7 +759,7 @@ mod tests { // The first tick happens immediately, so we don't need to wait for the refresh_interval for the first iteration to complete. tokio::time::sleep(Duration::from_millis(50)).await; - // Get the stored values for all time intevals + // Get the stored values for all time intervals // We will store the values corresponding to the labels (from LOOKBACK_SECONDS_MAP) "1m", "default", "5m", "15" in the same order in this array let mut stored_values: [i64; 4] = [0; 4]; { diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs new file mode 100644 index 0000000000..0439eb1c7f --- /dev/null +++ b/rust/numaflow-core/src/pipeline.rs @@ -0,0 +1,2 @@ +mod forwarder; +mod isb; diff --git a/rust/numaflow-core/src/pipeline/forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder.rs new file mode 100644 index 0000000000..70b786d12e --- /dev/null +++ b/rust/numaflow-core/src/pipeline/forwarder.rs @@ -0,0 +1 @@ +// TODO diff --git a/rust/numaflow-core/src/pipeline/isb.rs b/rust/numaflow-core/src/pipeline/isb.rs new file mode 100644 index 0000000000..53ab02707f --- /dev/null +++ b/rust/numaflow-core/src/pipeline/isb.rs @@ -0,0 +1 @@ +pub(crate) mod jetstream; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs new file mode 100644 index 0000000000..d4a263486f --- /dev/null +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -0,0 +1,344 @@ +use crate::error::Error; +use crate::message::Message; +use crate::pipeline::isb::jetstream::writer::JetstreamWriter; +use crate::Result; +use async_nats::jetstream::Context; +use tokio::sync::mpsc::Receiver; +use tokio::sync::{mpsc, oneshot}; +use tokio_util::sync::CancellationToken; + +/// Jetstream Writer is responsible for writing messages to Jetstream ISB. +/// it exposes both sync and async methods to write messages. +pub(super) mod writer; + +/// ISB Writer accepts an Actor pattern based messages. +#[derive(Debug)] +struct ActorMessage { + /// Write the messages to ISB + stream: &'static str, + message: Message, + /// once the message has been successfully written, we can let the sender know. + /// This can be used to trigger Acknowledgement of the message from the Reader. + // FIXME: concrete type and better name + callee_tx: oneshot::Sender>, +} + +impl ActorMessage { + fn new( + stream: &'static str, + message: Message, + callee_tx: oneshot::Sender>, + ) -> Self { + Self { + stream, + message, + callee_tx, + } + } +} + +/// WriterActor will handle the messages and write them to the Jetstream ISB. +struct WriterActor { + js_writer: JetstreamWriter, + receiver: Receiver, + cancel_token: CancellationToken, +} + +impl WriterActor { + fn new( + js_writer: JetstreamWriter, + receiver: Receiver, + cancel_token: CancellationToken, + ) -> Self { + Self { + js_writer, + receiver, + cancel_token, + } + } + + async fn handle_message(&mut self, msg: ActorMessage) { + let payload: Vec = msg + .message + .try_into() + .expect("message serialization should not fail"); + self.js_writer + .write(msg.stream, payload, msg.callee_tx) + .await + } + + async fn run(&mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } +} + +/// WriterHandle is the handle to the WriterActor. It exposes a method to send messages to the Actor. +pub(crate) struct WriterHandle { + sender: mpsc::Sender, +} + +impl WriterHandle { + pub(super) fn new(js_ctx: Context, batch_size: usize, cancel_token: CancellationToken) -> Self { + let (sender, receiver) = mpsc::channel::(batch_size); + + let js_writer = JetstreamWriter::new(js_ctx, batch_size, cancel_token.clone()); + let mut actor = WriterActor::new(js_writer.clone(), receiver, cancel_token); + + tokio::spawn(async move { + actor.run().await; + }); + + Self { sender } + } + + pub(crate) async fn write( + &self, + stream: &'static str, + message: Message, + ) -> Result>> { + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::new(stream, message, sender); + self.sender + .send(msg) + .await + .map_err(|e| Error::ISB(format!("Failed to write message to actor channel: {}", e)))?; + + Ok(receiver) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::{Message, MessageID, Offset}; + use async_nats::jetstream; + use async_nats::jetstream::stream; + use chrono::Utc; + use std::collections::HashMap; + use std::time::Duration; + use tokio::sync::oneshot; + use tokio::time::Instant; + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_publish_messages() { + let cln_token = CancellationToken::new(); + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "default"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + ..Default::default() + }) + .await + .unwrap(); + + // Create ISBMessageHandler + let batch_size = 500; + let handler = WriterHandle::new(context.clone(), batch_size, cln_token.clone()); + + let mut result_receivers = Vec::new(); + // Publish 500 messages + for i in 0..500 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec(), + offset: Offset { + offset: format!("offset_{}", i), + partition_id: i, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage { + stream: stream_name, + message, + callee_tx: sender, + }; + handler.sender.send(msg).await.unwrap(); + result_receivers.push(receiver); + } + + for receiver in result_receivers { + let result = receiver.await.unwrap(); + assert!(result.is_ok()); + } + + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_publish_messages_with_cancellation() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_publish_cancellation"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); + + let cancel_token = CancellationToken::new(); + let handler = WriterHandle::new(context.clone(), 500, cancel_token.clone()); + + let mut receivers = Vec::new(); + // Publish 100 messages successfully + for i in 0..100 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec(), + offset: Offset { + offset: format!("offset_{}", i), + partition_id: 0, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + receivers.push(handler.write(stream_name, message).await.unwrap()); + } + + // Attempt to publish the 101th message, which should get stuck in the retry loop + // because the max message size is set to 1024 + let message = Message { + keys: vec!["key_101".to_string()], + value: vec![0; 1024], + offset: Offset { + offset: "offset_101".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "offset_101".to_string(), + index: 101, + }, + headers: HashMap::new(), + }; + let receiver = handler.write(stream_name, message).await.unwrap(); + receivers.push(receiver); + + // Cancel the token to exit the retry loop + cancel_token.cancel(); + + // Check the results + for (i, receiver) in receivers.into_iter().enumerate() { + let result = receiver.await.unwrap(); + if i < 100 { + assert!(result.is_ok()); + } else { + assert!(result.is_err()); + } + } + + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[ignore] + #[tokio::test] + async fn benchmark_publish_messages() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "benchmark_stream"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + ..Default::default() + }) + .await + .unwrap(); + + let cancel_token = CancellationToken::new(); + let handler = WriterHandle::new(context.clone(), 500, cancel_token.clone()); + + let (tx, mut rx) = mpsc::channel(100); + let test_start_time = Instant::now(); + let duration = Duration::from_secs(10); + + // Task to publish messages + let publish_task = tokio::spawn(async move { + let mut i = 0; + let mut sent_count = 0; + let mut start_time = Instant::now(); + while Instant::now().duration_since(test_start_time) < duration { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec(), + offset: Offset { + offset: format!("offset_{}", i), + partition_id: i, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + tx.send(handler.write(stream_name, message).await.unwrap()) + .await + .unwrap(); + sent_count += 1; + i += 1; + + if start_time.elapsed().as_secs() >= 1 { + println!("Messages sent: {}", sent_count); + sent_count = 0; + start_time = Instant::now(); + } + } + }); + + // Task to await responses + let await_task = tokio::spawn(async move { + let mut start_time = Instant::now(); + let mut count = 0; + while let Some(receiver) = rx.recv().await { + if receiver.await.unwrap().is_ok() { + count += 1; + } + + if start_time.elapsed().as_secs() >= 1 { + println!("Messages received: {}", count); + count = 0; + start_time = Instant::now(); + } + } + }); + + let _ = tokio::join!(publish_task, await_task); + + context.delete_stream(stream_name).await.unwrap(); + } +} diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs new file mode 100644 index 0000000000..baff3f5217 --- /dev/null +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -0,0 +1,383 @@ +use crate::error::Error; +use crate::Result; +use async_nats::jetstream::context::PublishAckFuture; +use async_nats::jetstream::publish::PublishAck; +use async_nats::jetstream::Context; +use bytes::Bytes; +use log::warn; +use std::time::Duration; +use tokio::sync::mpsc::Receiver; +use tokio::sync::{mpsc, oneshot}; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::error; + +#[derive(Clone, Debug)] +/// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. +/// It accepts a cancellation token to stop infinite retries during shutdown. +pub(super) struct JetstreamWriter { + js_ctx: Context, + paf_resolver_tx: mpsc::Sender, + cancel_token: CancellationToken, +} + +impl JetstreamWriter { + /// Creates a JetStream Writer and a background task to make sure the Write futures (PAFs) are + /// successful. Batch Size determines the maximum pending futures. + pub(super) fn new(js_ctx: Context, batch_size: usize, cancel_token: CancellationToken) -> Self { + let (paf_resolver_tx, paf_resolver_rx) = + mpsc::channel::(batch_size); + + let this = Self { + js_ctx, + paf_resolver_tx, + cancel_token, + }; + + let mut resolver_actor = PafResolverActor::new(this.clone(), paf_resolver_rx); + + tokio::spawn(async move { + resolver_actor.run().await; + }); + + this + } + + /// Writes the message to the JetStream ISB and returns a future which can be + /// awaited to get the PublishAck. It will do infinite retries until the message + /// gets published successfully. If it returns an error it means it is fatal error + pub(super) async fn write( + &self, + stream: &'static str, + payload: Vec, + callee_tx: oneshot::Sender>, + ) { + let js_ctx = self.js_ctx.clone(); + + // FIXME: add gate for buffer-full check. + + // loop till we get a PAF, there could be other reasons why PAFs cannot be created. + let paf = loop { + match js_ctx.publish(stream, Bytes::from(payload.clone())).await { + Ok(paf) => { + break paf; + } + Err(e) => { + error!(?e, "publishing failed, retrying"); + sleep(Duration::from_millis(10)).await; + } + } + if self.cancel_token.is_cancelled() { + error!("Shutdown signal received, exiting write loop"); + callee_tx + .send(Err(Error::ISB("Shutdown signal received".to_string()))) + .unwrap(); + return; + } + }; + + // send the paf and callee_tx over + self.paf_resolver_tx + .send(ResolveAndPublishResult { + paf, + stream, + payload, + callee_tx, + }) + .await + .expect("send should not fail"); + } + + /// Writes the message to the JetStream ISB and returns the PublishAck. It will do + /// infinite retries until the message gets published successfully. If it returns + /// an error it means it is fatal non-retryable error. + pub(super) async fn blocking_write( + &self, + stream: &'static str, + payload: Vec, + ) -> Result { + let js_ctx = self.js_ctx.clone(); + + loop { + match js_ctx.publish(stream, Bytes::from(payload.clone())).await { + Ok(paf) => match paf.await { + Ok(ack) => { + if ack.duplicate { + // should we return an error here? Because duplicate messages are not fatal + // But it can mess up the watermark progression because the offset will be + // same as the previous message offset + warn!("Duplicate message detected, ignoring {:?}", ack); + } + return Ok(ack); + } + Err(e) => { + error!("awaiting publish ack failed, retrying: {}", e); + sleep(Duration::from_millis(10)).await; + } + }, + Err(e) => { + error!("publishing failed, retrying: {}", e); + sleep(Duration::from_millis(10)).await; + } + } + if self.cancel_token.is_cancelled() { + return Err(Error::ISB("Shutdown signal received".to_string())); + } + } + } +} + +/// ResolveAndPublishResult resolves the result of the write PAF operation. +/// It contains the PublishAckFuture which can be awaited to get the PublishAck. Once PAF has +/// resolved, the information is published to callee_tx. +#[derive(Debug)] +pub(super) struct ResolveAndPublishResult { + paf: PublishAckFuture, + stream: &'static str, + payload: Vec, + callee_tx: oneshot::Sender>, +} + +/// Resolves the PAF from the write call, if not successful it will do a blocking write so that +/// it is eventually successful. Once the PAF has been resolved (by either means) it will notify +/// the top-level callee via the oneshot rx. +struct PafResolverActor { + js_writer: JetstreamWriter, + receiver: Receiver, +} + +impl PafResolverActor { + fn new(js_writer: JetstreamWriter, receiver: Receiver) -> Self { + PafResolverActor { + js_writer, + receiver, + } + } + + /// Tries to the resolve the original PAF from the write call. If it is successful, will send + /// the successful result to the top-level callee's oneshot channel. If the original PAF does + /// not successfully resolve, it will do blocking write till write to JetStream succeeds. + async fn successfully_resolve_paf(&mut self, result: ResolveAndPublishResult) { + match result.paf.await { + Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), + Err(e) => { + error!(?e, "Failed to resolve the future, trying blocking write"); + match self + .js_writer + .blocking_write(result.stream, result.payload.clone()) + .await + { + Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), + Err(e) => result.callee_tx.send(Err(e)).unwrap(), + } + } + } + } + + async fn run(&mut self) { + while let Some(result) = self.receiver.recv().await { + self.successfully_resolve_paf(result).await; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::{Message, MessageID, Offset}; + use async_nats::jetstream; + use async_nats::jetstream::stream; + use chrono::Utc; + use std::collections::HashMap; + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_async_write() { + let cln_token = CancellationToken::new(); + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_async"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + ..Default::default() + }) + .await + .unwrap(); + + let writer = JetstreamWriter::new(context.clone(), 500, cln_token.clone()); + + let message = Message { + keys: vec!["key_0".to_string()], + value: "message 0".as_bytes().to_vec(), + offset: Offset { + offset: "offset_0".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "offset_0".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + + let (success_tx, success_rx) = oneshot::channel::>(); + writer + .write(stream_name, message.try_into().unwrap(), success_tx) + .await; + assert!(success_rx.await.is_ok()); + + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_sync_write() { + let cln_token = CancellationToken::new(); + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_sync"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + ..Default::default() + }) + .await + .unwrap(); + + let writer = JetstreamWriter::new(context.clone(), 500, cln_token.clone()); + + let message = Message { + keys: vec!["key_0".to_string()], + value: "message 0".as_bytes().to_vec(), + offset: Offset { + offset: "offset_0".to_string(), + partition_id: 1, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "offset_0".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + + let result = writer + .blocking_write(stream_name, message.try_into().unwrap()) + .await; + assert!(result.is_ok()); + + let publish_ack = result.unwrap(); + assert_eq!(publish_ack.stream, stream_name); + + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_write_with_cancellation() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_cancellation"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); + + let cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new(context.clone(), 500, cancel_token.clone()); + + let mut result_receivers = Vec::new(); + // Publish 10 messages successfully + for i in 0..10 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec(), + offset: Offset { + offset: format!("offset_{}", i), + partition_id: i, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (success_tx, success_rx) = oneshot::channel::>(); + writer + .write(stream_name, message.try_into().unwrap(), success_tx) + .await; + result_receivers.push(success_rx); + } + + // Attempt to publish a message which has a payload size greater than the max_message_size + // so that it fails and sync write will be attempted and it will be blocked + let message = Message { + keys: vec!["key_11".to_string()], + value: vec![0; 1025], + offset: Offset { + offset: "offset_11".to_string(), + partition_id: 11, + }, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "offset_11".to_string(), + index: 11, + }, + headers: HashMap::new(), + }; + let (success_tx, success_rx) = oneshot::channel::>(); + writer + .write(stream_name, message.try_into().unwrap(), success_tx) + .await; + result_receivers.push(success_rx); + + // Cancel the token to exit the retry loop + cancel_token.cancel(); + + // Check the results + for (i, receiver) in result_receivers.into_iter().enumerate() { + let result = receiver.await.unwrap(); + if i < 10 { + assert!( + result.is_ok(), + "Message {} should be published successfully", + i + ); + } else { + assert!( + result.is_err(), + "Message 11 should fail with cancellation error" + ); + assert_eq!( + result.err().unwrap().to_string(), + "ISB Error - Shutdown signal received", + ); + } + } + + context.delete_stream(stream_name).await.unwrap(); + } +} diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index ffc712ceb1..b3535b2182 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -729,7 +729,7 @@ mod tests { assert!(result.is_err()); assert!( result.unwrap_err().to_string().contains( - "ServerInfoError Error - SDK version 0.0.9 must be upgraded to at least 0.1.0, in order to work with the current numaflow version")); + "ServerInfo Error - SDK version 0.0.9 must be upgraded to at least 0.1.0, in order to work with the current numaflow version")); } #[tokio::test] @@ -904,7 +904,7 @@ mod tests { assert!(result.is_err()); assert!( result.unwrap_err().to_string().contains( - "ServerInfoError Error - SDK version 0.0.9 must be upgraded to at least 0.1.0-rc3, in order to work with the current numaflow version")); + "ServerInfo Error - SDK version 0.0.9 must be upgraded to at least 0.1.0-rc3, in order to work with the current numaflow version")); } #[tokio::test] diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 8db95b9072..454b43da66 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -25,7 +25,7 @@ impl Sink for LogSink { ); tracing::info!("{}", log_line); result.push(ResponseFromSink { - id: msg.id, + id: msg.id.to_string(), status: ResponseStatusFromSink::Success, }) } diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 4bb74d6790..77d650a2cc 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -119,7 +119,7 @@ mod tests { use tracing::info; use crate::error::Result; - use crate::message::{Message, Offset}; + use crate::message::{Message, MessageID, Offset}; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::UserDefinedSink; use numaflow_pb::clients::sink::sink_client::SinkClient; @@ -182,7 +182,11 @@ mod tests { }, event_time: Utc::now(), headers: Default::default(), - id: "one".to_string(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "1".to_string(), + index: 0, + }, }, Message { keys: vec![], @@ -193,7 +197,11 @@ mod tests { }, event_time: Utc::now(), headers: Default::default(), - id: "two".to_string(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "2".to_string(), + index: 1, + }, }, ]; diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 08f20e25e0..b1ac56fe43 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -3,7 +3,7 @@ use std::time::Duration; use bytes::Bytes; use futures::StreamExt; -use crate::message::{Message, Offset}; +use crate::message::{Message, MessageID, Offset}; use crate::reader; use crate::source; @@ -215,7 +215,11 @@ impl source::SourceReader for GeneratorRead { partition_id: 0, }, event_time: Default::default(), - id, + id: MessageID { + vertex_name: Default::default(), + offset: id, + index: Default::default(), + }, headers: Default::default(), } }) diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index da71191fc3..2ff20dedb9 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -90,7 +90,7 @@ impl UserDefinedSourceRead { let handshake_response = resp_stream.message().await?.ok_or(Error::Source( "failed to receive handshake response".to_string(), ))?; - // handshake cannot to None during the initial phase and it has to set `sot` to true. + // handshake cannot to None during the initial phase, and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { return Err(Error::Source("invalid handshake response".to_string())); } diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 653ee46293..c5995caa58 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -15,7 +15,7 @@ use numaflow_pb::clients::sourcetransformer::{ use crate::config::config; use crate::error::{Error, Result}; -use crate::message::{Message, Offset}; +use crate::message::{get_vertex_name, Message, MessageID, Offset}; use crate::shared::utils::utc_from_timestamp; const DROP: &str = "U+005C__DROP__"; @@ -55,6 +55,7 @@ impl SourceTransformer { let handshake_response = resp_stream.message().await?.ok_or(Error::Transformer( "failed to receive handshake response".to_string(), ))?; + // handshake cannot to None during the initial phase and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { return Err(Error::Transformer("invalid handshake response".to_string())); @@ -89,7 +90,7 @@ impl SourceTransformer { let mut tracker: HashMap = HashMap::with_capacity(messages.len()); for message in &messages { tracker.insert( - message.id.clone(), + message.id.to_string(), MessageInfo { offset: message.offset.clone(), headers: message.headers.clone(), @@ -152,7 +153,7 @@ impl SourceTransformer { } }; - let Some((msg_id, msg_info)) = tracker.remove_entry(&resp.id) else { + let Some((_, msg_info)) = tracker.remove_entry(&resp.id) else { token.cancel(); return Err(Error::Transformer(format!( "Received message with unknown ID {}", @@ -166,7 +167,11 @@ impl SourceTransformer { continue; } let message = Message { - id: format!("{}-{}", msg_id, i), + id: MessageID { + vertex_name: get_vertex_name().to_string(), + index: i as i32, + offset: msg_info.offset.to_string(), + }, keys: result.keys, value: result.value, offset: msg_info.offset.clone(), @@ -227,6 +232,7 @@ mod tests { use std::error::Error; use std::time::Duration; + use crate::message::MessageID; use crate::shared::utils::create_rpc_channel; use crate::transformer::user_defined::SourceTransformHandle; use numaflow::sourcetransform; @@ -282,7 +288,11 @@ mod tests { offset: "0".into(), }, event_time: chrono::Utc::now(), - id: "1".to_string(), + id: MessageID { + vertex_name: "vertex_name".to_string(), + offset: "0".to_string(), + index: 0, + }, headers: Default::default(), }; @@ -357,7 +367,11 @@ mod tests { offset: "0".into(), }, event_time: chrono::Utc::now(), - id: "".to_string(), + id: MessageID { + vertex_name: "vertex_name".to_string(), + offset: "0".to_string(), + index: 0, + }, headers: Default::default(), }; From 7672de71c645d07af68b284214e4d227ad6d4ac7 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 17 Oct 2024 07:46:07 +0530 Subject: [PATCH 114/188] chore: organize rust imports (#2164) Signed-off-by: Yashash H L --- rust/backoff/src/retry.rs | 3 +- rust/numaflow-core/src/config.rs | 2 +- rust/numaflow-core/src/message.rs | 22 ++++++++------ rust/numaflow-core/src/monovertex.rs | 25 ++++++++-------- .../numaflow-core/src/monovertex/forwarder.rs | 29 ++++++++++--------- rust/numaflow-core/src/monovertex/metrics.rs | 7 ++--- .../src/pipeline/isb/jetstream.rs | 19 +++++++----- .../src/pipeline/isb/jetstream/writer.rs | 16 ++++++---- rust/numaflow-core/src/shared/server_info.rs | 3 +- rust/numaflow-core/src/shared/utils.rs | 18 +++++++----- rust/numaflow-core/src/sink.rs | 4 +-- rust/numaflow-core/src/sink/user_defined.rs | 10 +++---- rust/numaflow-core/src/source/generator.rs | 13 +++++---- rust/numaflow-core/src/source/user_defined.rs | 18 +++++------- .../src/transformer/user_defined.rs | 16 +++++----- rust/servesink/src/lib.rs | 6 ++-- rust/serving/src/app.rs | 10 ++++--- rust/serving/src/app/callback.rs | 9 ++---- rust/serving/src/app/callback/state.rs | 6 ++-- .../src/app/callback/store/memstore.rs | 6 ++-- .../src/app/callback/store/redisstore.rs | 7 ++--- rust/serving/src/app/jetstream_proxy.rs | 6 ++-- rust/serving/src/app/message_path.rs | 6 ++-- rust/serving/src/app/tracker.rs | 3 +- rust/serving/src/config.rs | 12 ++++---- rust/serving/src/lib.rs | 8 +++-- rust/serving/src/metrics.rs | 15 ++++++---- rust/serving/src/pipeline.rs | 3 +- rust/src/bin/main.rs | 1 + 29 files changed, 158 insertions(+), 145 deletions(-) diff --git a/rust/backoff/src/retry.rs b/rust/backoff/src/retry.rs index 5747334e0c..5ead53915a 100644 --- a/rust/backoff/src/retry.rs +++ b/rust/backoff/src/retry.rs @@ -147,9 +147,8 @@ mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; - use crate::strategy::fixed; - use super::*; + use crate::strategy::fixed; async fn always_successful() -> Result { Ok(42) diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index facca96e9c..f8b5f37c7d 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -5,9 +5,9 @@ use std::sync::OnceLock; use base64::prelude::BASE64_STANDARD; use base64::Engine; use bytes::Bytes; +use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use crate::Error; -use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index db9c1ab681..f2af2e8739 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -1,6 +1,8 @@ -use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; -use crate::Error; -use crate::Result; +use std::cmp::PartialEq; +use std::collections::HashMap; +use std::sync::OnceLock; +use std::{env, fmt}; + use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::Engine; use bytes::Bytes; @@ -12,10 +14,10 @@ use numaflow_pb::clients::source::{read_response, AckRequest}; use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; -use std::cmp::PartialEq; -use std::collections::HashMap; -use std::sync::OnceLock; -use std::{env, fmt}; + +use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; +use crate::Error; +use crate::Result; const NUMAFLOW_MONO_VERTEX_NAME: &str = "NUMAFLOW_MONO_VERTEX_NAME"; const NUMAFLOW_VERTEX_NAME: &str = "NUMAFLOW_VERTEX_NAME"; @@ -296,14 +298,16 @@ impl TryFrom for ResponseFromSink { #[cfg(test)] mod tests { - use super::*; + use std::collections::HashMap; + use chrono::TimeZone; use numaflow_pb::clients::sink::sink_response::Result as SinkResult; use numaflow_pb::clients::source::Offset as SourceOffset; use numaflow_pb::objects::isb::{ Body, Header, Message as ProtoMessage, MessageId, MessageInfo, }; - use std::collections::HashMap; + + use super::*; #[test] fn test_offset_display() { diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index fb434da8af..0f0ec1a40a 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,15 +1,16 @@ use std::time::Duration; +use forwarder::ForwarderBuilder; +use metrics::UserDefinedContainerState; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tokio::signal; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use tracing::info; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; - use crate::config::{config, Settings}; use crate::error::{self, Error}; use crate::shared::utils; @@ -21,8 +22,6 @@ use crate::source::user_defined::{ }; use crate::source::SourceHandle; use crate::transformer::user_defined::SourceTransformHandle; -use forwarder::ForwarderBuilder; -use metrics::UserDefinedContainerState; /// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -292,17 +291,19 @@ async fn start_forwarder_with_source( #[cfg(test)] mod tests { - use crate::config::{Settings, UDSinkConfig, UDSourceConfig}; - use crate::error; - use crate::monovertex::start_forwarder; - use crate::shared::server_info::ServerInfo; - use numaflow::source::{Message, Offset, SourceReadRequest}; - use numaflow::{sink, source}; use std::fs::File; use std::io::Write; + + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source}; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; + use crate::config::{Settings, UDSinkConfig, UDSourceConfig}; + use crate::error; + use crate::monovertex::start_forwarder; + use crate::shared::server_info::ServerInfo; + struct SimpleSource; #[tonic::async_trait] impl source::Sourcer for SimpleSource { diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index a1bc6112a8..45326231b6 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -1,5 +1,11 @@ use std::collections::HashMap; +use chrono::Utc; +use log::warn; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; + use crate::config::{config, OnFailureStrategy}; use crate::error; use crate::message::{Message, Offset, ResponseStatusFromSink}; @@ -9,12 +15,6 @@ use crate::sink::SinkHandle; use crate::Error; use crate::{source::SourceHandle, transformer::user_defined::SourceTransformHandle}; -use chrono::Utc; -use log::warn; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tracing::{debug, info}; - /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. @@ -527,14 +527,6 @@ impl Forwarder { mod tests { use std::collections::HashSet; - use crate::config::config; - use crate::monovertex::forwarder::ForwarderBuilder; - use crate::monovertex::SourceType; - use crate::shared::utils::create_rpc_channel; - use crate::sink::{SinkClientType, SinkHandle}; - use crate::source::user_defined::new_source; - use crate::source::SourceHandle; - use crate::transformer::user_defined::SourceTransformHandle; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; @@ -545,6 +537,15 @@ mod tests { use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; + use crate::config::config; + use crate::monovertex::forwarder::ForwarderBuilder; + use crate::monovertex::SourceType; + use crate::shared::utils::create_rpc_channel; + use crate::sink::{SinkClientType, SinkHandle}; + use crate::source::user_defined::new_source; + use crate::source::SourceHandle; + use crate::transformer::user_defined::SourceTransformHandle; + struct SimpleSource { yet_to_be_acked: std::sync::RwLock>, } diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index b7db9c8d47..0818c26f96 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -10,6 +10,9 @@ use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use prometheus_client::encoding::text::encode; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -24,10 +27,6 @@ use tonic::transport::Channel; use tonic::Request; use tracing::{debug, error, info}; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; - use crate::config::config; use crate::source::SourceHandle; use crate::Error; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index d4a263486f..f0a312043b 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -1,12 +1,13 @@ -use crate::error::Error; -use crate::message::Message; -use crate::pipeline::isb::jetstream::writer::JetstreamWriter; -use crate::Result; use async_nats::jetstream::Context; use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::CancellationToken; +use crate::error::Error; +use crate::message::Message; +use crate::pipeline::isb::jetstream::writer::JetstreamWriter; +use crate::Result; + /// Jetstream Writer is responsible for writing messages to Jetstream ISB. /// it exposes both sync and async methods to write messages. pub(super) mod writer; @@ -111,16 +112,18 @@ impl WriterHandle { #[cfg(test)] mod tests { - use super::*; - use crate::message::{Message, MessageID, Offset}; + use std::collections::HashMap; + use std::time::Duration; + use async_nats::jetstream; use async_nats::jetstream::stream; use chrono::Utc; - use std::collections::HashMap; - use std::time::Duration; use tokio::sync::oneshot; use tokio::time::Instant; + use super::*; + use crate::message::{Message, MessageID, Offset}; + #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_publish_messages() { diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index baff3f5217..c77ae461a2 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -1,17 +1,19 @@ -use crate::error::Error; -use crate::Result; +use std::time::Duration; + use async_nats::jetstream::context::PublishAckFuture; use async_nats::jetstream::publish::PublishAck; use async_nats::jetstream::Context; use bytes::Bytes; use log::warn; -use std::time::Duration; use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::time::sleep; use tokio_util::sync::CancellationToken; use tracing::error; +use crate::error::Error; +use crate::Result; + #[derive(Clone, Debug)] /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. @@ -183,12 +185,14 @@ impl PafResolverActor { #[cfg(test)] mod tests { - use super::*; - use crate::message::{Message, MessageID, Offset}; + use std::collections::HashMap; + use async_nats::jetstream; use async_nats::jetstream::stream; use chrono::Utc; - use std::collections::HashMap; + + use super::*; + use crate::message::{Message, MessageID, Offset}; #[cfg(feature = "nats-tests")] #[tokio::test] diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index b3535b2182..7af7d378c2 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -458,9 +458,10 @@ mod version { #[cfg(test)] mod tests { - use serde_json::json; use std::io::{Read, Write}; use std::{collections::HashMap, fs::File}; + + use serde_json::json; use tempfile::tempdir; use super::*; diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 25708d68ae..9ec87490cd 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -6,6 +6,9 @@ use axum::http::Uri; use backoff::retry::Retry; use backoff::strategy::fixed; use chrono::{DateTime, TimeZone, Timelike, Utc}; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use prost_types::Timestamp; use tokio::net::UnixStream; use tokio::task::JoinHandle; @@ -24,9 +27,6 @@ use crate::monovertex::metrics::{ use crate::shared::server_info; use crate::source::SourceHandle; use crate::Error; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; pub(crate) async fn check_compatibility( cln_token: &CancellationToken, @@ -208,18 +208,20 @@ pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result error::Result<()> { let serialized = serde_json::to_string(server_info).unwrap(); let mut file = File::create(file_path).unwrap(); diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 59102213da..b641343c47 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -1,10 +1,10 @@ +use numaflow_pb::clients::sink::sink_client::SinkClient; use tokio::sync::{mpsc, oneshot}; use tonic::transport::Channel; +use user_defined::UserDefinedSink; use crate::config::config; use crate::message::{Message, ResponseFromSink}; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use user_defined::UserDefinedSink; mod log; /// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 77d650a2cc..fae703257f 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,11 +1,10 @@ +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; - use crate::error; use crate::message::{Message, ResponseFromSink}; use crate::sink::Sink; @@ -111,18 +110,17 @@ impl Sink for UserDefinedSink { #[cfg(test)] mod tests { - use super::*; - use chrono::offset::Utc; use numaflow::sink; + use numaflow_pb::clients::sink::sink_client::SinkClient; use tokio::sync::mpsc; use tracing::info; + use super::*; use crate::error::Result; use crate::message::{Message, MessageID, Offset}; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::UserDefinedSink; - use numaflow_pb::clients::sink::sink_client::SinkClient; struct Logger; #[tonic::async_trait] diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index b1ac56fe43..23a4f30926 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -23,12 +23,13 @@ use crate::source; /// ``` /// NOTE: The minimum granularity of duration is 10ms. mod stream_generator { - use bytes::Bytes; - use futures::Stream; - use pin_project::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; + + use bytes::Bytes; + use futures::Stream; + use pin_project::pin_project; use tokio::time::MissedTickBehavior; #[pin_project] @@ -109,9 +110,10 @@ mod stream_generator { #[cfg(test)] mod tests { - use super::*; use futures::StreamExt; + use super::*; + #[tokio::test] async fn test_stream_generator() { // Define the content to be generated @@ -264,10 +266,11 @@ impl reader::LagReader for GeneratorLagReader { #[cfg(test)] mod tests { + use tokio::time::Duration; + use super::*; use crate::reader::LagReader; use crate::source::{SourceAcker, SourceReader}; - use tokio::time::Duration; #[tokio::test] async fn test_generator_read() { diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 2ff20dedb9..df66cf4d8e 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -1,13 +1,12 @@ -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use tonic::transport::Channel; -use tonic::{Request, Streaming}; - use numaflow_pb::clients::source; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::source::{ read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, }; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::transport::Channel; +use tonic::{Request, Streaming}; use crate::config::config; use crate::message::{Message, Offset}; @@ -232,18 +231,17 @@ impl LagReader for UserDefinedSourceLagReader { #[cfg(test)] mod tests { - use super::*; - use std::collections::HashSet; - use crate::shared::utils::create_rpc_channel; - use numaflow_pb::clients::source::source_client::SourceClient; - use chrono::Utc; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow_pb::clients::source::source_client::SourceClient; use tokio::sync::mpsc::Sender; + use super::*; + use crate::shared::utils::create_rpc_channel; + struct SimpleSource { num: usize, yet_to_ack: std::sync::RwLock>, diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index c5995caa58..bb57115076 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,5 +1,9 @@ use std::collections::HashMap; +use numaflow_pb::clients::sourcetransformer::{ + self, source_transform_client::SourceTransformClient, SourceTransformRequest, + SourceTransformResponse, +}; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; @@ -8,11 +12,6 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::warn; -use numaflow_pb::clients::sourcetransformer::{ - self, source_transform_client::SourceTransformClient, SourceTransformRequest, - SourceTransformResponse, -}; - use crate::config::config; use crate::error::{Error, Result}; use crate::message::{get_vertex_name, Message, MessageID, Offset}; @@ -232,13 +231,14 @@ mod tests { use std::error::Error; use std::time::Duration; - use crate::message::MessageID; - use crate::shared::utils::create_rpc_channel; - use crate::transformer::user_defined::SourceTransformHandle; use numaflow::sourcetransform; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; + use crate::message::MessageID; + use crate::shared::utils::create_rpc_channel; + use crate::transformer::user_defined::SourceTransformHandle; + struct NowCat; #[tonic::async_trait] diff --git a/rust/servesink/src/lib.rs b/rust/servesink/src/lib.rs index 3c384b657f..371638360f 100644 --- a/rust/servesink/src/lib.rs +++ b/rust/servesink/src/lib.rs @@ -81,13 +81,15 @@ impl sink::Sinker for ServeSink { #[cfg(test)] mod tests { - use super::*; - use numaflow::sink::{SinkRequest, Sinker}; use std::collections::HashMap; + + use numaflow::sink::{SinkRequest, Sinker}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpListener; use tokio::sync::mpsc; + use super::*; + #[tokio::test] async fn test_serve_sink_without_url_header() { let serve_sink = ServeSink::new(); diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index ad8403e022..c2ba4c708d 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -280,15 +280,17 @@ async fn routes( #[cfg(test)] mod tests { - use super::*; - use crate::app::callback::store::memstore::InMemoryStore; - use crate::config::cert_key_pair; + use std::net::SocketAddr; + use async_nats::jetstream::stream; use axum::http::StatusCode; - use std::net::SocketAddr; use tokio::time::{sleep, Duration}; use tower::ServiceExt; + use super::*; + use crate::app::callback::store::memstore::InMemoryStore; + use crate::config::cert_key_pair; + #[tokio::test] async fn test_start_main_server() { let (cert, key) = cert_key_pair(); diff --git a/rust/serving/src/app/callback.rs b/rust/serving/src/app/callback.rs index 6ecbc87ccc..2fe7a2f6fe 100644 --- a/rust/serving/src/app/callback.rs +++ b/rust/serving/src/app/callback.rs @@ -1,14 +1,12 @@ use axum::{body::Bytes, extract::State, http::HeaderMap, routing, Json, Router}; use serde::{Deserialize, Serialize}; -use tracing::error; - use state::State as CallbackState; +use tracing::error; +use self::store::Store; use crate::app::response::ApiError; use crate::config; -use self::store::Store; - /// in-memory state store including connection tracking pub(crate) mod state; /// store for storing the state @@ -73,12 +71,11 @@ mod tests { use axum::http::StatusCode; use tower::ServiceExt; + use super::*; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; use crate::pipeline::min_pipeline_spec; - use super::*; - #[tokio::test] async fn test_callback_failure() { let store = InMemoryStore::new(); diff --git a/rust/serving/src/app/callback/state.rs b/rust/serving/src/app/callback/state.rs index aebf68d3a5..db145f5beb 100644 --- a/rust/serving/src/app/callback/state.rs +++ b/rust/serving/src/app/callback/state.rs @@ -5,12 +5,11 @@ use std::{ use tokio::sync::oneshot; +use super::store::Store; use crate::app::callback::{store::PayloadToSave, CallbackRequest}; use crate::app::tracker::MessageGraph; use crate::Error; -use super::store::Store; - struct RequestState { // Channel to notify when all callbacks for a message is received tx: oneshot::Sender>, @@ -235,11 +234,10 @@ where mod tests { use axum::body::Bytes; + use super::*; use crate::app::callback::store::memstore::InMemoryStore; use crate::pipeline::min_pipeline_spec; - use super::*; - #[tokio::test] async fn test_state() { let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); diff --git a/rust/serving/src/app/callback/store/memstore.rs b/rust/serving/src/app/callback/store/memstore.rs index b9ec5ffaf4..9ec9cfac9b 100644 --- a/rust/serving/src/app/callback/store/memstore.rs +++ b/rust/serving/src/app/callback/store/memstore.rs @@ -1,12 +1,11 @@ use std::collections::HashMap; use std::sync::Arc; +use super::PayloadToSave; use crate::app::callback::CallbackRequest; use crate::consts::SAVED; use crate::Error; -use super::PayloadToSave; - /// `InMemoryStore` is an in-memory implementation of the `Store` trait. /// It uses a `HashMap` to store data in memory. #[derive(Clone)] @@ -97,11 +96,10 @@ impl super::Store for InMemoryStore { mod tests { use std::sync::Arc; + use super::*; use crate::app::callback::store::{PayloadToSave, Store}; use crate::app::callback::CallbackRequest; - use super::*; - #[tokio::test] async fn test_save_and_retrieve_callbacks() { let mut store = InMemoryStore::new(); diff --git a/rust/serving/src/app/callback/store/redisstore.rs b/rust/serving/src/app/callback/store/redisstore.rs index 002f68b6c1..6e5decf880 100644 --- a/rust/serving/src/app/callback/store/redisstore.rs +++ b/rust/serving/src/app/callback/store/redisstore.rs @@ -1,12 +1,11 @@ use std::sync::Arc; +use backoff::retry::Retry; +use backoff::strategy::fixed; use redis::aio::ConnectionManager; use redis::RedisError; use tokio::sync::Semaphore; -use backoff::retry::Retry; -use backoff::strategy::fixed; - use super::PayloadToSave; use crate::app::callback::CallbackRequest; use crate::consts::SAVED; @@ -198,11 +197,11 @@ impl super::Store for RedisConnection { #[cfg(feature = "redis-tests")] #[cfg(test)] mod tests { - use crate::app::callback::store::LocalStore; use axum::body::Bytes; use redis::AsyncCommands; use super::*; + use crate::app::callback::store::LocalStore; #[tokio::test] async fn test_redis_store() { diff --git a/rust/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs index dd80f40eda..8123197857 100644 --- a/rust/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -12,12 +12,11 @@ use axum::{ use tracing::error; use uuid::Uuid; +use super::callback::{state::State as CallbackState, store::Store}; use crate::app::callback::state; use crate::app::response::{ApiError, ServeResponse}; use crate::config; -use super::callback::{state::State as CallbackState, store::Store}; - // TODO: // - [ ] better health check // - [ ] jetstream connection pooling @@ -273,6 +272,7 @@ mod tests { use serde_json::{json, Value}; use tower::ServiceExt; + use super::*; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::callback::store::PayloadToSave; use crate::app::callback::CallbackRequest; @@ -280,8 +280,6 @@ mod tests { use crate::pipeline::min_pipeline_spec; use crate::Error; - use super::*; - #[derive(Clone)] struct MockStore; diff --git a/rust/serving/src/app/message_path.rs b/rust/serving/src/app/message_path.rs index 54139566f8..933c58a815 100644 --- a/rust/serving/src/app/message_path.rs +++ b/rust/serving/src/app/message_path.rs @@ -3,10 +3,9 @@ use axum::{ routing, Router, }; -use crate::app::response::ApiError; - use super::callback::state::State as CallbackState; use super::callback::store::Store; +use crate::app::response::ApiError; pub fn get_message_path( callback_store: CallbackState, @@ -44,12 +43,11 @@ mod tests { use axum::http::StatusCode; use tower::ServiceExt; + use super::*; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; use crate::pipeline::min_pipeline_spec; - use super::*; - #[tokio::test] async fn test_message_path_not_present() { let store = InMemoryStore::new(); diff --git a/rust/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs index 12420f948c..4714e2171b 100644 --- a/rust/serving/src/app/tracker.rs +++ b/rust/serving/src/app/tracker.rs @@ -237,9 +237,8 @@ impl MessageGraph { #[cfg(test)] mod tests { - use crate::pipeline::{Conditions, Tag, Vertex}; - use super::*; + use crate::pipeline::{Conditions, Tag, Vertex}; #[test] fn test_no_subgraph() { diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index eb3b9ac361..d3e3268f32 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -1,16 +1,18 @@ -use crate::Error::ParseConfig; -use crate::{Error, Result}; +use std::fmt::Debug; +use std::path::Path; +use std::{env, sync::OnceLock}; + use async_nats::rustls; use base64::prelude::BASE64_STANDARD; use base64::Engine; use config::Config; use rcgen::{generate_simple_self_signed, Certificate, CertifiedKey, KeyPair}; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use std::path::Path; -use std::{env, sync::OnceLock}; use tracing::info; +use crate::Error::ParseConfig; +use crate::{Error, Result}; + const ENV_PREFIX: &str = "NUMAFLOW_SERVING"; const ENV_NUMAFLOW_SERVING_SOURCE_OBJECT: &str = "NUMAFLOW_SERVING_SOURCE_OBJECT"; const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 1838fdb77c..6e27c53e34 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -1,11 +1,13 @@ +use std::net::SocketAddr; + +use axum_server::tls_rustls::RustlsConfig; +use tracing::info; + pub use self::error::{Error, Result}; use crate::app::start_main_server; use crate::config::{cert_key_pair, config}; use crate::metrics::start_https_metrics_server; use crate::pipeline::min_pipeline_spec; -use axum_server::tls_rustls::RustlsConfig; -use std::net::SocketAddr; -use tracing::info; mod app; mod config; diff --git a/rust/serving/src/metrics.rs b/rust/serving/src/metrics.rs index cd4277efa3..a9cd7a3492 100644 --- a/rust/serving/src/metrics.rs +++ b/rust/serving/src/metrics.rs @@ -1,4 +1,7 @@ -use crate::Error::MetricsServer; +use std::net::SocketAddr; +use std::sync::OnceLock; +use std::time::Instant; + use axum::body::Body; use axum::http::StatusCode; use axum::response::IntoResponse; @@ -15,11 +18,10 @@ use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::OnceLock; -use std::time::Instant; use tracing::debug; +use crate::Error::MetricsServer; + // Define the labels for the metrics pub const SERVING_METHOD_LABEL: &str = "method"; pub const SERVING_PATH_LABEL: &str = "path"; @@ -158,14 +160,15 @@ mod tests { use std::net::SocketAddr; use std::time::Duration; - use super::*; - use crate::config::cert_key_pair; use axum::body::Body; use axum::http::{HeaderMap, StatusCode}; use axum::middleware; use tokio::time::sleep; use tower::ServiceExt; + use super::*; + use crate::config::cert_key_pair; + #[tokio::test] async fn test_start_metrics_server() { let (cert, key) = cert_key_pair(); diff --git a/rust/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs index 429ff8849f..042e5923b4 100644 --- a/rust/serving/src/pipeline.rs +++ b/rust/serving/src/pipeline.rs @@ -1,12 +1,13 @@ use std::env; use std::sync::OnceLock; -use crate::Error::ParseConfig; use base64::prelude::BASE64_STANDARD; use base64::Engine; use numaflow_models::models::PipelineSpec; use serde::{Deserialize, Serialize}; +use crate::Error::ParseConfig; + const ENV_MIN_PIPELINE_SPEC: &str = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC"; pub fn min_pipeline_spec() -> &'static PipelineDCG { diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index 46a811e814..792e4f205f 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -1,4 +1,5 @@ use std::env; + use tracing::{error, info}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; From 9bd7e1b2925ad8714d86114618fe93f967d2b7fe Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 17 Oct 2024 20:00:13 +0530 Subject: [PATCH 115/188] feat: check if the buffer is full before writing to ISB (#2166) Signed-off-by: Vigith Maurice Signed-off-by: Yashash H L Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config.rs | 55 +++ .../src/pipeline/isb/jetstream.rs | 70 ++-- .../src/pipeline/isb/jetstream/writer.rs | 339 +++++++++++++++--- 3 files changed, 384 insertions(+), 80 deletions(-) diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index f8b5f37c7d..15cf371c66 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -9,6 +9,7 @@ use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use crate::Error; +// TODO move constants to a separate module, separate consts for different components const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; const DEFAULT_SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; @@ -31,6 +32,60 @@ const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: OnFailureStrategy = OnFailureStrategy::Retry; +/// Jetstream ISB related configurations. +pub mod jetstream { + use std::fmt; + use std::time::Duration; + + // jetstream related constants + const DEFAULT_PARTITION_IDX: u16 = 0; + const DEFAULT_MAX_LENGTH: usize = 30000; + const DEFAULT_USAGE_LIMIT: f64 = 0.8; + const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; + const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; + const DEFAULT_RETRY_TIMEOUT_MILLIS: u64 = 10; + + #[derive(Debug, Clone)] + pub(crate) struct StreamWriterConfig { + pub name: String, + pub partition_idx: u16, + pub max_length: usize, + pub refresh_interval: Duration, + pub usage_limit: f64, + pub buffer_full_strategy: BufferFullStrategy, + pub retry_timeout: Duration, + } + + impl Default for StreamWriterConfig { + fn default() -> Self { + StreamWriterConfig { + name: "default".to_string(), + partition_idx: DEFAULT_PARTITION_IDX, + max_length: DEFAULT_MAX_LENGTH, + usage_limit: DEFAULT_USAGE_LIMIT, + refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), + buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, + retry_timeout: Duration::from_millis(DEFAULT_RETRY_TIMEOUT_MILLIS), + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq)] + pub(crate) enum BufferFullStrategy { + RetryUntilSuccess, + DiscardLatest, + } + + impl fmt::Display for BufferFullStrategy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BufferFullStrategy::RetryUntilSuccess => write!(f, "retryUntilSuccess"), + BufferFullStrategy::DiscardLatest => write!(f, "discardLatest"), + } + } + } +} + #[derive(Debug, PartialEq, Clone)] pub enum OnFailureStrategy { Retry, diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index f0a312043b..84092ad10a 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -3,20 +3,23 @@ use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::CancellationToken; +use crate::config::jetstream::StreamWriterConfig; use crate::error::Error; use crate::message::Message; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use crate::Result; -/// Jetstream Writer is responsible for writing messages to Jetstream ISB. -/// it exposes both sync and async methods to write messages. +/// JetStream Writer is responsible for writing messages to JetStream ISB. +/// it exposes both sync and async methods to write messages. It has gates +/// to prevent writing into the buffer if the buffer is full. After successful +/// writes, it will let the callee know the status (or return a non-retryable +/// exception). pub(super) mod writer; /// ISB Writer accepts an Actor pattern based messages. #[derive(Debug)] struct ActorMessage { /// Write the messages to ISB - stream: &'static str, message: Message, /// once the message has been successfully written, we can let the sender know. /// This can be used to trigger Acknowledgement of the message from the Reader. @@ -25,16 +28,8 @@ struct ActorMessage { } impl ActorMessage { - fn new( - stream: &'static str, - message: Message, - callee_tx: oneshot::Sender>, - ) -> Self { - Self { - stream, - message, - callee_tx, - } + fn new(message: Message, callee_tx: oneshot::Sender>) -> Self { + Self { message, callee_tx } } } @@ -63,9 +58,7 @@ impl WriterActor { .message .try_into() .expect("message serialization should not fail"); - self.js_writer - .write(msg.stream, payload, msg.callee_tx) - .await + self.js_writer.write(payload, msg.callee_tx).await } async fn run(&mut self) { @@ -81,10 +74,15 @@ pub(crate) struct WriterHandle { } impl WriterHandle { - pub(super) fn new(js_ctx: Context, batch_size: usize, cancel_token: CancellationToken) -> Self { + pub(super) fn new( + config: StreamWriterConfig, + js_ctx: Context, + batch_size: usize, + cancel_token: CancellationToken, + ) -> Self { let (sender, receiver) = mpsc::channel::(batch_size); - let js_writer = JetstreamWriter::new(js_ctx, batch_size, cancel_token.clone()); + let js_writer = JetstreamWriter::new(config, js_ctx, batch_size, cancel_token.clone()); let mut actor = WriterActor::new(js_writer.clone(), receiver, cancel_token); tokio::spawn(async move { @@ -94,13 +92,9 @@ impl WriterHandle { Self { sender } } - pub(crate) async fn write( - &self, - stream: &'static str, - message: Message, - ) -> Result>> { + pub(crate) async fn write(&self, message: Message) -> Result>> { let (sender, receiver) = oneshot::channel(); - let msg = ActorMessage::new(stream, message, sender); + let msg = ActorMessage::new(message, sender); self.sender .send(msg) .await @@ -143,9 +137,14 @@ mod tests { .await .unwrap(); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + // Create ISBMessageHandler let batch_size = 500; - let handler = WriterHandle::new(context.clone(), batch_size, cln_token.clone()); + let handler = WriterHandle::new(config, context.clone(), batch_size, cln_token.clone()); let mut result_receivers = Vec::new(); // Publish 500 messages @@ -167,7 +166,6 @@ mod tests { }; let (sender, receiver) = oneshot::channel(); let msg = ActorMessage { - stream: stream_name, message, callee_tx: sender, }; @@ -202,8 +200,13 @@ mod tests { .await .unwrap(); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new(context.clone(), 500, cancel_token.clone()); + let handler = WriterHandle::new(config, context.clone(), 500, cancel_token.clone()); let mut receivers = Vec::new(); // Publish 100 messages successfully @@ -223,7 +226,7 @@ mod tests { }, headers: HashMap::new(), }; - receivers.push(handler.write(stream_name, message).await.unwrap()); + receivers.push(handler.write(message).await.unwrap()); } // Attempt to publish the 101th message, which should get stuck in the retry loop @@ -243,7 +246,7 @@ mod tests { }, headers: HashMap::new(), }; - let receiver = handler.write(stream_name, message).await.unwrap(); + let receiver = handler.write(message).await.unwrap(); receivers.push(receiver); // Cancel the token to exit the retry loop @@ -281,8 +284,13 @@ mod tests { .await .unwrap(); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new(context.clone(), 500, cancel_token.clone()); + let handler = WriterHandle::new(config, context.clone(), 500, cancel_token.clone()); let (tx, mut rx) = mpsc::channel(100); let test_start_time = Instant::now(); @@ -309,7 +317,7 @@ mod tests { }, headers: HashMap::new(), }; - tx.send(handler.write(stream_name, message).await.unwrap()) + tx.send(handler.write(message).await.unwrap()) .await .unwrap(); sent_count += 1; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index c77ae461a2..178c88708f 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -1,16 +1,21 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use std::time::Duration; +use async_nats::jetstream::consumer::PullConsumer; use async_nats::jetstream::context::PublishAckFuture; use async_nats::jetstream::publish::PublishAck; +use async_nats::jetstream::stream::RetentionPolicy::Limits; use async_nats::jetstream::Context; use bytes::Bytes; -use log::warn; use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::time::sleep; use tokio_util::sync::CancellationToken; use tracing::error; +use tracing::{debug, warn}; +use crate::config::jetstream::StreamWriterConfig; use crate::error::Error; use crate::Result; @@ -18,7 +23,9 @@ use crate::Result; /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. pub(super) struct JetstreamWriter { + config: StreamWriterConfig, js_ctx: Context, + is_full: Arc, paf_resolver_tx: mpsc::Sender, cancel_token: CancellationToken, } @@ -26,18 +33,33 @@ pub(super) struct JetstreamWriter { impl JetstreamWriter { /// Creates a JetStream Writer and a background task to make sure the Write futures (PAFs) are /// successful. Batch Size determines the maximum pending futures. - pub(super) fn new(js_ctx: Context, batch_size: usize, cancel_token: CancellationToken) -> Self { + pub(super) fn new( + config: StreamWriterConfig, + js_ctx: Context, + batch_size: usize, + cancel_token: CancellationToken, + ) -> Self { let (paf_resolver_tx, paf_resolver_rx) = mpsc::channel::(batch_size); let this = Self { + config, js_ctx, + is_full: Arc::new(AtomicBool::new(false)), paf_resolver_tx, cancel_token, }; - let mut resolver_actor = PafResolverActor::new(this.clone(), paf_resolver_rx); + // spawn a task for checking whether buffer is_full + tokio::task::spawn({ + let mut this = this.clone(); + async move { + this.check_stream_status().await; + } + }); + // spawn a task for resolving PAFs + let mut resolver_actor = PafResolverActor::new(this.clone(), paf_resolver_rx); tokio::spawn(async move { resolver_actor.run().await; }); @@ -45,30 +67,111 @@ impl JetstreamWriter { this } + /// Checks the buffer usage metrics (soft and solid usage) for a given stream. + /// If the usage is greater than the bufferUsageLimit, it sets the is_full flag to true. + async fn check_stream_status(&mut self) { + let mut interval = tokio::time::interval(self.config.refresh_interval); + loop { + tokio::select! { + _ = interval.tick() => { + match Self::fetch_buffer_usage(self.js_ctx.clone(), self.config.name.as_str(), self.config.max_length).await { + Ok((soft_usage, solid_usage)) => { + if solid_usage >= self.config.usage_limit && soft_usage >= self.config.usage_limit { + self.is_full.store(true, Ordering::Relaxed); + } else { + self.is_full.store(false, Ordering::Relaxed); + } + } + Err(e) => { + error!(?e, "Failed to fetch buffer usage, updating isFull to true"); + self.is_full.store(true, Ordering::Relaxed); + } + } + } + _ = self.cancel_token.cancelled() => { + return; + } + } + } + } + + /// Fetches the buffer usage metrics (soft and solid usage) for a given stream. + /// + /// Soft Usage: + /// Formula: (NumPending + NumAckPending) / maxLength + /// - NumPending: The number of pending messages. + /// - NumAckPending: The number of messages that are in processing state(yet to be acked). + /// - maxLength: The maximum length of the buffer. + /// + /// Solid Usage: + /// Formula: + /// - If the stream's retention policy is LimitsPolicy: solidUsage = softUsage + /// - Otherwise: solidUsage = State.Msgs / maxLength + /// - State.Msgs: The total number of messages in the stream. + /// - maxLength: The maximum length of the buffer. + async fn fetch_buffer_usage( + js_ctx: Context, + stream_name: &str, + max_length: usize, + ) -> Result<(f64, f64)> { + let mut stream = js_ctx + .get_stream(stream_name) + .await + .map_err(|_| Error::ISB("Failed to get stream".to_string()))?; + + let stream_info = stream + .info() + .await + .map_err(|e| Error::ISB(format!("Failed to get the stream info {:?}", e)))?; + + let mut consumer: PullConsumer = js_ctx + .get_consumer_from_stream(stream_name, stream_name) + .await + .map_err(|e| Error::ISB(format!("Failed to get the consumer {:?}", e)))?; + + let consumer_info = consumer + .info() + .await + .map_err(|e| Error::ISB(format!("Failed to get the consumer info {:?}", e)))?; + + let soft_usage = (consumer_info.num_pending as f64 + consumer_info.num_ack_pending as f64) + / max_length as f64; + let solid_usage = if stream_info.config.retention == Limits { + soft_usage + } else { + stream_info.state.messages as f64 / max_length as f64 + }; + + Ok((soft_usage, solid_usage)) + } + /// Writes the message to the JetStream ISB and returns a future which can be /// awaited to get the PublishAck. It will do infinite retries until the message /// gets published successfully. If it returns an error it means it is fatal error - pub(super) async fn write( - &self, - stream: &'static str, - payload: Vec, - callee_tx: oneshot::Sender>, - ) { + pub(super) async fn write(&self, payload: Vec, callee_tx: oneshot::Sender>) { let js_ctx = self.js_ctx.clone(); - // FIXME: add gate for buffer-full check. - // loop till we get a PAF, there could be other reasons why PAFs cannot be created. let paf = loop { - match js_ctx.publish(stream, Bytes::from(payload.clone())).await { - Ok(paf) => { - break paf; - } - Err(e) => { - error!(?e, "publishing failed, retrying"); - sleep(Duration::from_millis(10)).await; + // let's write only if the buffer is not full + match self.is_full.load(Ordering::Relaxed) { + true => { + // FIXME: add metrics + debug!(%self.config.name, "buffer is full"); } + false => match js_ctx + .publish(self.config.name.clone(), Bytes::from(payload.clone())) + .await + { + Ok(paf) => { + break paf; + } + Err(e) => { + error!(?e, "publishing failed, retrying"); + } + }, } + // short-circuit out in failure mode if shutdown has been initiated if self.cancel_token.is_cancelled() { error!("Shutdown signal received, exiting write loop"); callee_tx @@ -76,13 +179,16 @@ impl JetstreamWriter { .unwrap(); return; } + + // FIXME: make it configurable + // sleep to avoid busy looping + sleep(Duration::from_millis(10)).await; }; // send the paf and callee_tx over self.paf_resolver_tx .send(ResolveAndPublishResult { paf, - stream, payload, callee_tx, }) @@ -93,15 +199,14 @@ impl JetstreamWriter { /// Writes the message to the JetStream ISB and returns the PublishAck. It will do /// infinite retries until the message gets published successfully. If it returns /// an error it means it is fatal non-retryable error. - pub(super) async fn blocking_write( - &self, - stream: &'static str, - payload: Vec, - ) -> Result { + pub(super) async fn blocking_write(&self, payload: Vec) -> Result { let js_ctx = self.js_ctx.clone(); loop { - match js_ctx.publish(stream, Bytes::from(payload.clone())).await { + match js_ctx + .publish(self.config.name.clone(), Bytes::from(payload.clone())) + .await + { Ok(paf) => match paf.await { Ok(ack) => { if ack.duplicate { @@ -113,12 +218,12 @@ impl JetstreamWriter { return Ok(ack); } Err(e) => { - error!("awaiting publish ack failed, retrying: {}", e); + error!(?e, "awaiting publish ack failed, retrying"); sleep(Duration::from_millis(10)).await; } }, Err(e) => { - error!("publishing failed, retrying: {}", e); + error!(?e, "publishing failed, retrying"); sleep(Duration::from_millis(10)).await; } } @@ -135,7 +240,6 @@ impl JetstreamWriter { #[derive(Debug)] pub(super) struct ResolveAndPublishResult { paf: PublishAckFuture, - stream: &'static str, payload: Vec, callee_tx: oneshot::Sender>, } @@ -164,11 +268,7 @@ impl PafResolverActor { Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), Err(e) => { error!(?e, "Failed to resolve the future, trying blocking write"); - match self - .js_writer - .blocking_write(result.stream, result.payload.clone()) - .await - { + match self.js_writer.blocking_write(result.payload.clone()).await { Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), Err(e) => result.callee_tx.send(Err(e)).unwrap(), } @@ -186,9 +286,10 @@ impl PafResolverActor { #[cfg(test)] mod tests { use std::collections::HashMap; + use std::time::Instant; use async_nats::jetstream; - use async_nats::jetstream::stream; + use async_nats::jetstream::{consumer, stream}; use chrono::Utc; use super::*; @@ -213,7 +314,12 @@ mod tests { .await .unwrap(); - let writer = JetstreamWriter::new(context.clone(), 500, cln_token.clone()); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + + let writer = JetstreamWriter::new(config, context.clone(), 500, cln_token.clone()); let message = Message { keys: vec!["key_0".to_string()], @@ -232,9 +338,7 @@ mod tests { }; let (success_tx, success_rx) = oneshot::channel::>(); - writer - .write(stream_name, message.try_into().unwrap(), success_tx) - .await; + writer.write(message.try_into().unwrap(), success_tx).await; assert!(success_rx.await.is_ok()); context.delete_stream(stream_name).await.unwrap(); @@ -259,7 +363,12 @@ mod tests { .await .unwrap(); - let writer = JetstreamWriter::new(context.clone(), 500, cln_token.clone()); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + + let writer = JetstreamWriter::new(config, context.clone(), 500, cln_token.clone()); let message = Message { keys: vec!["key_0".to_string()], @@ -277,9 +386,7 @@ mod tests { headers: HashMap::new(), }; - let result = writer - .blocking_write(stream_name, message.try_into().unwrap()) - .await; + let result = writer.blocking_write(message.try_into().unwrap()).await; assert!(result.is_ok()); let publish_ack = result.unwrap(); @@ -307,8 +414,13 @@ mod tests { .await .unwrap(); + let config = StreamWriterConfig { + name: stream_name.into(), + ..Default::default() + }; + let cancel_token = CancellationToken::new(); - let writer = JetstreamWriter::new(context.clone(), 500, cancel_token.clone()); + let writer = JetstreamWriter::new(config, context.clone(), 500, cancel_token.clone()); let mut result_receivers = Vec::new(); // Publish 10 messages successfully @@ -329,9 +441,7 @@ mod tests { headers: HashMap::new(), }; let (success_tx, success_rx) = oneshot::channel::>(); - writer - .write(stream_name, message.try_into().unwrap(), success_tx) - .await; + writer.write(message.try_into().unwrap(), success_tx).await; result_receivers.push(success_rx); } @@ -353,9 +463,7 @@ mod tests { headers: HashMap::new(), }; let (success_tx, success_rx) = oneshot::channel::>(); - writer - .write(stream_name, message.try_into().unwrap(), success_tx) - .await; + writer.write(message.try_into().unwrap(), success_tx).await; result_receivers.push(success_rx); // Cancel the token to exit the retry loop @@ -384,4 +492,137 @@ mod tests { context.delete_stream(stream_name).await.unwrap(); } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_fetch_buffer_usage() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_fetch_buffer_usage"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_messages: 1000, + max_message_size: 1024, + max_messages_per_subject: 1000, + retention: Limits, // Set retention policy to Limits for solid usage + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_strict_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let max_length = 100; + + // Publish messages to fill the buffer + for _ in 0..80 { + context + .publish(stream_name, Bytes::from("test message")) + .await + .unwrap(); + } + + // Fetch buffer usage + let (soft_usage, solid_usage) = + JetstreamWriter::fetch_buffer_usage(context.clone(), stream_name, max_length) + .await + .unwrap(); + + // Verify the buffer usage metrics + assert_eq!(soft_usage, 0.8); + assert_eq!(soft_usage, 0.8); + + // Clean up + context + .delete_consumer_from_stream(stream_name, stream_name) + .await + .unwrap(); + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_check_stream_status() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let config = StreamWriterConfig { + name: "test_check_stream_status".into(), + max_length: 100, + ..Default::default() + }; + let stream_name = "test_check_stream_status"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_messages: 1000, + max_message_size: 1024, + max_messages_per_subject: 1000, + retention: Limits, // Set retention policy to Limits for solid usage + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_strict_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new(config, context.clone(), 500, cancel_token.clone()); + + let mut js_writer = writer.clone(); + // Simulate the stream status check + tokio::spawn(async move { + js_writer.check_stream_status().await; + }); + + // Publish messages to fill the buffer, since max_length is 100, we need to publish 80 messages + for _ in 0..80 { + context + .publish(stream_name, Bytes::from("test message")) + .await + .unwrap(); + } + + let start_time = Instant::now(); + while !writer.is_full.load(Ordering::Relaxed) && start_time.elapsed().as_millis() < 1000 { + sleep(Duration::from_millis(5)).await; + } + + // Verify the is_full flag + assert!( + writer.is_full.load(Ordering::Relaxed), + "Buffer should be full after publishing messages" + ); + + // Clean up + context.delete_stream(stream_name).await.unwrap(); + } } From e8017cda1023740d3e75fe0286f76a31c3c2ce04 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 17 Oct 2024 21:36:31 +0530 Subject: [PATCH 116/188] chore: use enums for offset (#2169) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config.rs | 6 +- rust/numaflow-core/src/message.rs | 229 +++++++++++++----- .../numaflow-core/src/monovertex/forwarder.rs | 16 +- .../src/pipeline/isb/jetstream.rs | 33 +-- .../src/pipeline/isb/jetstream/writer.rs | 53 ++-- rust/numaflow-core/src/sink/user_defined.rs | 10 +- rust/numaflow-core/src/source/generator.rs | 26 +- rust/numaflow-core/src/source/user_defined.rs | 4 +- .../src/transformer/user_defined.rs | 25 +- 9 files changed, 251 insertions(+), 151 deletions(-) diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index 15cf371c66..feaaa4146f 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -43,7 +43,7 @@ pub mod jetstream { const DEFAULT_USAGE_LIMIT: f64 = 0.8; const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; - const DEFAULT_RETRY_TIMEOUT_MILLIS: u64 = 10; + const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; #[derive(Debug, Clone)] pub(crate) struct StreamWriterConfig { @@ -53,7 +53,7 @@ pub mod jetstream { pub refresh_interval: Duration, pub usage_limit: f64, pub buffer_full_strategy: BufferFullStrategy, - pub retry_timeout: Duration, + pub retry_interval: Duration, } impl Default for StreamWriterConfig { @@ -65,7 +65,7 @@ pub mod jetstream { usage_limit: DEFAULT_USAGE_LIMIT, refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, - retry_timeout: Duration::from_millis(DEFAULT_RETRY_TIMEOUT_MILLIS), + retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), } } } diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index f2af2e8739..a4cab12edd 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -21,6 +21,7 @@ use crate::Result; const NUMAFLOW_MONO_VERTEX_NAME: &str = "NUMAFLOW_MONO_VERTEX_NAME"; const NUMAFLOW_VERTEX_NAME: &str = "NUMAFLOW_VERTEX_NAME"; +const NUMAFLOW_REPLICA: &str = "NUMAFLOW_REPLICA"; static VERTEX_NAME: OnceLock = OnceLock::new(); @@ -32,6 +33,18 @@ pub(crate) fn get_vertex_name() -> &'static str { }) } +static VERTEX_REPLICA: OnceLock = OnceLock::new(); + +// fetch the vertex replica information from the environment variable +pub(crate) fn get_vertex_replica() -> &'static u16 { + VERTEX_REPLICA.get_or_init(|| { + env::var(NUMAFLOW_REPLICA) + .unwrap_or_default() + .parse() + .unwrap_or_default() + }) +} + /// A message that is sent from the source to the sink. #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Message { @@ -39,8 +52,10 @@ pub(crate) struct Message { pub(crate) keys: Vec, /// actual payload of the message pub(crate) value: Vec, - /// offset of the message - pub(crate) offset: Offset, + /// offset of the message, it is optional because offset is only + /// available when we read the message, and we don't persist the + /// offset in the ISB. + pub(crate) offset: Option, /// event time of the message pub(crate) event_time: DateTime, /// id of the message @@ -51,16 +66,81 @@ pub(crate) struct Message { /// Offset of the message which will be used to acknowledge the message. #[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) struct Offset { - /// unique identifier of the message - pub(crate) offset: String, - /// partition id of the message - pub(crate) partition_id: i32, +pub(crate) enum Offset { + Int(IntOffset), + String(StringOffset), } impl fmt::Display for Offset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}", self.offset, self.partition_id) + match self { + Offset::Int(offset) => write!(f, "{}", offset), + Offset::String(offset) => write!(f, "{}", offset), + } + } +} + +/// IntOffset is integer based offset enum type. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct IntOffset { + offset: u64, + partition_idx: u16, +} + +impl IntOffset { + pub fn new(seq: u64, partition_idx: u16) -> Self { + Self { + offset: seq, + partition_idx, + } + } +} + +impl IntOffset { + fn sequence(&self) -> Result { + Ok(self.offset) + } + + fn partition_idx(&self) -> u16 { + self.partition_idx + } +} + +impl fmt::Display for IntOffset { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.offset, self.partition_idx) + } +} + +/// StringOffset is string based offset enum type. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StringOffset { + offset: String, + partition_idx: u16, +} + +impl StringOffset { + pub fn new(seq: String, partition_idx: u16) -> Self { + Self { + offset: seq, + partition_idx, + } + } +} + +impl StringOffset { + fn sequence(&self) -> Result { + Ok(self.offset.parse().unwrap()) + } + + fn partition_idx(&self) -> u16 { + self.partition_idx + } +} + +impl fmt::Display for StringOffset { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.offset, self.partition_idx) } } @@ -81,24 +161,49 @@ impl MessageID { } } +impl From for MessageID { + fn from(id: numaflow_pb::objects::isb::MessageId) -> Self { + Self { + vertex_name: id.vertex_name, + offset: id.offset, + index: id.index, + } + } +} + +impl From for numaflow_pb::objects::isb::MessageId { + fn from(id: MessageID) -> Self { + Self { + vertex_name: id.vertex_name, + offset: id.offset, + index: id.index, + } + } +} + impl fmt::Display for MessageID { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}-{}-{}", self.vertex_name, self.offset, self.index) } } -impl From for AckRequest { - fn from(offset: Offset) -> Self { - Self { - request: Some(numaflow_pb::clients::source::ack_request::Request { - offset: Some(numaflow_pb::clients::source::Offset { - offset: BASE64_STANDARD - .decode(offset.offset) - .expect("we control the encoding, so this should never fail"), - partition_id: offset.partition_id, +impl TryFrom for AckRequest { + type Error = Error; + + fn try_from(value: Offset) -> std::result::Result { + match value { + Offset::Int(_) => Err(Error::Source("IntOffset not supported".to_string())), + Offset::String(o) => Ok(Self { + request: Some(numaflow_pb::clients::source::ack_request::Request { + offset: Some(numaflow_pb::clients::source::Offset { + offset: BASE64_STANDARD + .decode(o.offset) + .expect("we control the encoding, so this should never fail"), + partition_id: o.partition_idx as i32, + }), }), + handshake: None, }), - handshake: None, } } } @@ -114,11 +219,7 @@ impl TryFrom for Vec { is_late: false, // Set this according to your logic }), kind: numaflow_pb::objects::isb::MessageKind::Data as i32, - id: Some(numaflow_pb::objects::isb::MessageId { - vertex_name: get_vertex_name().to_string(), - offset: message.offset.to_string(), - index: 0, - }), + id: Some(message.id.into()), keys: message.keys.clone(), headers: message.headers.clone(), }), @@ -156,16 +257,9 @@ impl TryFrom> for Message { Ok(Message { keys: header.keys, value: body.payload, - offset: Offset { - offset: id.offset.clone(), - partition_id: 0, // Set this according to your logic - }, + offset: None, event_time: utc_from_timestamp(message_info.event_time), - id: MessageID { - vertex_name: id.vertex_name, - offset: id.offset, - index: id.index, - }, + id: id.into(), headers: header.headers, }) } @@ -196,21 +290,21 @@ impl TryFrom for Message { fn try_from(result: read_response::Result) -> Result { let source_offset = match result.offset { - Some(o) => Offset { + Some(o) => Offset::String(StringOffset { offset: BASE64_STANDARD.encode(o.offset), - partition_id: o.partition_id, - }, + partition_idx: o.partition_id as u16, + }), None => return Err(Error::Source("Offset not found".to_string())), }; Ok(Message { keys: result.keys, value: result.payload, - offset: source_offset.clone(), + offset: Some(source_offset.clone()), event_time: utc_from_timestamp(result.event_time), id: MessageID { vertex_name: get_vertex_name().to_string(), - offset: source_offset.offset, + offset: source_offset.to_string(), index: 0, }, headers: result.headers, @@ -311,10 +405,10 @@ mod tests { #[test] fn test_offset_display() { - let offset = Offset { + let offset = Offset::String(StringOffset { offset: "123".to_string(), - partition_id: 1, - }; + partition_idx: 1, + }); assert_eq!(format!("{}", offset), "123-1"); } @@ -330,11 +424,11 @@ mod tests { #[test] fn test_offset_to_ack_request() { - let offset = Offset { + let offset = Offset::String(StringOffset { offset: BASE64_STANDARD.encode("123"), - partition_id: 1, - }; - let ack_request: AckRequest = offset.into(); + partition_idx: 1, + }); + let ack_request: AckRequest = offset.try_into().unwrap(); assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); } @@ -343,10 +437,10 @@ mod tests { let message = Message { keys: vec!["key1".to_string()], value: vec![1, 2, 3], - offset: Offset { + offset: Some(Offset::String(StringOffset { offset: "123".to_string(), - partition_id: 0, - }, + partition_idx: 0, + })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { vertex_name: "vertex".to_string(), @@ -366,11 +460,7 @@ mod tests { is_late: false, }), kind: numaflow_pb::objects::isb::MessageKind::Data as i32, - id: Some(MessageId { - vertex_name: get_vertex_name().to_string(), - offset: message.offset.to_string(), - index: 0, - }), + id: Some(message.id.into()), keys: message.keys.clone(), headers: message.headers.clone(), }), @@ -415,7 +505,6 @@ mod tests { let message = result.unwrap(); assert_eq!(message.keys, vec!["key1".to_string()]); assert_eq!(message.value, vec![1, 2, 3]); - assert_eq!(message.offset.offset, "123"); assert_eq!( message.event_time, Utc.timestamp_opt(1627846261, 0).unwrap() @@ -427,10 +516,10 @@ mod tests { let message = Message { keys: vec!["key1".to_string()], value: vec![1, 2, 3], - offset: Offset { + offset: Some(Offset::String(StringOffset { offset: "123".to_string(), - partition_id: 0, - }, + partition_idx: 0, + })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { vertex_name: "vertex".to_string(), @@ -476,10 +565,10 @@ mod tests { let message = Message { keys: vec!["key1".to_string()], value: vec![1, 2, 3], - offset: Offset { + offset: Some(Offset::String(StringOffset { offset: "123".to_string(), - partition_id: 0, - }, + partition_idx: 0, + })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { vertex_name: "vertex".to_string(), @@ -523,4 +612,26 @@ mod tests { assert_eq!(response.id, "123"); assert_eq!(response.status, ResponseStatusFromSink::Success); } + + #[test] + fn test_message_id_from_proto() { + let proto_id = MessageId { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }; + let message_id: MessageID = proto_id.into(); + assert_eq!(message_id.vertex_name, "vertex"); + assert_eq!(message_id.offset, "123"); + assert_eq!(message_id.index, 0); + } + + #[test] + fn test_message_id_to_proto() { + let message_id = MessageID::new("vertex".to_string(), "123".to_string(), 0); + let proto_id: MessageId = message_id.into(); + assert_eq!(proto_id.vertex_name, "vertex"); + assert_eq!(proto_id.offset, "123"); + assert_eq!(proto_id.index, 0); + } } diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 45326231b6..bd3253fea5 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -146,14 +146,18 @@ impl Forwarder { .get_or_create(&self.common_labels) .inc_by(msg_count); - let (offsets, bytes_count): (Vec, u64) = messages.iter().fold( + let (offsets, bytes_count): (Vec, u64) = messages.iter().try_fold( (Vec::with_capacity(messages.len()), 0), |(mut offsets, mut bytes_count), msg| { - offsets.push(msg.offset.clone()); - bytes_count += msg.value.len() as u64; - (offsets, bytes_count) + if let Some(offset) = &msg.offset { + offsets.push(offset.clone()); + bytes_count += msg.value.len() as u64; + Ok((offsets, bytes_count)) + } else { + Err(Error::Forwarder("Message offset is missing".to_string())) + } }, - ); + )?; forward_metrics() .read_bytes_total @@ -448,7 +452,7 @@ impl Forwarder { // and keep only the failed messages to send again // construct the error map for the failed messages messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.offset.to_string()) { + if let Some(result) = result_map.get(&msg.id.to_string()) { return match result { ResponseStatusFromSink::Success => false, ResponseStatusFromSink::Failed(err_msg) => { diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index 84092ad10a..5e7890a97a 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -5,7 +5,7 @@ use tokio_util::sync::CancellationToken; use crate::config::jetstream::StreamWriterConfig; use crate::error::Error; -use crate::message::Message; +use crate::message::{Message, Offset}; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use crate::Result; @@ -24,11 +24,11 @@ struct ActorMessage { /// once the message has been successfully written, we can let the sender know. /// This can be used to trigger Acknowledgement of the message from the Reader. // FIXME: concrete type and better name - callee_tx: oneshot::Sender>, + callee_tx: oneshot::Sender>, } impl ActorMessage { - fn new(message: Message, callee_tx: oneshot::Sender>) -> Self { + fn new(message: Message, callee_tx: oneshot::Sender>) -> Self { Self { message, callee_tx } } } @@ -92,7 +92,10 @@ impl WriterHandle { Self { sender } } - pub(crate) async fn write(&self, message: Message) -> Result>> { + pub(crate) async fn write( + &self, + message: Message, + ) -> Result>> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::new(message, sender); self.sender @@ -116,7 +119,7 @@ mod tests { use tokio::time::Instant; use super::*; - use crate::message::{Message, MessageID, Offset}; + use crate::message::{Message, MessageID}; #[cfg(feature = "nats-tests")] #[tokio::test] @@ -152,10 +155,7 @@ mod tests { let message = Message { keys: vec![format!("key_{}", i)], value: format!("message {}", i).as_bytes().to_vec(), - offset: Offset { - offset: format!("offset_{}", i), - partition_id: i, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -214,10 +214,7 @@ mod tests { let message = Message { keys: vec![format!("key_{}", i)], value: format!("message {}", i).as_bytes().to_vec(), - offset: Offset { - offset: format!("offset_{}", i), - partition_id: 0, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -234,10 +231,7 @@ mod tests { let message = Message { keys: vec!["key_101".to_string()], value: vec![0; 1024], - offset: Offset { - offset: "offset_101".to_string(), - partition_id: 0, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -305,10 +299,7 @@ mod tests { let message = Message { keys: vec![format!("key_{}", i)], value: format!("message {}", i).as_bytes().to_vec(), - offset: Offset { - offset: format!("offset_{}", i), - partition_id: i, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "".to_string(), diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 178c88708f..0e3324ac4e 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -17,6 +17,7 @@ use tracing::{debug, warn}; use crate::config::jetstream::StreamWriterConfig; use crate::error::Error; +use crate::message::{IntOffset, Offset}; use crate::Result; #[derive(Clone, Debug)] @@ -148,7 +149,7 @@ impl JetstreamWriter { /// Writes the message to the JetStream ISB and returns a future which can be /// awaited to get the PublishAck. It will do infinite retries until the message /// gets published successfully. If it returns an error it means it is fatal error - pub(super) async fn write(&self, payload: Vec, callee_tx: oneshot::Sender>) { + pub(super) async fn write(&self, payload: Vec, callee_tx: oneshot::Sender>) { let js_ctx = self.js_ctx.clone(); // loop till we get a PAF, there could be other reasons why PAFs cannot be created. @@ -158,6 +159,7 @@ impl JetstreamWriter { true => { // FIXME: add metrics debug!(%self.config.name, "buffer is full"); + // FIXME: consider buffer-full strategy } false => match js_ctx .publish(self.config.name.clone(), Bytes::from(payload.clone())) @@ -180,9 +182,8 @@ impl JetstreamWriter { return; } - // FIXME: make it configurable // sleep to avoid busy looping - sleep(Duration::from_millis(10)).await; + sleep(self.config.retry_interval).await; }; // send the paf and callee_tx over @@ -224,7 +225,7 @@ impl JetstreamWriter { }, Err(e) => { error!(?e, "publishing failed, retrying"); - sleep(Duration::from_millis(10)).await; + sleep(self.config.retry_interval).await; } } if self.cancel_token.is_cancelled() { @@ -241,7 +242,7 @@ impl JetstreamWriter { pub(super) struct ResolveAndPublishResult { paf: PublishAckFuture, payload: Vec, - callee_tx: oneshot::Sender>, + callee_tx: oneshot::Sender>, } /// Resolves the PAF from the write call, if not successful it will do a blocking write so that @@ -265,11 +266,23 @@ impl PafResolverActor { /// not successfully resolve, it will do blocking write till write to JetStream succeeds. async fn successfully_resolve_paf(&mut self, result: ResolveAndPublishResult) { match result.paf.await { - Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), + Ok(ack) => result + .callee_tx + .send(Ok(Offset::Int(IntOffset::new( + ack.sequence, + self.js_writer.config.partition_idx, + )))) + .unwrap(), Err(e) => { error!(?e, "Failed to resolve the future, trying blocking write"); match self.js_writer.blocking_write(result.payload.clone()).await { - Ok(ack) => result.callee_tx.send(Ok(ack.sequence)).unwrap(), + Ok(ack) => result + .callee_tx + .send(Ok(Offset::Int(IntOffset::new( + ack.sequence, + self.js_writer.config.partition_idx, + )))) + .unwrap(), Err(e) => result.callee_tx.send(Err(e)).unwrap(), } } @@ -324,10 +337,7 @@ mod tests { let message = Message { keys: vec!["key_0".to_string()], value: "message 0".as_bytes().to_vec(), - offset: Offset { - offset: "offset_0".to_string(), - partition_id: 0, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -337,7 +347,7 @@ mod tests { headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); + let (success_tx, success_rx) = oneshot::channel::>(); writer.write(message.try_into().unwrap(), success_tx).await; assert!(success_rx.await.is_ok()); @@ -373,10 +383,7 @@ mod tests { let message = Message { keys: vec!["key_0".to_string()], value: "message 0".as_bytes().to_vec(), - offset: Offset { - offset: "offset_0".to_string(), - partition_id: 1, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -428,10 +435,7 @@ mod tests { let message = Message { keys: vec![format!("key_{}", i)], value: format!("message {}", i).as_bytes().to_vec(), - offset: Offset { - offset: format!("offset_{}", i), - partition_id: i, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -440,7 +444,7 @@ mod tests { }, headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); + let (success_tx, success_rx) = oneshot::channel::>(); writer.write(message.try_into().unwrap(), success_tx).await; result_receivers.push(success_rx); } @@ -450,10 +454,7 @@ mod tests { let message = Message { keys: vec!["key_11".to_string()], value: vec![0; 1025], - offset: Offset { - offset: "offset_11".to_string(), - partition_id: 11, - }, + offset: None, event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), @@ -462,7 +463,7 @@ mod tests { }, headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); + let (success_tx, success_rx) = oneshot::channel::>(); writer.write(message.try_into().unwrap(), success_tx).await; result_receivers.push(success_rx); diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index fae703257f..8d2d227800 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -174,10 +174,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "1".to_string(), - partition_id: 0, - }, + offset: None, event_time: Utc::now(), headers: Default::default(), id: MessageID { @@ -189,10 +186,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "2".to_string(), - partition_id: 0, - }, + offset: None, event_time: Utc::now(), headers: Default::default(), id: MessageID { diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 23a4f30926..61ee3d346e 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -3,7 +3,9 @@ use std::time::Duration; use bytes::Bytes; use futures::StreamExt; -use crate::message::{Message, MessageID, Offset}; +use crate::message::{ + get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, +}; use crate::reader; use crate::source; @@ -208,18 +210,18 @@ impl source::SourceReader for GeneratorRead { .unwrap_or_default() .to_string(); + let offset = + Offset::String(StringOffset::new(id.clone(), *get_vertex_replica())); + Message { keys: vec![], value: msg.clone().to_vec(), // FIXME: better offset? - offset: Offset { - offset: id.clone(), - partition_id: 0, - }, + offset: Some(offset.clone()), event_time: Default::default(), id: MessageID { - vertex_name: Default::default(), - offset: id, + vertex_name: get_vertex_name().to_string(), + offset: offset.to_string(), index: Default::default(), }, headers: Default::default(), @@ -315,14 +317,8 @@ mod tests { // Create a vector of offsets to acknowledge let offsets = vec![ - Offset { - offset: "offset1".to_string(), - partition_id: 0, - }, - Offset { - offset: "offset2".to_string(), - partition_id: 1, - }, + Offset::String(StringOffset::new("offset1".to_string(), 0)), + Offset::String(StringOffset::new("offset2".to_string(), 0)), ]; // Call the ack method and check the result diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index df66cf4d8e..69be3d9a3b 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -186,7 +186,7 @@ impl SourceAcker for UserDefinedSourceAck { // send n ack requests for offset in offsets { - let request = offset.into(); + let request = offset.try_into()?; self.ack_tx .send(request) .await @@ -331,7 +331,7 @@ mod tests { assert_eq!(messages.len(), 5); let response = src_ack - .ack(messages.iter().map(|m| m.offset.clone()).collect()) + .ack(messages.iter().map(|m| m.offset.clone().unwrap()).collect()) .await; assert!(response.is_ok()); diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index bb57115076..035addcbb5 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -91,7 +91,10 @@ impl SourceTransformer { tracker.insert( message.id.to_string(), MessageInfo { - offset: message.offset.clone(), + offset: message + .offset + .clone() + .ok_or(Error::Transformer("Message offset is missing".to_string()))?, headers: message.headers.clone(), }, ); @@ -173,7 +176,7 @@ impl SourceTransformer { }, keys: result.keys, value: result.value, - offset: msg_info.offset.clone(), + offset: None, event_time: utc_from_timestamp(result.event_time), headers: msg_info.headers.clone(), }; @@ -235,7 +238,7 @@ mod tests { use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; - use crate::message::MessageID; + use crate::message::{MessageID, StringOffset}; use crate::shared::utils::create_rpc_channel; use crate::transformer::user_defined::SourceTransformHandle; @@ -283,10 +286,10 @@ mod tests { let message = crate::message::Message { keys: vec!["first".into()], value: "hello".into(), - offset: crate::message::Offset { - partition_id: 0, - offset: "0".into(), - }, + offset: Some(crate::message::Offset::String(StringOffset::new( + "0".to_string(), + 0, + ))), event_time: chrono::Utc::now(), id: MessageID { vertex_name: "vertex_name".to_string(), @@ -362,10 +365,10 @@ mod tests { let message = crate::message::Message { keys: vec!["second".into()], value: "hello".into(), - offset: crate::message::Offset { - partition_id: 0, - offset: "0".into(), - }, + offset: Some(crate::message::Offset::String(StringOffset::new( + "0".to_string(), + 0, + ))), event_time: chrono::Utc::now(), id: MessageID { vertex_name: "vertex_name".to_string(), From 252cf4319aa8f42f9f91e47368d3e08651bb0893 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 17 Oct 2024 22:41:33 +0530 Subject: [PATCH 117/188] chore: adding unit tests for offset enum (#2170) Signed-off-by: Yashash H L --- rust/numaflow-core/src/message.rs | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index a4cab12edd..88483e8908 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -430,6 +430,16 @@ mod tests { }); let ack_request: AckRequest = offset.try_into().unwrap(); assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); + + let offset = Offset::Int(IntOffset::new(42, 1)); + let result: Result = offset.try_into(); + + // Assert that the conversion results in an error + assert!(result.is_err()); + + if let Err(e) = result { + assert_eq!(e.to_string(), "Source Error - IntOffset not supported"); + } } #[test] @@ -634,4 +644,35 @@ mod tests { assert_eq!(proto_id.offset, "123"); assert_eq!(proto_id.index, 0); } + + #[test] + fn test_offset_cases() { + let int_offset = IntOffset::new(42, 1); + assert_eq!(int_offset.offset, 42); + assert_eq!(int_offset.partition_idx, 1); + assert_eq!(format!("{}", int_offset), "42-1"); + + let string_offset = StringOffset::new("42".to_string(), 1); + assert_eq!(string_offset.offset, "42"); + assert_eq!(string_offset.partition_idx, 1); + assert_eq!(format!("{}", string_offset), "42-1"); + + let offset_int = Offset::Int(int_offset); + assert_eq!(format!("{}", offset_int), "42-1"); + + let offset_string = Offset::String(string_offset); + assert_eq!(format!("{}", offset_string), "42-1"); + + // Test conversion from Offset to AckRequest for StringOffset + let offset = Offset::String(StringOffset::new(BASE64_STANDARD.encode("42"), 1)); + let result: Result = offset.try_into(); + assert!(result.is_ok()); + let ack_request = result.unwrap(); + assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); + + // Test conversion from Offset to AckRequest for IntOffset (should fail) + let offset = Offset::Int(IntOffset::new(42, 1)); + let result: Result = offset.try_into(); + assert!(result.is_err()); + } } From dc137c24b3cc842c8a3e048fa928bc2a54f4d759 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Thu, 17 Oct 2024 22:48:37 +0530 Subject: [PATCH 118/188] feat: blackhole sink for Monovertex (#2167) Signed-off-by: Sreekanth --- rust/numaflow-core/src/config.rs | 15 +++++ rust/numaflow-core/src/message.rs | 1 + rust/numaflow-core/src/monovertex.rs | 4 ++ rust/numaflow-core/src/sink.rs | 11 ++++ rust/numaflow-core/src/sink/blackhole.rs | 77 ++++++++++++++++++++++++ rust/numaflow-core/src/sink/log.rs | 59 ++++++++++++++++++ 6 files changed, 167 insertions(+) create mode 100644 rust/numaflow-core/src/sink/blackhole.rs diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index feaaa4146f..dcf563dc8b 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -149,6 +149,7 @@ pub struct Settings { pub udsource_config: Option, pub udsink_config: Option, pub logsink_config: Option<()>, + pub blackhole_config: Option, pub fallback_config: Option, pub generator_config: Option, } @@ -231,6 +232,10 @@ impl Default for GeneratorConfig { } } +/// Configuration for the [BlackholeSink](crate::sink::blackhole::BlackholeSink) +#[derive(Default, Debug, Clone)] +pub struct BlackholeConfig {} + impl Default for Settings { fn default() -> Self { // Create a default retry strategy from defined constants @@ -259,6 +264,7 @@ impl Default for Settings { udsource_config: None, udsink_config: Default::default(), logsink_config: None, + blackhole_config: None, fallback_config: None, generator_config: None, } @@ -346,6 +352,15 @@ impl Settings { _ => None, }; + settings.blackhole_config = mono_vertex_obj + .spec + .sink + .as_deref() + .ok_or(Error::Config("Sink not found".to_string()))? + .blackhole + .as_deref() + .map(|_| BlackholeConfig::default()); + settings.generator_config = match mono_vertex_obj .spec .source diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 88483e8908..6208b43f94 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -342,6 +342,7 @@ pub(crate) enum ResponseStatusFromSink { } /// Sink will give a response per [Message]. +#[derive(Debug, PartialEq)] pub(crate) struct ResponseFromSink { /// Unique id per [Message]. We need to track per [Message] status. pub(crate) id: String, diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 0f0ec1a40a..621778da39 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -251,6 +251,10 @@ async fn fetch_sink( let log = SinkHandle::new(SinkClientType::Log).await?; return Ok((log, fb_sink)); } + if settings.blackhole_config.is_some() { + let blackhole = SinkHandle::new(SinkClientType::Blackhole).await?; + return Ok((blackhole, fb_sink)); + } Err(Error::Config( "No valid Sink configuration found".to_string(), )) diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index b641343c47..1c109a36f5 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -6,6 +6,7 @@ use user_defined::UserDefinedSink; use crate::config::config; use crate::message::{Message, ResponseFromSink}; +mod blackhole; mod log; /// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. /// @@ -64,6 +65,7 @@ pub(crate) struct SinkHandle { pub(crate) enum SinkClientType { Log, + Blackhole, UserDefined(SinkClient), } @@ -80,6 +82,15 @@ impl SinkHandle { } }); } + SinkClientType::Blackhole => { + let blackhole_sink = blackhole::BlackholeSink; + tokio::spawn(async { + let mut actor = SinkActor::new(receiver, blackhole_sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + } SinkClientType::UserDefined(sink_client) => { let sink = UserDefinedSink::new(sink_client).await?; tokio::spawn(async { diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs new file mode 100644 index 0000000000..1f9fad639f --- /dev/null +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -0,0 +1,77 @@ +use super::Sink; +use crate::message::{Message, ResponseFromSink, ResponseStatusFromSink}; + +/// Blackhole is a sink to emulate /dev/null +pub struct BlackholeSink; + +impl Sink for BlackholeSink { + async fn sink(&mut self, messages: Vec) -> crate::Result> { + let output = messages + .into_iter() + .map(|msg| ResponseFromSink { + status: ResponseStatusFromSink::Success, + id: msg.id.to_string(), + }) + .collect(); + Ok(output) + } +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + + use super::BlackholeSink; + use crate::{ + message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, + sink::Sink, + }; + + #[tokio::test] + async fn test_black_hole() { + let mut sink = BlackholeSink; + let messages = vec![ + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "1".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "1".to_string(), + index: 0, + }, + }, + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "2".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "2".to_string(), + index: 1, + }, + }, + ]; + + let expected_responses = messages + .iter() + .map(|msg| ResponseFromSink { + status: ResponseStatusFromSink::Success, + id: msg.id.to_string(), + }) + .collect::>(); + + let responses = sink.sink(messages).await.unwrap(); + assert_eq!(responses, expected_responses); + } +} diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 454b43da66..3a978b1d29 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -32,3 +32,62 @@ impl Sink for LogSink { Ok(result) } } + +#[cfg(test)] +mod tests { + use chrono::Utc; + + use super::LogSink; + use crate::{ + message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, + sink::Sink, + }; + + #[tokio::test] + async fn test_log_sink() { + let mut sink = LogSink; + let messages = vec![ + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "1".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "1".to_string(), + index: 0, + }, + }, + Message { + keys: vec![], + value: b"Hello, World!".to_vec(), + offset: Offset { + offset: "2".to_string(), + partition_id: 0, + }, + event_time: Utc::now(), + headers: Default::default(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "2".to_string(), + index: 1, + }, + }, + ]; + + let expected_responses = messages + .iter() + .map(|msg| ResponseFromSink { + status: ResponseStatusFromSink::Success, + id: msg.id.to_string(), + }) + .collect::>(); + + let responses = sink.sink(messages).await.unwrap(); + assert_eq!(responses, expected_responses); + } +} From 187398ccd1569316ad7303cdc86f7faed98e1eb1 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Thu, 17 Oct 2024 10:50:00 -0700 Subject: [PATCH 119/188] fix: main branch, offset type got updated (#2171) Signed-off-by: Vigith Maurice --- rust/numaflow-core/src/sink/blackhole.rs | 11 +++-------- rust/numaflow-core/src/sink/log.rs | 11 +++-------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index 1f9fad639f..d4828ca1f3 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -22,6 +22,7 @@ mod tests { use chrono::Utc; use super::BlackholeSink; + use crate::message::IntOffset; use crate::{ message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, sink::Sink, @@ -34,10 +35,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "1".to_string(), - partition_id: 0, - }, + offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { @@ -49,10 +47,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "2".to_string(), - partition_id: 0, - }, + offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 3a978b1d29..be6a89755d 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -38,6 +38,7 @@ mod tests { use chrono::Utc; use super::LogSink; + use crate::message::IntOffset; use crate::{ message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, sink::Sink, @@ -50,10 +51,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "1".to_string(), - partition_id: 0, - }, + offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { @@ -65,10 +63,7 @@ mod tests { Message { keys: vec![], value: b"Hello, World!".to_vec(), - offset: Offset { - offset: "2".to_string(), - partition_id: 0, - }, + offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { From 0cc495a7ead63a73bc099be608912f215a7164c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Th=C3=B8gersen?= Date: Fri, 18 Oct 2024 08:01:22 +0200 Subject: [PATCH 120/188] chore: add beumer group to USERS (#2168) --- USERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/USERS.md b/USERS.md index 002bcc4440..174024b561 100644 --- a/USERS.md +++ b/USERS.md @@ -8,3 +8,4 @@ Please add your company name and initial use case (optional) below. 4. [Valegachain Analytics](https://www.valegachain.com/) Numaflow is used to extract, transform, and load cryptocurrency blocks and mempool transactions in data lakes, as well as for activity alerts. 5. [Lockheed Martin](https://lockheedmartin.com/) Perform ELT processing on high and low volume data streams of sensor data as recieved from IOT type systems. 6. [Seekr](https://www.seekr.com/) Numaflow coordinates multiple ML pipelines to rate and extract information from the pipeline input. +7. [BEUMER Group](https://www.beumergroup.com/) - Signal processing for IoT-sensors, providing predictive maintenance solutions for the Intra-logistics sector. From 5fdb7fb4b7570edde75729d92ea459d3934739ec Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Fri, 18 Oct 2024 22:05:21 +0530 Subject: [PATCH 121/188] chore: Make TickGen compatible with current builtin (#2162) Signed-off-by: Sreekanth Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/Cargo.lock | 1 + rust/numaflow-core/Cargo.toml | 1 + rust/numaflow-core/src/config.rs | 58 +++- rust/numaflow-core/src/monovertex.rs | 15 +- rust/numaflow-core/src/source/generator.rs | 302 ++++++++++++++++----- 5 files changed, 299 insertions(+), 78 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index f1105db300..c069cd81f0 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1657,6 +1657,7 @@ dependencies = [ "prometheus-client", "prost", "prost-types", + "rand", "rcgen", "rustls 0.23.14", "semver", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index c17f8e83ad..9a7678ff67 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -41,6 +41,7 @@ kube = "0.95.0" log = "0.4.22" futures = "0.3.30" pin-project = "1.1.5" +rand = "0.8.5" async-nats = "0.37.0" [dev-dependencies] diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index dcf563dc8b..d7b4973719 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -1,10 +1,12 @@ -use std::env; use std::fmt::Display; use std::sync::OnceLock; +use std::{env, time::Duration}; use base64::prelude::BASE64_STANDARD; use base64::Engine; use bytes::Bytes; +use tracing::warn; + use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; use crate::Error; @@ -148,7 +150,7 @@ pub struct Settings { pub transformer_config: Option, pub udsource_config: Option, pub udsink_config: Option, - pub logsink_config: Option<()>, + pub logsink_config: Option, pub blackhole_config: Option, pub fallback_config: Option, pub generator_config: Option, @@ -188,6 +190,9 @@ impl Default for UDSourceConfig { } } +#[derive(Debug, Clone, Default)] +pub struct LogSinkConfig; + #[derive(Debug, Clone)] pub struct UDSinkConfig { pub grpc_max_message_size: usize, @@ -220,14 +225,22 @@ pub struct GeneratorConfig { pub rpu: usize, pub content: Bytes, pub duration: usize, + pub value: Option, + pub key_count: u8, + pub msg_size_bytes: u32, + pub jitter: Duration, } impl Default for GeneratorConfig { fn default() -> Self { Self { rpu: 1, - content: bytes::Bytes::from("5"), + content: Bytes::new(), duration: 1000, + value: None, + key_count: 0, + msg_size_bytes: 8, + jitter: Duration::from_secs(0), } } } @@ -375,6 +388,22 @@ impl Settings { if let Some(value_blob) = &generator_source.value_blob { config.content = Bytes::from(value_blob.clone()); } + match &generator_source.value_blob { + Some(value) => { + config.content = Bytes::from(value.clone()); + } + None => { + if let Some(msg_size) = generator_source.msg_size { + if msg_size < 0 { + warn!("'msgSize' can not be negative, using default value (8 bytes)"); + } else { + config.msg_size_bytes = msg_size as u32; + } + } + + config.value = generator_source.value; + } + } if let Some(rpu) = generator_source.rpu { config.rpu = rpu as usize; @@ -384,11 +413,34 @@ impl Settings { config.duration = std::time::Duration::from(d).as_millis() as usize; } + if let Some(key_count) = generator_source.key_count { + if key_count > u8::MAX as i32 { + warn!( + "Capping the key count to {}, provided value is {key_count}", + u8::MAX + ); + } + config.key_count = std::cmp::min(key_count, u8::MAX as i32) as u8; + } + + if let Some(jitter) = generator_source.jitter { + config.jitter = std::time::Duration::from(jitter); + } + Some(config) } None => None, }; + settings.logsink_config = mono_vertex_obj + .spec + .sink + .as_deref() + .ok_or(Error::Config("Sink not found".to_string()))? + .log + .as_deref() + .map(|_| LogSinkConfig); + if let Some(retry_strategy) = mono_vertex_obj .spec .sink diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 621778da39..e78b816087 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,15 +1,14 @@ -use std::time::Duration; - use forwarder::ForwarderBuilder; use metrics::UserDefinedContainerState; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use tokio::signal; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; -use tracing::info; +use tracing::{error, info}; use crate::config::{config, Settings}; use crate::error::{self, Error}; @@ -45,7 +44,7 @@ pub async fn mono_vertex() -> error::Result<()> { // Run the forwarder with cancellation token. if let Err(e) = start_forwarder(cln_token, config()).await { - tracing::error!("Application error: {:?}", e); + error!("Application error: {:?}", e); // abort the signal handler task since we have an error and we are shutting down if !shutdown_handle.is_finished() { @@ -217,12 +216,8 @@ async fn fetch_source( // now that we know it is not a user-defined source, it has to be a built-in if let Some(generator_config) = &config.generator_config { - let (source_read, source_ack, lag_reader) = new_generator( - generator_config.content.clone(), - generator_config.rpu, - config.batch_size as usize, - Duration::from_millis(generator_config.duration as u64), - )?; + let (source_read, source_ack, lag_reader) = + new_generator(generator_config.clone(), config.batch_size as usize)?; Ok(SourceType::Generator(source_read, source_ack, lag_reader)) } else { Err(Error::Config("No valid source configuration found".into())) diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 61ee3d346e..e8d80c4318 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -1,11 +1,7 @@ -use std::time::Duration; - -use bytes::Bytes; use futures::StreamExt; -use crate::message::{ - get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, -}; +use crate::config; +use crate::message::{Message, Offset}; use crate::reader; use crate::source; @@ -32,7 +28,14 @@ mod stream_generator { use bytes::Bytes; use futures::Stream; use pin_project::pin_project; + use rand::Rng; use tokio::time::MissedTickBehavior; + use tracing::warn; + + use crate::config; + use crate::message::{ + get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, + }; #[pin_project] pub(super) struct StreamGenerator { @@ -45,44 +48,167 @@ mod stream_generator { /// the amount of credits used for the current time-period. /// remaining = (rpu - used) for that time-period used: usize, + /// const int data to be send in the payload if provided by the user. + /// If `content` is present, this will be ignored. + /// This is a simple way used by users to test Reduce feature. + value: Option, + /// total message size to be created, will be padded with random u8. Size is + /// only an approximation. + msg_size_bytes: u32, + /// Vary the event-time of the messages to produce some out-of-orderliness. It is in + /// seconds granularity. + jitter: Duration, + /// keys to be used for the messages and the current index in the list + /// All possible keys are generated in the constructor. + /// The index is incremented (treating key list as cyclic) when a message is generated. + keys: (Vec, usize), #[pin] tick: tokio::time::Interval, } impl StreamGenerator { - pub(super) fn new(content: Bytes, rpu: usize, batch: usize, unit: Duration) -> Self { - let mut tick = tokio::time::interval(unit); + pub(super) fn new(cfg: config::GeneratorConfig, batch_size: usize) -> Self { + let mut tick = tokio::time::interval(Duration::from_millis(cfg.duration as u64)); tick.set_missed_tick_behavior(MissedTickBehavior::Skip); + let mut rpu = cfg.rpu; + // Key count cannot be more than RPU. + // If rpu is not a multiple of the key_count, we floor the rpu to the nearest multiple of key_count + // We cap the key_count to u8::MAX in config.rs + let key_count = std::cmp::min(cfg.key_count as usize, cfg.rpu) as u8; + if key_count != cfg.key_count { + warn!( + "Specified KeyCount({}) is higher than RPU ({}). KeyCount is changed to {}", + cfg.key_count, cfg.rpu, key_count + ); + } + if key_count > 0 && rpu % key_count as usize != 0 { + let new_rpu = rpu - (rpu % key_count as usize); + warn!(rpu, key_count, "Specified RPU is not a multiple of the KeyCount. This may lead to uneven distribution of messages across keys. RPUs will be adjusted to {}", new_rpu); + rpu = new_rpu; + } + + // Generate all possible keys + let keys = (0..key_count) + .map(|i| format!("key-{}-{}", config::config().replica, i)) + .collect(); + Self { - content, + content: cfg.content, rpu, // batch cannot > rpu - batch: if batch > rpu { rpu } else { batch }, + batch: std::cmp::min(cfg.rpu, batch_size), used: 0, tick, + value: cfg.value, + msg_size_bytes: cfg.msg_size_bytes, + keys: (keys, 0), + jitter: cfg.jitter, + } + } + + /// Generates a similar payload as the Go implementation. + /// This is only needed if the user has not specified `valueBlob` in the generator source configuration in the pipeline + fn generate_payload(&self, value: i64) -> Vec { + #[derive(serde::Serialize)] + struct Data { + value: i64, + // only to ensure a desired message size + #[serde(skip_serializing_if = "Vec::is_empty")] + padding: Vec, + } + + let padding: Vec = (self.msg_size_bytes > 8) + .then(|| { + let size = self.msg_size_bytes - 8; + let mut bytes = vec![0; size as usize]; + rand::thread_rng().fill(&mut bytes[..]); + bytes + }) + .unwrap_or_default(); + + let data = Data { value, padding }; + serde_json::to_vec(&data).unwrap() + } + + /// we have a global array of prepopulated keys, we just have to fetch the next in line. + /// to fetch the next one, we idx++ whenever we fetch. + /// This will be a single element vector at the most. + fn next_key_to_be_fetched(&mut self) -> Vec { + let idx = self.keys.1; + // fetches the next key from the predefined set of keys. + match self.keys.0.get(idx) { + Some(key) => { + self.keys.1 = (idx + 1) % self.keys.0.len(); + vec![key.clone()] + } + None => vec![], + } + } + + /// creates a single message that can be returned by the generator. + fn create_message(&mut self) -> Message { + let id = chrono::Utc::now() + .timestamp_nanos_opt() + .unwrap_or_default() + .to_string(); + + let offset = Offset::String(StringOffset::new(id.clone(), *get_vertex_replica())); + + // rng.gen_range(0..0) panics with "cannot sample empty range" + // rng.gen_range(0..1) will always produce 0 + let jitter = self.jitter.as_secs().max(1); + let event_time = + chrono::Utc::now() - Duration::from_secs(rand::thread_rng().gen_range(0..jitter)); + let mut data = self.content.to_vec(); + if data.is_empty() { + let value = match self.value { + Some(v) => v, + None => event_time.timestamp_nanos_opt().unwrap_or_default(), + }; + data = self.generate_payload(value); + } + + Message { + keys: self.next_key_to_be_fetched(), + value: data, + offset: Some(offset.clone()), + event_time, + id: MessageID { + vertex_name: get_vertex_name().to_string(), + offset: offset.to_string(), + index: Default::default(), + }, + headers: Default::default(), } } + + /// generates a set of messages to be returned. + fn generate_messages(&mut self, count: usize) -> Vec { + let mut data = Vec::with_capacity(count); + for _ in 0..count { + data.push(self.create_message()); + } + data + } } impl Stream for StreamGenerator { - type Item = Vec; + type Item = Vec; fn poll_next( mut self: Pin<&mut StreamGenerator>, cx: &mut Context<'_>, ) -> Poll> { let mut this = self.as_mut().project(); - match this.tick.poll_tick(cx) { // Poll::Ready means we are ready to send data the whole batch since enough time // has passed. Poll::Ready(_) => { - // generate data that equals to batch data - let data = vec![this.content.clone(); *this.batch]; - // reset used quota *this.used = *this.batch; - + let count = self.batch; + let data = self.generate_messages(count); + // reset used quota Poll::Ready(Some(data)) } Poll::Pending => { @@ -94,8 +220,8 @@ mod stream_generator { // update the counters *this.used += to_send; - - Poll::Ready(Some(vec![this.content.clone(); to_send])) + let data = self.generate_messages(to_send); + Poll::Ready(Some(data)) } else { Poll::Pending } @@ -121,25 +247,31 @@ mod stream_generator { // Define the content to be generated let content = Bytes::from("test_data"); // Define requests per unit (rpu), batch size, and time unit - let rpu = 10; let batch = 6; - let unit = Duration::from_millis(100); + let rpu = 10; + let cfg = config::GeneratorConfig { + content: content.clone(), + rpu, + jitter: Duration::from_millis(0), + duration: 100, + ..Default::default() + }; // Create a new StreamGenerator - let mut stream_generator = StreamGenerator::new(content.clone(), rpu, batch, unit); + let mut stream_generator = StreamGenerator::new(cfg, batch); // Collect the first batch of data let first_batch = stream_generator.next().await.unwrap(); assert_eq!(first_batch.len(), batch); for item in first_batch { - assert_eq!(item, content); + assert_eq!(item.value, content); } // Collect the second batch of data let second_batch = stream_generator.next().await.unwrap(); assert_eq!(second_batch.len(), rpu - batch); for item in second_batch { - assert_eq!(item, content); + assert_eq!(item.value, content); } // no there is no more data left in the quota @@ -150,7 +282,7 @@ mod stream_generator { let third_batch = stream_generator.next().await.unwrap(); assert_eq!(third_batch.len(), 6); for item in third_batch { - assert_eq!(item, content); + assert_eq!(item.value, content); } // we should now have data @@ -158,6 +290,26 @@ mod stream_generator { assert_eq!(size.0, 4); assert_eq!(size.1, Some(rpu)); } + + #[tokio::test] + async fn test_stream_generator_config() { + let cfg = config::GeneratorConfig { + rpu: 33, + key_count: 7, + ..Default::default() + }; + + let stream_generator = StreamGenerator::new(cfg, 50); + assert_eq!(stream_generator.rpu, 28); + + let cfg = config::GeneratorConfig { + rpu: 3, + key_count: 7, + ..Default::default() + }; + let stream_generator = StreamGenerator::new(cfg, 30); + assert_eq!(stream_generator.keys.0.len(), 3); + } } } @@ -166,12 +318,10 @@ mod stream_generator { /// source to generate some messages. We mainly use generator for load testing and integration /// testing of Numaflow. The load generated is per replica. pub(crate) fn new_generator( - content: Bytes, - rpu: usize, - batch: usize, - unit: Duration, + cfg: config::GeneratorConfig, + batch_size: usize, ) -> crate::Result<(GeneratorRead, GeneratorAck, GeneratorLagReader)> { - let gen_read = GeneratorRead::new(content, rpu, batch, unit); + let gen_read = GeneratorRead::new(cfg, batch_size); let gen_ack = GeneratorAck::new(); let gen_lag_reader = GeneratorLagReader::new(); @@ -185,8 +335,8 @@ pub(crate) struct GeneratorRead { impl GeneratorRead { /// A new [GeneratorRead] is returned. It takes a static content, requests per unit-time, batch size /// to return per [source::SourceReader::read], and the unit-time as duration. - fn new(content: Bytes, rpu: usize, batch: usize, unit: Duration) -> Self { - let stream_generator = stream_generator::StreamGenerator::new(content, rpu, batch, unit); + fn new(cfg: config::GeneratorConfig, batch_size: usize) -> Self { + let stream_generator = stream_generator::StreamGenerator::new(cfg.clone(), batch_size); Self { stream_generator } } } @@ -197,38 +347,10 @@ impl source::SourceReader for GeneratorRead { } async fn read(&mut self) -> crate::error::Result> { - match self.stream_generator.next().await { - None => { - panic!("Stream generator has stopped"); - } - Some(data) => Ok(data - .iter() - .map(|msg| { - // FIXME: better id? - let id = chrono::Utc::now() - .timestamp_nanos_opt() - .unwrap_or_default() - .to_string(); - - let offset = - Offset::String(StringOffset::new(id.clone(), *get_vertex_replica())); - - Message { - keys: vec![], - value: msg.clone().to_vec(), - // FIXME: better offset? - offset: Some(offset.clone()), - event_time: Default::default(), - id: MessageID { - vertex_name: get_vertex_name().to_string(), - offset: offset.to_string(), - index: Default::default(), - }, - headers: Default::default(), - } - }) - .collect::>()), - } + let Some(messages) = self.stream_generator.next().await else { + panic!("Stream generator has stopped"); + }; + Ok(messages) } fn partitions(&self) -> Vec { @@ -268,9 +390,11 @@ impl reader::LagReader for GeneratorLagReader { #[cfg(test)] mod tests { + use bytes::Bytes; use tokio::time::Duration; use super::*; + use crate::message::StringOffset; use crate::reader::LagReader; use crate::source::{SourceAcker, SourceReader}; @@ -281,15 +405,23 @@ mod tests { // Define requests per unit (rpu), batch size, and time unit let rpu = 10; let batch = 5; - let unit = Duration::from_millis(100); + let cfg = config::GeneratorConfig { + content: content.clone(), + rpu, + jitter: Duration::from_millis(0), + duration: 100, + ..Default::default() + }; // Create a new Generator - let mut generator = GeneratorRead::new(content.clone(), rpu, batch, unit); + let mut generator = GeneratorRead::new(cfg, batch); // Read the first batch of messages let messages = generator.read().await.unwrap(); assert_eq!(messages.len(), batch); + assert!(messages.first().unwrap().value.eq(&content)); + // Verify that each message has the expected structure // Read the second batch of messages @@ -297,6 +429,46 @@ mod tests { assert_eq!(messages.len(), rpu - batch); } + #[tokio::test] + async fn test_generator_read_with_random_data() { + // Here we do not provide any content, so the generator will generate random data + // Define requests per unit (rpu), batch size, and time unit + let rpu = 10; + let batch = 5; + let cfg = config::GeneratorConfig { + content: Bytes::new(), + rpu, + jitter: Duration::from_millis(0), + duration: 100, + key_count: 3, + msg_size_bytes: 100, + ..Default::default() + }; + + // Create a new Generator + let mut generator = GeneratorRead::new(cfg, batch); + + // Read the first batch of messages + let messages = generator.read().await.unwrap(); + let keys = messages + .iter() + .map(|m| m.keys[0].clone()) + .collect::>(); + + let expected_keys = vec![ + "key-0-0".to_string(), + "key-0-1".to_string(), + "key-0-2".to_string(), + "key-0-0".to_string(), + "key-0-1".to_string(), + ]; + + assert_eq!(keys, expected_keys); + assert!(messages.first().unwrap().value.len() >= 100); + + assert_eq!(messages.len(), batch); + } + #[tokio::test] async fn test_generator_lag_pending() { // Create a new GeneratorLagReader From 7b02290d3c8ee665916625fb119490192b4560bd Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sat, 19 Oct 2024 09:06:04 -0700 Subject: [PATCH 122/188] feat: config management for numaflow rust (#2172) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Yashash H L --- rust/numaflow-core/src/config.rs | 757 +++--------------- rust/numaflow-core/src/config/components.rs | 452 +++++++++++ rust/numaflow-core/src/config/monovertex.rs | 376 +++++++++ rust/numaflow-core/src/config/pipeline.rs | 52 ++ rust/numaflow-core/src/config/pipeline/isb.rs | 53 ++ rust/numaflow-core/src/error.rs | 2 +- rust/numaflow-core/src/monovertex.rs | 223 ++++-- .../numaflow-core/src/monovertex/forwarder.rs | 138 +++- rust/numaflow-core/src/monovertex/metrics.rs | 38 +- .../src/pipeline/isb/jetstream.rs | 2 +- .../src/pipeline/isb/jetstream/writer.rs | 2 +- rust/numaflow-core/src/shared/utils.rs | 54 +- rust/numaflow-core/src/sink.rs | 5 +- rust/numaflow-core/src/source.rs | 4 +- rust/numaflow-core/src/source/generator.rs | 33 +- rust/numaflow-core/src/source/user_defined.rs | 19 +- .../src/transformer/user_defined.rs | 22 +- 17 files changed, 1394 insertions(+), 838 deletions(-) create mode 100644 rust/numaflow-core/src/config/components.rs create mode 100644 rust/numaflow-core/src/config/monovertex.rs create mode 100644 rust/numaflow-core/src/config/pipeline.rs create mode 100644 rust/numaflow-core/src/config/pipeline/isb.rs diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index d7b4973719..ae9071d8ed 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -1,130 +1,23 @@ -use std::fmt::Display; +use std::env; use std::sync::OnceLock; -use std::{env, time::Duration}; - -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use bytes::Bytes; -use tracing::warn; - -use numaflow_models::models::{Backoff, MonoVertex, RetryStrategy}; +use crate::config::pipeline::PipelineConfig; use crate::Error; +use crate::Result; +use monovertex::MonovertexConfig; -// TODO move constants to a separate module, separate consts for different components -const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; -const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; -const DEFAULT_SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; -const DEFAULT_FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; - -const DEFAULT_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; -const DEFAULT_FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; -const DEFAULT_TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; -const DEFAULT_TRANSFORMER_SERVER_INFO_FILE: &str = - "/var/run/numaflow/sourcetransformer-server-info"; const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; -const ENV_POD_REPLICA: &str = "NUMAFLOW_REPLICA"; -const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB -const DEFAULT_METRICS_PORT: u16 = 2469; -const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; -const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; -const DEFAULT_BATCH_SIZE: u64 = 500; -const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; -const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; -const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; -const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: OnFailureStrategy = OnFailureStrategy::Retry; - -/// Jetstream ISB related configurations. -pub mod jetstream { - use std::fmt; - use std::time::Duration; - - // jetstream related constants - const DEFAULT_PARTITION_IDX: u16 = 0; - const DEFAULT_MAX_LENGTH: usize = 30000; - const DEFAULT_USAGE_LIMIT: f64 = 0.8; - const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; - const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; - const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; - - #[derive(Debug, Clone)] - pub(crate) struct StreamWriterConfig { - pub name: String, - pub partition_idx: u16, - pub max_length: usize, - pub refresh_interval: Duration, - pub usage_limit: f64, - pub buffer_full_strategy: BufferFullStrategy, - pub retry_interval: Duration, - } - - impl Default for StreamWriterConfig { - fn default() -> Self { - StreamWriterConfig { - name: "default".to_string(), - partition_idx: DEFAULT_PARTITION_IDX, - max_length: DEFAULT_MAX_LENGTH, - usage_limit: DEFAULT_USAGE_LIMIT, - refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), - buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, - retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), - } - } - } - - #[derive(Debug, Clone, Eq, PartialEq)] - pub(crate) enum BufferFullStrategy { - RetryUntilSuccess, - DiscardLatest, - } - - impl fmt::Display for BufferFullStrategy { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - BufferFullStrategy::RetryUntilSuccess => write!(f, "retryUntilSuccess"), - BufferFullStrategy::DiscardLatest => write!(f, "discardLatest"), - } - } - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum OnFailureStrategy { - Retry, - Fallback, - Drop, -} +const ENV_VERTEX_OBJ: &str = "NUMAFLOW_VERTEX_OBJECT"; -impl OnFailureStrategy { - /// Converts a string slice to an `OnFailureStrategy` enum variant. - /// Case insensitivity is considered to enhance usability. - /// - /// # Arguments - /// * `s` - A string slice representing the retry strategy. - /// - /// # Returns - /// An option containing the corresponding enum variant if successful, - /// or DefaultStrategy if the input does not match known variants. - fn from_str(s: &str) -> Option { - match s.to_lowercase().as_str() { - "retry" => Some(OnFailureStrategy::Retry), - "fallback" => Some(OnFailureStrategy::Fallback), - "drop" => Some(OnFailureStrategy::Drop), - _ => Some(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY), - } - } -} - -impl Display for OnFailureStrategy { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - OnFailureStrategy::Retry => write!(f, "retry"), - OnFailureStrategy::Fallback => write!(f, "fallback"), - OnFailureStrategy::Drop => write!(f, "drop"), - } - } -} +/// Building blocks (Source, Sink, Transformer, FallBack, Metrics, etc.) to build a Pipeline or a +/// MonoVertex. +pub(crate) mod components; +/// MonoVertex specific configs. +pub(crate) mod monovertex; +/// Pipeline specific configs. +pub(crate) mod pipeline; +/// Exposes the [Settings] via lazy loading. pub fn config() -> &'static Settings { static CONF: OnceLock = OnceLock::new(); CONF.get_or_init(|| match Settings::load() { @@ -135,376 +28,48 @@ pub fn config() -> &'static Settings { }) } -pub struct Settings { - pub mono_vertex_name: String, - pub replica: u32, - pub batch_size: u64, - pub timeout_in_ms: u32, - pub metrics_server_listen_port: u16, - pub lag_check_interval_in_secs: u16, - pub lag_refresh_interval_in_secs: u16, - pub sink_max_retry_attempts: u16, - pub sink_retry_interval_in_ms: u32, - pub sink_retry_on_fail_strategy: OnFailureStrategy, - pub sink_default_retry_strategy: RetryStrategy, - pub transformer_config: Option, - pub udsource_config: Option, - pub udsink_config: Option, - pub logsink_config: Option, - pub blackhole_config: Option, - pub fallback_config: Option, - pub generator_config: Option, -} - #[derive(Debug, Clone)] -pub struct TransformerConfig { - pub grpc_max_message_size: usize, - pub socket_path: String, - pub server_info_path: String, -} - -impl Default for TransformerConfig { - fn default() -> Self { - Self { - grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - socket_path: DEFAULT_TRANSFORMER_SOCKET.to_string(), - server_info_path: DEFAULT_TRANSFORMER_SERVER_INFO_FILE.to_string(), - } - } +pub(crate) enum CustomResourceType { + MonoVertex(MonovertexConfig), + Pipeline(PipelineConfig), } +/// The CRD and other necessary setting to get the Numaflow pipeline/monovertex running. #[derive(Debug, Clone)] -pub struct UDSourceConfig { - pub grpc_max_message_size: usize, - pub socket_path: String, - pub server_info_path: String, -} - -impl Default for UDSourceConfig { - fn default() -> Self { - Self { - grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - socket_path: DEFAULT_SOURCE_SOCKET.to_string(), - server_info_path: DEFAULT_SOURCE_SERVER_INFO_FILE.to_string(), - } - } -} - -#[derive(Debug, Clone, Default)] -pub struct LogSinkConfig; - -#[derive(Debug, Clone)] -pub struct UDSinkConfig { - pub grpc_max_message_size: usize, - pub socket_path: String, - pub server_info_path: String, -} - -impl Default for UDSinkConfig { - fn default() -> Self { - Self { - grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - socket_path: DEFAULT_SINK_SOCKET.to_string(), - server_info_path: DEFAULT_SINK_SERVER_INFO_FILE.to_string(), - } - } -} - -impl UDSinkConfig { - fn fallback_default() -> Self { - Self { - grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, - socket_path: DEFAULT_FB_SINK_SOCKET.to_string(), - server_info_path: DEFAULT_FB_SINK_SERVER_INFO_FILE.to_string(), - } - } -} - -#[derive(Debug, Clone)] -pub struct GeneratorConfig { - pub rpu: usize, - pub content: Bytes, - pub duration: usize, - pub value: Option, - pub key_count: u8, - pub msg_size_bytes: u32, - pub jitter: Duration, -} - -impl Default for GeneratorConfig { - fn default() -> Self { - Self { - rpu: 1, - content: Bytes::new(), - duration: 1000, - value: None, - key_count: 0, - msg_size_bytes: 8, - jitter: Duration::from_secs(0), - } - } -} - -/// Configuration for the [BlackholeSink](crate::sink::blackhole::BlackholeSink) -#[derive(Default, Debug, Clone)] -pub struct BlackholeConfig {} - -impl Default for Settings { - fn default() -> Self { - // Create a default retry strategy from defined constants - let default_retry_strategy = RetryStrategy { - backoff: Option::from(Box::from(Backoff { - interval: Option::from(kube::core::Duration::from( - std::time::Duration::from_millis(DEFAULT_SINK_RETRY_INTERVAL_IN_MS as u64), - )), - steps: Option::from(DEFAULT_MAX_SINK_RETRY_ATTEMPTS as i64), - })), - on_failure: Option::from(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string()), - }; - Self { - mono_vertex_name: "default".to_string(), - replica: 0, - batch_size: DEFAULT_BATCH_SIZE, - timeout_in_ms: DEFAULT_TIMEOUT_IN_MS, - metrics_server_listen_port: DEFAULT_METRICS_PORT, - lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, - lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, - sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, - sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, - sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, - sink_default_retry_strategy: default_retry_strategy, - transformer_config: None, - udsource_config: None, - udsink_config: Default::default(), - logsink_config: None, - blackhole_config: None, - fallback_config: None, - generator_config: None, - } - } +pub(crate) struct Settings { + pub(crate) custom_resource_type: CustomResourceType, } impl Settings { - fn load() -> Result { - let mut settings = Settings::default(); - if let Ok(mono_vertex_spec) = env::var(ENV_MONO_VERTEX_OBJ) { - // decode the spec it will be base64 encoded - let mono_vertex_spec = BASE64_STANDARD - .decode(mono_vertex_spec.as_bytes()) - .map_err(|e| { - Error::Config(format!("Failed to decode mono vertex spec: {:?}", e)) - })?; - - let mono_vertex_obj: MonoVertex = serde_json::from_slice(&mono_vertex_spec) - .map_err(|e| Error::Config(format!("Failed to parse mono vertex spec: {:?}", e)))?; - - settings.batch_size = mono_vertex_obj - .spec - .limits - .clone() - .unwrap() - .read_batch_size - .map(|x| x as u64) - .unwrap_or(DEFAULT_BATCH_SIZE); - - settings.timeout_in_ms = mono_vertex_obj - .spec - .limits - .clone() - .unwrap() - .read_timeout - .map(|x| std::time::Duration::from(x).as_millis() as u32) - .unwrap_or(DEFAULT_TIMEOUT_IN_MS); - - settings.mono_vertex_name = mono_vertex_obj - .metadata - .and_then(|metadata| metadata.name) - .ok_or_else(|| Error::Config("Mono vertex name not found".to_string()))?; - - settings.transformer_config = match mono_vertex_obj - .spec - .source - .as_deref() - .ok_or(Error::Config("Source not found".to_string()))? - .transformer - { - Some(_) => Some(TransformerConfig::default()), - _ => None, - }; - - settings.udsource_config = match mono_vertex_obj - .spec - .source - .as_deref() - .ok_or(Error::Config("Source not found".to_string()))? - .udsource - { - Some(_) => Some(UDSourceConfig::default()), - _ => None, - }; - - settings.udsink_config = match mono_vertex_obj - .spec - .sink - .as_deref() - .ok_or(Error::Config("Sink not found".to_string()))? - .udsink - { - Some(_) => Some(UDSinkConfig::default()), - _ => None, - }; - - settings.fallback_config = match mono_vertex_obj - .spec - .sink - .as_deref() - .ok_or(Error::Config("Sink not found".to_string()))? - .fallback - { - Some(_) => Some(UDSinkConfig::fallback_default()), - _ => None, - }; - - settings.blackhole_config = mono_vertex_obj - .spec - .sink - .as_deref() - .ok_or(Error::Config("Sink not found".to_string()))? - .blackhole - .as_deref() - .map(|_| BlackholeConfig::default()); - - settings.generator_config = match mono_vertex_obj - .spec - .source - .as_deref() - .ok_or(Error::Config("Source not found".to_string()))? - .generator - .as_deref() - { - Some(generator_source) => { - let mut config = GeneratorConfig::default(); - - if let Some(value_blob) = &generator_source.value_blob { - config.content = Bytes::from(value_blob.clone()); - } - match &generator_source.value_blob { - Some(value) => { - config.content = Bytes::from(value.clone()); - } - None => { - if let Some(msg_size) = generator_source.msg_size { - if msg_size < 0 { - warn!("'msgSize' can not be negative, using default value (8 bytes)"); - } else { - config.msg_size_bytes = msg_size as u32; - } - } - - config.value = generator_source.value; - } - } - - if let Some(rpu) = generator_source.rpu { - config.rpu = rpu as usize; - } - - if let Some(d) = generator_source.duration { - config.duration = std::time::Duration::from(d).as_millis() as usize; - } - - if let Some(key_count) = generator_source.key_count { - if key_count > u8::MAX as i32 { - warn!( - "Capping the key count to {}, provided value is {key_count}", - u8::MAX - ); - } - config.key_count = std::cmp::min(key_count, u8::MAX as i32) as u8; - } - - if let Some(jitter) = generator_source.jitter { - config.jitter = std::time::Duration::from(jitter); - } - - Some(config) - } - None => None, - }; - - settings.logsink_config = mono_vertex_obj - .spec - .sink - .as_deref() - .ok_or(Error::Config("Sink not found".to_string()))? - .log - .as_deref() - .map(|_| LogSinkConfig); - - if let Some(retry_strategy) = mono_vertex_obj - .spec - .sink - .expect("sink should not be empty") - .retry_strategy - { - if let Some(sink_backoff) = retry_strategy.clone().backoff { - // Set the max retry attempts and retry interval using direct reference - settings.sink_retry_interval_in_ms = sink_backoff - .clone() - .interval - .map(|x| std::time::Duration::from(x).as_millis() as u32) - .unwrap_or(DEFAULT_SINK_RETRY_INTERVAL_IN_MS); - - settings.sink_max_retry_attempts = sink_backoff - .clone() - .steps - .map(|x| x as u16) - .unwrap_or(DEFAULT_MAX_SINK_RETRY_ATTEMPTS); - - // We do not allow 0 attempts to write to sink - if settings.sink_max_retry_attempts == 0 { - return Err(Error::Config( - "Retry Strategy given with 0 retry attempts".to_string(), - )); - } - } - - // Set the retry strategy from the spec or use the default - settings.sink_retry_on_fail_strategy = retry_strategy - .on_failure - .clone() - .and_then(|s| OnFailureStrategy::from_str(&s)) - .unwrap_or(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY); - - // check if the sink retry strategy is set to fallback and there is no fallback sink configured - // then we should return an error - if settings.sink_retry_on_fail_strategy == OnFailureStrategy::Fallback - && settings.fallback_config.is_none() - { - return Err(Error::Config( - "Retry Strategy given as fallback but Fallback sink not configured" - .to_string(), - )); - } - } + /// load based on the CRD type, either a pipeline or a monovertex. + /// Settings are populated through reading the env vars set via the controller. The main + /// CRD is the base64 spec of the CR. + fn load() -> Result { + if let Ok(obj) = env::var(ENV_MONO_VERTEX_OBJ) { + let cfg = MonovertexConfig::load(obj)?; + return Ok(Settings { + custom_resource_type: CustomResourceType::MonoVertex(cfg), + }); } - settings.replica = env::var(ENV_POD_REPLICA) - .unwrap_or_else(|_| "0".to_string()) - .parse() - .map_err(|e| Error::Config(format!("Failed to parse pod replica: {:?}", e)))?; - - Ok(settings) + if let Ok(obj) = env::var(ENV_VERTEX_OBJ) { + let cfg = PipelineConfig::load(obj)?; + return Ok(Settings { + custom_resource_type: CustomResourceType::Pipeline(cfg), + }); + } + Err(Error::Config("No configuration found".to_string())) } } #[cfg(test)] mod tests { - use std::env; - + use crate::config::components::sink::OnFailureStrategy; + use crate::config::{CustomResourceType, Settings, ENV_MONO_VERTEX_OBJ}; + use base64::prelude::BASE64_STANDARD; + use base64::Engine; use serde_json::json; - - use super::*; + use std::env; #[test] fn test_settings_load_combined() { @@ -559,7 +124,10 @@ mod tests { // Execute and verify let settings = Settings::load().unwrap(); - assert_eq!(settings.mono_vertex_name, "simple-mono-vertex"); + assert!(matches!( + settings.custom_resource_type, + CustomResourceType::MonoVertex(_) + )); env::remove_var(ENV_MONO_VERTEX_OBJ); } @@ -607,12 +175,28 @@ mod tests { // Execute and verify let settings = Settings::load().unwrap(); + let mvtx_cfg = match settings.custom_resource_type { + CustomResourceType::MonoVertex(cfg) => cfg, + _ => panic!("Invalid configuration type"), + }; + + assert_eq!( + mvtx_cfg + .sink_config + .retry_config + .clone() + .unwrap() + .sink_max_retry_attempts, + 5 + ); assert_eq!( - settings.sink_retry_on_fail_strategy, - DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY + mvtx_cfg + .sink_config + .retry_config + .unwrap() + .sink_retry_interval_in_ms, + 1000 ); - assert_eq!(settings.sink_max_retry_attempts, 5); - assert_eq!(settings.sink_retry_interval_in_ms, 1000); env::remove_var(ENV_MONO_VERTEX_OBJ); } @@ -661,15 +245,40 @@ mod tests { // Execute and verify let settings = Settings::load().unwrap(); + let mvtx_cfg = match settings.custom_resource_type { + CustomResourceType::MonoVertex(cfg) => cfg, + _ => panic!("Invalid configuration type"), + }; + assert_eq!( - settings.sink_retry_on_fail_strategy, + mvtx_cfg + .sink_config + .retry_config + .clone() + .unwrap() + .sink_retry_on_fail_strategy, OnFailureStrategy::Drop ); - assert_eq!(settings.sink_max_retry_attempts, 5); - assert_eq!(settings.sink_retry_interval_in_ms, 1000); + assert_eq!( + mvtx_cfg + .sink_config + .retry_config + .clone() + .unwrap() + .sink_max_retry_attempts, + 5 + ); + assert_eq!( + mvtx_cfg + .sink_config + .retry_config + .clone() + .unwrap() + .sink_retry_interval_in_ms, + 1000 + ); env::remove_var(ENV_MONO_VERTEX_OBJ); } - { // Test Invalid on failure strategy to use default let json_data = json!({ @@ -715,159 +324,39 @@ mod tests { // Execute and verify let settings = Settings::load().unwrap(); + let mvtx_config = match settings.custom_resource_type { + CustomResourceType::MonoVertex(cfg) => cfg, + _ => panic!("Invalid configuration type"), + }; + assert_eq!( - settings.sink_retry_on_fail_strategy, - DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY + mvtx_config + .sink_config + .retry_config + .clone() + .unwrap() + .sink_retry_on_fail_strategy, + OnFailureStrategy::Retry + ); + assert_eq!( + mvtx_config + .sink_config + .retry_config + .clone() + .unwrap() + .sink_max_retry_attempts, + 5 + ); + assert_eq!( + mvtx_config + .sink_config + .retry_config + .clone() + .unwrap() + .sink_retry_interval_in_ms, + 1000 ); - assert_eq!(settings.sink_max_retry_attempts, 5); - assert_eq!(settings.sink_retry_interval_in_ms, 1000); - env::remove_var(ENV_MONO_VERTEX_OBJ); - } - - { - // Test Error Case: Retry Strategy Fallback without Fallback Sink - let json_data = json!({ - "metadata": { - "name": "simple-mono-vertex", - "namespace": "default", - "creationTimestamp": null - }, - "spec": { - "replicas": 0, - "source": { - "udsource": { - "container": { - "image": "xxxxxxx", - "resources": {} - } - } - }, - "sink": { - "udsink": { - "container": { - "image": "xxxxxx", - "resources": {} - } - }, - "retryStrategy": { - "backoff": { - "interval": "1s", - "steps": 5 - }, - "onFailure": "fallback" - }, - }, - "limits": { - "readBatchSize": 500, - "readTimeout": "1s" - }, - } - }); - let json_str = json_data.to_string(); - let encoded_json = BASE64_STANDARD.encode(json_str); - env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); - - // Execute and verify - assert!(Settings::load().is_err()); - env::remove_var(ENV_MONO_VERTEX_OBJ); - } - - { - // Test Error Case: Retry Strategy with 0 Retry Attempts - let json_data = json!({ - "metadata": { - "name": "simple-mono-vertex", - "namespace": "default", - "creationTimestamp": null - }, - "spec": { - "replicas": 0, - "source": { - "udsource": { - "container": { - "image": "xxxxxxx", - "resources": {} - } - } - }, - "sink": { - "udsink": { - "container": { - "image": "xxxxxx", - "resources": {} - } - }, - "retryStrategy": { - "backoff": { - "interval": "1s", - "steps": 0 - }, - "onFailure": "retry" - }, - }, - "limits": { - "readBatchSize": 500, - "readTimeout": "1s" - }, - } - }); - let json_str = json_data.to_string(); - let encoded_json = BASE64_STANDARD.encode(json_str); - env::set_var(ENV_MONO_VERTEX_OBJ, encoded_json); - - // Execute and verify - assert!(Settings::load().is_err()); env::remove_var(ENV_MONO_VERTEX_OBJ); } } - - #[test] - fn test_on_failure_enum_from_str_valid_inputs() { - assert_eq!( - OnFailureStrategy::from_str("retry"), - Some(OnFailureStrategy::Retry) - ); - assert_eq!( - OnFailureStrategy::from_str("fallback"), - Some(OnFailureStrategy::Fallback) - ); - assert_eq!( - OnFailureStrategy::from_str("drop"), - Some(OnFailureStrategy::Drop) - ); - - // Testing case insensitivity - assert_eq!( - OnFailureStrategy::from_str("ReTry"), - Some(OnFailureStrategy::Retry) - ); - assert_eq!( - OnFailureStrategy::from_str("FALLBACK"), - Some(OnFailureStrategy::Fallback) - ); - assert_eq!( - OnFailureStrategy::from_str("Drop"), - Some(OnFailureStrategy::Drop) - ); - } - - #[test] - fn test_on_failure_enum_from_str_invalid_input() { - assert_eq!( - OnFailureStrategy::from_str("unknown"), - Some(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY) - ); // should return None for undefined inputs - } - - #[test] - fn test_on_failure_enum_to_string() { - let retry = OnFailureStrategy::Retry; - assert_eq!(retry.to_string(), "retry"); - - let fallback = OnFailureStrategy::Fallback; - assert_eq!(fallback.to_string(), "fallback"); - - let drop = OnFailureStrategy::Drop; - assert_eq!(drop.to_string(), "drop"); - } } diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs new file mode 100644 index 0000000000..87ca81adc0 --- /dev/null +++ b/rust/numaflow-core/src/config/components.rs @@ -0,0 +1,452 @@ +pub(crate) mod source { + const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB + const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; + const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; + + use bytes::Bytes; + use std::time::Duration; + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct SourceConfig { + pub(crate) source_type: SourceType, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) enum SourceType { + Generator(GeneratorConfig), + UserDefined(UserDefinedConfig), + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct GeneratorConfig { + pub rpu: usize, + pub content: Bytes, + pub duration: usize, + pub value: Option, + pub key_count: u8, + pub msg_size_bytes: u32, + pub jitter: Duration, + } + + impl Default for GeneratorConfig { + fn default() -> Self { + Self { + rpu: 1, + content: Bytes::new(), + duration: 1000, + value: None, + key_count: 0, + msg_size_bytes: 8, + jitter: Duration::from_secs(0), + } + } + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct UserDefinedConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, + } + + impl Default for UserDefinedConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_SOURCE_SOCKET.to_string(), + server_info_path: DEFAULT_SOURCE_SERVER_INFO_FILE.to_string(), + } + } + } +} + +pub(crate) mod sink { + const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB + const DEFAULT_SINK_SOCKET: &str = "/var/run/numaflow/sink.sock"; + const DEFAULT_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/sinker-server-info"; + const DEFAULT_FB_SINK_SOCKET: &str = "/var/run/numaflow/fb-sink.sock"; + const DEFAULT_FB_SINK_SERVER_INFO_FILE: &str = "/var/run/numaflow/fb-sinker-server-info"; + const DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY: OnFailureStrategy = OnFailureStrategy::Retry; + const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; + const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; + + use crate::error::Error; + use crate::Result; + use numaflow_models::models::{Backoff, RetryStrategy}; + use std::fmt::Display; + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct SinkConfig { + pub(crate) sink_type: SinkType, + pub(crate) retry_config: Option, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) enum SinkType { + Log(LogConfig), + Blackhole(BlackholeConfig), + UserDefined(UserDefinedConfig), + } + + #[derive(Debug, Clone, PartialEq, Default)] + pub(crate) struct LogConfig {} + + #[derive(Debug, Clone, PartialEq, Default)] + pub(crate) struct BlackholeConfig {} + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct UserDefinedConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) enum OnFailureStrategy { + Retry, + Fallback, + Drop, + } + + impl OnFailureStrategy { + /// Converts a string slice to an `OnFailureStrategy` enum variant. + /// Case insensitivity is considered to enhance usability. + /// + /// # Arguments + /// * `s` - A string slice representing the retry strategy. + /// + /// # Returns + /// An option containing the corresponding enum variant if successful, + /// or DefaultStrategy if the input does not match known variants. + pub(crate) fn from_str(s: &str) -> Self { + match s.to_lowercase().as_str() { + "retry" => OnFailureStrategy::Retry, + "fallback" => OnFailureStrategy::Fallback, + "drop" => OnFailureStrategy::Drop, + _ => DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, + } + } + } + + impl Display for OnFailureStrategy { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match *self { + OnFailureStrategy::Retry => write!(f, "retry"), + OnFailureStrategy::Fallback => write!(f, "fallback"), + OnFailureStrategy::Drop => write!(f, "drop"), + } + } + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct RetryConfig { + pub sink_max_retry_attempts: u16, + pub sink_retry_interval_in_ms: u32, + pub sink_retry_on_fail_strategy: OnFailureStrategy, + pub sink_default_retry_strategy: RetryStrategy, + } + + impl Default for RetryConfig { + fn default() -> Self { + let default_retry_strategy = RetryStrategy { + backoff: Option::from(Box::from(Backoff { + interval: Option::from(kube::core::Duration::from( + std::time::Duration::from_millis(DEFAULT_SINK_RETRY_INTERVAL_IN_MS as u64), + )), + steps: Option::from(DEFAULT_MAX_SINK_RETRY_ATTEMPTS as i64), + })), + on_failure: Option::from(DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY.to_string()), + }; + Self { + sink_max_retry_attempts: DEFAULT_MAX_SINK_RETRY_ATTEMPTS, + sink_retry_interval_in_ms: DEFAULT_SINK_RETRY_INTERVAL_IN_MS, + sink_retry_on_fail_strategy: DEFAULT_SINK_RETRY_ON_FAIL_STRATEGY, + sink_default_retry_strategy: default_retry_strategy, + } + } + } + + impl Default for UserDefinedConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_SINK_SOCKET.to_string(), + server_info_path: DEFAULT_SINK_SERVER_INFO_FILE.to_string(), + } + } + } + + impl UserDefinedConfig { + pub(crate) fn fallback_default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_FB_SINK_SOCKET.to_string(), + server_info_path: DEFAULT_FB_SINK_SERVER_INFO_FILE.to_string(), + } + } + } +} + +pub(crate) mod transformer { + const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB + const DEFAULT_TRANSFORMER_SOCKET: &str = "/var/run/numaflow/sourcetransform.sock"; + const DEFAULT_TRANSFORMER_SERVER_INFO_FILE: &str = + "/var/run/numaflow/sourcetransformer-server-info"; + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct TransformerConfig { + pub(crate) transformer_type: TransformerType, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) enum TransformerType { + Noop(NoopConfig), // will add built-in transformers + UserDefined(UserDefinedConfig), + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct NoopConfig {} + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct UserDefinedConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, + } + + impl Default for UserDefinedConfig { + fn default() -> Self { + Self { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_TRANSFORMER_SOCKET.to_string(), + server_info_path: DEFAULT_TRANSFORMER_SERVER_INFO_FILE.to_string(), + } + } + } +} + +pub(crate) mod metrics { + const DEFAULT_METRICS_PORT: u16 = 2469; + const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; + const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct MetricsConfig { + pub metrics_server_listen_port: u16, + pub lag_check_interval_in_secs: u16, + pub lag_refresh_interval_in_secs: u16, + } + + impl Default for MetricsConfig { + fn default() -> Self { + Self { + metrics_server_listen_port: DEFAULT_METRICS_PORT, + lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, + lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, + } + } + } +} + +#[cfg(test)] +mod source_tests { + use super::source::{GeneratorConfig, SourceConfig, SourceType, UserDefinedConfig}; + use bytes::Bytes; + use std::time::Duration; + + #[test] + fn test_default_generator_config() { + let default_config = GeneratorConfig::default(); + assert_eq!(default_config.rpu, 1); + assert_eq!(default_config.content, Bytes::new()); + assert_eq!(default_config.duration, 1000); + assert_eq!(default_config.value, None); + assert_eq!(default_config.key_count, 0); + assert_eq!(default_config.msg_size_bytes, 8); + assert_eq!(default_config.jitter, Duration::from_secs(0)); + } + + #[test] + fn test_default_user_defined_config() { + let default_config = UserDefinedConfig::default(); + assert_eq!(default_config.grpc_max_message_size, 64 * 1024 * 1024); + assert_eq!(default_config.socket_path, "/var/run/numaflow/source.sock"); + assert_eq!( + default_config.server_info_path, + "/var/run/numaflow/sourcer-server-info" + ); + } + + #[test] + fn test_source_config_generator() { + let generator_config = GeneratorConfig::default(); + let source_config = SourceConfig { + source_type: SourceType::Generator(generator_config.clone()), + }; + if let SourceType::Generator(config) = source_config.source_type { + assert_eq!(config, generator_config); + } else { + panic!("Expected SourceType::Generator"); + } + } + + #[test] + fn test_source_config_user_defined() { + let user_defined_config = UserDefinedConfig::default(); + let source_config = SourceConfig { + source_type: SourceType::UserDefined(user_defined_config.clone()), + }; + if let SourceType::UserDefined(config) = source_config.source_type { + assert_eq!(config, user_defined_config); + } else { + panic!("Expected SourceType::UserDefined"); + } + } +} + +#[cfg(test)] +mod sink_tests { + use super::sink::{ + BlackholeConfig, LogConfig, OnFailureStrategy, RetryConfig, SinkConfig, SinkType, + UserDefinedConfig, + }; + use numaflow_models::models::{Backoff, RetryStrategy}; + + #[test] + fn test_default_log_config() { + let default_config = LogConfig::default(); + assert_eq!(default_config, LogConfig {}); + } + + #[test] + fn test_default_blackhole_config() { + let default_config = BlackholeConfig::default(); + assert_eq!(default_config, BlackholeConfig {}); + } + + #[test] + fn test_default_user_defined_config() { + let default_config = UserDefinedConfig::default(); + assert_eq!(default_config.grpc_max_message_size, 64 * 1024 * 1024); + assert_eq!(default_config.socket_path, "/var/run/numaflow/sink.sock"); + assert_eq!( + default_config.server_info_path, + "/var/run/numaflow/sinker-server-info" + ); + } + + #[test] + fn test_default_retry_config() { + let default_retry_strategy = RetryStrategy { + backoff: Option::from(Box::from(Backoff { + interval: Option::from(kube::core::Duration::from( + std::time::Duration::from_millis(1u64), + )), + steps: Option::from(u16::MAX as i64), + })), + on_failure: Option::from(OnFailureStrategy::Retry.to_string()), + }; + let default_config = RetryConfig::default(); + assert_eq!(default_config.sink_max_retry_attempts, u16::MAX); + assert_eq!(default_config.sink_retry_interval_in_ms, 1); + assert_eq!( + default_config.sink_retry_on_fail_strategy, + OnFailureStrategy::Retry + ); + assert_eq!( + default_config.sink_default_retry_strategy, + default_retry_strategy + ); + } + + #[test] + fn test_on_failure_strategy_from_str() { + assert_eq!( + OnFailureStrategy::from_str("retry"), + OnFailureStrategy::Retry + ); + assert_eq!( + OnFailureStrategy::from_str("fallback"), + OnFailureStrategy::Fallback + ); + assert_eq!(OnFailureStrategy::from_str("drop"), OnFailureStrategy::Drop); + assert_eq!( + OnFailureStrategy::from_str("unknown"), + OnFailureStrategy::Retry + ); + } + + #[test] + fn test_sink_config_log() { + let log_config = LogConfig::default(); + let sink_config = SinkConfig { + sink_type: SinkType::Log(log_config.clone()), + retry_config: None, + }; + if let SinkType::Log(config) = sink_config.sink_type { + assert_eq!(config, log_config); + } else { + panic!("Expected SinkType::Log"); + } + } + + #[test] + fn test_sink_config_blackhole() { + let blackhole_config = BlackholeConfig::default(); + let sink_config = SinkConfig { + sink_type: SinkType::Blackhole(blackhole_config.clone()), + retry_config: None, + }; + if let SinkType::Blackhole(config) = sink_config.sink_type { + assert_eq!(config, blackhole_config); + } else { + panic!("Expected SinkType::Blackhole"); + } + } + + #[test] + fn test_sink_config_user_defined() { + let user_defined_config = UserDefinedConfig::default(); + let sink_config = SinkConfig { + sink_type: SinkType::UserDefined(user_defined_config.clone()), + retry_config: None, + }; + if let SinkType::UserDefined(config) = sink_config.sink_type { + assert_eq!(config, user_defined_config); + } else { + panic!("Expected SinkType::UserDefined"); + } + } +} + +#[cfg(test)] +mod transformer_tests { + use super::transformer::{TransformerConfig, TransformerType, UserDefinedConfig}; + + #[test] + fn test_default_user_defined_config() { + let default_config = UserDefinedConfig::default(); + assert_eq!(default_config.grpc_max_message_size, 64 * 1024 * 1024); + assert_eq!( + default_config.socket_path, + "/var/run/numaflow/sourcetransform.sock" + ); + assert_eq!( + default_config.server_info_path, + "/var/run/numaflow/sourcetransformer-server-info" + ); + } + + #[test] + fn test_transformer_config_user_defined() { + let user_defined_config = UserDefinedConfig::default(); + let transformer_config = TransformerConfig { + transformer_type: TransformerType::UserDefined(user_defined_config.clone()), + }; + if let TransformerType::UserDefined(config) = transformer_config.transformer_type { + assert_eq!(config, user_defined_config); + } else { + panic!("Expected TransformerType::UserDefined"); + } + } +} diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs new file mode 100644 index 0000000000..a9d5685530 --- /dev/null +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -0,0 +1,376 @@ +use crate::config::components::metrics::MetricsConfig; +use crate::config::components::sink::{OnFailureStrategy, RetryConfig, SinkConfig}; +use crate::config::components::source::{GeneratorConfig, SourceConfig}; +use crate::config::components::transformer::{ + TransformerConfig, TransformerType, UserDefinedConfig, +}; +use crate::config::components::{sink, source}; +use crate::error::Error; +use crate::message::get_vertex_replica; +use crate::Result; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use bytes::Bytes; +use numaflow_models::models::MonoVertex; +use serde_json::from_slice; +use std::time::Duration; +use tracing::warn; + +const DEFAULT_BATCH_SIZE: u64 = 500; +const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct MonovertexConfig { + pub(crate) name: String, + pub(crate) batch_size: usize, + pub(crate) timeout_in_ms: u64, + pub(crate) replica: u16, + pub(crate) source_config: SourceConfig, + pub(crate) sink_config: SinkConfig, + pub(crate) transformer_config: Option, + pub(crate) fb_sink_config: Option, + pub(crate) metrics_config: MetricsConfig, +} + +impl Default for MonovertexConfig { + fn default() -> Self { + MonovertexConfig { + name: "".to_string(), + batch_size: DEFAULT_BATCH_SIZE as usize, + timeout_in_ms: DEFAULT_TIMEOUT_IN_MS as u64, + replica: 0, + source_config: SourceConfig { + source_type: source::SourceType::Generator(GeneratorConfig::default()), + }, + sink_config: SinkConfig { + sink_type: sink::SinkType::Log(sink::LogConfig::default()), + retry_config: None, + }, + transformer_config: None, + fb_sink_config: None, + metrics_config: MetricsConfig::default(), + } + } +} + +impl MonovertexConfig { + /// Load the MonoVertex Settings. + pub(crate) fn load(mono_vertex_spec: String) -> Result { + // controller sets this env var. + let decoded_spec = BASE64_STANDARD + .decode(mono_vertex_spec.as_bytes()) + .map_err(|e| Error::Config(format!("Failed to decode mono vertex spec: {:?}", e)))?; + + let mono_vertex_obj: MonoVertex = from_slice(&decoded_spec) + .map_err(|e| Error::Config(format!("Failed to parse mono vertex spec: {:?}", e)))?; + + let batch_size = mono_vertex_obj + .spec + .limits + .as_ref() + .and_then(|limits| limits.read_batch_size.map(|x| x as u64)) + .unwrap_or(DEFAULT_BATCH_SIZE); + + let timeout_in_ms = mono_vertex_obj + .spec + .limits + .as_ref() + .and_then(|limits| { + limits + .read_timeout + .map(|x| Duration::from(x).as_millis() as u32) + }) + .unwrap_or(DEFAULT_TIMEOUT_IN_MS); + + let mono_vertex_name = mono_vertex_obj + .metadata + .as_ref() + .and_then(|metadata| metadata.name.clone()) + .ok_or_else(|| Error::Config("Mono vertex name not found".to_string()))?; + + let transformer_config = mono_vertex_obj + .spec + .source + .as_ref() + .and_then(|source| source.transformer.as_ref()) + .map(|_| TransformerConfig { + transformer_type: TransformerType::UserDefined(UserDefinedConfig::default()), + }); + + let source_config = mono_vertex_obj + .spec + .source + .as_ref() + .ok_or_else(|| Error::Config("Source not found".to_string())) + .and_then(|source| { + source.udsource.as_ref().map(|_| SourceConfig { + source_type: source::SourceType::UserDefined(source::UserDefinedConfig::default()), + }).or_else(|| { + source.generator.as_ref().map(|generator| { + let mut generator_config = GeneratorConfig::default(); + + if let Some(value_blob) = &generator.value_blob { + generator_config.content = Bytes::from(value_blob.clone()); + } + + if let Some(msg_size) = generator.msg_size { + if msg_size >= 0 { + generator_config.msg_size_bytes = msg_size as u32; + } else { + warn!("'msgSize' cannot be negative, using default value (8 bytes)"); + } + } + + generator_config.value = generator.value; + generator_config.rpu = generator.rpu.unwrap_or(1) as usize; + generator_config.duration = generator.duration.map_or(1000, |d| std::time::Duration::from(d).as_millis() as usize); + generator_config.key_count = generator.key_count.map_or(0, |kc| std::cmp::min(kc, u8::MAX as i32) as u8); + generator_config.jitter = generator.jitter.map_or(Duration::from_secs(0), std::time::Duration::from); + + SourceConfig { + source_type: source::SourceType::Generator(generator_config), + } + }) + }).ok_or_else(|| Error::Config("Source type not found".to_string())) + })?; + + let sink_config = mono_vertex_obj + .spec + .sink + .as_ref() + .ok_or_else(|| Error::Config("Sink not found".to_string())) + .and_then(|sink| { + let retry_config = sink.retry_strategy.as_ref().map(|retry| { + let mut retry_config = RetryConfig::default(); + + if let Some(backoff) = &retry.backoff { + if let Some(interval) = backoff.interval { + retry_config.sink_retry_interval_in_ms = + std::time::Duration::from(interval).as_millis() as u32; + } + + if let Some(steps) = backoff.steps { + retry_config.sink_max_retry_attempts = steps as u16; + } + } + + if let Some(strategy) = &retry.on_failure { + retry_config.sink_retry_on_fail_strategy = + OnFailureStrategy::from_str(strategy); + } + + retry_config + }); + + sink.udsink + .as_ref() + .map(|_| SinkConfig { + sink_type: sink::SinkType::UserDefined(sink::UserDefinedConfig::default()), + retry_config: retry_config.clone(), + }) + .or_else(|| { + sink.log.as_ref().map(|_| SinkConfig { + sink_type: sink::SinkType::Log(sink::LogConfig::default()), + retry_config: retry_config.clone(), + }) + }) + .or_else(|| { + sink.blackhole.as_ref().map(|_| SinkConfig { + sink_type: sink::SinkType::Blackhole(sink::BlackholeConfig::default()), + retry_config: retry_config.clone(), + }) + }) + .ok_or_else(|| Error::Config("Sink type not found".to_string())) + })?; + + let fb_sink_config = mono_vertex_obj + .spec + .sink + .as_ref() + .and_then(|sink| sink.fallback.as_ref()) + .map(|fallback| { + fallback + .udsink + .as_ref() + .map(|_| SinkConfig { + sink_type: sink::SinkType::UserDefined( + sink::UserDefinedConfig::fallback_default(), + ), + retry_config: None, + }) + .or_else(|| { + fallback.log.as_ref().map(|_| SinkConfig { + sink_type: sink::SinkType::Log(sink::LogConfig::default()), + retry_config: None, + }) + }) + .or_else(|| { + fallback.blackhole.as_ref().map(|_| SinkConfig { + sink_type: sink::SinkType::Blackhole(sink::BlackholeConfig::default()), + retry_config: None, + }) + }) + .ok_or_else(|| Error::Config("Fallback sink type not found".to_string())) + }) + .transpose()?; + + Ok(MonovertexConfig { + name: mono_vertex_name, + replica: *get_vertex_replica(), + batch_size: batch_size as usize, + timeout_in_ms: timeout_in_ms as u64, + metrics_config: MetricsConfig::default(), + source_config, + sink_config, + transformer_config, + fb_sink_config, + }) + } +} + +#[cfg(test)] +mod tests { + use crate::config::components::sink::SinkType; + use crate::config::components::source::SourceType; + use crate::config::components::transformer::TransformerType; + use crate::config::monovertex::MonovertexConfig; + use crate::error::Error; + use base64::prelude::BASE64_STANDARD; + use base64::Engine; + #[test] + fn test_load_valid_config() { + let valid_config = r#" + { + "metadata": { + "name": "test_vertex" + }, + "spec": { + "limits": { + "readBatchSize": 1000, + "readTimeout": "2s" + }, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "log": {} + } + } + } + "#; + + let encoded_valid_config = BASE64_STANDARD.encode(valid_config); + let spec = encoded_valid_config.as_str(); + + let config = MonovertexConfig::load(spec.to_string()).unwrap(); + + assert_eq!(config.name, "test_vertex"); + assert_eq!(config.batch_size, 1000); + assert_eq!(config.timeout_in_ms, 2000); + assert!(matches!( + config.source_config.source_type, + SourceType::UserDefined(_) + )); + assert!(matches!(config.sink_config.sink_type, SinkType::Log(_))); + } + + #[test] + fn test_load_missing_source() { + let invalid_config = r#" + { + "metadata": { + "name": "test_vertex" + }, + "spec": { + "limits": { + "readBatchSize": 1000, + "readTimeout": "2s" + }, + "sink": { + "log": {} + } + } + } + "#; + let encoded_invalid_config = BASE64_STANDARD.encode(invalid_config); + let spec = encoded_invalid_config.as_str(); + + let result = MonovertexConfig::load(spec.to_string()); + assert!(matches!(result, Err(Error::Config(_)))); + } + + #[test] + fn test_load_missing_sink() { + let invalid_config = r#" + { + "metadata": { + "name": "test_vertex" + }, + "spec": { + "limits": { + "readBatchSize": 1000, + "readTimeout": "2s" + }, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + } + } + } + "#; + let encoded_invalid_config = BASE64_STANDARD.encode(invalid_config); + let spec = encoded_invalid_config.as_str(); + + let result = MonovertexConfig::load(spec.to_string()); + assert!(matches!(result, Err(Error::Config(_)))); + } + + #[test] + fn test_load_with_transformer() { + let valid_config = r#" + { + "metadata": { + "name": "test_vertex" + }, + "spec": { + "limits": { + "readBatchSize": 1000, + "readTimeout": "2s" + }, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + }, + "transformer": {} + }, + "sink": { + "log": {} + } + } + } + "#; + let encoded_invalid_config = BASE64_STANDARD.encode(valid_config); + let spec = encoded_invalid_config.as_str(); + + let config = MonovertexConfig::load(spec.to_string()).unwrap(); + + assert_eq!(config.name, "test_vertex"); + assert!(config.transformer_config.is_some()); + assert!(matches!( + config.transformer_config.unwrap().transformer_type, + TransformerType::UserDefined(_) + )); + } +} diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs new file mode 100644 index 0000000000..03753bcfd5 --- /dev/null +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -0,0 +1,52 @@ +use crate::config::components::sink::SinkConfig; +use crate::config::components::source::{SourceConfig, SourceType}; +use crate::config::components::transformer::TransformerConfig; +use crate::Result; + +pub(crate) mod isb; + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct PipelineConfig { + pub(crate) buffer_reader_config: BufferReaderConfig, + pub(crate) buffer_writer_config: BufferWriterConfig, + pub(crate) vertex_config: VertexConfig, +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct SourceVtxConfig { + pub(crate) source_config: SourceConfig, + pub(crate) transformer_config: Option, +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct SinkVtxConfig { + pub(crate) sink_config: SinkConfig, + pub(crate) fb_sink_config: Option, +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum VertexConfig { + Source(SourceVtxConfig), + Sink(SinkVtxConfig), +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct BufferReaderConfig {} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct BufferWriterConfig {} + +impl PipelineConfig { + pub fn load(_pipeline_spec_obj: String) -> Result { + Ok(PipelineConfig { + buffer_reader_config: BufferReaderConfig {}, + buffer_writer_config: BufferWriterConfig {}, + vertex_config: VertexConfig::Source(SourceVtxConfig { + source_config: SourceConfig { + source_type: SourceType::Generator(Default::default()), + }, + transformer_config: None, + }), + }) + } +} diff --git a/rust/numaflow-core/src/config/pipeline/isb.rs b/rust/numaflow-core/src/config/pipeline/isb.rs new file mode 100644 index 0000000000..89723cc9be --- /dev/null +++ b/rust/numaflow-core/src/config/pipeline/isb.rs @@ -0,0 +1,53 @@ +/// Jetstream ISB related configurations. +pub mod jetstream { + use std::fmt; + use std::time::Duration; + + // jetstream related constants + const DEFAULT_PARTITION_IDX: u16 = 0; + const DEFAULT_MAX_LENGTH: usize = 30000; + const DEFAULT_USAGE_LIMIT: f64 = 0.8; + const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; + const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; + const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; + + #[derive(Debug, Clone)] + pub(crate) struct StreamWriterConfig { + pub name: String, + pub partition_idx: u16, + pub max_length: usize, + pub refresh_interval: Duration, + pub usage_limit: f64, + pub buffer_full_strategy: BufferFullStrategy, + pub retry_interval: Duration, + } + + impl Default for StreamWriterConfig { + fn default() -> Self { + StreamWriterConfig { + name: "default".to_string(), + partition_idx: DEFAULT_PARTITION_IDX, + max_length: DEFAULT_MAX_LENGTH, + usage_limit: DEFAULT_USAGE_LIMIT, + refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), + buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, + retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq)] + pub(crate) enum BufferFullStrategy { + RetryUntilSuccess, + DiscardLatest, + } + + impl fmt::Display for BufferFullStrategy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BufferFullStrategy::RetryUntilSuccess => write!(f, "retryUntilSuccess"), + BufferFullStrategy::DiscardLatest => write!(f, "discardLatest"), + } + } + } +} diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 56e470d5e2..4fd78d8a1a 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -4,7 +4,7 @@ pub type Result = std::result::Result; #[derive(Error, Debug, Clone)] pub enum Error { - #[error("Metrics Error - {0}")] + #[error("metrics Error - {0}")] Metrics(String), #[error("Source Error - {0}")] diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index e78b816087..64fe47c676 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -10,8 +10,11 @@ use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use tracing::{error, info}; -use crate::config::{config, Settings}; +use crate::config::components::{sink, source, transformer}; +use crate::config::monovertex::MonovertexConfig; +use crate::config::{config, CustomResourceType, Settings}; use crate::error::{self, Error}; +use crate::shared::server_info::check_for_server_compatibility; use crate::shared::utils; use crate::shared::utils::create_rpc_channel; use crate::sink::{SinkClientType, SinkHandle}; @@ -42,13 +45,21 @@ pub async fn mono_vertex() -> error::Result<()> { Ok(()) }); - // Run the forwarder with cancellation token. - if let Err(e) = start_forwarder(cln_token, config()).await { - error!("Application error: {:?}", e); - - // abort the signal handler task since we have an error and we are shutting down - if !shutdown_handle.is_finished() { - shutdown_handle.abort(); + let crd_type = config().custom_resource_type.clone(); + match crd_type { + CustomResourceType::MonoVertex(config) => { + // Run the forwarder with cancellation token. + if let Err(e) = start_forwarder(cln_token, &config).await { + error!("Application error: {:?}", e); + + // abort the signal handler task since we have an error and we are shutting down + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } + } + } + CustomResourceType::Pipeline(_) => { + panic!("Pipeline not supported") } } @@ -87,30 +98,20 @@ pub(crate) enum SourceType { Generator(GeneratorRead, GeneratorAck, GeneratorLagReader), } -async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> error::Result<()> { - // make sure that we have compatibility with the server - utils::check_compatibility( - &cln_token, - config - .udsource_config - .as_ref() - .map(|source_config| source_config.server_info_path.clone().into()), - config - .udsink_config - .as_ref() - .map(|sink_config| sink_config.server_info_path.clone().into()), - config - .transformer_config - .as_ref() - .map(|transformer_config| transformer_config.server_info_path.clone().into()), - config - .fallback_config - .as_ref() - .map(|fallback_config| fallback_config.server_info_path.clone().into()), - ) - .await?; +async fn start_forwarder( + cln_token: CancellationToken, + config: &MonovertexConfig, +) -> error::Result<()> { + let mut source_grpc_client = if let source::SourceType::UserDefined(source_config) = + &config.source_config.source_type + { + // do server compatibility check + check_for_server_compatibility( + source_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; - let mut source_grpc_client = if let Some(source_config) = &config.udsource_config { Some( SourceClient::new(create_rpc_channel(source_config.socket_path.clone().into()).await?) .max_encoding_message_size(source_config.grpc_max_message_size) @@ -120,7 +121,16 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err None }; - let mut sink_grpc_client = if let Some(udsink_config) = &config.udsink_config { + let mut sink_grpc_client = if let sink::SinkType::UserDefined(udsink_config) = + &config.sink_config.sink_type + { + // do server compatibility check + check_for_server_compatibility( + udsink_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + Some( SinkClient::new(create_rpc_channel(udsink_config.socket_path.clone().into()).await?) .max_encoding_message_size(udsink_config.grpc_max_message_size) @@ -130,25 +140,50 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err None }; - let mut fb_sink_grpc_client = if let Some(fb_sink_config) = &config.fallback_config { - let fb_sink_grpc_client = - SinkClient::new(create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?) + let mut fb_sink_grpc_client = if let Some(fb_sink) = &config.fb_sink_config { + if let sink::SinkType::UserDefined(fb_sink_config) = &fb_sink.sink_type { + // do server compatibility check + check_for_server_compatibility( + fb_sink_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + + Some( + SinkClient::new( + create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?, + ) .max_encoding_message_size(fb_sink_config.grpc_max_message_size) - .max_encoding_message_size(fb_sink_config.grpc_max_message_size); - - Some(fb_sink_grpc_client.clone()) + .max_encoding_message_size(fb_sink_config.grpc_max_message_size), + ) + } else { + None + } } else { None }; - let mut transformer_grpc_client = if let Some(transformer_config) = &config.transformer_config { - let transformer_grpc_client = SourceTransformClient::new( - create_rpc_channel(transformer_config.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(transformer_config.grpc_max_message_size) - .max_encoding_message_size(transformer_config.grpc_max_message_size); - - Some(transformer_grpc_client.clone()) + let mut transformer_grpc_client = if let Some(transformer) = &config.transformer_config { + if let transformer::TransformerType::UserDefined(transformer_config) = + &transformer.transformer_type + { + // do server compatibility check + check_for_server_compatibility( + transformer_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + + let transformer_grpc_client = SourceTransformClient::new( + create_rpc_channel(transformer_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(transformer_config.grpc_max_message_size) + .max_encoding_message_size(transformer_config.grpc_max_message_size); + + Some(transformer_grpc_client.clone()) + } else { + None + } } else { None }; @@ -183,10 +218,18 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err // start the metrics server // FIXME: what to do with the handle - utils::start_metrics_server(metrics_state).await; - - let source = SourceHandle::new(source_type); - start_forwarder_with_source(source, sink, transformer_grpc_client, fb_sink, cln_token).await?; + utils::start_metrics_server(config.metrics_config.clone(), metrics_state).await; + + let source = SourceHandle::new(source_type, config.batch_size); + start_forwarder_with_source( + config.clone(), + source, + sink, + transformer_grpc_client, + fb_sink, + cln_token, + ) + .await?; info!("Forwarder stopped gracefully"); Ok(()) @@ -195,7 +238,7 @@ async fn start_forwarder(cln_token: CancellationToken, config: &Settings) -> err // fetch right the source. // source_grpc_client can be optional because it is valid only for user-defined source. async fn fetch_source( - config: &Settings, + config: &MonovertexConfig, source_grpc_client: &mut Option>, ) -> crate::Result { // check whether the source grpc client is provided, this happens only of the source is a @@ -203,7 +246,7 @@ async fn fetch_source( if let Some(source_grpc_client) = source_grpc_client.clone() { let (source_read, source_ack, lag_reader) = new_source( source_grpc_client, - config.batch_size as usize, + config.batch_size, config.timeout_in_ms as u16, ) .await?; @@ -215,7 +258,7 @@ async fn fetch_source( } // now that we know it is not a user-defined source, it has to be a built-in - if let Some(generator_config) = &config.generator_config { + if let source::SourceType::Generator(generator_config) = &config.source_config.source_type { let (source_read, source_ack, lag_reader) = new_generator(generator_config.clone(), config.batch_size as usize)?; Ok(SourceType::Generator(source_read, source_ack, lag_reader)) @@ -227,27 +270,35 @@ async fn fetch_source( // fetch the actor handle for the sink. // sink_grpc_client can be optional because it is valid only for user-defined sink. async fn fetch_sink( - settings: &Settings, + settings: &MonovertexConfig, sink_grpc_client: Option>, fallback_sink_grpc_client: Option>, ) -> crate::Result<(SinkHandle, Option)> { let fb_sink = match fallback_sink_grpc_client { - Some(fallback_sink) => { - Some(SinkHandle::new(SinkClientType::UserDefined(fallback_sink)).await?) - } + Some(fallback_sink) => Some( + SinkHandle::new( + SinkClientType::UserDefined(fallback_sink), + settings.batch_size, + ) + .await?, + ), None => None, }; if let Some(sink_client) = sink_grpc_client { - let sink = SinkHandle::new(SinkClientType::UserDefined(sink_client)).await?; + let sink = SinkHandle::new( + SinkClientType::UserDefined(sink_client), + settings.batch_size, + ) + .await?; return Ok((sink, fb_sink)); } - if settings.logsink_config.is_some() { - let log = SinkHandle::new(SinkClientType::Log).await?; + if let sink::SinkType::Log(_) = &settings.sink_config.sink_type { + let log = SinkHandle::new(SinkClientType::Log, settings.batch_size).await?; return Ok((log, fb_sink)); } - if settings.blackhole_config.is_some() { - let blackhole = SinkHandle::new(SinkClientType::Blackhole).await?; + if let sink::SinkType::Blackhole(_) = &settings.sink_config.sink_type { + let blackhole = SinkHandle::new(SinkClientType::Blackhole, settings.batch_size).await?; return Ok((blackhole, fb_sink)); } Err(Error::Config( @@ -256,17 +307,18 @@ async fn fetch_sink( } async fn start_forwarder_with_source( + mvtx_config: MonovertexConfig, source: SourceHandle, sink: SinkHandle, - transformer_client: Option>, + transformer_client: Option>, fallback_sink: Option, cln_token: CancellationToken, ) -> error::Result<()> { // start the pending reader to publish pending metrics - let pending_reader = utils::create_pending_reader(source.clone()).await; + let pending_reader = utils::create_pending_reader(&mvtx_config, source.clone()).await; let _pending_reader_handle = pending_reader.start().await; - let mut forwarder_builder = ForwarderBuilder::new(source, sink, cln_token); + let mut forwarder_builder = ForwarderBuilder::new(source, sink, mvtx_config, cln_token); // add transformer if exists if let Some(transformer_client) = transformer_client { @@ -293,16 +345,16 @@ mod tests { use std::fs::File; use std::io::Write; + use crate::config::monovertex::MonovertexConfig; + use crate::config::{components, Settings}; + use crate::error; + use crate::monovertex::start_forwarder; + use crate::shared::server_info::ServerInfo; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source}; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::config::{Settings, UDSinkConfig, UDSourceConfig}; - use crate::error; - use crate::monovertex::start_forwarder; - use crate::shared::server_info::ServerInfo; - struct SimpleSource; #[tonic::async_trait] impl source::Sourcer for SimpleSource { @@ -401,17 +453,26 @@ mod tests { token_clone.cancel(); }); - let config = Settings { - udsink_config: Some(UDSinkConfig { - socket_path: sink_sock_file.to_str().unwrap().to_string(), - server_info_path: sink_server_info.to_str().unwrap().to_string(), - grpc_max_message_size: 1024, - }), - udsource_config: Some(UDSourceConfig { - socket_path: src_sock_file.to_str().unwrap().to_string(), - server_info_path: src_info_file.to_str().unwrap().to_string(), - grpc_max_message_size: 1024, - }), + let config = MonovertexConfig { + source_config: components::source::SourceConfig { + source_type: components::source::SourceType::UserDefined( + components::source::UserDefinedConfig { + socket_path: src_sock_file.to_str().unwrap().to_string(), + grpc_max_message_size: 1024, + server_info_path: src_info_file.to_str().unwrap().to_string(), + }, + ), + }, + sink_config: components::sink::SinkConfig { + sink_type: components::sink::SinkType::UserDefined( + components::sink::UserDefinedConfig { + socket_path: sink_sock_file.to_str().unwrap().to_string(), + grpc_max_message_size: 1024, + server_info_path: sink_server_info.to_str().unwrap().to_string(), + }, + ), + retry_config: Default::default(), + }, ..Default::default() }; diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index bd3253fea5..0b83a0dfe6 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -6,7 +6,8 @@ use tokio::time::sleep; use tokio_util::sync::CancellationToken; use tracing::{debug, info}; -use crate::config::{config, OnFailureStrategy}; +use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; +use crate::config::monovertex::MonovertexConfig; use crate::error; use crate::message::{Message, Offset, ResponseStatusFromSink}; use crate::monovertex::metrics; @@ -25,6 +26,7 @@ pub(crate) struct Forwarder { fb_sink_writer: Option, cln_token: CancellationToken, common_labels: Vec<(String, String)>, + mvtx_config: MonovertexConfig, } /// ForwarderBuilder is used to build a Forwarder instance with optional fields. @@ -34,6 +36,7 @@ pub(crate) struct ForwarderBuilder { cln_token: CancellationToken, source_transformer: Option, fb_sink_writer: Option, + mvtx_config: MonovertexConfig, } impl ForwarderBuilder { @@ -41,6 +44,7 @@ impl ForwarderBuilder { pub(crate) fn new( source_reader: SourceHandle, sink_writer: SinkHandle, + mvtx_config: MonovertexConfig, cln_token: CancellationToken, ) -> Self { Self { @@ -49,6 +53,7 @@ impl ForwarderBuilder { cln_token, source_transformer: None, fb_sink_writer: None, + mvtx_config, } } @@ -67,13 +72,18 @@ impl ForwarderBuilder { /// Build the Forwarder instance #[must_use] pub(crate) fn build(self) -> Forwarder { - let common_labels = metrics::forward_metrics_labels().clone(); + let common_labels = metrics::mvtx_forward_metric_labels( + self.mvtx_config.name.clone(), + self.mvtx_config.replica, + ) + .clone(); Forwarder { source_reader: self.source_reader, sink_writer: self.sink_writer, source_transformer: self.source_transformer, fb_sink_writer: self.fb_sink_writer, cln_token: self.cln_token, + mvtx_config: self.mvtx_config, common_labels, } } @@ -231,10 +241,22 @@ impl Forwarder { // only breaks out of this loop based on the retry strategy unless all the messages have been written to sink // successfully. + let retry_config = &self + .mvtx_config + .sink_config + .retry_config + .clone() + .unwrap_or_default(); + loop { - while attempts < config().sink_max_retry_attempts { + while attempts < retry_config.sink_max_retry_attempts { let status = self - .write_to_sink_once(&mut error_map, &mut fallback_msgs, &mut messages_to_send) + .write_to_sink_once( + &mut error_map, + &mut fallback_msgs, + &mut messages_to_send, + &retry_config, + ) .await; match status { Ok(true) => break, @@ -262,7 +284,9 @@ impl Forwarder { &mut error_map, &mut fallback_msgs, &mut messages_to_send, + &retry_config, ); + match need_retry { // if we are done with the messages, break the loop Ok(false) => break, @@ -277,7 +301,8 @@ impl Forwarder { // If there are fallback messages, write them to the fallback sink if !fallback_msgs.is_empty() { - self.handle_fallback_messages(fallback_msgs).await?; + self.handle_fallback_messages(fallback_msgs, &retry_config) + .await?; } forward_metrics() @@ -302,13 +327,14 @@ impl Forwarder { error_map: &mut HashMap, fallback_msgs: &mut Vec, messages_to_send: &mut Vec, + retry_config: &RetryConfig, ) -> error::Result { // if we are done with the messages, break the loop if messages_to_send.is_empty() { return Ok(false); } // check what is the failure strategy in the config - let strategy = config().sink_retry_on_fail_strategy.clone(); + let strategy = retry_config.sink_retry_on_fail_strategy.clone(); match strategy { // if we need to retry, return true OnFailureStrategy::Retry => { @@ -353,6 +379,7 @@ impl Forwarder { error_map: &mut HashMap, fallback_msgs: &mut Vec, messages_to_send: &mut Vec, + retry_config: &RetryConfig, ) -> error::Result { let start_time = tokio::time::Instant::now(); match self.sink_writer.sink(messages_to_send.clone()).await { @@ -393,7 +420,7 @@ impl Forwarder { } sleep(tokio::time::Duration::from_millis( - config().sink_retry_interval_in_ms as u64, + retry_config.sink_retry_interval_in_ms as u64, )) .await; @@ -405,7 +432,11 @@ impl Forwarder { } // Writes the fallback messages to the fallback sink - async fn handle_fallback_messages(&mut self, fallback_msgs: Vec) -> error::Result<()> { + async fn handle_fallback_messages( + &mut self, + fallback_msgs: Vec, + retry_config: &RetryConfig, + ) -> error::Result<()> { if self.fb_sink_writer.is_none() { return Err(Error::Sink( "Response contains fallback messages but no fallback sink is configured" @@ -421,7 +452,7 @@ impl Forwarder { let mut messages_to_send = fallback_msgs; let fb_msg_count = messages_to_send.len() as u64; - let default_retry = config() + let default_retry = retry_config .sink_default_retry_strategy .clone() .backoff @@ -669,6 +700,9 @@ mod tests { #[tokio::test] async fn test_forwarder_source_sink() { + let batch_size = 100; + let timeout_in_ms = 1000; + let (sink_tx, mut sink_rx) = mpsc::channel(10); // Start the source server @@ -729,22 +763,22 @@ mod tests { let (source_read, source_ack, source_lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), - config().batch_size as usize, - config().timeout_in_ms as u16, + batch_size, + timeout_in_ms, ) .await .expect("failed to connect to source server"); - let src_reader = SourceHandle::new(SourceType::UserDefinedSource( - source_read, - source_ack, - source_lag_reader, - )); + let src_reader = SourceHandle::new( + SourceType::UserDefinedSource(source_read, source_ack, source_lag_reader), + batch_size, + ); let sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_grpc_client)) - .await - .expect("failed to connect to sink server"); + let sink_writer = + SinkHandle::new(SinkClientType::UserDefined(sink_grpc_client), batch_size) + .await + .expect("failed to connect to sink server"); let transformer_client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(transformer_sock_file).await.unwrap(), @@ -752,9 +786,14 @@ mod tests { .await .expect("failed to connect to transformer server"); - let mut forwarder = ForwarderBuilder::new(src_reader, sink_writer, cln_token.clone()) - .source_transformer(transformer_client) - .build(); + let mut forwarder = ForwarderBuilder::new( + src_reader, + sink_writer, + Default::default(), + cln_token.clone(), + ) + .source_transformer(transformer_client) + .build(); // Assert the received message in a different task let assert_handle = tokio::spawn(async move { @@ -817,6 +856,9 @@ mod tests { #[tokio::test] async fn test_forwarder_sink_error() { + let batch_size = 100; + let timeout_in_ms = 1000; + // Start the source server let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); @@ -858,25 +900,29 @@ mod tests { let (source_read, source_ack, lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), - 500, - 100, + batch_size, + timeout_in_ms, ) .await .expect("failed to connect to source server"); - let source_reader = SourceHandle::new(SourceType::UserDefinedSource( - source_read, - source_ack, - lag_reader, - )); + let source_reader = SourceHandle::new( + SourceType::UserDefinedSource(source_read, source_ack, lag_reader), + batch_size, + ); let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client)) + let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client), batch_size) .await .expect("failed to connect to sink server"); - let mut forwarder = - ForwarderBuilder::new(source_reader, sink_writer, cln_token.clone()).build(); + let mut forwarder = ForwarderBuilder::new( + source_reader, + sink_writer, + Default::default(), + cln_token.clone(), + ) + .build(); let cancel_handle = tokio::spawn(async move { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; @@ -920,6 +966,9 @@ mod tests { #[tokio::test] async fn test_fb_sink() { + let batch_size = 100; + let timeout_in_ms = 1000; + let (sink_tx, mut sink_rx) = mpsc::channel(10); // Start the source server @@ -986,25 +1035,26 @@ mod tests { .await .expect("failed to connect to source server"); - let source = SourceHandle::new(SourceType::UserDefinedSource( - source_read, - source_ack, - source_lag_reader, - )); + let source = SourceHandle::new( + SourceType::UserDefinedSource(source_read, source_ack, source_lag_reader), + batch_size, + ); let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client)) + let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client), batch_size) .await .expect("failed to connect to sink server"); let fb_sink_writer = SinkClient::new(create_rpc_channel(fb_sink_sock_file).await.unwrap()); - let fb_sink_writer = SinkHandle::new(SinkClientType::UserDefined(fb_sink_writer)) - .await - .expect("failed to connect to fb sink server"); + let fb_sink_writer = + SinkHandle::new(SinkClientType::UserDefined(fb_sink_writer), batch_size) + .await + .expect("failed to connect to fb sink server"); - let mut forwarder = ForwarderBuilder::new(source, sink_writer, cln_token.clone()) - .fallback_sink_writer(fb_sink_writer) - .build(); + let mut forwarder = + ForwarderBuilder::new(source, sink_writer, Default::default(), cln_token.clone()) + .fallback_sink_writer(fb_sink_writer) + .build(); let assert_handle = tokio::spawn(async move { let received_message = sink_rx.recv().await.unwrap(); diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 0818c26f96..2fe3336b26 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -263,14 +263,14 @@ static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new() // forward_metrics_labels is a helper function used to fetch the // MONOVTX_METRICS_LABELS object -pub(crate) fn forward_metrics_labels() -> &'static Vec<(String, String)> { +pub(crate) fn mvtx_forward_metric_labels( + mvtx_name: String, + replica: u16, +) -> &'static Vec<(String, String)> { MONOVTX_METRICS_LABELS.get_or_init(|| { let common_labels = vec![ - ( - MVTX_NAME_LABEL.to_string(), - config().mono_vertex_name.clone(), - ), - (REPLICA_LABEL.to_string(), config().replica.to_string()), + (MVTX_NAME_LABEL.to_string(), mvtx_name), + (REPLICA_LABEL.to_string(), replica.to_string()), ]; common_labels }) @@ -282,7 +282,7 @@ pub async fn metrics_handler() -> impl IntoResponse { let state = global_registry().registry.lock(); let mut buffer = String::new(); encode(&mut buffer, &state).unwrap(); - debug!("Exposing Metrics: {:?}", buffer); + debug!("Exposing metrics: {:?}", buffer); Response::builder() .status(StatusCode::OK) .body(Body::from(buffer)) @@ -367,6 +367,8 @@ struct TimestampedPending { /// and exposing the metrics. It maintains a list of pending stats and ensures that /// only the most recent entries are kept. pub(crate) struct PendingReader { + mvtx_name: String, + replica: u16, lag_reader: SourceHandle, lag_checking_interval: Duration, refresh_interval: Duration, @@ -380,14 +382,18 @@ pub(crate) struct PendingReaderTasks { /// PendingReaderBuilder is used to build a [LagReader] instance. pub(crate) struct PendingReaderBuilder { + mvtx_name: String, + replica: u16, lag_reader: SourceHandle, lag_checking_interval: Option, refresh_interval: Option, } impl PendingReaderBuilder { - pub(crate) fn new(lag_reader: SourceHandle) -> Self { + pub(crate) fn new(mvtx_name: String, replica: u16, lag_reader: SourceHandle) -> Self { Self { + mvtx_name, + replica, lag_reader, lag_checking_interval: None, refresh_interval: None, @@ -406,6 +412,8 @@ impl PendingReaderBuilder { pub(crate) fn build(self) -> PendingReader { PendingReader { + mvtx_name: self.mvtx_name, + replica: self.replica, lag_reader: self.lag_reader, lag_checking_interval: self .lag_checking_interval @@ -437,8 +445,10 @@ impl PendingReader { }); let pending_stats = self.pending_stats.clone(); + let mvtx_name = self.mvtx_name.clone(); + let replica = self.replica; let expose_handle = tokio::spawn(async move { - expose_pending_metrics(refresh_interval, pending_stats).await; + expose_pending_metrics(mvtx_name, replica, refresh_interval, pending_stats).await; }); PendingReaderTasks { buildup_handle, @@ -497,6 +507,8 @@ const LOOKBACK_SECONDS_MAP: [(&str, i64); 4] = // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( + mvtx_name: String, + replica: u16, refresh_interval: Duration, pending_stats: Arc>>, ) { @@ -511,7 +523,8 @@ async fn expose_pending_metrics( for (label, seconds) in LOOKBACK_SECONDS_MAP { let pending = calculate_pending(seconds, &pending_stats).await; if pending != -1 { - let mut metric_labels = forward_metrics_labels().clone(); + let mut metric_labels = + mvtx_forward_metric_labels(mvtx_name.clone(), replica).clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); pending_info.insert(label, pending); forward_metrics() @@ -751,7 +764,8 @@ mod tests { tokio::spawn({ let pending_stats = pending_stats.clone(); async move { - expose_pending_metrics(refresh_interval, pending_stats).await; + expose_pending_metrics("test".to_string(), 0, refresh_interval, pending_stats) + .await; } }); // We use tokio::time::interval() as the ticker in the expose_pending_metrics() function. @@ -763,7 +777,7 @@ mod tests { let mut stored_values: [i64; 4] = [0; 4]; { for (i, (label, _)) in LOOKBACK_SECONDS_MAP.iter().enumerate() { - let mut metric_labels = forward_metrics_labels().clone(); + let mut metric_labels = mvtx_forward_metric_labels("test".to_string(), 0).clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); let guage = forward_metrics() .source_pending diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index 5e7890a97a..7e3e7e378c 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -3,7 +3,7 @@ use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::CancellationToken; -use crate::config::jetstream::StreamWriterConfig; +use crate::config::pipeline::isb::jetstream::StreamWriterConfig; use crate::error::Error; use crate::message::{Message, Offset}; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 0e3324ac4e..431958b12e 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -15,7 +15,7 @@ use tokio_util::sync::CancellationToken; use tracing::error; use tracing::{debug, warn}; -use crate::config::jetstream::StreamWriterConfig; +use crate::config::pipeline::isb::jetstream::StreamWriterConfig; use crate::error::Error; use crate::message::{IntOffset, Offset}; use crate::Result; diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 9ec87490cd..f55e6b87a5 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -2,6 +2,15 @@ use std::net::SocketAddr; use std::path::PathBuf; use std::time::Duration; +use crate::config::components::metrics::MetricsConfig; +use crate::config::monovertex::MonovertexConfig; +use crate::error; +use crate::monovertex::metrics::{ + start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, +}; +use crate::shared::server_info; +use crate::source::SourceHandle; +use crate::Error; use axum::http::Uri; use backoff::retry::Retry; use backoff::strategy::fixed; @@ -19,15 +28,6 @@ use tonic::Request; use tower::service_fn; use tracing::{info, warn}; -use crate::config::config; -use crate::error; -use crate::monovertex::metrics::{ - start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, -}; -use crate::shared::server_info; -use crate::source::SourceHandle; -use crate::Error; - pub(crate) async fn check_compatibility( cln_token: &CancellationToken, source_file_path: Option, @@ -74,29 +74,41 @@ pub(crate) async fn check_compatibility( } pub(crate) async fn start_metrics_server( + metrics_config: MetricsConfig, metrics_state: UserDefinedContainerState, ) -> JoinHandle<()> { - tokio::spawn(async { + let metrics_port = metrics_config.metrics_server_listen_port.clone(); + tokio::spawn(async move { // Start the metrics server, which server the prometheus metrics. - let metrics_addr: SocketAddr = format!("0.0.0.0:{}", &config().metrics_server_listen_port) + let metrics_addr: SocketAddr = format!("0.0.0.0:{}", metrics_port) .parse() .expect("Invalid address"); if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { - error!("Metrics server error: {:?}", e); + error!("metrics server error: {:?}", e); } }) } -pub(crate) async fn create_pending_reader(lag_reader_grpc_client: SourceHandle) -> PendingReader { - PendingReaderBuilder::new(lag_reader_grpc_client) - .lag_checking_interval(Duration::from_secs( - config().lag_check_interval_in_secs.into(), - )) - .refresh_interval(Duration::from_secs( - config().lag_refresh_interval_in_secs.into(), - )) - .build() +pub(crate) async fn create_pending_reader( + mvtx_config: &MonovertexConfig, + lag_reader_grpc_client: SourceHandle, +) -> PendingReader { + PendingReaderBuilder::new( + mvtx_config.name.clone(), + mvtx_config.replica, + lag_reader_grpc_client, + ) + .lag_checking_interval(Duration::from_secs( + mvtx_config.metrics_config.lag_check_interval_in_secs.into(), + )) + .refresh_interval(Duration::from_secs( + mvtx_config + .metrics_config + .lag_refresh_interval_in_secs + .into(), + )) + .build() } pub(crate) async fn wait_until_ready( diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 1c109a36f5..88e41dd107 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -3,7 +3,6 @@ use tokio::sync::{mpsc, oneshot}; use tonic::transport::Channel; use user_defined::UserDefinedSink; -use crate::config::config; use crate::message::{Message, ResponseFromSink}; mod blackhole; @@ -70,8 +69,8 @@ pub(crate) enum SinkClientType { } impl SinkHandle { - pub(crate) async fn new(sink_client: SinkClientType) -> crate::Result { - let (sender, receiver) = mpsc::channel(config().batch_size as usize); + pub(crate) async fn new(sink_client: SinkClientType, batch_size: usize) -> crate::Result { + let (sender, receiver) = mpsc::channel(batch_size); match sink_client { SinkClientType::Log => { let log_sink = log::LogSink; diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 5e2eba5da5..1816e9fa04 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -106,8 +106,8 @@ pub(crate) struct SourceHandle { } impl SourceHandle { - pub(crate) fn new(src_type: SourceType) -> Self { - let (sender, receiver) = mpsc::channel(config().batch_size as usize); + pub(crate) fn new(src_type: SourceType, batch_size: usize) -> Self { + let (sender, receiver) = mpsc::channel(batch_size); match src_type { SourceType::UserDefinedSource(reader, acker, lag_reader) => { tokio::spawn(async move { diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index e8d80c4318..7d42a62a8e 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -1,6 +1,6 @@ use futures::StreamExt; -use crate::config; +use crate::config::components::source::GeneratorConfig; use crate::message::{Message, Offset}; use crate::reader; use crate::source; @@ -33,6 +33,7 @@ mod stream_generator { use tracing::warn; use crate::config; + use crate::config::components::source::GeneratorConfig; use crate::message::{ get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, }; @@ -67,7 +68,7 @@ mod stream_generator { } impl StreamGenerator { - pub(super) fn new(cfg: config::GeneratorConfig, batch_size: usize) -> Self { + pub(super) fn new(cfg: GeneratorConfig, batch_size: usize) -> Self { let mut tick = tokio::time::interval(Duration::from_millis(cfg.duration as u64)); tick.set_missed_tick_behavior(MissedTickBehavior::Skip); @@ -89,9 +90,7 @@ mod stream_generator { } // Generate all possible keys - let keys = (0..key_count) - .map(|i| format!("key-{}-{}", config::config().replica, i)) - .collect(); + let keys = (0..key_count).map(|i| format!("key-{}", i)).collect(); Self { content: cfg.content, @@ -249,7 +248,7 @@ mod stream_generator { // Define requests per unit (rpu), batch size, and time unit let batch = 6; let rpu = 10; - let cfg = config::GeneratorConfig { + let cfg = GeneratorConfig { content: content.clone(), rpu, jitter: Duration::from_millis(0), @@ -293,7 +292,7 @@ mod stream_generator { #[tokio::test] async fn test_stream_generator_config() { - let cfg = config::GeneratorConfig { + let cfg = GeneratorConfig { rpu: 33, key_count: 7, ..Default::default() @@ -302,7 +301,7 @@ mod stream_generator { let stream_generator = StreamGenerator::new(cfg, 50); assert_eq!(stream_generator.rpu, 28); - let cfg = config::GeneratorConfig { + let cfg = GeneratorConfig { rpu: 3, key_count: 7, ..Default::default() @@ -318,7 +317,7 @@ mod stream_generator { /// source to generate some messages. We mainly use generator for load testing and integration /// testing of Numaflow. The load generated is per replica. pub(crate) fn new_generator( - cfg: config::GeneratorConfig, + cfg: GeneratorConfig, batch_size: usize, ) -> crate::Result<(GeneratorRead, GeneratorAck, GeneratorLagReader)> { let gen_read = GeneratorRead::new(cfg, batch_size); @@ -335,7 +334,7 @@ pub(crate) struct GeneratorRead { impl GeneratorRead { /// A new [GeneratorRead] is returned. It takes a static content, requests per unit-time, batch size /// to return per [source::SourceReader::read], and the unit-time as duration. - fn new(cfg: config::GeneratorConfig, batch_size: usize) -> Self { + fn new(cfg: GeneratorConfig, batch_size: usize) -> Self { let stream_generator = stream_generator::StreamGenerator::new(cfg.clone(), batch_size); Self { stream_generator } } @@ -405,7 +404,7 @@ mod tests { // Define requests per unit (rpu), batch size, and time unit let rpu = 10; let batch = 5; - let cfg = config::GeneratorConfig { + let cfg = GeneratorConfig { content: content.clone(), rpu, jitter: Duration::from_millis(0), @@ -435,7 +434,7 @@ mod tests { // Define requests per unit (rpu), batch size, and time unit let rpu = 10; let batch = 5; - let cfg = config::GeneratorConfig { + let cfg = GeneratorConfig { content: Bytes::new(), rpu, jitter: Duration::from_millis(0), @@ -456,11 +455,11 @@ mod tests { .collect::>(); let expected_keys = vec![ - "key-0-0".to_string(), - "key-0-1".to_string(), - "key-0-2".to_string(), - "key-0-0".to_string(), - "key-0-1".to_string(), + "key-0".to_string(), + "key-1".to_string(), + "key-2".to_string(), + "key-0".to_string(), + "key-1".to_string(), ]; assert_eq!(keys, expected_keys); diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 69be3d9a3b..89a4551899 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -8,7 +8,6 @@ use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use crate::config::config; use crate::message::{Message, Offset}; use crate::reader::LagReader; use crate::source::{SourceAcker, SourceReader}; @@ -41,7 +40,7 @@ pub(crate) async fn new_source( UserDefinedSourceLagReader, )> { let src_read = UserDefinedSourceRead::new(client.clone(), num_records, timeout_in_ms).await?; - let src_ack = UserDefinedSourceAck::new(client.clone()).await?; + let src_ack = UserDefinedSourceAck::new(client.clone(), num_records).await?; let lag_reader = UserDefinedSourceLagReader::new(client); Ok((src_read, src_ack, lag_reader)) @@ -50,23 +49,24 @@ pub(crate) async fn new_source( impl UserDefinedSourceRead { async fn new( mut client: SourceClient, - num_records: usize, + batch_size: usize, timeout_in_ms: u16, ) -> Result { - let (read_tx, resp_stream) = Self::create_reader(&mut client).await?; + let (read_tx, resp_stream) = Self::create_reader(batch_size, &mut client).await?; Ok(Self { read_tx, resp_stream, - num_records, + num_records: batch_size, timeout_in_ms, }) } async fn create_reader( + batch_size: usize, client: &mut SourceClient, ) -> Result<(mpsc::Sender, Streaming)> { - let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); + let (read_tx, read_rx) = mpsc::channel(batch_size); let read_stream = ReceiverStream::new(read_rx); // do a handshake for read with the server before we start sending read requests @@ -139,8 +139,8 @@ impl SourceReader for UserDefinedSourceRead { } impl UserDefinedSourceAck { - async fn new(mut client: SourceClient) -> Result { - let (ack_tx, ack_resp_stream) = Self::create_acker(&mut client).await?; + async fn new(mut client: SourceClient, batch_size: usize) -> Result { + let (ack_tx, ack_resp_stream) = Self::create_acker(batch_size, &mut client).await?; Ok(Self { ack_tx, @@ -149,9 +149,10 @@ impl UserDefinedSourceAck { } async fn create_acker( + batch_size: usize, client: &mut SourceClient, ) -> Result<(mpsc::Sender, Streaming)> { - let (ack_tx, ack_rx) = mpsc::channel(config().batch_size as usize); + let (ack_tx, ack_rx) = mpsc::channel(batch_size); let ack_stream = ReceiverStream::new(ack_rx); // do a handshake for ack with the server before we start sending ack requests diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 035addcbb5..5b6c478f42 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -12,7 +12,6 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::warn; -use crate::config::config; use crate::error::{Error, Result}; use crate::message::{get_vertex_name, Message, MessageID, Offset}; use crate::shared::utils::utc_from_timestamp; @@ -28,10 +27,11 @@ struct SourceTransformer { impl SourceTransformer { async fn new( + batch_size: usize, mut client: SourceTransformClient, actor_messages: mpsc::Receiver, ) -> Result { - let (read_tx, read_rx) = mpsc::channel(config().batch_size as usize); + let (read_tx, read_rx) = mpsc::channel(batch_size); let read_stream = ReceiverStream::new(read_rx); // do a handshake for read with the server before we start sending read requests @@ -207,9 +207,10 @@ pub(crate) struct SourceTransformHandle { } impl SourceTransformHandle { - pub(crate) async fn new(client: SourceTransformClient) -> crate::Result { - let (sender, receiver) = mpsc::channel(config().batch_size as usize); - let mut client = SourceTransformer::new(client, receiver).await?; + pub(crate) async fn new(client: SourceTransformClient) -> Result { + let batch_size = 500; + let (sender, receiver) = mpsc::channel(batch_size); + let mut client = SourceTransformer::new(batch_size, client, receiver).await?; tokio::spawn(async move { while let Some(msg) = client.actor_messages.recv().await { client.handle_message(msg).await; @@ -276,7 +277,7 @@ mod tests { }); // wait for the server to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, @@ -299,11 +300,8 @@ mod tests { headers: Default::default(), }; - let resp = tokio::time::timeout( - tokio::time::Duration::from_secs(2), - client.transform(vec![message]), - ) - .await??; + let resp = + tokio::time::timeout(Duration::from_secs(2), client.transform(vec![message])).await??; assert_eq!(resp.len(), 1); // we need to drop the client, because if there are any in-flight requests @@ -355,7 +353,7 @@ mod tests { }); // wait for the server to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let client = SourceTransformHandle::new(SourceTransformClient::new( create_rpc_channel(sock_file).await?, From a5071b3177615793d4c8f14cbdcd59611c243c10 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Tue, 22 Oct 2024 05:29:09 +0530 Subject: [PATCH 123/188] chore: Use sub-registries for transformer/sink/fbsink metrics. (#2173) Signed-off-by: Sreekanth Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config/components.rs | 2 - rust/numaflow-core/src/error.rs | 1 + rust/numaflow-core/src/monovertex.rs | 6 +- .../numaflow-core/src/monovertex/forwarder.rs | 19 +- rust/numaflow-core/src/monovertex/metrics.rs | 323 ++++++++++++++---- rust/numaflow-core/src/shared/utils.rs | 8 +- rust/numaflow-core/src/sink/user_defined.rs | 2 +- rust/numaflow-core/src/source.rs | 1 - rust/numaflow-core/src/source/generator.rs | 1 - 9 files changed, 280 insertions(+), 83 deletions(-) diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index 87ca81adc0..d4f319924a 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -70,8 +70,6 @@ pub(crate) mod sink { const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; - use crate::error::Error; - use crate::Result; use numaflow_models::models::{Backoff, RetryStrategy}; use std::fmt::Display; diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 4fd78d8a1a..b64896f26b 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -34,6 +34,7 @@ pub enum Error { #[error("Proto Error - {0}")] Proto(String), + #[allow(clippy::upper_case_acronyms)] #[error("ISB Error - {0}")] ISB(String), diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 64fe47c676..2ae0e63447 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -12,7 +12,7 @@ use tracing::{error, info}; use crate::config::components::{sink, source, transformer}; use crate::config::monovertex::MonovertexConfig; -use crate::config::{config, CustomResourceType, Settings}; +use crate::config::{config, CustomResourceType}; use crate::error::{self, Error}; use crate::shared::server_info::check_for_server_compatibility; use crate::shared::utils; @@ -260,7 +260,7 @@ async fn fetch_source( // now that we know it is not a user-defined source, it has to be a built-in if let source::SourceType::Generator(generator_config) = &config.source_config.source_type { let (source_read, source_ack, lag_reader) = - new_generator(generator_config.clone(), config.batch_size as usize)?; + new_generator(generator_config.clone(), config.batch_size)?; Ok(SourceType::Generator(source_read, source_ack, lag_reader)) } else { Err(Error::Config("No valid source configuration found".into())) @@ -345,8 +345,8 @@ mod tests { use std::fs::File; use std::io::Write; + use crate::config::components; use crate::config::monovertex::MonovertexConfig; - use crate::config::{components, Settings}; use crate::error; use crate::monovertex::start_forwarder; use crate::shared::server_info::ServerInfo; diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 0b83a0dfe6..ac9d8605f0 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -214,7 +214,8 @@ impl Forwarder { start_time.elapsed().as_millis() ); forward_metrics() - .transform_time + .transformer + .time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); @@ -255,7 +256,7 @@ impl Forwarder { &mut error_map, &mut fallback_msgs, &mut messages_to_send, - &retry_config, + retry_config, ) .await; match status { @@ -284,7 +285,7 @@ impl Forwarder { &mut error_map, &mut fallback_msgs, &mut messages_to_send, - &retry_config, + retry_config, ); match need_retry { @@ -301,19 +302,21 @@ impl Forwarder { // If there are fallback messages, write them to the fallback sink if !fallback_msgs.is_empty() { - self.handle_fallback_messages(fallback_msgs, &retry_config) + self.handle_fallback_messages(fallback_msgs, retry_config) .await?; } forward_metrics() - .sink_time + .sink + .time .get_or_create(&self.common_labels) .observe(start_time_e2e.elapsed().as_micros() as f64); // update the metric for number of messages written to the sink // this included primary and fallback sink forward_metrics() - .sink_write_total + .sink + .write_total .get_or_create(&self.common_labels) .inc_by(msg_count); Ok(()) @@ -530,7 +533,8 @@ impl Forwarder { } // increment the metric for the fallback sink write forward_metrics() - .fbsink_write_total + .fb_sink + .write_total .get_or_create(&self.common_labels) .inc_by(fb_msg_count); Ok(()) @@ -572,7 +576,6 @@ mod tests { use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; - use crate::config::config; use crate::monovertex::forwarder::ForwarderBuilder; use crate::monovertex::SourceType; use crate::shared::utils::create_rpc_channel; diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/monovertex/metrics.rs index 2fe3336b26..266b1a9abf 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/monovertex/metrics.rs @@ -27,7 +27,6 @@ use tonic::transport::Channel; use tonic::Request; use tracing::{debug, error, info}; -use crate::config::config; use crate::source::SourceHandle; use crate::Error; @@ -37,6 +36,13 @@ const MVTX_NAME_LABEL: &str = "mvtx_name"; const REPLICA_LABEL: &str = "mvtx_replica"; const PENDING_PERIOD_LABEL: &str = "period"; +// The top-level metric registry is created with the GLOBAL_PREFIX +const GLOBAL_PREFIX: &str = "monovtx"; +// Prefixes for the sub-registries +const SINK_REGISTRY_PREFIX: &str = "sink"; +const FALLBACK_SINK_REGISTRY_PREFIX: &str = "fallback_sink"; +const TRANSFORMER_REGISTRY_PREFIX: &str = "transformer"; + // Define the metrics // Note: We do not add a suffix to the metric name, as the suffix is inferred through the metric type // by the prometheus client library @@ -44,24 +50,24 @@ const PENDING_PERIOD_LABEL: &str = "period"; // Note: Please keep consistent with the definitions in MonoVertex daemon // counters (please note the prefix _total, and read above link) -const READ_TOTAL: &str = "monovtx_read"; -const READ_BYTES_TOTAL: &str = "monovtx_read_bytes"; -const ACK_TOTAL: &str = "monovtx_ack"; -const SINK_WRITE_TOTAL: &str = "monovtx_sink_write"; -const DROPPED_TOTAL: &str = "monovtx_dropped"; -const FALLBACK_SINK_WRITE_TOTAL: &str = "monovtx_fallback_sink_write"; +const READ_TOTAL: &str = "read"; +const READ_BYTES_TOTAL: &str = "read_bytes"; +const ACK_TOTAL: &str = "ack"; +const SINK_WRITE_TOTAL: &str = "write"; +const DROPPED_TOTAL: &str = "dropped"; +const FALLBACK_SINK_WRITE_TOTAL: &str = "write"; // pending as gauge -const SOURCE_PENDING: &str = "monovtx_pending"; +const SOURCE_PENDING: &str = "pending"; // processing times as timers -const E2E_TIME: &str = "monovtx_processing_time"; -const READ_TIME: &str = "monovtx_read_time"; -const TRANSFORM_TIME: &str = "monovtx_transformer_time"; -const ACK_TIME: &str = "monovtx_ack_time"; -const SINK_TIME: &str = "monovtx_sink_time"; +const E2E_TIME: &str = "processing_time"; +const READ_TIME: &str = "read_time"; +const TRANSFORM_TIME: &str = "time"; +const ACK_TIME: &str = "ack_time"; +const SINK_TIME: &str = "time"; -/// Only used defined functions will have containers since rest +/// Only user defined functions will have containers since rest /// are builtins. We save the gRPC clients to retrieve metrics and also /// to do liveness checks. This means, these will be optionals since /// we do not require these for builtins. @@ -75,55 +81,70 @@ pub(crate) struct UserDefinedContainerState { /// The global register of all metrics. #[derive(Default)] -pub struct GlobalRegistry { +struct GlobalRegistry { // It is okay to use std mutex because we register each metric only one time. - pub registry: parking_lot::Mutex, + registry: parking_lot::Mutex, } impl GlobalRegistry { fn new() -> Self { GlobalRegistry { // Create a new registry for the metrics - registry: parking_lot::Mutex::new(Registry::default()), + registry: parking_lot::Mutex::new(Registry::with_prefix(GLOBAL_PREFIX)), } } } -/// GLOBAL_REGISTER is the static global registry which is initialized -// only once. -static GLOBAL_REGISTER: OnceLock = OnceLock::new(); +/// GLOBAL_REGISTRY is the static global registry which is initialized only once. +static GLOBAL_REGISTRY: OnceLock = OnceLock::new(); -/// global_registry is a helper function to get the GLOBAL_REGISTER +/// global_registry is a helper function to get the GLOBAL_REGISTRY fn global_registry() -> &'static GlobalRegistry { - GLOBAL_REGISTER.get_or_init(GlobalRegistry::new) + GLOBAL_REGISTRY.get_or_init(GlobalRegistry::new) } -// TODO: let's do sub-registry for forwarder so tomorrow we can add sink and source metrics. /// MonoVtxMetrics is a struct which is used for storing the metrics related to MonoVertex // These fields are exposed as pub to be used by other modules for // changing the value of the metrics // Each metric is defined as family of metrics, which means that they can be // differentiated by their label values assigned. -// The labels are provided in the form of Vec<(String, String) +// The labels are provided in the form of Vec<(String, String)> // The second argument is the metric kind. -pub struct MonoVtxMetrics { +pub(crate) struct MonoVtxMetrics { // counters - pub read_total: Family, Counter>, - pub read_bytes_total: Family, Counter>, - pub ack_total: Family, Counter>, - pub sink_write_total: Family, Counter>, - pub dropped_total: Family, Counter>, - pub fbsink_write_total: Family, Counter>, + pub(crate) read_total: Family, Counter>, + pub(crate) read_bytes_total: Family, Counter>, + pub(crate) ack_total: Family, Counter>, + pub(crate) dropped_total: Family, Counter>, // gauge - pub source_pending: Family, Gauge>, + pub(crate) source_pending: Family, Gauge>, // timers - pub e2e_time: Family, Histogram>, - pub read_time: Family, Histogram>, - pub transform_time: Family, Histogram>, - pub ack_time: Family, Histogram>, - pub sink_time: Family, Histogram>, + pub(crate) e2e_time: Family, Histogram>, + pub(crate) read_time: Family, Histogram>, + pub(crate) ack_time: Family, Histogram>, + + pub(crate) transformer: TransformerMetrics, + pub(crate) sink: SinkMetrics, + pub(crate) fb_sink: FallbackSinkMetrics, +} + +/// Family of metrics for the sink +pub(crate) struct SinkMetrics { + pub(crate) write_total: Family, Counter>, + pub(crate) time: Family, Histogram>, +} + +/// Family of metrics for the Fallback Sink +pub(crate) struct FallbackSinkMetrics { + pub(crate) write_total: Family, Counter>, +} + +/// Family of metrics for the Transformer +pub(crate) struct TransformerMetrics { + /// Transformer latency + pub(crate) time: Family, Histogram>, } /// Exponential bucket distribution with range. @@ -154,9 +175,7 @@ impl MonoVtxMetrics { read_total: Family::, Counter>::default(), read_bytes_total: Family::, Counter>::default(), ack_total: Family::, Counter>::default(), - sink_write_total: Family::, Counter>::default(), dropped_total: Family::, Counter>::default(), - fbsink_write_total: Family::, Counter>::default(), // gauge source_pending: Family::, Gauge>::default(), // timers @@ -167,15 +186,26 @@ impl MonoVtxMetrics { read_time: Family::, Histogram>::new_with_constructor(|| { Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), - transform_time: Family::, Histogram>::new_with_constructor( - || Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)), - ), ack_time: Family::, Histogram>::new_with_constructor(|| { Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) }), - sink_time: Family::, Histogram>::new_with_constructor(|| { - Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) - }), + + transformer: TransformerMetrics { + time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) + }), + }, + + sink: SinkMetrics { + write_total: Family::, Counter>::default(), + time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) + }), + }, + + fb_sink: FallbackSinkMetrics { + write_total: Family::, Counter>::default(), + }, }; let mut registry = global_registry().registry.lock(); @@ -185,11 +215,6 @@ impl MonoVtxMetrics { "A Counter to keep track of the total number of messages read from the source", metrics.read_total.clone(), ); - registry.register( - SINK_WRITE_TOTAL, - "A Counter to keep track of the total number of messages written to the sink", - metrics.sink_write_total.clone(), - ); registry.register( ACK_TOTAL, "A Counter to keep track of the total number of messages acknowledged by the sink", @@ -207,12 +232,6 @@ impl MonoVtxMetrics { metrics.dropped_total.clone(), ); - registry.register( - FALLBACK_SINK_WRITE_TOTAL, - "A Counter to keep track of the total number of messages written to the fallback sink", - metrics.fbsink_write_total.clone(), - ); - // gauges registry.register( SOURCE_PENDING, @@ -230,20 +249,40 @@ impl MonoVtxMetrics { "A Histogram to keep track of the total time taken to Read from the Source, in microseconds", metrics.read_time.clone(), ); - registry.register( - TRANSFORM_TIME, - "A Histogram to keep track of the total time taken to Transform, in microseconds", - metrics.transform_time.clone(), - ); registry.register( ACK_TIME, "A Histogram to keep track of the total time taken to Ack to the Source, in microseconds", metrics.ack_time.clone(), ); - registry.register( + + // Transformer metrics + let transformer_registry = registry.sub_registry_with_prefix(TRANSFORMER_REGISTRY_PREFIX); + transformer_registry.register( + TRANSFORM_TIME, + "A Histogram to keep track of the total time taken to Transform, in microseconds", + metrics.transformer.time.clone(), + ); + + // Sink metrics + let sink_registry = registry.sub_registry_with_prefix(SINK_REGISTRY_PREFIX); + sink_registry.register( + SINK_WRITE_TOTAL, + "A Counter to keep track of the total number of messages written to the sink", + metrics.sink.write_total.clone(), + ); + sink_registry.register( SINK_TIME, "A Histogram to keep track of the total time taken to Write to the Sink, in microseconds", - metrics.sink_time.clone(), + metrics.sink.time.clone(), + ); + + // Fallback Sink metrics + let fb_sink_registry = registry.sub_registry_with_prefix(FALLBACK_SINK_REGISTRY_PREFIX); + + fb_sink_registry.register( + FALLBACK_SINK_WRITE_TOTAL, + "A Counter to keep track of the total number of messages written to the fallback sink", + metrics.fb_sink.write_total.clone(), ); metrics } @@ -837,4 +876,162 @@ mod tests { fn test_exponential_buckets_range_negative_min() { let _ = exponential_buckets_range(-1.0, 100.0, 10).collect::>(); } + + #[test] + fn test_metric_names() { + let metrics = forward_metrics(); + // Use a fixed set of labels instead of the ones from mvtx_forward_metric_labels() since other test functions may also set it. + let common_labels = vec![ + ( + MVTX_NAME_LABEL.to_string(), + "test-monovertex-metric-names".to_string(), + ), + (REPLICA_LABEL.to_string(), "3".to_string()), + ]; + // Populate all metrics + metrics.read_total.get_or_create(&common_labels).inc(); + metrics.read_bytes_total.get_or_create(&common_labels).inc(); + metrics.ack_total.get_or_create(&common_labels).inc(); + metrics.dropped_total.get_or_create(&common_labels).inc(); + metrics.source_pending.get_or_create(&common_labels).set(10); + metrics.e2e_time.get_or_create(&common_labels).observe(10.0); + metrics.read_time.get_or_create(&common_labels).observe(3.0); + metrics.ack_time.get_or_create(&common_labels).observe(2.0); + + metrics + .transformer + .time + .get_or_create(&common_labels) + .observe(5.0); + + metrics.sink.write_total.get_or_create(&common_labels).inc(); + metrics.sink.time.get_or_create(&common_labels).observe(4.0); + + metrics + .fb_sink + .write_total + .get_or_create(&common_labels) + .inc(); + + // Validate the metric names + let state = global_registry().registry.lock(); + let mut buffer = String::new(); + encode(&mut buffer, &state).unwrap(); + + let expected = r#" +# HELP monovtx_read A Counter to keep track of the total number of messages read from the source. +# TYPE monovtx_read counter +monovtx_read_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_ack A Counter to keep track of the total number of messages acknowledged by the sink. +# TYPE monovtx_ack counter +monovtx_ack_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_read_bytes A Counter to keep track of the total number of bytes read from the source. +# TYPE monovtx_read_bytes counter +monovtx_read_bytes_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_dropped A Counter to keep track of the total number of messages dropped by the monovtx. +# TYPE monovtx_dropped counter +monovtx_dropped_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_pending A Gauge to keep track of the total number of pending messages for the monovtx. +# TYPE monovtx_pending gauge +monovtx_pending{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10 +# HELP monovtx_processing_time A Histogram to keep track of the total time taken to forward a chunk, in microseconds. +# TYPE monovtx_processing_time histogram +monovtx_processing_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10.0 +monovtx_processing_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_processing_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_read_time A Histogram to keep track of the total time taken to Read from the Source, in microseconds. +# TYPE monovtx_read_time histogram +monovtx_read_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 3.0 +monovtx_read_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_read_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_ack_time A Histogram to keep track of the total time taken to Ack to the Source, in microseconds. +# TYPE monovtx_ack_time histogram +monovtx_ack_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 2.0 +monovtx_ack_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_ack_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_transformer_time A Histogram to keep track of the total time taken to Transform, in microseconds. +# TYPE monovtx_transformer_time histogram +monovtx_transformer_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 5.0 +monovtx_transformer_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_transformer_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_sink_write A Counter to keep track of the total number of messages written to the sink. +# TYPE monovtx_sink_write counter +monovtx_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_sink_time A Histogram to keep track of the total time taken to Write to the Sink, in microseconds. +# TYPE monovtx_sink_time histogram +monovtx_sink_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 4.0 +monovtx_sink_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +monovtx_sink_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# HELP monovtx_fallback_sink_write A Counter to keep track of the total number of messages written to the fallback sink. +# TYPE monovtx_fallback_sink_write counter +monovtx_fallback_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 +# EOF + "#; + + // The registry may contains metrics from other tests also. Extract the ones created from this test using the unique labels we specify. + let labels = common_labels + .iter() + .map(|(k, v)| format!("{}=\"{}\"", k, v)) + .collect::>() + .join(","); + + let got = buffer + .trim() + .lines() + .filter(|line| line.starts_with('#') || line.contains(&labels)) + .collect::>() + .join("\n"); + + assert_eq!(got.trim(), expected.trim()); + } } diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index f55e6b87a5..aa2d802e6d 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -77,12 +77,12 @@ pub(crate) async fn start_metrics_server( metrics_config: MetricsConfig, metrics_state: UserDefinedContainerState, ) -> JoinHandle<()> { - let metrics_port = metrics_config.metrics_server_listen_port.clone(); tokio::spawn(async move { // Start the metrics server, which server the prometheus metrics. - let metrics_addr: SocketAddr = format!("0.0.0.0:{}", metrics_port) - .parse() - .expect("Invalid address"); + let metrics_addr: SocketAddr = + format!("0.0.0.0:{}", metrics_config.metrics_server_listen_port) + .parse() + .expect("Invalid address"); if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { error!("metrics server error: {:?}", e); diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 8d2d227800..ba20bdbef1 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -118,7 +118,7 @@ mod tests { use super::*; use crate::error::Result; - use crate::message::{Message, MessageID, Offset}; + use crate::message::{Message, MessageID}; use crate::shared::utils::create_rpc_channel; use crate::sink::user_defined::UserDefinedSink; diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 1816e9fa04..3af6dc8190 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,6 +1,5 @@ use tokio::sync::{mpsc, oneshot}; -use crate::config::config; use crate::{ message::{Message, Offset}, monovertex::SourceType, diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 7d42a62a8e..cf0ffb327e 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -32,7 +32,6 @@ mod stream_generator { use tokio::time::MissedTickBehavior; use tracing::warn; - use crate::config; use crate::config::components::source::GeneratorConfig; use crate::message::{ get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, From f21e75bcf1e133d26eed83cec9983501f3648ae3 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 22 Oct 2024 20:04:31 -0700 Subject: [PATCH 124/188] fix(controller): incorporate instance into lease lock name (#2177) Signed-off-by: Derek Wang --- pkg/reconciler/cmd/start.go | 8 ++++- pkg/shared/util/string.go | 6 ++++ pkg/shared/util/string_test.go | 61 ++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index 0b0df1847d..f0477e7038 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -62,13 +62,19 @@ func Start(namespaced bool, managedNamespace string) { logger.Fatalf("ENV %s not found", dfv1.EnvImage) } + leaderElectionID := "numaflow-controller-lock" + nomalizedInstance := sharedutil.DNS1035(config.GetInstance()) + if len(nomalizedInstance) > 0 { + leaderElectionID = leaderElectionID + "-" + nomalizedInstance + } + opts := ctrl.Options{ Metrics: metricsserver.Options{ BindAddress: ":9090", }, HealthProbeBindAddress: ":8081", LeaderElection: true, - LeaderElectionID: "numaflow-controller-lock", + LeaderElectionID: leaderElectionID, } if sharedutil.LookupEnvStringOr(dfv1.EnvLeaderElectionDisabled, "false") == "true" { diff --git a/pkg/shared/util/string.go b/pkg/shared/util/string.go index 23e50e05d6..3ab9a3df5d 100644 --- a/pkg/shared/util/string.go +++ b/pkg/shared/util/string.go @@ -19,6 +19,7 @@ package util import ( "crypto/rand" "math/big" + "regexp" "strings" "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -86,3 +87,8 @@ func CompareSlice(operator v1alpha1.LogicOperator, a []string, b []string) bool } return false } + +func DNS1035(str string) string { + re := regexp.MustCompile(`[^a-z0-9-]+`) + return re.ReplaceAllString(strings.ToLower(str), "-") +} diff --git a/pkg/shared/util/string_test.go b/pkg/shared/util/string_test.go index 071effc5c0..5e358318a8 100644 --- a/pkg/shared/util/string_test.go +++ b/pkg/shared/util/string_test.go @@ -149,3 +149,64 @@ func TestCompareSlice(t *testing.T) { }) } } + +func TestDNS1035(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "simple lowercase conversion", + input: "HELLO", + expected: "hello", + }, + { + name: "replace special characters", + input: "hello@world!123", + expected: "hello-world-123", + }, + { + name: "multiple consecutive special chars", + input: "hello!!!world###123", + expected: "hello-world-123", + }, + { + name: "spaces and underscores", + input: "hello_world space test", + expected: "hello-world-space-test", + }, + { + name: "empty string", + input: "", + expected: "", + }, + { + name: "only special characters", + input: "@#$%^&*", + expected: "-", + }, + { + name: "mixed case with numbers and hyphens", + input: "My-Cool-Service123", + expected: "my-cool-service123", + }, + { + name: "unicode characters", + input: "héllo→wörld", + expected: "h-llo-w-rld", + }, + { + name: "empty", + input: "", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DNS1035(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} From ee27af35aa7920d26068e4c03cb6efdf874f08fc Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 23 Oct 2024 09:35:15 -0700 Subject: [PATCH 125/188] fix(metrics): fix incorrect metric label and add docs (#2180) Signed-off-by: Derek Wang --- docs/operations/metrics/metrics.md | 35 ++++++++++++++++-------------- pkg/reconciler/metrics.go | 2 +- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/docs/operations/metrics/metrics.md b/docs/operations/metrics/metrics.md index 049bcc98ab..3b64f46924 100644 --- a/docs/operations/metrics/metrics.md +++ b/docs/operations/metrics/metrics.md @@ -69,22 +69,25 @@ These metrics can be used to determine the latency of your pipeline. ### Errors -These metrics can be used to determine if there are any errors in the pipeline - -| Metric name | Metric type | Labels | Description | -| --------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | -| `pipeline_data_processing_health` | Gauge | `pipeline=` | Pipeline data processing health status. 1: Healthy, 0: Unknown, -1: Warning, -2: Critical | -| `forwarder_platform_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Indicates any internal errors which could stop pipeline processing | -| `forwarder_read_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while reading messages by the forwarder | -| `forwarder_write_error_total` | Counter | `pipeline=`
`vertex=` `vertex_type=`

`replica=`
`partition_name=` | Indicates any errors while writing messages by the forwarder | -| `forwarder_ack_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while acknowledging messages by the forwarder | -| `kafka_source_offset_ack_errors` | Counter | `pipeline=`
`vertex=` | Indicates any kafka acknowledgement errors | -| `kafka_sink_write_error_total` | Counter | `pipeline=`
`vertex=` | Provides the number of errors while writing to the Kafka sink | -| `kafka_sink_write_timeout_total` | Counter | `pipeline=`
`vertex=` | Provides the write timeouts while writing to the Kafka sink | -| `isb_jetstream_read_error_total` | Counter | `partition_name=` | Indicates any read errors with NATS Jetstream ISB | -| `isb_jetstream_write_error_total` | Counter | `partition_name=` | Indicates any write errors with NATS Jetstream ISB | -| `isb_redis_read_error_total` | Counter | `partition_name=` | Indicates any read errors with Redis ISB | -| `isb_redis_write_error_total` | Counter | `partition_name=` | Indicates any write errors with Redis ISB | +These metrics can be used to determine if there are any errors in the pipeline. + +| Metric name | Metric type | Labels | Description | +| --------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `pipeline_data_processing_health` | Gauge | `pipeline=` | Pipeline data processing health status. 1: Healthy, 0: Unknown, -1: Warning, -2: Critical | +| `controller_isbsvc_health` | Gauge | `ns=`
`isbsvc=` | A metric to indicate whether the ISB Service is healthy. '1' means healthy, '0' means unhealthy | +| `controller_pipeline_health` | Gauge | `ns=`
`pipeline=` | A metric to indicate whether the Pipeline is healthy. '1' means healthy, '0' means unhealthy | +| `controller_monovtx_health` | Gauge | `ns=`
`mvtx_name=` | A metric to indicate whether the MonoVertex is healthy. '1' means healthy, '0' means unhealthy | +| `forwarder_platform_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Indicates any internal errors which could stop pipeline processing | +| `forwarder_read_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while reading messages by the forwarder | +| `forwarder_write_error_total` | Counter | `pipeline=`
`vertex=` `vertex_type=`

`replica=`
`partition_name=` | Indicates any errors while writing messages by the forwarder | +| `forwarder_ack_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while acknowledging messages by the forwarder | +| `kafka_source_offset_ack_errors` | Counter | `pipeline=`
`vertex=` | Indicates any kafka acknowledgement errors | +| `kafka_sink_write_error_total` | Counter | `pipeline=`
`vertex=` | Provides the number of errors while writing to the Kafka sink | +| `kafka_sink_write_timeout_total` | Counter | `pipeline=`
`vertex=` | Provides the write timeouts while writing to the Kafka sink | +| `isb_jetstream_read_error_total` | Counter | `partition_name=` | Indicates any read errors with NATS Jetstream ISB | +| `isb_jetstream_write_error_total` | Counter | `partition_name=` | Indicates any write errors with NATS Jetstream ISB | +| `isb_redis_read_error_total` | Counter | `partition_name=` | Indicates any read errors with Redis ISB | +| `isb_redis_write_error_total` | Counter | `partition_name=` | Indicates any write errors with Redis ISB | ### Saturation diff --git a/pkg/reconciler/metrics.go b/pkg/reconciler/metrics.go index ce96436556..ec117a9e52 100644 --- a/pkg/reconciler/metrics.go +++ b/pkg/reconciler/metrics.go @@ -43,7 +43,7 @@ var ( Subsystem: "controller", Name: "pipeline_health", Help: "A metric to indicate whether the Pipeline is healthy. '1' means healthy, '0' means unhealthy", - }, []string{metrics.LabelNamespace, metrics.LabelISBService}) + }, []string{metrics.LabelNamespace, metrics.LabelPipeline}) MonoVertexHealth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "controller", From 51b9347c56b7a6a568061b3ef46da14172ec5eef Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 23 Oct 2024 12:50:16 -0700 Subject: [PATCH 126/188] chore: mono vertex validation (#2182) Signed-off-by: Derek Wang --- docs/specifications/side-inputs.md | 4 +- pkg/reconciler/cmd/start.go | 6 +- pkg/reconciler/isbsvc/controller.go | 3 +- pkg/reconciler/isbsvc/controller_test.go | 2 +- pkg/reconciler/monovertex/controller.go | 7 + pkg/reconciler/pipeline/controller.go | 3 +- pkg/reconciler/pipeline/controller_test.go | 159 ++++++++++++ .../isbsvc_validate.go} | 2 +- .../isbsvc_validate_test.go} | 2 +- pkg/reconciler/validator/mvtx_validate.go | 64 +++++ .../validator/mvtx_validate_test.go | 164 +++++++++++++ .../pipeline_validate.go} | 226 +++++++++--------- .../pipeline_validate_test.go} | 4 +- pkg/webhook/validator/isbsvc.go | 6 +- pkg/webhook/validator/pipeline.go | 6 +- 15 files changed, 531 insertions(+), 127 deletions(-) rename pkg/reconciler/{isbsvc/validate.go => validator/isbsvc_validate.go} (99%) rename pkg/reconciler/{isbsvc/validate_test.go => validator/isbsvc_validate_test.go} (99%) create mode 100644 pkg/reconciler/validator/mvtx_validate.go create mode 100644 pkg/reconciler/validator/mvtx_validate_test.go rename pkg/reconciler/{pipeline/validate.go => validator/pipeline_validate.go} (79%) rename pkg/reconciler/{pipeline/validate_test.go => validator/pipeline_validate_test.go} (99%) diff --git a/docs/specifications/side-inputs.md b/docs/specifications/side-inputs.md index de1e1ba67b..8ef6871c53 100644 --- a/docs/specifications/side-inputs.md +++ b/docs/specifications/side-inputs.md @@ -72,12 +72,12 @@ Using K8s CronJob/Job will be a [challenge](https://github.com/istio/istio/issue When Side Inputs is enabled for a pipeline, each of its vertex pods will have a second init container added, the init container will have a shared volume (emptyDir) mounted, -and the same volume will be mounted to the User-defined Function/Sink/Transformer container. +and the same volume will be mounted to the User-defined Function/Source/Sink/Transformer container. The init container reads from the data store, and saves to the shared volume. A sidecar container will also be injected by the controller, and it mounts the same volume as above. The sidecar runs a service provided by numaflow, watching the Side Inputs data from the data store, if there’s any update, reads the data and updates the shared volume. -In the User-defined Function/Sink/Sink container, a helper function will be provided by Numaflow SDK, to return the Side Input data. The helper function caches the Side Inputs data in the memory, but performs thread safe updates if it watches the changes in the shared volume. +In the User-defined Function/Source/Sink/Transformer container, a helper function will be provided by Numaflow SDK, to return the Side Input data. The helper function caches the Side Inputs data in the memory, but performs thread safe updates if it watches the changes in the shared volume. ### Numaflow SDK diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index f0477e7038..c4da13a525 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -63,9 +63,9 @@ func Start(namespaced bool, managedNamespace string) { } leaderElectionID := "numaflow-controller-lock" - nomalizedInstance := sharedutil.DNS1035(config.GetInstance()) - if len(nomalizedInstance) > 0 { - leaderElectionID = leaderElectionID + "-" + nomalizedInstance + normalizedInstance := sharedutil.DNS1035(config.GetInstance()) + if len(normalizedInstance) > 0 { + leaderElectionID = leaderElectionID + "-" + normalizedInstance } opts := ctrl.Options{ diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index 987c9322f7..1cddae5d4b 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -36,6 +36,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" "github.com/numaproj/numaflow/pkg/reconciler/isbsvc/installer" + "github.com/numaproj/numaflow/pkg/reconciler/validator" "github.com/numaproj/numaflow/pkg/shared/logging" ) @@ -122,7 +123,7 @@ func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc isbSvc.Status.InitConditions() isbSvc.Status.SetObservedGeneration(isbSvc.Generation) - if err := ValidateInterStepBufferService(isbSvc); err != nil { + if err := validator.ValidateInterStepBufferService(isbSvc); err != nil { log.Errorw("Validation failed", zap.Error(err)) isbSvc.Status.MarkNotConfigured("InvalidSpec", err.Error()) return err diff --git a/pkg/reconciler/isbsvc/controller_test.go b/pkg/reconciler/isbsvc/controller_test.go index 2a24ec7c69..97189ff9e2 100644 --- a/pkg/reconciler/isbsvc/controller_test.go +++ b/pkg/reconciler/isbsvc/controller_test.go @@ -229,7 +229,7 @@ func TestNeedsFinalizer(t *testing.T) { t.Run("needs finalizer jetstream", func(t *testing.T) { testStorageClass := "test" - testIsbs := testJetStreamIsbs.DeepCopy() + testIsbs := jetStreamIsbs.DeepCopy() testIsbs.Spec.JetStream.Persistence = &dfv1.PersistenceStrategy{ StorageClassName: &testStorageClass, } diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index a40f620c69..8747e6e7c9 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -41,6 +41,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/reconciler" mvtxscaling "github.com/numaproj/numaflow/pkg/reconciler/monovertex/scaling" + "github.com/numaproj/numaflow/pkg/reconciler/validator" "github.com/numaproj/numaflow/pkg/shared/logging" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" ) @@ -120,6 +121,12 @@ func (mr *monoVertexReconciler) reconcile(ctx context.Context, monoVtx *dfv1.Mon mr.scaler.StartWatching(mVtxKey) } + if err := validator.ValidateMonoVertex(monoVtx); err != nil { + mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "ValidateMonoVertexFailed", "Invalid mvtx: %s", err.Error()) + monoVtx.Status.MarkDeployFailed("InvalidSpec", err.Error()) + return ctrl.Result{}, err + } + if err := mr.orchestrateFixedResources(ctx, monoVtx); err != nil { monoVtx.Status.MarkDeployFailed("OrchestrateFixedResourcesFailed", err.Error()) mr.recorder.Eventf(monoVtx, corev1.EventTypeWarning, "OrchestrateFixedResourcesFailed", "OrchestrateFixedResourcesFailed: %s", err.Error()) diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 0af2fb4788..05183a27c1 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -46,6 +46,7 @@ import ( daemonclient "github.com/numaproj/numaflow/pkg/daemon/client" "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/reconciler" + "github.com/numaproj/numaflow/pkg/reconciler/validator" "github.com/numaproj/numaflow/pkg/shared/logging" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" ) @@ -157,7 +158,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( if !controllerutil.ContainsFinalizer(pl, finalizerName) { controllerutil.AddFinalizer(pl, finalizerName) } - if err := ValidatePipeline(pl); err != nil { + if err := validator.ValidatePipeline(pl); err != nil { r.recorder.Eventf(pl, corev1.EventTypeWarning, "ValidatePipelineFailed", "Invalid pipeline: %s", err.Error()) pl.Status.MarkNotConfigured("InvalidSpec", err.Error()) return ctrl.Result{}, err diff --git a/pkg/reconciler/pipeline/controller_test.go b/pkg/reconciler/pipeline/controller_test.go index aafff27cc3..c94555cf7f 100644 --- a/pkg/reconciler/pipeline/controller_test.go +++ b/pkg/reconciler/pipeline/controller_test.go @@ -99,6 +99,42 @@ var ( }, } + testPipeline = &dfv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl", + Namespace: "test-ns", + }, + Spec: dfv1.PipelineSpec{ + Vertices: []dfv1.AbstractVertex{ + { + Name: "input", + Source: &dfv1.Source{ + UDTransformer: &dfv1.UDTransformer{ + Builtin: &dfv1.Transformer{Name: "filter"}, + }}, + }, + { + Name: "p1", + UDF: &dfv1.UDF{ + Builtin: &dfv1.Function{Name: "cat"}, + }, + }, + { + Name: "output", + Sink: &dfv1.Sink{}, + }, + }, + Edges: []dfv1.Edge{ + {From: "input", To: "p1"}, + {From: "p1", To: "output"}, + }, + Watermark: dfv1.Watermark{ + Disabled: false, + MaxDelay: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + } + testPipelineWithSideinput = &dfv1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pl", @@ -146,6 +182,129 @@ var ( }, }, } + + testReducePipeline = &dfv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl", + Namespace: "test-ns", + }, + Spec: dfv1.PipelineSpec{ + Vertices: []dfv1.AbstractVertex{ + { + Name: "input", + Source: &dfv1.Source{}, + }, + { + Name: "p1", + UDF: &dfv1.UDF{ + Container: &dfv1.Container{ + Image: "my-image", + }, + GroupBy: &dfv1.GroupBy{ + Window: dfv1.Window{ + Fixed: &dfv1.FixedWindow{ + Length: &metav1.Duration{ + Duration: 60 * time.Second, + }, + }, + }, + Storage: &dfv1.PBQStorage{ + PersistentVolumeClaim: &dfv1.PersistenceStrategy{ + StorageClassName: nil, + AccessMode: &dfv1.DefaultAccessMode, + VolumeSize: &dfv1.DefaultVolumeSize, + }, + }, + }, + }, + }, + { + Name: "p2", + Partitions: ptr.To[int32](2), + UDF: &dfv1.UDF{ + Container: &dfv1.Container{ + Image: "my-image", + }, + GroupBy: &dfv1.GroupBy{ + Window: dfv1.Window{ + Fixed: &dfv1.FixedWindow{ + Length: &metav1.Duration{ + Duration: 60 * time.Second, + }, + }, + }, + Keyed: true, + Storage: &dfv1.PBQStorage{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + { + Name: "p3", + UDF: &dfv1.UDF{ + Container: &dfv1.Container{ + Image: "my-image", + }, + GroupBy: &dfv1.GroupBy{ + Window: dfv1.Window{ + Sliding: &dfv1.SlidingWindow{ + Length: &metav1.Duration{ + Duration: time.Duration(60 * time.Second), + }, + Slide: &metav1.Duration{ + Duration: time.Duration(30 * time.Second), + }, + }, + }, + Storage: &dfv1.PBQStorage{ + PersistentVolumeClaim: &dfv1.PersistenceStrategy{ + StorageClassName: nil, + AccessMode: &dfv1.DefaultAccessMode, + VolumeSize: &dfv1.DefaultVolumeSize, + }, + }, + }, + }, + }, + { + Name: "p4", + UDF: &dfv1.UDF{ + Container: &dfv1.Container{ + Image: "my-image", + }, + GroupBy: &dfv1.GroupBy{ + Window: dfv1.Window{ + Session: &dfv1.SessionWindow{ + Timeout: &metav1.Duration{ + Duration: time.Duration(10 * time.Second), + }, + }, + }, + Storage: &dfv1.PBQStorage{ + PersistentVolumeClaim: &dfv1.PersistenceStrategy{ + StorageClassName: nil, + AccessMode: &dfv1.DefaultAccessMode, + VolumeSize: &dfv1.DefaultVolumeSize, + }, + }, + }, + }, + }, + { + Name: "output", + Sink: &dfv1.Sink{}, + }, + }, + Edges: []dfv1.Edge{ + {From: "input", To: "p1"}, + {From: "p1", To: "p2"}, + {From: "p2", To: "p3"}, + {From: "p3", To: "p4"}, + {From: "p4", To: "output"}, + }, + }, + } ) func init() { diff --git a/pkg/reconciler/isbsvc/validate.go b/pkg/reconciler/validator/isbsvc_validate.go similarity index 99% rename from pkg/reconciler/isbsvc/validate.go rename to pkg/reconciler/validator/isbsvc_validate.go index b9d4fa58dd..9ef5a88948 100644 --- a/pkg/reconciler/isbsvc/validate.go +++ b/pkg/reconciler/validator/isbsvc_validate.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package isbsvc +package validator import ( "fmt" diff --git a/pkg/reconciler/isbsvc/validate_test.go b/pkg/reconciler/validator/isbsvc_validate_test.go similarity index 99% rename from pkg/reconciler/isbsvc/validate_test.go rename to pkg/reconciler/validator/isbsvc_validate_test.go index b3a8269c75..eb3d7ac9a8 100644 --- a/pkg/reconciler/isbsvc/validate_test.go +++ b/pkg/reconciler/validator/isbsvc_validate_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package isbsvc +package validator import ( "testing" diff --git a/pkg/reconciler/validator/mvtx_validate.go b/pkg/reconciler/validator/mvtx_validate.go new file mode 100644 index 0000000000..02ce6b9630 --- /dev/null +++ b/pkg/reconciler/validator/mvtx_validate.go @@ -0,0 +1,64 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validator + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" + k8svalidation "k8s.io/apimachinery/pkg/util/validation" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" +) + +func ValidateMonoVertex(mvtx *dfv1.MonoVertex) error { + if mvtx == nil { + return fmt.Errorf("nil MonoVertex") + } + if errs := k8svalidation.IsDNS1035Label(mvtx.Name); len(errs) > 0 { + return fmt.Errorf("invalid mvtx name %q, %v", mvtx.Name, errs) + } + if mvtx.Spec.Source == nil { + return fmt.Errorf("source is not defined") + } + if err := validateSource(*mvtx.Spec.Source); err != nil { + return fmt.Errorf("invalid source: %w", err) + } + if mvtx.Spec.Sink == nil { + return fmt.Errorf("sink is not defined") + } + if err := validateSink(*mvtx.Spec.Sink); err != nil { + return fmt.Errorf("invalid sink: %w", err) + } + for _, ic := range mvtx.Spec.InitContainers { + if isReservedContainerName(ic.Name) { + return fmt.Errorf("invalid init container name: %q is reserved for containers created by numaflow", ic.Name) + } + } + for _, sc := range mvtx.Spec.Sidecars { + if isReservedContainerName(sc.Name) { + return fmt.Errorf("invalid sidecar container name: %q is reserved for containers created by numaflow", sc.Name) + } + } + // Validate the update strategy. + maxUvail := mvtx.Spec.UpdateStrategy.GetRollingUpdateStrategy().GetMaxUnavailable() + _, err := intstr.GetScaledValueFromIntOrPercent(&maxUvail, 1, true) // maxUnavailable should be an interger or a percentage in string + if err != nil { + return fmt.Errorf("invalid maxUnavailable: %w", err) + } + return nil +} diff --git a/pkg/reconciler/validator/mvtx_validate_test.go b/pkg/reconciler/validator/mvtx_validate_test.go new file mode 100644 index 0000000000..85f0387833 --- /dev/null +++ b/pkg/reconciler/validator/mvtx_validate_test.go @@ -0,0 +1,164 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validator + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/stretchr/testify/assert" +) + +var ( + testMvtx = &dfv1.MonoVertex{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl", + Namespace: "test-ns", + }, + Spec: dfv1.MonoVertexSpec{ + InitContainers: []corev1.Container{ + { + Name: "init-container", + Image: "my-image:latest", + }, + }, + Sidecars: []corev1.Container{ + { + Name: "sidecar-container", + Image: "my-image:latest", + }, + }, + Source: &dfv1.Source{ + UDTransformer: &dfv1.UDTransformer{ + Builtin: &dfv1.Transformer{Name: "filter"}, + }, + UDSource: &dfv1.UDSource{ + Container: &dfv1.Container{ + Image: "my-image:latest", + }, + }, + }, + Sink: &dfv1.Sink{ + AbstractSink: dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{ + Container: &dfv1.Container{ + Image: "my-image:latest", + }, + }, + }, + Fallback: &dfv1.AbstractSink{ + UDSink: &dfv1.UDSink{ + Container: &dfv1.Container{ + Image: "my-fb-image:latest", + }, + }, + }, + }, + }, + } +) + +func TestValidateMonoVertex(t *testing.T) { + t.Run("test good mvtx", func(t *testing.T) { + err := ValidateMonoVertex(testMvtx) + assert.NoError(t, err) + }) + + t.Run("test nil", func(t *testing.T) { + err := ValidateMonoVertex(nil) + assert.Error(t, err) + }) + + t.Run("test invalid name", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Name = "test-pl-iNvalid+name" + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid mvtx name") + }) + + t.Run("test no source", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.Source = nil + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "source is not defined") + }) + + t.Run("test invalid source", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.Source.Kafka = &dfv1.KafkaSource{} + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid user-defined source spec") + }) + + t.Run("test no sink", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.Sink = nil + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "sink is not defined") + }) + + t.Run("test invalid sink", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.Sink.Fallback = nil + testObj.Spec.Sink.RetryStrategy = dfv1.RetryStrategy{ + OnFailure: ptr.To[dfv1.OnFailureRetryStrategy](dfv1.OnFailureFallback), + } + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "given OnFailure strategy is fallback but fallback sink is not provided") + }) + + t.Run("test invalid init container name", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.InitContainers[0].Name = dfv1.CtrInitSideInputs + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid init container name") + }) + + t.Run("test invalid sidecar container name", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.Sidecars[0].Name = dfv1.CtrInitSideInputs + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid sidecar container name") + }) + + t.Run("test invalid maxUnavailable", func(t *testing.T) { + testObj := testMvtx.DeepCopy() + testObj.Spec.UpdateStrategy = dfv1.UpdateStrategy{ + RollingUpdate: &dfv1.RollingUpdateStrategy{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "invalid", + }, + }, + } + err := ValidateMonoVertex(testObj) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid maxUnavailable") + }) +} diff --git a/pkg/reconciler/pipeline/validate.go b/pkg/reconciler/validator/pipeline_validate.go similarity index 79% rename from pkg/reconciler/pipeline/validate.go rename to pkg/reconciler/validator/pipeline_validate.go index 7304147c16..5ba41626ed 100644 --- a/pkg/reconciler/pipeline/validate.go +++ b/pkg/reconciler/validator/pipeline_validate.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pipeline +package validator import ( "fmt" @@ -42,10 +42,7 @@ func ValidatePipeline(pl *dfv1.Pipeline) error { } names := make(map[string]bool) sources := make(map[string]dfv1.AbstractVertex) - udTransformers := make(map[string]dfv1.AbstractVertex) sinks := make(map[string]dfv1.AbstractVertex) - mapUdfs := make(map[string]dfv1.AbstractVertex) - reduceUdfs := make(map[string]dfv1.AbstractVertex) for _, v := range pl.Spec.Vertices { if names[v.Name] { return fmt.Errorf("duplicate vertex name %q", v.Name) @@ -63,9 +60,6 @@ func ValidatePipeline(pl *dfv1.Pipeline) error { return fmt.Errorf("invalid vertex %q, source must have 0 from edges and at least 1 to edge", v.Name) } sources[v.Name] = v - if v.Source.UDTransformer != nil { - udTransformers[v.Name] = v - } } if v.Sink != nil { if v.Source != nil || v.UDF != nil { @@ -83,11 +77,6 @@ func ValidatePipeline(pl *dfv1.Pipeline) error { if len(pl.GetToEdges(v.Name)) == 0 || len(pl.GetFromEdges(v.Name)) == 0 { return fmt.Errorf("invalid vertex %q, UDF must have to and from edges", v.Name) } - if v.UDF.GroupBy != nil { - reduceUdfs[v.Name] = v - } else { - mapUdfs[v.Name] = v - } } } @@ -99,56 +88,6 @@ func ValidatePipeline(pl *dfv1.Pipeline) error { return fmt.Errorf("pipeline has no sink, at least one vertex with 'sink' defined is required") } - for k, s := range sources { - if s.IsUDSource() { - if s.Source.UDSource.Container == nil || s.Source.UDSource.Container.Image == "" { - return fmt.Errorf("invalid user-defined source vertex %q, a customized image is required", k) - } - if s.Source.HTTP != nil || s.Source.Kafka != nil || s.Source.Nats != nil || s.Source.Generator != nil { - return fmt.Errorf("invalid user-defined source vertex %q, only one of 'http', 'kafka', 'nats', 'generator' and 'udSource' can be specified", k) - } - } - } - - for k, t := range udTransformers { - transformer := t.Source.UDTransformer - if transformer.Container != nil { - if transformer.Container.Image == "" && transformer.Builtin == nil { - return fmt.Errorf("invalid source vertex %q, either specify a builtin transformer, or a customized image", k) - } - if transformer.Container.Image != "" && transformer.Builtin != nil { - return fmt.Errorf("invalid source vertex %q, can not specify both builtin transformer, and a customized image", k) - } - } else if transformer.Builtin == nil { - return fmt.Errorf("invalid source vertex %q, either specify a builtin transformer, or a customized image", k) - } - } - - for k, u := range mapUdfs { - if u.UDF.Container != nil { - if u.UDF.Container.Image == "" && u.UDF.Builtin == nil { - return fmt.Errorf("invalid vertex %q, either specify a builtin function, or a customized image", k) - } - if u.UDF.Container.Image != "" && u.UDF.Builtin != nil { - return fmt.Errorf("invalid vertex %q, can not specify both builtin function, and a customized image", k) - } - } else if u.UDF.Builtin == nil { - return fmt.Errorf("invalid vertex %q, either specify a builtin function, or a customized image", k) - } - } - - for k, u := range reduceUdfs { - if u.UDF.Builtin != nil { - // No builtin function supported for reduce vertices. - return fmt.Errorf("invalid vertex %q, there's no buildin function support in reduce vertices", k) - } - if u.UDF.Container != nil { - if u.UDF.Container.Image == "" { - return fmt.Errorf("invalid vertex %q, a customized image is required", k) - } - } - } - namesInEdges := make(map[string]bool) toFromEdge := make(map[string]bool) for _, e := range pl.Spec.Edges { @@ -261,7 +200,7 @@ func validateVertex(v dfv1.AbstractVertex) error { maxUvail := v.UpdateStrategy.GetRollingUpdateStrategy().GetMaxUnavailable() _, err := intstr.GetScaledValueFromIntOrPercent(&maxUvail, 1, true) // maxUnavailable should be an interger or a percentage in string if err != nil { - return fmt.Errorf("vertex %q: invalid maxUnavailable: %v", v.Name, err) + return fmt.Errorf("vertex %q: invalid maxUnavailable: %w", v.Name, err) } for _, ic := range v.InitContainers { @@ -277,62 +216,106 @@ func validateVertex(v dfv1.AbstractVertex) error { return fmt.Errorf("vertex %q: sidecar container name %q is reserved for containers created by numaflow", v.Name, sc.Name) } } + if v.Source != nil { + if err := validateSource(*v.Source); err != nil { + return fmt.Errorf("invalid vertex %q: %w", v.Name, err) + } + return nil + } + if v.UDF != nil { - return validateUDF(*v.UDF) + if err := validateUDF(*v.UDF); err != nil { + return fmt.Errorf("invalid vertex %q: %w", v.Name, err) + } + return nil } if v.Sink != nil { - return validateSink(*v.Sink) + if err := validateSink(*v.Sink); err != nil { + return fmt.Errorf("invalid vertex %q: %w", v.Name, err) + } + return nil } return nil } func validateUDF(udf dfv1.UDF) error { if udf.GroupBy != nil { - f := udf.GroupBy.Window.Fixed - s := udf.GroupBy.Window.Sliding - ss := udf.GroupBy.Window.Session - storage := udf.GroupBy.Storage - if f == nil && s == nil && ss == nil { - return fmt.Errorf(`invalid "groupBy.window", no windowing strategy specified`) - } - if f != nil && s != nil { - return fmt.Errorf(`invalid "groupBy.window", either fixed or sliding is allowed, not both`) - } - if f != nil && ss != nil { - return fmt.Errorf(`invalid "groupBy.window", either fixed or session is allowed, not both`) - } - if s != nil && ss != nil { - return fmt.Errorf(`invalid "groupBy.window", either sliding or session is allowed, not both`) - } - if f != nil && f.Length == nil { - return fmt.Errorf(`invalid "groupBy.window.fixed", "length" is missing`) - } - if s != nil && (s.Length == nil) { - return fmt.Errorf(`invalid "groupBy.window.sliding", "length" is missing`) - } - if s != nil && (s.Slide == nil) { - return fmt.Errorf(`invalid "groupBy.window.sliding", "slide" is missing`) - } - if ss != nil && ss.Timeout == nil { - return fmt.Errorf(`invalid "groupBy.window.session", "timeout" is missing`) - } - if storage == nil { - return fmt.Errorf(`invalid "groupBy", "storage" is missing`) - } - if storage.PersistentVolumeClaim == nil && storage.EmptyDir == nil && storage.NoStore == nil { - return fmt.Errorf(`invalid "groupBy.storage", type of storage to use is missing`) - } - if storage.PersistentVolumeClaim != nil && storage.EmptyDir != nil { - return fmt.Errorf(`invalid "groupBy.storage", either emptyDir or persistentVolumeClaim is allowed, not both`) + return validateReduceUDF(udf) + } else { + return validateMapUDF(udf) + } +} + +func validateMapUDF(udf dfv1.UDF) error { + if udf.Container != nil { + if udf.Container.Image == "" && udf.Builtin == nil { + return fmt.Errorf("invalid udf spec, either specify a builtin function, or a customized image") } - if storage.PersistentVolumeClaim != nil && storage.NoStore != nil { - return fmt.Errorf(`invalid "groupBy.storage", either none or persistentVolumeClaim is allowed, not both`) + if udf.Container.Image != "" && udf.Builtin != nil { + return fmt.Errorf("invalid udf, can not specify both builtin function, and a customized image") } - if storage.EmptyDir != nil && storage.NoStore != nil { - return fmt.Errorf(`invalid "groupBy.storage", either none or emptyDir is allowed, not both`) + } else if udf.Builtin == nil { + return fmt.Errorf("invalid udf, either specify a builtin function, or a customized image") + } + return nil +} + +func validateReduceUDF(udf dfv1.UDF) error { + if udf.Builtin != nil { + // No builtin function supported for reduce vertices. + return fmt.Errorf("invalid udf, there's no buildin function support in reduce vertices") + } + if udf.Container != nil { + if udf.Container.Image == "" { + return fmt.Errorf("invalid udf spec, a customized image is required") } } + + f := udf.GroupBy.Window.Fixed + s := udf.GroupBy.Window.Sliding + ss := udf.GroupBy.Window.Session + storage := udf.GroupBy.Storage + if f == nil && s == nil && ss == nil { + return fmt.Errorf(`invalid "groupBy.window", no windowing strategy specified`) + } + if f != nil && s != nil { + return fmt.Errorf(`invalid "groupBy.window", either fixed or sliding is allowed, not both`) + } + if f != nil && ss != nil { + return fmt.Errorf(`invalid "groupBy.window", either fixed or session is allowed, not both`) + } + if s != nil && ss != nil { + return fmt.Errorf(`invalid "groupBy.window", either sliding or session is allowed, not both`) + } + if f != nil && f.Length == nil { + return fmt.Errorf(`invalid "groupBy.window.fixed", "length" is missing`) + } + if s != nil && (s.Length == nil) { + return fmt.Errorf(`invalid "groupBy.window.sliding", "length" is missing`) + } + if s != nil && (s.Slide == nil) { + return fmt.Errorf(`invalid "groupBy.window.sliding", "slide" is missing`) + } + if ss != nil && ss.Timeout == nil { + return fmt.Errorf(`invalid "groupBy.window.session", "timeout" is missing`) + } + if storage == nil { + return fmt.Errorf(`invalid "groupBy", "storage" is missing`) + } + if storage.PersistentVolumeClaim == nil && storage.EmptyDir == nil && storage.NoStore == nil { + return fmt.Errorf(`invalid "groupBy.storage", type of storage to use is missing`) + } + if storage.PersistentVolumeClaim != nil && storage.EmptyDir != nil { + return fmt.Errorf(`invalid "groupBy.storage", either emptyDir or persistentVolumeClaim is allowed, not both`) + } + if storage.PersistentVolumeClaim != nil && storage.NoStore != nil { + return fmt.Errorf(`invalid "groupBy.storage", either none or persistentVolumeClaim is allowed, not both`) + } + if storage.EmptyDir != nil && storage.NoStore != nil { + return fmt.Errorf(`invalid "groupBy.storage", either none or emptyDir is allowed, not both`) + } + return nil } @@ -543,7 +526,6 @@ func isAForest(pl *dfv1.Pipeline) bool { // buildVisitedMap is a helper function that traverses the pipeline using DFS // This is a recursive function. Each iteration we are building our visited map to check in the parent function. func buildVisitedMap(vtxName string, visited map[string]struct{}, pl *dfv1.Pipeline) { - visited[vtxName] = struct{}{} // construct list of all to and from vertices @@ -566,18 +548,44 @@ func buildVisitedMap(vtxName string, visited map[string]struct{}, pl *dfv1.Pipel } -// validateSink initiates the validation of the sink spec for a pipeline +func validateSource(source dfv1.Source) error { + if transformer := source.UDTransformer; transformer != nil { + if transformer.Container != nil { + if transformer.Container.Image == "" && transformer.Builtin == nil { + return fmt.Errorf("invalid source transformer, either specify a builtin transformer, or a customized image") + } + if transformer.Container.Image != "" && transformer.Builtin != nil { + return fmt.Errorf("invalid source transformer, can not specify both builtin transformer, and a customized image") + } + } else if transformer.Builtin == nil { + return fmt.Errorf("invalid source transformer, either specify a builtin transformer, or a customized image") + } + } + // TODO: add more validations for each source type + if source.UDSource != nil { + if source.UDSource.Container == nil || source.UDSource.Container.Image == "" { + return fmt.Errorf("invalid user-defined source spec, a customized image is required") + } + if source.HTTP != nil || source.Kafka != nil || source.Nats != nil || source.Generator != nil { + return fmt.Errorf("invalid user-defined source spec, only one of 'http', 'kafka', 'nats', 'generator' and 'udSource' can be specified") + } + } + return nil +} + +// validateSink initiates the validation of the sink spec func validateSink(sink dfv1.Sink) error { // check the sinks retry strategy validity. - if ok := HasValidSinkRetryStrategy(sink); !ok { + if ok := hasValidSinkRetryStrategy(sink); !ok { return fmt.Errorf("given OnFailure strategy is fallback but fallback sink is not provided") } + // TODO: add more validations for each sink type return nil } // HasValidSinkRetryStrategy checks if the provided RetryStrategy is valid based on the sink's configuration. // This validation ensures that the retry strategy is compatible with the sink's current setup -func HasValidSinkRetryStrategy(s dfv1.Sink) bool { +func hasValidSinkRetryStrategy(s dfv1.Sink) bool { // If the OnFailure strategy is set to fallback, but no fallback sink is provided in the Sink struct, // we return an error if s.RetryStrategy.OnFailure != nil && *s.RetryStrategy.OnFailure == dfv1.OnFailureFallback && !hasValidFallbackSink(&s) { diff --git a/pkg/reconciler/pipeline/validate_test.go b/pkg/reconciler/validator/pipeline_validate_test.go similarity index 99% rename from pkg/reconciler/pipeline/validate_test.go rename to pkg/reconciler/validator/pipeline_validate_test.go index 8f7a272d89..e42af00654 100644 --- a/pkg/reconciler/pipeline/validate_test.go +++ b/pkg/reconciler/validator/pipeline_validate_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pipeline +package validator import ( "testing" @@ -1212,7 +1212,7 @@ func TestIsValidSinkRetryStrategy(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.sink.RetryStrategy = tt.strategy - ok := HasValidSinkRetryStrategy(tt.sink) + ok := hasValidSinkRetryStrategy(tt.sink) if (!ok) != tt.wantErr { t.Errorf("isValidSinkRetryStrategy() got = %v, want %v", ok, tt.wantErr) } diff --git a/pkg/webhook/validator/isbsvc.go b/pkg/webhook/validator/isbsvc.go index a790c427d7..3c1271c899 100644 --- a/pkg/webhook/validator/isbsvc.go +++ b/pkg/webhook/validator/isbsvc.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" - isbsvccontroller "github.com/numaproj/numaflow/pkg/reconciler/isbsvc" + "github.com/numaproj/numaflow/pkg/reconciler/validator" ) type isbsvcValidator struct { @@ -36,7 +36,7 @@ func NewISBServiceValidator(old, new *dfv1.InterStepBufferService) Validator { } func (v *isbsvcValidator) ValidateCreate(_ context.Context) *admissionv1.AdmissionResponse { - if err := isbsvccontroller.ValidateInterStepBufferService(v.newISBService); err != nil { + if err := validator.ValidateInterStepBufferService(v.newISBService); err != nil { return DeniedResponse(err.Error()) } return AllowedResponse() @@ -44,7 +44,7 @@ func (v *isbsvcValidator) ValidateCreate(_ context.Context) *admissionv1.Admissi func (v *isbsvcValidator) ValidateUpdate(_ context.Context) *admissionv1.AdmissionResponse { // check the new ISB Service is valid - if err := isbsvccontroller.ValidateInterStepBufferService(v.newISBService); err != nil { + if err := validator.ValidateInterStepBufferService(v.newISBService); err != nil { return DeniedResponse(err.Error()) } // chck if the instance annotation is changed diff --git a/pkg/webhook/validator/pipeline.go b/pkg/webhook/validator/pipeline.go index c911ff6114..516b7262fe 100644 --- a/pkg/webhook/validator/pipeline.go +++ b/pkg/webhook/validator/pipeline.go @@ -26,7 +26,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/client/clientset/versioned/typed/numaflow/v1alpha1" - pipelinecontroller "github.com/numaproj/numaflow/pkg/reconciler/pipeline" + "github.com/numaproj/numaflow/pkg/reconciler/validator" ) type pipelineValidator struct { @@ -45,7 +45,7 @@ func NewPipelineValidator(isbClient v1alpha1.InterStepBufferServiceInterface, ol } func (v *pipelineValidator) ValidateCreate(ctx context.Context) *admissionv1.AdmissionResponse { - if err := pipelinecontroller.ValidatePipeline(v.newPipeline); err != nil { + if err := validator.ValidatePipeline(v.newPipeline); err != nil { return DeniedResponse(err.Error()) } // check that the ISB service exists @@ -66,7 +66,7 @@ func (v *pipelineValidator) ValidateUpdate(_ context.Context) *admissionv1.Admis return DeniedResponse("old pipeline spec is nil") } // check that the new pipeline spec is valid - if err := pipelinecontroller.ValidatePipeline(v.newPipeline); err != nil { + if err := validator.ValidatePipeline(v.newPipeline); err != nil { return DeniedResponse(fmt.Sprintf("new pipeline spec is invalid: %s", err.Error())) } // check that the update is valid From 8e98c0854bc3c17626238b2c58326cac5a602a05 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Thu, 24 Oct 2024 08:09:37 -0700 Subject: [PATCH 127/188] fix: refine vertex/mvtx pod clean up logic (#2185) Signed-off-by: Derek Wang --- pkg/reconciler/monovertex/controller.go | 9 +++++---- pkg/reconciler/vertex/controller.go | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index 8747e6e7c9..dbe63c6ca8 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -19,6 +19,7 @@ package monovertex import ( "context" "fmt" + "math" "strconv" "strings" "time" @@ -201,11 +202,11 @@ func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *df monoVtx.Status.UpdatedReadyReplicas = 0 } - // Manually or automatically scaled down + // Manually or automatically scaled down, in this case, we need to clean up extra pods if there's any + if err := mr.cleanUpPodsFromTo(ctx, monoVtx, desiredReplicas, math.MaxInt); err != nil { + return fmt.Errorf("failed to clean up mono vertex pods [%v, ∞): %w", desiredReplicas, err) + } if currentReplicas := int(monoVtx.Status.Replicas); currentReplicas > desiredReplicas { - if err := mr.cleanUpPodsFromTo(ctx, monoVtx, desiredReplicas, currentReplicas); err != nil { - return fmt.Errorf("failed to clean up mono vertex pods [%v, %v): %w", desiredReplicas, currentReplicas, err) - } monoVtx.Status.Replicas = uint32(desiredReplicas) } updatedReplicas := int(monoVtx.Status.UpdatedReplicas) diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index 8d520609bf..c5e7fdcfeb 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -19,6 +19,7 @@ package vertex import ( "context" "fmt" + "math" "strconv" "strings" "time" @@ -215,11 +216,11 @@ func (r *vertexReconciler) orchestratePods(ctx context.Context, vertex *dfv1.Ver vertex.Status.UpdatedReadyReplicas = 0 } - // Manually or automatically scaled down + // Manually or automatically scaled down, in this case, we need to clean up extra pods if there's any + if err := r.cleanUpPodsFromTo(ctx, vertex, desiredReplicas, math.MaxInt); err != nil { + return fmt.Errorf("failed to clean up vertex pods [%v, ∞): %w", desiredReplicas, err) + } if currentReplicas := int(vertex.Status.Replicas); currentReplicas > desiredReplicas { - if err := r.cleanUpPodsFromTo(ctx, vertex, desiredReplicas, currentReplicas); err != nil { - return fmt.Errorf("failed to clean up vertex pods [%v, %v): %w", desiredReplicas, currentReplicas, err) - } vertex.Status.Replicas = uint32(desiredReplicas) } updatedReplicas := int(vertex.Status.UpdatedReplicas) From e98ff980577ee8c161d41fb3b00fcda6db20c9e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:45:30 -0700 Subject: [PATCH 128/188] chore(deps): bump http-proxy-middleware from 2.0.6 to 2.0.7 in /ui (#2188) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index f44d029a2d..41a958626a 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -6532,9 +6532,9 @@ http-proxy-agent@^4.0.1: debug "4" http-proxy-middleware@^2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" - integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== + version "2.0.7" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6" + integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA== dependencies: "@types/http-proxy" "^1.17.8" http-proxy "^1.18.1" From 5b7778260c85c74fc73bf098d5d4609d2f8e2a42 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sun, 27 Oct 2024 14:03:53 -0700 Subject: [PATCH 129/188] feat: source and sink implementation in Rust (blocking implementation) (#2190) Signed-off-by: Yashash H L Signed-off-by: Sreekanth Signed-off-by: Vigith Maurice Co-authored-by: Yashash H L Co-authored-by: Sreekanth --- Dockerfile | 2 +- Makefile | 10 + api/json-schema/schema.json | 6 +- api/openapi-spec/swagger.json | 6 +- examples/1-simple-pipeline.yaml | 2 +- pkg/apis/numaflow/v1alpha1/const.go | 20 +- .../numaflow/v1alpha1/container_supplier.go | 13 +- .../numaflow/v1alpha1/mono_vertex_types.go | 2 +- pkg/apis/numaflow/v1alpha1/sink.go | 3 + pkg/apis/numaflow/v1alpha1/source.go | 3 + pkg/apis/numaflow/v1alpha1/vertex_types.go | 17 +- .../numaflow/v1alpha1/zz_generated.openapi.go | 9 +- pkg/daemon/server/service/rater/rater.go | 8 +- pkg/mvtxdaemon/server/service/rater/rater.go | 1 + rust/build.sh | 38 ++ rust/numaflow-core/src/config.rs | 14 +- rust/numaflow-core/src/config/components.rs | 150 ++++- rust/numaflow-core/src/config/monovertex.rs | 160 ++--- rust/numaflow-core/src/config/pipeline.rs | 407 +++++++++++- rust/numaflow-core/src/config/pipeline/isb.rs | 186 ++++-- rust/numaflow-core/src/lib.rs | 73 +- rust/numaflow-core/src/message.rs | 124 ++-- .../src/{monovertex => }/metrics.rs | 198 +++++- rust/numaflow-core/src/monovertex.rs | 204 ++---- .../numaflow-core/src/monovertex/forwarder.rs | 38 +- rust/numaflow-core/src/pipeline.rs | 621 ++++++++++++++++++ rust/numaflow-core/src/pipeline/forwarder.rs | 18 +- .../src/pipeline/forwarder/sink_forwarder.rs | 55 ++ .../pipeline/forwarder/source_forwarder.rs | 191 ++++++ .../src/pipeline/isb/jetstream.rs | 121 ++-- .../src/pipeline/isb/jetstream/reader.rs | 361 ++++++++++ .../src/pipeline/isb/jetstream/writer.rs | 202 ++++-- rust/numaflow-core/src/shared/utils.rs | 316 ++++----- rust/numaflow-core/src/sink.rs | 396 ++++++++++- rust/numaflow-core/src/sink/blackhole.rs | 4 +- rust/numaflow-core/src/sink/log.rs | 4 +- rust/numaflow-core/src/sink/user_defined.rs | 4 +- rust/numaflow-core/src/source.rs | 14 +- rust/numaflow-core/src/source/generator.rs | 10 +- rust/numaflow-core/src/source/user_defined.rs | 25 +- .../src/transformer/user_defined.rs | 2 +- .../src/models/get_container_req.rs | 4 + rust/rust-toolchain.toml | 2 +- rust/src/bin/main.rs | 10 +- 44 files changed, 3245 insertions(+), 809 deletions(-) create mode 100644 rust/build.sh rename rust/numaflow-core/src/{monovertex => }/metrics.rs (86%) create mode 100644 rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs create mode 100644 rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs create mode 100644 rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs diff --git a/Dockerfile b/Dockerfile index 027dfa0376..c234eb30ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN chmod +x /bin/numaflow-rs #################################################################################################### # Rust binary #################################################################################################### -FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef ARG TARGETPLATFORM WORKDIR /numaflow RUN apt-get update && apt-get install -y protobuf-compiler diff --git a/Makefile b/Makefile index 038e6d8e11..b5cec67822 100644 --- a/Makefile +++ b/Makefile @@ -197,7 +197,17 @@ build-rust-in-docker: DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) --target rust-builder -f $(DOCKERFILE) . export CTR=$$($(DOCKER) create $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)) && $(DOCKER) cp $$CTR:/root/numaflow dist/numaflow-rs-linux-$(HOST_ARCH) && $(DOCKER) rm $$CTR && $(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) +.PHONY: build-rust-in-docker-multi +build-rust-in-docker-multi: + mkdir -p dist + docker run -v ./dist/cargo:/root/.cargo -v ./rust/:/app/ -w /app --rm ubuntu:24.04 bash build.sh + cp -pv rust/target/aarch64-unknown-linux-gnu/release/numaflow dist/numaflow-rs-linux-arm64 + cp -pv rust/target/x86_64-unknown-linux-gnu/release/numaflow dist/numaflow-rs-linux-amd64 + image-multi: ui-build set-qemu dist/$(BINARY_NAME)-linux-arm64.gz dist/$(BINARY_NAME)-linux-amd64.gz +ifndef GITHUB_ACTIONS + $(MAKE) build-rust-in-docker-multi +endif $(DOCKER) buildx build --sbom=false --provenance=false --build-arg "BASE_IMAGE=$(RELEASE_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) --platform linux/amd64,linux/arm64 --file $(DOCKERFILE) ${PUSH_OPTION} . set-qemu: diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index b840921f2e..d881d39f52 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -22890,6 +22890,9 @@ }, "type": "array" }, + "executeRustBinary": { + "type": "boolean" + }, "image": { "type": "string" }, @@ -22915,7 +22918,8 @@ "imagePullPolicy", "image", "volumeMounts", - "resources" + "resources", + "executeRustBinary" ], "type": "object" } diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 89ac681330..82730f4440 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -22867,7 +22867,8 @@ "imagePullPolicy", "image", "volumeMounts", - "resources" + "resources", + "executeRustBinary" ], "properties": { "env": { @@ -22876,6 +22877,9 @@ "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" } }, + "executeRustBinary": { + "type": "boolean" + }, "image": { "type": "string" }, diff --git a/examples/1-simple-pipeline.yaml b/examples/1-simple-pipeline.yaml index e790fa3150..42e9d9e095 100644 --- a/examples/1-simple-pipeline.yaml +++ b/examples/1-simple-pipeline.yaml @@ -27,4 +27,4 @@ spec: - from: in to: cat - from: cat - to: out + to: out \ No newline at end of file diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 574752c304..2deddd477c 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -151,15 +151,17 @@ const ( EnvServingMinPipelineSpec = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC" EnvServingHostIP = "NUMAFLOW_SERVING_HOST_IP" EnvServingStoreTTL = "NUMAFLOW_SERVING_STORE_TTL" - PathVarRun = "/var/run/numaflow" - VertexMetricsPort = 2469 - VertexMetricsPortName = "metrics" - VertexHTTPSPort = 8443 - VertexHTTPSPortName = "https" - DaemonServicePort = 4327 - MonoVertexMetricsPort = 2469 - MonoVertexMetricsPortName = "metrics" - MonoVertexDaemonServicePort = 4327 + EnvExecuteRustBinary = "NUMAFLOW_EXECUTE_RUST_BINARY" + + PathVarRun = "/var/run/numaflow" + VertexMetricsPort = 2469 + VertexMetricsPortName = "metrics" + VertexHTTPSPort = 8443 + VertexHTTPSPortName = "https" + DaemonServicePort = 4327 + MonoVertexMetricsPort = 2469 + MonoVertexMetricsPortName = "metrics" + MonoVertexDaemonServicePort = 4327 DefaultRequeueAfter = 10 * time.Second diff --git a/pkg/apis/numaflow/v1alpha1/container_supplier.go b/pkg/apis/numaflow/v1alpha1/container_supplier.go index 153a07aac7..3d090c62fd 100644 --- a/pkg/apis/numaflow/v1alpha1/container_supplier.go +++ b/pkg/apis/numaflow/v1alpha1/container_supplier.go @@ -19,12 +19,13 @@ package v1alpha1 import corev1 "k8s.io/api/core/v1" type getContainerReq struct { - env []corev1.EnvVar - isbSvcType ISBSvcType - imagePullPolicy corev1.PullPolicy - image string - volumeMounts []corev1.VolumeMount - resources corev1.ResourceRequirements + env []corev1.EnvVar + isbSvcType ISBSvcType + imagePullPolicy corev1.PullPolicy + image string + volumeMounts []corev1.VolumeMount + resources corev1.ResourceRequirements + executeRustBinary bool } type containerSupplier interface { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index d271c34144..0ed0730506 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -460,7 +460,7 @@ func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Container { mainContainer := containerBuilder{}. - init(req).command(NumaflowRustBinary).args("--monovertex").build() + init(req).command(NumaflowRustBinary).args("--rust").build() containers := []corev1.Container{mainContainer} if mvspec.Source.UDSource != nil { // Only support UDSource for now. diff --git a/pkg/apis/numaflow/v1alpha1/sink.go b/pkg/apis/numaflow/v1alpha1/sink.go index b0f38aa67a..1b37ef5dcf 100644 --- a/pkg/apis/numaflow/v1alpha1/sink.go +++ b/pkg/apis/numaflow/v1alpha1/sink.go @@ -62,6 +62,9 @@ func (s Sink) getContainers(req getContainerReq) ([]corev1.Container, error) { } func (s Sink) getMainContainer(req getContainerReq) corev1.Container { + if req.executeRustBinary { + return containerBuilder{}.init(req).command(NumaflowRustBinary).args("processor", "--type="+string(VertexTypeSink), "--isbsvc-type="+string(req.isbSvcType), "--rust").build() + } return containerBuilder{}.init(req).args("processor", "--type="+string(VertexTypeSink), "--isbsvc-type="+string(req.isbSvcType)).build() } diff --git a/pkg/apis/numaflow/v1alpha1/source.go b/pkg/apis/numaflow/v1alpha1/source.go index deece42e72..c59d4c3c21 100644 --- a/pkg/apis/numaflow/v1alpha1/source.go +++ b/pkg/apis/numaflow/v1alpha1/source.go @@ -59,6 +59,9 @@ func (s Source) getContainers(req getContainerReq) ([]corev1.Container, error) { } func (s Source) getMainContainer(req getContainerReq) corev1.Container { + if req.executeRustBinary { + return containerBuilder{}.init(req).command(NumaflowRustBinary).args("processor", "--type="+string(VertexTypeSink), "--isbsvc-type="+string(req.isbSvcType), "--rust").build() + } return containerBuilder{}.init(req).args("processor", "--type="+string(VertexTypeSource), "--isbsvc-type="+string(req.isbSvcType)).build() } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index f403d074c3..e45c168bdf 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/env" "k8s.io/utils/ptr" ) @@ -240,15 +241,17 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { }, } volumeMounts := []corev1.VolumeMount{{Name: varVolumeName, MountPath: PathVarRun}} - + executeRustBinary, _ := env.GetBool(EnvExecuteRustBinary, false) containers, err := v.Spec.getType().getContainers(getContainerReq{ - isbSvcType: req.ISBSvcType, - env: envVars, - image: req.Image, - imagePullPolicy: req.PullPolicy, - resources: req.DefaultResources, - volumeMounts: volumeMounts, + isbSvcType: req.ISBSvcType, + env: envVars, + image: req.Image, + imagePullPolicy: req.PullPolicy, + resources: req.DefaultResources, + volumeMounts: volumeMounts, + executeRustBinary: executeRustBinary, }) + if err != nil { return nil, err } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 5272ee72d4..2c42771989 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -6638,8 +6638,15 @@ func schema_pkg_apis_numaflow_v1alpha1_getContainerReq(ref common.ReferenceCallb Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, + "executeRustBinary": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, }, - Required: []string{"env", "isbSvcType", "imagePullPolicy", "image", "volumeMounts", "resources"}, + Required: []string{"env", "isbSvcType", "imagePullPolicy", "image", "volumeMounts", "resources", "executeRustBinary"}, }, }, Dependencies: []string{ diff --git a/pkg/daemon/server/service/rater/rater.go b/pkg/daemon/server/service/rater/rater.go index 86b2ad10de..94e7bc9875 100644 --- a/pkg/daemon/server/service/rater/rater.go +++ b/pkg/daemon/server/service/rater/rater.go @@ -248,7 +248,13 @@ func (r *Rater) getPodReadCounts(vertexName, podName string) *PodReadCount { if partitionName == "" { r.log.Warnf("[vertex name %s, pod name %s]: Partition name is not found for metric %s", vertexName, podName, readTotalMetricName) } else { - partitionReadCount[partitionName] = ele.Counter.GetValue() + // https://github.com/prometheus/client_rust/issues/194 + counterVal := ele.Counter.GetValue() + untypedVal := ele.Untyped.GetValue() + if counterVal == 0 && untypedVal != 0 { + counterVal = untypedVal + } + partitionReadCount[partitionName] = counterVal } } podReadCount := &PodReadCount{podName, partitionReadCount} diff --git a/pkg/mvtxdaemon/server/service/rater/rater.go b/pkg/mvtxdaemon/server/service/rater/rater.go index 19a3ee87dc..ac7422952e 100644 --- a/pkg/mvtxdaemon/server/service/rater/rater.go +++ b/pkg/mvtxdaemon/server/service/rater/rater.go @@ -179,6 +179,7 @@ func (r *Rater) getPodReadCounts(podName string) *PodReadCount { // from the results safely. // We use Untyped here as the counter metric family shows up as untyped from the rust client // TODO(MonoVertex): Check further on this to understand why not type is counter + // https://github.com/prometheus/client_rust/issues/194 podReadCount := &PodReadCount{podName, metricsList[0].Untyped.GetValue()} return podReadCount } else { diff --git a/rust/build.sh b/rust/build.sh new file mode 100644 index 0000000000..a0f93ee61f --- /dev/null +++ b/rust/build.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -xeuo pipefail + +# Builds static rust binaries for both aarch64 and amd64 +# Intended for building Linux binries from Mac OS host in a docker container. +# Usage: (from root directory of numaflow repo) +# docker run -v ./rust/:/app/ -w /app --rm ubuntu:24.04 bash build.sh + +# Detect the host machine architecture +ARCH=$(uname -m) + +apt update && apt install -y curl protobuf-compiler build-essential + +if [ ! -f "$HOME/.cargo/env" ]; then + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +fi +. "$HOME/.cargo/env" + +# Function to build for aarch64 +build_aarch64() { + apt install -y gcc-aarch64-linux-gnu + sed -i "/^targets = \[/d" rust-toolchain.toml + RUSTFLAGS='-C target-feature=+crt-static -C linker=aarch64-linux-gnu-gcc' cargo build --release --target aarch64-unknown-linux-gnu + sed -i "/targets = \['aarch64-unknown-linux-gnu'\]/d" rust-toolchain.toml +} + +# Function to build for x86_64 +build_x86_64() { + apt install -y gcc-x86-64-linux-gnu + sed -i "/^targets = \[/d" rust-toolchain.toml + echo "targets = ['x86_64-unknown-linux-gnu']" >> rust-toolchain.toml + RUSTFLAGS='-C target-feature=+crt-static -C linker=x86_64-linux-gnu-gcc' cargo build --release --target x86_64-unknown-linux-gnu + sed -i "/targets = \['x86_64-unknown-linux-gnu'\]/d" rust-toolchain.toml +} + +build_aarch64 +build_x86_64 diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index ae9071d8ed..e36ab4dc21 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -1,10 +1,11 @@ use std::env; use std::sync::OnceLock; +use monovertex::MonovertexConfig; + use crate::config::pipeline::PipelineConfig; use crate::Error; use crate::Result; -use monovertex::MonovertexConfig; const ENV_MONO_VERTEX_OBJ: &str = "NUMAFLOW_MONO_VERTEX_OBJECT"; const ENV_VERTEX_OBJ: &str = "NUMAFLOW_VERTEX_OBJECT"; @@ -28,6 +29,7 @@ pub fn config() -> &'static Settings { }) } +/// CustomResources supported by Numaflow. #[derive(Debug, Clone)] pub(crate) enum CustomResourceType { MonoVertex(MonovertexConfig), @@ -53,7 +55,7 @@ impl Settings { } if let Ok(obj) = env::var(ENV_VERTEX_OBJ) { - let cfg = PipelineConfig::load(obj)?; + let cfg = PipelineConfig::load(obj, env::vars())?; return Ok(Settings { custom_resource_type: CustomResourceType::Pipeline(cfg), }); @@ -64,12 +66,14 @@ impl Settings { #[cfg(test)] mod tests { - use crate::config::components::sink::OnFailureStrategy; - use crate::config::{CustomResourceType, Settings, ENV_MONO_VERTEX_OBJ}; + use std::env; + use base64::prelude::BASE64_STANDARD; use base64::Engine; use serde_json::json; - use std::env; + + use crate::config::components::sink::OnFailureStrategy; + use crate::config::{CustomResourceType, Settings, ENV_MONO_VERTEX_OBJ}; #[test] fn test_settings_load_combined() { diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index d4f319924a..ce62407f11 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -3,25 +3,85 @@ pub(crate) mod source { const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; - use bytes::Bytes; use std::time::Duration; + use bytes::Bytes; + use numaflow_models::models::Source; + use tracing::warn; + + use crate::error::Error; + use crate::Result; + #[derive(Debug, Clone, PartialEq)] pub(crate) struct SourceConfig { pub(crate) source_type: SourceType, } + impl Default for SourceConfig { + fn default() -> Self { + Self { + source_type: SourceType::Generator(GeneratorConfig::default()), + } + } + } + #[derive(Debug, Clone, PartialEq)] pub(crate) enum SourceType { Generator(GeneratorConfig), UserDefined(UserDefinedConfig), } + impl TryFrom> for SourceType { + type Error = Error; + + fn try_from(source: Box) -> Result { + source + .udsource + .as_ref() + .map(|_| Ok(SourceType::UserDefined(UserDefinedConfig::default()))) + .or_else(|| { + source.generator.as_ref().map(|generator| { + let mut generator_config = GeneratorConfig::default(); + + if let Some(value_blob) = &generator.value_blob { + generator_config.content = Bytes::from(value_blob.clone()); + } + + if let Some(msg_size) = generator.msg_size { + if msg_size >= 0 { + generator_config.msg_size_bytes = msg_size as u32; + } else { + warn!( + "'msgSize' cannot be negative, using default value (8 bytes)" + ); + } + } + + generator_config.value = generator.value; + generator_config.rpu = generator.rpu.unwrap_or(1) as usize; + generator_config.duration = + generator.duration.map_or(Duration::from_millis(1000), |d| { + std::time::Duration::from(d) + }); + generator_config.key_count = generator + .key_count + .map_or(0, |kc| std::cmp::min(kc, u8::MAX as i32) as u8); + generator_config.jitter = generator + .jitter + .map_or(Duration::from_secs(0), std::time::Duration::from); + + Ok(SourceType::Generator(generator_config)) + }) + }) + .ok_or_else(|| Error::Config("Source type not found".to_string()))? + } + } + #[derive(Debug, Clone, PartialEq)] pub(crate) struct GeneratorConfig { pub rpu: usize, pub content: Bytes, - pub duration: usize, + pub duration: Duration, pub value: Option, pub key_count: u8, pub msg_size_bytes: u32, @@ -33,7 +93,7 @@ pub(crate) mod source { Self { rpu: 1, content: Bytes::new(), - duration: 1000, + duration: Duration::from_millis(1000), value: None, key_count: 0, msg_size_bytes: 8, @@ -70,9 +130,13 @@ pub(crate) mod sink { const DEFAULT_MAX_SINK_RETRY_ATTEMPTS: u16 = u16::MAX; const DEFAULT_SINK_RETRY_INTERVAL_IN_MS: u32 = 1; - use numaflow_models::models::{Backoff, RetryStrategy}; use std::fmt::Display; + use numaflow_models::models::{Backoff, RetryStrategy, Sink}; + + use crate::error::Error; + use crate::Result; + #[derive(Debug, Clone, PartialEq)] pub(crate) struct SinkConfig { pub(crate) sink_type: SinkType, @@ -86,6 +150,51 @@ pub(crate) mod sink { UserDefined(UserDefinedConfig), } + impl TryFrom> for SinkType { + type Error = Error; + + // FIXME(cr): why is sink.fallback Box vs. sink Box. This is coming from + // numaflow-models. Problem is, golang has embedded structures and rust does not. We might + // have to AbstractSink for sink-configs while Sink for real sink types. + // NOTE: I do not see this problem with Source? + fn try_from(sink: Box) -> Result { + if let Some(fallback) = sink.fallback { + fallback + .udsink + .as_ref() + .map(|_| Ok(SinkType::UserDefined(UserDefinedConfig::fallback_default()))) + .or_else(|| { + fallback + .log + .as_ref() + .map(|_| Ok(SinkType::Log(LogConfig::default()))) + }) + .or_else(|| { + fallback + .blackhole + .as_ref() + .map(|_| Ok(SinkType::Blackhole(BlackholeConfig::default()))) + }) + .ok_or_else(|| Error::Config("Sink type not found".to_string()))? + } else { + sink.udsink + .as_ref() + .map(|_| Ok(SinkType::UserDefined(UserDefinedConfig::default()))) + .or_else(|| { + sink.log + .as_ref() + .map(|_| Ok(SinkType::Log(LogConfig::default()))) + }) + .or_else(|| { + sink.blackhole + .as_ref() + .map(|_| Ok(SinkType::Blackhole(BlackholeConfig::default()))) + }) + .ok_or_else(|| Error::Config("Sink type not found".to_string()))? + } + } + } + #[derive(Debug, Clone, PartialEq, Default)] pub(crate) struct LogConfig {} @@ -164,6 +273,27 @@ pub(crate) mod sink { } } + impl From> for RetryConfig { + fn from(retry: Box) -> Self { + let mut retry_config = RetryConfig::default(); + if let Some(backoff) = &retry.backoff { + if let Some(interval) = backoff.interval { + retry_config.sink_retry_interval_in_ms = + std::time::Duration::from(interval).as_millis() as u32; + } + + if let Some(steps) = backoff.steps { + retry_config.sink_max_retry_attempts = steps as u16; + } + } + + if let Some(strategy) = &retry.on_failure { + retry_config.sink_retry_on_fail_strategy = OnFailureStrategy::from_str(strategy); + } + retry_config + } + } + impl Default for UserDefinedConfig { fn default() -> Self { Self { @@ -198,6 +328,7 @@ pub(crate) mod transformer { #[derive(Debug, Clone, PartialEq)] pub(crate) enum TransformerType { + #[allow(dead_code)] Noop(NoopConfig), // will add built-in transformers UserDefined(UserDefinedConfig), } @@ -248,16 +379,18 @@ pub(crate) mod metrics { #[cfg(test)] mod source_tests { - use super::source::{GeneratorConfig, SourceConfig, SourceType, UserDefinedConfig}; - use bytes::Bytes; use std::time::Duration; + use bytes::Bytes; + + use super::source::{GeneratorConfig, SourceConfig, SourceType, UserDefinedConfig}; + #[test] fn test_default_generator_config() { let default_config = GeneratorConfig::default(); assert_eq!(default_config.rpu, 1); assert_eq!(default_config.content, Bytes::new()); - assert_eq!(default_config.duration, 1000); + assert_eq!(default_config.duration.as_millis(), 1000); assert_eq!(default_config.value, None); assert_eq!(default_config.key_count, 0); assert_eq!(default_config.msg_size_bytes, 8); @@ -304,11 +437,12 @@ mod source_tests { #[cfg(test)] mod sink_tests { + use numaflow_models::models::{Backoff, RetryStrategy}; + use super::sink::{ BlackholeConfig, LogConfig, OnFailureStrategy, RetryConfig, SinkConfig, SinkType, UserDefinedConfig, }; - use numaflow_models::models::{Backoff, RetryStrategy}; #[test] fn test_default_log_config() { diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index a9d5685530..0d1b0c1a9f 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -1,5 +1,12 @@ +use std::time::Duration; + +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use numaflow_models::models::MonoVertex; +use serde_json::from_slice; + use crate::config::components::metrics::MetricsConfig; -use crate::config::components::sink::{OnFailureStrategy, RetryConfig, SinkConfig}; +use crate::config::components::sink::SinkConfig; use crate::config::components::source::{GeneratorConfig, SourceConfig}; use crate::config::components::transformer::{ TransformerConfig, TransformerType, UserDefinedConfig, @@ -8,13 +15,6 @@ use crate::config::components::{sink, source}; use crate::error::Error; use crate::message::get_vertex_replica; use crate::Result; -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use bytes::Bytes; -use numaflow_models::models::MonoVertex; -use serde_json::from_slice; -use std::time::Duration; -use tracing::warn; const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; @@ -23,7 +23,7 @@ const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; pub(crate) struct MonovertexConfig { pub(crate) name: String, pub(crate) batch_size: usize, - pub(crate) timeout_in_ms: u64, + pub(crate) read_timeout: Duration, pub(crate) replica: u16, pub(crate) source_config: SourceConfig, pub(crate) sink_config: SinkConfig, @@ -37,7 +37,7 @@ impl Default for MonovertexConfig { MonovertexConfig { name: "".to_string(), batch_size: DEFAULT_BATCH_SIZE as usize, - timeout_in_ms: DEFAULT_TIMEOUT_IN_MS as u64, + read_timeout: Duration::from_millis(DEFAULT_TIMEOUT_IN_MS as u64), replica: 0, source_config: SourceConfig { source_type: source::SourceType::Generator(GeneratorConfig::default()), @@ -97,128 +97,41 @@ impl MonovertexConfig { transformer_type: TransformerType::UserDefined(UserDefinedConfig::default()), }); - let source_config = mono_vertex_obj - .spec - .source - .as_ref() - .ok_or_else(|| Error::Config("Source not found".to_string())) - .and_then(|source| { - source.udsource.as_ref().map(|_| SourceConfig { - source_type: source::SourceType::UserDefined(source::UserDefinedConfig::default()), - }).or_else(|| { - source.generator.as_ref().map(|generator| { - let mut generator_config = GeneratorConfig::default(); - - if let Some(value_blob) = &generator.value_blob { - generator_config.content = Bytes::from(value_blob.clone()); - } - - if let Some(msg_size) = generator.msg_size { - if msg_size >= 0 { - generator_config.msg_size_bytes = msg_size as u32; - } else { - warn!("'msgSize' cannot be negative, using default value (8 bytes)"); - } - } - - generator_config.value = generator.value; - generator_config.rpu = generator.rpu.unwrap_or(1) as usize; - generator_config.duration = generator.duration.map_or(1000, |d| std::time::Duration::from(d).as_millis() as usize); - generator_config.key_count = generator.key_count.map_or(0, |kc| std::cmp::min(kc, u8::MAX as i32) as u8); - generator_config.jitter = generator.jitter.map_or(Duration::from_secs(0), std::time::Duration::from); + let source = mono_vertex_obj + .spec + .source + .clone() + .ok_or_else(|| Error::Config("Source not found".to_string()))?; - SourceConfig { - source_type: source::SourceType::Generator(generator_config), - } - }) - }).ok_or_else(|| Error::Config("Source type not found".to_string())) - })?; + let source_config = SourceConfig { + source_type: source.try_into()?, + }; - let sink_config = mono_vertex_obj + let sink = mono_vertex_obj .spec .sink - .as_ref() - .ok_or_else(|| Error::Config("Sink not found".to_string())) - .and_then(|sink| { - let retry_config = sink.retry_strategy.as_ref().map(|retry| { - let mut retry_config = RetryConfig::default(); + .clone() + .ok_or_else(|| Error::Config("Sink not found".to_string()))?; - if let Some(backoff) = &retry.backoff { - if let Some(interval) = backoff.interval { - retry_config.sink_retry_interval_in_ms = - std::time::Duration::from(interval).as_millis() as u32; - } + let sink_config = SinkConfig { + sink_type: sink.clone().try_into()?, + retry_config: sink.retry_strategy.clone().map(|retry| retry.into()), + }; - if let Some(steps) = backoff.steps { - retry_config.sink_max_retry_attempts = steps as u16; - } - } - - if let Some(strategy) = &retry.on_failure { - retry_config.sink_retry_on_fail_strategy = - OnFailureStrategy::from_str(strategy); - } - - retry_config - }); - - sink.udsink - .as_ref() - .map(|_| SinkConfig { - sink_type: sink::SinkType::UserDefined(sink::UserDefinedConfig::default()), - retry_config: retry_config.clone(), - }) - .or_else(|| { - sink.log.as_ref().map(|_| SinkConfig { - sink_type: sink::SinkType::Log(sink::LogConfig::default()), - retry_config: retry_config.clone(), - }) - }) - .or_else(|| { - sink.blackhole.as_ref().map(|_| SinkConfig { - sink_type: sink::SinkType::Blackhole(sink::BlackholeConfig::default()), - retry_config: retry_config.clone(), - }) - }) - .ok_or_else(|| Error::Config("Sink type not found".to_string())) - })?; - - let fb_sink_config = mono_vertex_obj - .spec - .sink - .as_ref() - .and_then(|sink| sink.fallback.as_ref()) - .map(|fallback| { - fallback - .udsink - .as_ref() - .map(|_| SinkConfig { - sink_type: sink::SinkType::UserDefined( - sink::UserDefinedConfig::fallback_default(), - ), - retry_config: None, - }) - .or_else(|| { - fallback.log.as_ref().map(|_| SinkConfig { - sink_type: sink::SinkType::Log(sink::LogConfig::default()), - retry_config: None, - }) - }) - .or_else(|| { - fallback.blackhole.as_ref().map(|_| SinkConfig { - sink_type: sink::SinkType::Blackhole(sink::BlackholeConfig::default()), - retry_config: None, - }) - }) - .ok_or_else(|| Error::Config("Fallback sink type not found".to_string())) + let fb_sink_config = if sink.fallback.is_some() { + Some(SinkConfig { + sink_type: sink.try_into()?, + retry_config: None, }) - .transpose()?; + } else { + None + }; Ok(MonovertexConfig { name: mono_vertex_name, replica: *get_vertex_replica(), batch_size: batch_size as usize, - timeout_in_ms: timeout_in_ms as u64, + read_timeout: Duration::from_millis(timeout_in_ms as u64), metrics_config: MetricsConfig::default(), source_config, sink_config, @@ -230,13 +143,14 @@ impl MonovertexConfig { #[cfg(test)] mod tests { + use base64::prelude::BASE64_STANDARD; + use base64::Engine; + use crate::config::components::sink::SinkType; use crate::config::components::source::SourceType; use crate::config::components::transformer::TransformerType; use crate::config::monovertex::MonovertexConfig; use crate::error::Error; - use base64::prelude::BASE64_STANDARD; - use base64::Engine; #[test] fn test_load_valid_config() { let valid_config = r#" @@ -271,7 +185,7 @@ mod tests { assert_eq!(config.name, "test_vertex"); assert_eq!(config.batch_size, 1000); - assert_eq!(config.timeout_in_ms, 2000); + assert_eq!(config.read_timeout.as_millis(), 2000); assert!(matches!( config.source_config.source_type, SourceType::UserDefined(_) diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 03753bcfd5..0a5ba67508 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -1,15 +1,64 @@ +use std::collections::HashMap; +use std::env; +use std::time::Duration; + +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use numaflow_models::models::{ForwardConditions, Vertex}; +use serde_json::from_slice; + +use crate::config::components::metrics::MetricsConfig; use crate::config::components::sink::SinkConfig; -use crate::config::components::source::{SourceConfig, SourceType}; -use crate::config::components::transformer::TransformerConfig; +use crate::config::components::source::SourceConfig; +use crate::config::components::transformer::{TransformerConfig, TransformerType}; +use crate::config::pipeline::isb::{BufferReaderConfig, BufferWriterConfig}; +use crate::error::Error; +use crate::message::get_vertex_replica; use crate::Result; +const DEFAULT_BATCH_SIZE: u64 = 500; +const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; +const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; +const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; +const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; + pub(crate) mod isb; #[derive(Debug, Clone, PartialEq)] pub(crate) struct PipelineConfig { - pub(crate) buffer_reader_config: BufferReaderConfig, - pub(crate) buffer_writer_config: BufferWriterConfig, - pub(crate) vertex_config: VertexConfig, + pub(crate) pipeline_name: String, + pub(crate) vertex_name: String, + pub(crate) replica: u16, + pub(crate) batch_size: usize, + // FIXME(cr): we cannot leak this as a paf, we need to use a different terminology. + pub(crate) paf_batch_size: usize, + pub(crate) read_timeout: Duration, + pub(crate) js_client_config: isb::jetstream::ClientConfig, // TODO: make it enum, since we can have different ISB implementations + pub(crate) from_vertex_config: Vec, + pub(crate) to_vertex_config: Vec, + pub(crate) vertex_config: VertexType, + pub(crate) metrics_config: MetricsConfig, +} + +impl Default for PipelineConfig { + fn default() -> Self { + PipelineConfig { + pipeline_name: "default-pl".to_string(), + vertex_name: "default-vtx".to_string(), + replica: 0, + batch_size: DEFAULT_BATCH_SIZE as usize, + paf_batch_size: (DEFAULT_BATCH_SIZE * 2) as usize, + read_timeout: Duration::from_secs(DEFAULT_TIMEOUT_IN_MS as u64), + js_client_config: isb::jetstream::ClientConfig::default(), + from_vertex_config: vec![], + to_vertex_config: vec![], + vertex_config: VertexType::Source(SourceVtxConfig { + source_config: Default::default(), + transformer_config: None, + }), + metrics_config: Default::default(), + } + } } #[derive(Debug, Clone, PartialEq)] @@ -25,28 +74,358 @@ pub(crate) struct SinkVtxConfig { } #[derive(Debug, Clone, PartialEq)] -pub(crate) enum VertexConfig { +pub(crate) enum VertexType { Source(SourceVtxConfig), Sink(SinkVtxConfig), } +impl std::fmt::Display for VertexType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + match self { + VertexType::Source(_) => write!(f, "Source"), + VertexType::Sink(_) => write!(f, "Sink"), + } + } +} + #[derive(Debug, Clone, PartialEq)] -pub(crate) struct BufferReaderConfig {} +pub(crate) struct FromVertexConfig { + pub(crate) name: String, + pub(crate) reader_config: BufferReaderConfig, + pub(crate) partitions: u16, +} #[derive(Debug, Clone, PartialEq)] -pub(crate) struct BufferWriterConfig {} +pub(crate) struct ToVertexConfig { + pub(crate) name: String, + pub(crate) writer_config: BufferWriterConfig, + pub(crate) partitions: u16, + pub(crate) conditions: Option, +} impl PipelineConfig { - pub fn load(_pipeline_spec_obj: String) -> Result { + pub(crate) fn load( + pipeline_spec_obj: String, + env_vars: impl IntoIterator, impl Into)>, + ) -> Result { + // controller sets this env var. + let decoded_spec = BASE64_STANDARD + .decode(pipeline_spec_obj.as_bytes()) + .map_err(|e| Error::Config(format!("Failed to decode pipeline spec: {:?}", e)))?; + + let vertex_obj: Vertex = from_slice(&decoded_spec) + .map_err(|e| Error::Config(format!("Failed to parse pipeline spec: {:?}", e)))?; + + let pipeline_name = vertex_obj.spec.pipeline_name; + let vertex_name = vertex_obj.spec.name; + let replica = get_vertex_replica(); + + let namespace = vertex_obj + .metadata + .ok_or_else(|| Error::Config("Missing metadata in vertex spec".to_string()))? + .namespace + .ok_or_else(|| Error::Config("Missing namespace in vertex spec".to_string()))?; + + let batch_size = vertex_obj + .spec + .limits + .as_ref() + .and_then(|limits| limits.read_batch_size.map(|x| x as u64)) + .unwrap_or(DEFAULT_BATCH_SIZE); + + let timeout_in_ms = vertex_obj + .spec + .limits + .as_ref() + .and_then(|limits| { + limits + .read_timeout + .map(|x| Duration::from(x).as_millis() as u32) + }) + .unwrap_or(DEFAULT_TIMEOUT_IN_MS); + + let from_edges = vertex_obj.spec.from_edges.unwrap_or_default(); + + let to_edges = vertex_obj.spec.to_edges.unwrap_or_default(); + + let vertex: VertexType = if let Some(source) = vertex_obj.spec.source { + let transformer_config = source.transformer.as_ref().map(|_| TransformerConfig { + transformer_type: TransformerType::UserDefined(Default::default()), + }); + + VertexType::Source(SourceVtxConfig { + source_config: SourceConfig { + source_type: source.try_into()?, + }, + transformer_config, + }) + } else if let Some(sink) = vertex_obj.spec.sink { + let fb_sink_config = if sink.fallback.as_ref().is_some() { + Some(SinkConfig { + sink_type: sink.clone().try_into()?, + retry_config: None, + }) + } else { + None + }; + + VertexType::Sink(SinkVtxConfig { + sink_config: SinkConfig { + sink_type: sink.try_into()?, + retry_config: None, + }, + fb_sink_config, + }) + } else { + return Err(Error::Config( + "Only source and sink are supported ATM".to_string(), + )); + }; + + let env_vars: HashMap = env_vars + .into_iter() + .map(|(key, val)| (key.into(), val.into())) + .filter(|(key, _val)| { + // FIXME(cr): this filter is non-exhaustive, should we invert? + key == ENV_NUMAFLOW_SERVING_JETSTREAM_URL + || key == ENV_NUMAFLOW_SERVING_JETSTREAM_USER + || key == ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD + }) + .collect(); + + let get_var = |var: &str| -> Result { + Ok(env_vars + .get(var) + .ok_or_else(|| Error::Config(format!("Environment variable {var} is not set")))? + .to_string()) + }; + + let js_client_config = isb::jetstream::ClientConfig { + url: get_var(ENV_NUMAFLOW_SERVING_JETSTREAM_URL)?, + user: get_var(ENV_NUMAFLOW_SERVING_JETSTREAM_USER).ok(), + password: get_var(ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD).ok(), + }; + + let mut from_vertex_config = vec![]; + for edge in from_edges { + let partition_count = edge.to_vertex_partition_count.unwrap_or_default() as u16; + let buffer_name = format!("{}-{}-{}", namespace, pipeline_name, edge.to); + + let streams: Vec<(String, u16)> = (0..partition_count) + .map(|i| (format!("{}-{}", buffer_name, i), i)) + .collect(); + + from_vertex_config.push(FromVertexConfig { + name: edge.from, + reader_config: BufferReaderConfig { + partitions: partition_count, + streams, + ..Default::default() + }, + partitions: 0, + }); + } + + let mut to_vertex_config = vec![]; + for edge in to_edges { + let partition_count = edge.to_vertex_partition_count.unwrap_or_default() as u16; + let buffer_name = format!("{}-{}-{}", namespace, pipeline_name, edge.to); + + let streams: Vec<(String, u16)> = (0..partition_count) + .map(|i| (format!("{}-{}", buffer_name, i), i)) + .collect(); + + let default_writer_config = BufferWriterConfig::default(); + to_vertex_config.push(ToVertexConfig { + name: edge.to, + writer_config: BufferWriterConfig { + streams, + partitions: partition_count, + max_length: vertex_obj + .spec + .limits + .as_ref() + .and_then(|l| l.buffer_max_length) + .unwrap_or(default_writer_config.max_length as i64) + as usize, + usage_limit: vertex_obj + .spec + .limits + .as_ref() + .and_then(|l| l.buffer_usage_limit) + .unwrap_or(default_writer_config.usage_limit as i64) + as f64 + / 100.0, + ..default_writer_config + }, + partitions: edge.to_vertex_partition_count.unwrap_or_default() as u16, + conditions: None, + }); + } + Ok(PipelineConfig { - buffer_reader_config: BufferReaderConfig {}, - buffer_writer_config: BufferWriterConfig {}, - vertex_config: VertexConfig::Source(SourceVtxConfig { + batch_size: batch_size as usize, + paf_batch_size: env::var("PAF_BATCH_SIZE") + .unwrap_or("30000".to_string()) + .parse() + .unwrap(), + read_timeout: Duration::from_millis(timeout_in_ms as u64), + pipeline_name, + vertex_name, + replica: *replica, + js_client_config, + from_vertex_config, + to_vertex_config, + vertex_config: vertex, + metrics_config: Default::default(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::components::sink::{BlackholeConfig, LogConfig, SinkType}; + use crate::config::components::source::{GeneratorConfig, SourceType}; + + #[test] + fn test_default_pipeline_config() { + let expected = PipelineConfig { + pipeline_name: "default-pl".to_string(), + vertex_name: "default-vtx".to_string(), + replica: 0, + batch_size: DEFAULT_BATCH_SIZE as usize, + paf_batch_size: (DEFAULT_BATCH_SIZE * 2) as usize, + read_timeout: Duration::from_secs(DEFAULT_TIMEOUT_IN_MS as u64), + js_client_config: isb::jetstream::ClientConfig::default(), + from_vertex_config: vec![], + to_vertex_config: vec![], + vertex_config: VertexType::Source(SourceVtxConfig { + source_config: Default::default(), + transformer_config: None, + }), + metrics_config: Default::default(), + }; + + let config = PipelineConfig::default(); + assert_eq!(config, expected); + } + + #[test] + fn test_vertex_type_display() { + let src_type = VertexType::Source(SourceVtxConfig { + source_config: SourceConfig::default(), + transformer_config: None, + }); + assert_eq!(src_type.to_string(), "Source"); + + let sink_type = VertexType::Sink(SinkVtxConfig { + sink_config: SinkConfig { + sink_type: SinkType::Log(LogConfig {}), + retry_config: None, + }, + fb_sink_config: None, + }); + assert_eq!(sink_type.to_string(), "Sink"); + } + + #[test] + fn test_pipeline_config_load_sink_vertex() { + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); + + let expected = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "out".to_string(), + replica: 0, + batch_size: 500, + paf_batch_size: 30000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + from_vertex_config: vec![FromVertexConfig { + name: "in".to_string(), + reader_config: BufferReaderConfig { + partitions: 1, + streams: vec![("default-simple-pipeline-out-0".into(), 0)], + batch_size: 500, + read_timeout: Duration::from_secs(1), + wip_ack_interval: Duration::from_secs(1), + }, + partitions: 0, + }], + to_vertex_config: vec![], + vertex_config: VertexType::Sink(SinkVtxConfig { + sink_config: SinkConfig { + sink_type: SinkType::Blackhole(BlackholeConfig {}), + retry_config: None, + }, + fb_sink_config: None, + }), + metrics_config: MetricsConfig { + metrics_server_listen_port: 2469, + lag_check_interval_in_secs: 5, + lag_refresh_interval_in_secs: 3, + }, + }; + assert_eq!(pipeline_config, expected); + } + + #[test] + fn test_pipeline_config_load_all() { + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLWluIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJuYW1lIjoiaW4iLCJzb3VyY2UiOnsiZ2VuZXJhdG9yIjp7InJwdSI6MTAwMDAwLCJkdXJhdGlvbiI6IjFzIiwibXNnU2l6ZSI6OCwiaml0dGVyIjoiMHMifX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImVudiI6W3sibmFtZSI6IlBBRl9CQVRDSF9TSVpFIiwidmFsdWUiOiIxMDAwMDAifV19LCJsaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6MTAwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MTUwMDAwLCJidWZmZXJVc2FnZUxpbWl0Ijo4NX0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fSwicGlwZWxpbmVOYW1lIjoic2ltcGxlLXBpcGVsaW5lIiwiaW50ZXJTdGVwQnVmZmVyU2VydmljZU5hbWUiOiIiLCJyZXBsaWNhcyI6MCwidG9FZGdlcyI6W3siZnJvbSI6ImluIiwidG8iOiJvdXQiLCJjb25kaXRpb25zIjpudWxsLCJmcm9tVmVydGV4VHlwZSI6IlNvdXJjZSIsImZyb21WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwiZnJvbVZlcnRleExpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjoxMDAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjoxNTAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjg1fSwidG9WZXJ0ZXhUeXBlIjoiU2luayIsInRvVmVydGV4UGFydGl0aW9uQ291bnQiOjEsInRvVmVydGV4TGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjEwMDAsInJlYWRUaW1lb3V0IjoiMXMiLCJidWZmZXJNYXhMZW5ndGgiOjE1MDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODV9fV0sIndhdGVybWFyayI6eyJkaXNhYmxlZCI6dHJ1ZSwibWF4RGVsYXkiOiIwcyJ9fSwic3RhdHVzIjp7InBoYXNlIjoiIiwicmVwbGljYXMiOjAsImRlc2lyZWRSZXBsaWNhcyI6MCwibGFzdFNjYWxlZEF0IjpudWxsfX0="; + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = + PipelineConfig::load(pipeline_cfg_base64.to_string(), env_vars).unwrap(); + + let expected = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "in".to_string(), + replica: 0, + batch_size: 1000, + paf_batch_size: 30000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + from_vertex_config: vec![], + to_vertex_config: vec![ToVertexConfig { + name: "out".to_string(), + writer_config: BufferWriterConfig { + streams: vec![("default-simple-pipeline-out-0".to_string(), 0)], + partitions: 1, + max_length: 150000, + usage_limit: 0.85, + ..Default::default() + }, + partitions: 1, + conditions: None, + }], + vertex_config: VertexType::Source(SourceVtxConfig { source_config: SourceConfig { - source_type: SourceType::Generator(Default::default()), + source_type: SourceType::Generator(GeneratorConfig { + rpu: 100000, + content: Default::default(), + duration: Duration::from_millis(1000), + value: None, + key_count: 0, + msg_size_bytes: 8, + jitter: Duration::from_secs(0), + }), }, transformer_config: None, }), - }) + metrics_config: Default::default(), + }; + + assert_eq!(pipeline_config, expected); } } diff --git a/rust/numaflow-core/src/config/pipeline/isb.rs b/rust/numaflow-core/src/config/pipeline/isb.rs index 89723cc9be..704d19de08 100644 --- a/rust/numaflow-core/src/config/pipeline/isb.rs +++ b/rust/numaflow-core/src/config/pipeline/isb.rs @@ -1,53 +1,155 @@ /// Jetstream ISB related configurations. -pub mod jetstream { - use std::fmt; - use std::time::Duration; - - // jetstream related constants - const DEFAULT_PARTITION_IDX: u16 = 0; - const DEFAULT_MAX_LENGTH: usize = 30000; - const DEFAULT_USAGE_LIMIT: f64 = 0.8; - const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; - const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; - const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; - - #[derive(Debug, Clone)] - pub(crate) struct StreamWriterConfig { - pub name: String, - pub partition_idx: u16, - pub max_length: usize, - pub refresh_interval: Duration, - pub usage_limit: f64, - pub buffer_full_strategy: BufferFullStrategy, - pub retry_interval: Duration, - } - - impl Default for StreamWriterConfig { +use std::fmt; +use std::time::Duration; + +const DEFAULT_PARTITION_IDX: u16 = 0; +const DEFAULT_BATCH_SIZE: usize = 500; +const DEFAULT_PARTITIONS: u16 = 1; +const DEFAULT_MAX_LENGTH: usize = 30000; +const DEFAULT_USAGE_LIMIT: f64 = 0.8; +const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; +const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; +const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; +const DEFAULT_WIP_ACK_INTERVAL_MILLIS: u64 = 1000; +const DEFAULT_READ_TIMEOUT_MILLIS: u64 = 1000; + +pub(crate) mod jetstream { + const DEFAULT_URL: &str = "localhost:4222"; + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct ClientConfig { + pub url: String, + pub user: Option, + pub password: Option, + } + + impl Default for ClientConfig { fn default() -> Self { - StreamWriterConfig { - name: "default".to_string(), - partition_idx: DEFAULT_PARTITION_IDX, - max_length: DEFAULT_MAX_LENGTH, - usage_limit: DEFAULT_USAGE_LIMIT, - refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), - buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, - retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), + ClientConfig { + url: DEFAULT_URL.to_string(), + user: None, + password: None, } } } +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct BufferWriterConfig { + pub streams: Vec<(String, u16)>, + pub partitions: u16, + pub max_length: usize, + pub refresh_interval: Duration, + pub usage_limit: f64, + pub buffer_full_strategy: BufferFullStrategy, + pub retry_interval: Duration, +} + +impl Default for BufferWriterConfig { + fn default() -> Self { + BufferWriterConfig { + streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + partitions: DEFAULT_PARTITIONS, + max_length: DEFAULT_MAX_LENGTH, + usage_limit: DEFAULT_USAGE_LIMIT, + refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), + buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, + retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub(crate) enum BufferFullStrategy { + RetryUntilSuccess, + #[allow(dead_code)] + DiscardLatest, +} - #[derive(Debug, Clone, Eq, PartialEq)] - pub(crate) enum BufferFullStrategy { - RetryUntilSuccess, - DiscardLatest, +impl fmt::Display for BufferFullStrategy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BufferFullStrategy::RetryUntilSuccess => write!(f, "retryUntilSuccess"), + BufferFullStrategy::DiscardLatest => write!(f, "discardLatest"), + } } +} - impl fmt::Display for BufferFullStrategy { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - BufferFullStrategy::RetryUntilSuccess => write!(f, "retryUntilSuccess"), - BufferFullStrategy::DiscardLatest => write!(f, "discardLatest"), - } +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct BufferReaderConfig { + pub(crate) partitions: u16, + pub(crate) streams: Vec<(String, u16)>, + pub(crate) batch_size: usize, + pub(crate) read_timeout: Duration, + pub(crate) wip_ack_interval: Duration, +} + +impl Default for BufferReaderConfig { + fn default() -> Self { + BufferReaderConfig { + partitions: DEFAULT_PARTITIONS, + streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + batch_size: DEFAULT_BATCH_SIZE, + wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), + read_timeout: Duration::from_millis(DEFAULT_READ_TIMEOUT_MILLIS), } } } + +#[cfg(test)] +mod jetstream_client_config { + use super::jetstream::*; + + #[test] + fn test_default_client_config() { + let expected_config = ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }; + let config = ClientConfig::default(); + assert_eq!(config, expected_config); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_buffer_writer_config() { + let expected = BufferWriterConfig { + streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + partitions: DEFAULT_PARTITIONS, + max_length: DEFAULT_MAX_LENGTH, + usage_limit: DEFAULT_USAGE_LIMIT, + refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), + buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, + retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), + }; + let config = BufferWriterConfig::default(); + + assert_eq!(config, expected); + } + + #[test] + fn test_buffer_full_strategy_display() { + let val = BufferFullStrategy::RetryUntilSuccess; + assert_eq!(val.to_string(), "retryUntilSuccess"); + + let val = BufferFullStrategy::DiscardLatest; + assert_eq!(val.to_string(), "discardLatest"); + } + + #[test] + fn test_default_buffer_reader_config() { + let expected = BufferReaderConfig { + partitions: DEFAULT_PARTITIONS, + streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + batch_size: DEFAULT_BATCH_SIZE, + wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), + read_timeout: Duration::from_millis(DEFAULT_READ_TIMEOUT_MILLIS), + }; + let config = BufferReaderConfig::default(); + assert_eq!(config, expected); + } +} diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index a9c38e00fb..e324c0ff33 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -1,4 +1,9 @@ -use tracing::error; +use tokio::signal; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; + +use crate::config::{config, CustomResourceType}; /// Custom Error handling. mod error; @@ -10,7 +15,6 @@ pub(crate) use crate::error::{Error, Result}; /// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ /// [ISB]: https://numaflow.numaproj.io/core-concepts/inter-step-buffer/ pub mod monovertex; -pub use crate::monovertex::mono_vertex; /// Parse configs, including Numaflow specifications. mod config; @@ -37,7 +41,72 @@ mod transformer; /// Reads from a stream. mod reader; +pub(crate) mod metrics; /// [Pipeline] /// /// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ mod pipeline; + +pub async fn run() -> Result<()> { + let cln_token = CancellationToken::new(); + let shutdown_cln_token = cln_token.clone(); + + // wait for SIG{INT,TERM} and invoke cancellation token. + let shutdown_handle: JoinHandle> = tokio::spawn(async move { + shutdown_signal().await; + shutdown_cln_token.cancel(); + Ok(()) + }); + + let crd_type = config().custom_resource_type.clone(); + match crd_type { + CustomResourceType::MonoVertex(config) => { + info!("Starting monovertex forwarder with config: {:?}", config); + // Run the forwarder with cancellation token. + if let Err(e) = monovertex::start_forwarder(cln_token, &config).await { + error!("Application error running monovertex: {:?}", e); + + // abort the signal handler task since we have an error and we are shutting down + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } + } + } + CustomResourceType::Pipeline(config) => { + info!("Starting pipeline forwarder with config: {:?}", config); + if let Err(e) = pipeline::start_forwarder(cln_token, config).await { + error!("Application error running pipeline: {:?}", e); + + // abort the signal handler task since we have an error and we are shutting down + if !shutdown_handle.is_finished() { + shutdown_handle.abort(); + } + } + } + } + + info!("Gracefully Exiting..."); + Ok(()) +} + +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + info!("Received Ctrl+C signal"); + }; + + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + info!("Received terminate signal"); + }; + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } +} diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 6208b43f94..ef650a0df1 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -3,9 +3,10 @@ use std::collections::HashMap; use std::sync::OnceLock; use std::{env, fmt}; +use async_nats::HeaderValue; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::Engine; -use bytes::Bytes; +use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; use numaflow_pb::clients::sink::sink_request::Request; use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; @@ -14,6 +15,7 @@ use numaflow_pb::clients::source::{read_response, AckRequest}; use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; +use tokio::sync::oneshot; use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; use crate::Error; @@ -51,7 +53,7 @@ pub(crate) struct Message { /// keys of the message pub(crate) keys: Vec, /// actual payload of the message - pub(crate) value: Vec, + pub(crate) value: Bytes, /// offset of the message, it is optional because offset is only /// available when we read the message, and we don't persist the /// offset in the ISB. @@ -80,6 +82,43 @@ impl fmt::Display for Offset { } } +impl TryFrom for Message { + type Error = Error; + + fn try_from(message: async_nats::Message) -> std::result::Result { + let payload = message.payload; + let headers: HashMap = message + .headers + .unwrap_or_default() + .iter() + .map(|(key, value)| { + ( + key.to_string(), + value.first().unwrap_or(&HeaderValue::from("")).to_string(), + ) + }) + .collect(); + // FIXME(cr): we should not be using subject. keys are in the payload + let keys = message.subject.split('.').map(|s| s.to_string()).collect(); + let event_time = Utc::now(); + let offset = None; + let id = MessageID { + vertex_name: get_vertex_name().to_string(), + offset: "0".to_string(), + index: 0, + }; + + Ok(Self { + keys, + value: payload, // FIXME: use Bytes + offset, + event_time, + id, + headers, + }) + } +} + /// IntOffset is integer based offset enum type. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct IntOffset { @@ -96,16 +135,6 @@ impl IntOffset { } } -impl IntOffset { - fn sequence(&self) -> Result { - Ok(self.offset) - } - - fn partition_idx(&self) -> u16 { - self.partition_idx - } -} - impl fmt::Display for IntOffset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}-{}", self.offset, self.partition_idx) @@ -128,22 +157,24 @@ impl StringOffset { } } -impl StringOffset { - fn sequence(&self) -> Result { - Ok(self.offset.parse().unwrap()) - } - - fn partition_idx(&self) -> u16 { - self.partition_idx - } -} - impl fmt::Display for StringOffset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}-{}", self.offset, self.partition_idx) } } +pub(crate) enum ReadAck { + /// Message was successfully processed. + Ack, + /// Message will not be processed now and processing can move onto the next message, NAK’d message will be retried. + Nak, +} + +pub(crate) struct ReadMessage { + pub(crate) message: Message, + pub(crate) ack: oneshot::Sender, +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct MessageID { pub(crate) vertex_name: String, @@ -151,16 +182,6 @@ pub(crate) struct MessageID { pub(crate) index: i32, } -impl MessageID { - fn new(vertex_name: String, offset: String, index: i32) -> Self { - Self { - vertex_name, - offset, - index, - } - } -} - impl From for MessageID { fn from(id: numaflow_pb::objects::isb::MessageId) -> Self { Self { @@ -208,7 +229,7 @@ impl TryFrom for AckRequest { } } -impl TryFrom for Vec { +impl TryFrom for BytesMut { type Error = Error; fn try_from(message: Message) -> std::result::Result { @@ -224,11 +245,11 @@ impl TryFrom for Vec { headers: message.headers.clone(), }), body: Some(numaflow_pb::objects::isb::Body { - payload: message.value.clone(), + payload: message.value.to_vec(), }), }; - let mut buf = Vec::new(); + let mut buf = BytesMut::new(); proto_message .encode(&mut buf) .map_err(|e| Error::Proto(e.to_string()))?; @@ -236,11 +257,11 @@ impl TryFrom for Vec { } } -impl TryFrom> for Message { +impl TryFrom for Message { type Error = Error; - fn try_from(bytes: Vec) -> std::result::Result { - let proto_message = numaflow_pb::objects::isb::Message::decode(Bytes::from(bytes)) + fn try_from(bytes: Bytes) -> std::result::Result { + let proto_message = numaflow_pb::objects::isb::Message::decode(bytes) .map_err(|e| Error::Proto(e.to_string()))?; let header = proto_message @@ -256,7 +277,7 @@ impl TryFrom> for Message { Ok(Message { keys: header.keys, - value: body.payload, + value: body.payload.into(), offset: None, event_time: utc_from_timestamp(message_info.event_time), id: id.into(), @@ -273,7 +294,7 @@ impl From for SourceTransformRequest { numaflow_pb::clients::sourcetransformer::source_transform_request::Request { id: message.id.to_string(), keys: message.keys, - value: message.value, + value: message.value.to_vec(), event_time: prost_timestamp_from_utc(message.event_time), watermark: None, headers: message.headers, @@ -299,7 +320,7 @@ impl TryFrom for Message { Ok(Message { keys: result.keys, - value: result.payload, + value: result.payload.into(), offset: Some(source_offset.clone()), event_time: utc_from_timestamp(result.event_time), id: MessageID { @@ -318,7 +339,7 @@ impl From for SinkRequest { Self { request: Some(Request { keys: message.keys, - value: message.value, + value: message.value.to_vec(), event_time: prost_timestamp_from_utc(message.event_time), watermark: None, id: message.id.to_string(), @@ -447,7 +468,7 @@ mod tests { fn test_message_to_vec_u8() { let message = Message { keys: vec!["key1".to_string()], - value: vec![1, 2, 3], + value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), partition_idx: 0, @@ -461,7 +482,7 @@ mod tests { headers: HashMap::new(), }; - let result: Result> = message.clone().try_into(); + let result: Result = message.clone().try_into(); assert!(result.is_ok()); let proto_message = ProtoMessage { @@ -476,7 +497,7 @@ mod tests { headers: message.headers.clone(), }), body: Some(Body { - payload: message.value.clone(), + payload: message.value.clone().into(), }), }; @@ -507,8 +528,9 @@ mod tests { }), }; - let mut buf = Vec::new(); + let mut buf = BytesMut::new(); prost::Message::encode(&proto_message, &mut buf).unwrap(); + let buf = buf.freeze(); let result: Result = buf.try_into(); assert!(result.is_ok()); @@ -526,7 +548,7 @@ mod tests { fn test_message_to_source_transform_request() { let message = Message { keys: vec!["key1".to_string()], - value: vec![1, 2, 3], + value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), partition_idx: 0, @@ -575,7 +597,7 @@ mod tests { fn test_message_to_sink_request() { let message = Message { keys: vec!["key1".to_string()], - value: vec![1, 2, 3], + value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), partition_idx: 0, @@ -639,7 +661,11 @@ mod tests { #[test] fn test_message_id_to_proto() { - let message_id = MessageID::new("vertex".to_string(), "123".to_string(), 0); + let message_id = MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }; let proto_id: MessageId = message_id.into(); assert_eq!(proto_id.vertex_name, "vertex"); assert_eq!(proto_id.offset, "123"); diff --git a/rust/numaflow-core/src/monovertex/metrics.rs b/rust/numaflow-core/src/metrics.rs similarity index 86% rename from rust/numaflow-core/src/monovertex/metrics.rs rename to rust/numaflow-core/src/metrics.rs index 266b1a9abf..c81c16231c 100644 --- a/rust/numaflow-core/src/monovertex/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -36,8 +36,14 @@ const MVTX_NAME_LABEL: &str = "mvtx_name"; const REPLICA_LABEL: &str = "mvtx_replica"; const PENDING_PERIOD_LABEL: &str = "period"; +const PIPELINE_NAME_LABEL: &str = "pipeline"; +const PIPELINE_REPLICA_LABEL: &str = "replica"; +const PIPELINE_PARTITION_NAME_LABEL: &str = "partition_name"; +const PIPELINE_VERTEX_LABEL: &str = "vertex"; +const PIPELINE_VERTEX_TYPE_LABEL: &str = "vertex_type"; + // The top-level metric registry is created with the GLOBAL_PREFIX -const GLOBAL_PREFIX: &str = "monovtx"; +const MVTX_REGISTRY_GLOBAL_PREFIX: &str = "monovtx"; // Prefixes for the sub-registries const SINK_REGISTRY_PREFIX: &str = "sink"; const FALLBACK_SINK_REGISTRY_PREFIX: &str = "fallback_sink"; @@ -67,18 +73,41 @@ const TRANSFORM_TIME: &str = "time"; const ACK_TIME: &str = "ack_time"; const SINK_TIME: &str = "time"; +const PIPELINE_FORWARDER_READ_TOTAL: &str = "data_read"; + /// Only user defined functions will have containers since rest /// are builtins. We save the gRPC clients to retrieve metrics and also -/// to do liveness checks. This means, these will be optionals since +/// to do liveness checks. +#[derive(Clone)] +pub(crate) enum UserDefinedContainerState { + Monovertex(MonovertexContainerState), + Pipeline(PipelineContainerState), +} + +/// MonovertexContainerState is used to store the gRPC clients for the +/// monovtx. These will be optionals since /// we do not require these for builtins. #[derive(Clone)] -pub(crate) struct UserDefinedContainerState { +pub(crate) struct MonovertexContainerState { pub source_client: Option>, pub sink_client: Option>, pub transformer_client: Option>, pub fb_sink_client: Option>, } +/// PipelineContainerState is used to store the gRPC clients for the +/// pipeline. +#[derive(Clone)] +pub(crate) enum PipelineContainerState { + Source( + ( + Option>, + Option>, + ), + ), + Sink((Option>, Option>)), +} + /// The global register of all metrics. #[derive(Default)] struct GlobalRegistry { @@ -90,7 +119,7 @@ impl GlobalRegistry { fn new() -> Self { GlobalRegistry { // Create a new registry for the metrics - registry: parking_lot::Mutex::new(Registry::with_prefix(GLOBAL_PREFIX)), + registry: parking_lot::Mutex::new(Registry::default()), } } } @@ -130,6 +159,12 @@ pub(crate) struct MonoVtxMetrics { pub(crate) fb_sink: FallbackSinkMetrics, } +/// PipelineMetrics is a struct which is used for storing the metrics related to the Pipeline +// TODO: Add the metrics for the pipeline +pub(crate) struct PipelineMetrics { + pub(crate) forwarder: PipelineForwarderMetrics, +} + /// Family of metrics for the sink pub(crate) struct SinkMetrics { pub(crate) write_total: Family, Counter>, @@ -147,6 +182,10 @@ pub(crate) struct TransformerMetrics { pub(crate) time: Family, Histogram>, } +pub(crate) struct PipelineForwarderMetrics { + pub(crate) data_read: Family, Counter>, +} + /// Exponential bucket distribution with range. /// Creates `length` buckets, where the lowest bucket is `min` and the highest bucket is `max`. /// The final +Inf bucket is not counted and not included in the returned iterator. @@ -209,6 +248,7 @@ impl MonoVtxMetrics { }; let mut registry = global_registry().registry.lock(); + let registry = registry.sub_registry_with_prefix(MVTX_REGISTRY_GLOBAL_PREFIX); // Register all the metrics to the global registry registry.register( READ_TOTAL, @@ -288,15 +328,44 @@ impl MonoVtxMetrics { } } +impl PipelineMetrics { + fn new() -> Self { + let metrics = Self { + forwarder: PipelineForwarderMetrics { + data_read: Default::default(), + }, + }; + let mut registry = global_registry().registry.lock(); + + // Pipeline forwarder sub-registry + let forwarder_registry = registry.sub_registry_with_prefix("forwarder"); + forwarder_registry.register( + PIPELINE_FORWARDER_READ_TOTAL, + "Total number of Data Messages Read", + metrics.forwarder.data_read.clone(), + ); + metrics + } +} + /// MONOVTX_METRICS is the MonoVtxMetrics object which stores the metrics static MONOVTX_METRICS: OnceLock = OnceLock::new(); // forward_metrics is a helper function used to fetch the // MonoVtxMetrics object -pub(crate) fn forward_metrics() -> &'static MonoVtxMetrics { +pub(crate) fn forward_mvtx_metrics() -> &'static MonoVtxMetrics { MONOVTX_METRICS.get_or_init(MonoVtxMetrics::new) } +/// PIPELINE_METRICS is the PipelineMetrics object which stores the metrics +static PIPELINE_METRICS: OnceLock = OnceLock::new(); + +// forward_pipeline_metrics is a helper function used to fetch the +// PipelineMetrics object +pub(crate) fn forward_pipeline_metrics() -> &'static PipelineMetrics { + PIPELINE_METRICS.get_or_init(PipelineMetrics::new) +} + /// MONOVTX_METRICS_LABELS are used to store the common labels used in the metrics static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new(); @@ -315,6 +384,32 @@ pub(crate) fn mvtx_forward_metric_labels( }) } +static PIPELINE_READ_METRICS_LABELS: OnceLock> = OnceLock::new(); + +pub(crate) fn pipeline_forward_read_metric_labels( + pipeline_name: &str, + partition_name: &str, + vertex_name: &str, + vertex_type: &str, + replica: u16, +) -> &'static Vec<(String, String)> { + PIPELINE_READ_METRICS_LABELS.get_or_init(|| { + vec![ + (PIPELINE_NAME_LABEL.to_string(), pipeline_name.to_string()), + (PIPELINE_REPLICA_LABEL.to_string(), replica.to_string()), + ( + PIPELINE_PARTITION_NAME_LABEL.to_string(), + partition_name.to_string(), + ), + ( + PIPELINE_VERTEX_TYPE_LABEL.to_string(), + vertex_type.to_string(), + ), + (PIPELINE_VERTEX_LABEL.to_string(), vertex_name.to_string()), + ] + }) +} + // metrics_handler is used to generate and return a snapshot of the // current state of the metrics in the global registry pub async fn metrics_handler() -> impl IntoResponse { @@ -324,6 +419,10 @@ pub async fn metrics_handler() -> impl IntoResponse { debug!("Exposing metrics: {:?}", buffer); Response::builder() .status(StatusCode::OK) + .header( + axum::http::header::CONTENT_TYPE, + "application/openmetrics-text; version=1.0.0; charset=utf-8", + ) .body(Body::from(buffer)) .unwrap() } @@ -367,29 +466,63 @@ async fn livez() -> impl IntoResponse { } async fn sidecar_livez(State(state): State) -> impl IntoResponse { - if let Some(mut source_client) = state.source_client { - if source_client.is_ready(Request::new(())).await.is_err() { - error!("Source client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; - } - } - if let Some(mut sink_client) = state.sink_client { - if sink_client.is_ready(Request::new(())).await.is_err() { - error!("Sink client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; - } - } - if let Some(mut transformer_client) = state.transformer_client { - if transformer_client.is_ready(Request::new(())).await.is_err() { - error!("Transformer client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; - } - } - if let Some(mut fb_sink_client) = state.fb_sink_client { - if fb_sink_client.is_ready(Request::new(())).await.is_err() { - error!("Fallback sink client is not available"); - return StatusCode::SERVICE_UNAVAILABLE; + match state { + UserDefinedContainerState::Monovertex(monovertex_state) => { + if let Some(mut source_client) = monovertex_state.source_client { + if source_client.is_ready(Request::new(())).await.is_err() { + error!("Monovertex source client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + if let Some(mut sink_client) = monovertex_state.sink_client { + if sink_client.is_ready(Request::new(())).await.is_err() { + error!("Monovertex sink client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + if let Some(mut transformer_client) = monovertex_state.transformer_client { + if transformer_client.is_ready(Request::new(())).await.is_err() { + error!("Monovertex transformer client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + if let Some(mut fb_sink_client) = monovertex_state.fb_sink_client { + if fb_sink_client.is_ready(Request::new(())).await.is_err() { + error!("Monovertex fallback sink client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } } + UserDefinedContainerState::Pipeline(pipeline_state) => match pipeline_state { + PipelineContainerState::Source((source_client, transformer_client)) => { + if let Some(mut source_client) = source_client { + if source_client.is_ready(Request::new(())).await.is_err() { + error!("Pipeline source client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + if let Some(mut transformer_client) = transformer_client { + if transformer_client.is_ready(Request::new(())).await.is_err() { + error!("Pipeline transformer client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + } + PipelineContainerState::Sink((sink_client, fb_sink_client)) => { + if let Some(mut sink_client) = sink_client { + if sink_client.is_ready(Request::new(())).await.is_err() { + error!("Pipeline sink client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + if let Some(mut fb_sink_client) = fb_sink_client { + if fb_sink_client.is_ready(Request::new(())).await.is_err() { + error!("Pipeline fallback sink client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + } + }, } StatusCode::NO_CONTENT } @@ -566,7 +699,7 @@ async fn expose_pending_metrics( mvtx_forward_metric_labels(mvtx_name.clone(), replica).clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); pending_info.insert(label, pending); - forward_metrics() + forward_mvtx_metrics() .source_pending .get_or_create(&metric_labels) .set(pending); @@ -617,7 +750,6 @@ mod tests { use tokio::sync::mpsc::Sender; use super::*; - use crate::monovertex::metrics::UserDefinedContainerState; use crate::shared::utils::create_rpc_channel; struct SimpleSource; @@ -726,7 +858,7 @@ mod tests { // wait for the servers to start // FIXME: we need to have a better way, this is flaky tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let metrics_state = UserDefinedContainerState { + let metrics_state = UserDefinedContainerState::Monovertex(MonovertexContainerState { source_client: Some(SourceClient::new( create_rpc_channel(src_sock_file).await.unwrap(), )), @@ -739,7 +871,7 @@ mod tests { fb_sink_client: Some(SinkClient::new( create_rpc_channel(fb_sink_sock_file).await.unwrap(), )), - }; + }); let addr: SocketAddr = "127.0.0.1:9091".parse().unwrap(); let metrics_state_clone = metrics_state.clone(); @@ -818,7 +950,7 @@ mod tests { for (i, (label, _)) in LOOKBACK_SECONDS_MAP.iter().enumerate() { let mut metric_labels = mvtx_forward_metric_labels("test".to_string(), 0).clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); - let guage = forward_metrics() + let guage = forward_mvtx_metrics() .source_pending .get_or_create(&metric_labels) .get(); @@ -879,7 +1011,7 @@ mod tests { #[test] fn test_metric_names() { - let metrics = forward_metrics(); + let metrics = forward_mvtx_metrics(); // Use a fixed set of labels instead of the ones from mvtx_forward_metric_labels() since other test functions may also set it. let common_labels = vec![ ( diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 2ae0e63447..598a3d9e83 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,28 +1,25 @@ use forwarder::ForwarderBuilder; -use metrics::UserDefinedContainerState; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; - -use tokio::signal; -use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; -use tracing::{error, info}; +use tracing::info; use crate::config::components::{sink, source, transformer}; use crate::config::monovertex::MonovertexConfig; -use crate::config::{config, CustomResourceType}; use crate::error::{self, Error}; +use crate::metrics; use crate::shared::server_info::check_for_server_compatibility; use crate::shared::utils; -use crate::shared::utils::create_rpc_channel; -use crate::sink::{SinkClientType, SinkHandle}; -use crate::source::generator::{new_generator, GeneratorAck, GeneratorLagReader, GeneratorRead}; -use crate::source::user_defined::{ - new_source, UserDefinedSourceAck, UserDefinedSourceLagReader, UserDefinedSourceRead, +use crate::shared::utils::{ + create_rpc_channel, wait_until_sink_ready, wait_until_source_ready, + wait_until_transformer_ready, }; -use crate::source::SourceHandle; +use crate::sink::{SinkClientType, SinkHandle}; +use crate::source::generator::new_generator; +use crate::source::user_defined::new_source; +use crate::source::{SourceHandle, SourceType}; use crate::transformer::user_defined::SourceTransformHandle; /// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. @@ -32,73 +29,8 @@ use crate::transformer::user_defined::SourceTransformHandle; /// - Calls the Sinker to write the batch to the Sink /// - Send Acknowledgement back to the Source mod forwarder; -pub(crate) mod metrics; - -pub async fn mono_vertex() -> error::Result<()> { - let cln_token = CancellationToken::new(); - let shutdown_cln_token = cln_token.clone(); - - // wait for SIG{INT,TERM} and invoke cancellation token. - let shutdown_handle: JoinHandle> = tokio::spawn(async move { - shutdown_signal().await; - shutdown_cln_token.cancel(); - Ok(()) - }); - - let crd_type = config().custom_resource_type.clone(); - match crd_type { - CustomResourceType::MonoVertex(config) => { - // Run the forwarder with cancellation token. - if let Err(e) = start_forwarder(cln_token, &config).await { - error!("Application error: {:?}", e); - - // abort the signal handler task since we have an error and we are shutting down - if !shutdown_handle.is_finished() { - shutdown_handle.abort(); - } - } - } - CustomResourceType::Pipeline(_) => { - panic!("Pipeline not supported") - } - } - - info!("Gracefully Exiting..."); - Ok(()) -} -async fn shutdown_signal() { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - info!("Received Ctrl+C signal"); - }; - - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - info!("Received terminate signal"); - }; - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, - } -} - -pub(crate) enum SourceType { - UserDefinedSource( - UserDefinedSourceRead, - UserDefinedSourceAck, - UserDefinedSourceLagReader, - ), - Generator(GeneratorRead, GeneratorAck, GeneratorLagReader), -} - -async fn start_forwarder( +pub(crate) async fn start_forwarder( cln_token: CancellationToken, config: &MonovertexConfig, ) -> error::Result<()> { @@ -112,16 +44,18 @@ async fn start_forwarder( ) .await?; - Some( + let mut source_grpc_client = SourceClient::new(create_rpc_channel(source_config.socket_path.clone().into()).await?) .max_encoding_message_size(source_config.grpc_max_message_size) - .max_encoding_message_size(source_config.grpc_max_message_size), - ) + .max_encoding_message_size(source_config.grpc_max_message_size); + + wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; + Some(source_grpc_client) } else { None }; - let mut sink_grpc_client = if let sink::SinkType::UserDefined(udsink_config) = + let sink_grpc_client = if let sink::SinkType::UserDefined(udsink_config) = &config.sink_config.sink_type { // do server compatibility check @@ -131,16 +65,18 @@ async fn start_forwarder( ) .await?; - Some( + let mut sink_grpc_client = SinkClient::new(create_rpc_channel(udsink_config.socket_path.clone().into()).await?) .max_encoding_message_size(udsink_config.grpc_max_message_size) - .max_encoding_message_size(udsink_config.grpc_max_message_size), - ) + .max_encoding_message_size(udsink_config.grpc_max_message_size); + + wait_until_sink_ready(&cln_token, &mut sink_grpc_client).await?; + Some(sink_grpc_client) } else { None }; - let mut fb_sink_grpc_client = if let Some(fb_sink) = &config.fb_sink_config { + let fb_sink_grpc_client = if let Some(fb_sink) = &config.fb_sink_config { if let sink::SinkType::UserDefined(fb_sink_config) = &fb_sink.sink_type { // do server compatibility check check_for_server_compatibility( @@ -149,13 +85,14 @@ async fn start_forwarder( ) .await?; - Some( - SinkClient::new( - create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(fb_sink_config.grpc_max_message_size) - .max_encoding_message_size(fb_sink_config.grpc_max_message_size), + let mut fb_sink_grpc_client = SinkClient::new( + create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?, ) + .max_encoding_message_size(fb_sink_config.grpc_max_message_size) + .max_encoding_message_size(fb_sink_config.grpc_max_message_size); + + wait_until_sink_ready(&cln_token, &mut fb_sink_grpc_client).await?; + Some(fb_sink_grpc_client) } else { None } @@ -163,7 +100,7 @@ async fn start_forwarder( None }; - let mut transformer_grpc_client = if let Some(transformer) = &config.transformer_config { + let transformer_grpc_client = if let Some(transformer) = &config.transformer_config { if let transformer::TransformerType::UserDefined(transformer_config) = &transformer.transformer_type { @@ -174,12 +111,13 @@ async fn start_forwarder( ) .await?; - let transformer_grpc_client = SourceTransformClient::new( + let mut transformer_grpc_client = SourceTransformClient::new( create_rpc_channel(transformer_config.socket_path.clone().into()).await?, ) .max_encoding_message_size(transformer_config.grpc_max_message_size) .max_encoding_message_size(transformer_config.grpc_max_message_size); + wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; Some(transformer_grpc_client.clone()) } else { None @@ -188,16 +126,6 @@ async fn start_forwarder( None }; - // readiness check for all the ud containers - utils::wait_until_ready( - cln_token.clone(), - &mut source_grpc_client, - &mut sink_grpc_client, - &mut transformer_grpc_client, - &mut fb_sink_grpc_client, - ) - .await?; - let source_type = fetch_source(config, &mut source_grpc_client).await?; let (sink, fb_sink) = fetch_sink( config, @@ -209,12 +137,13 @@ async fn start_forwarder( // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not // joined. - let metrics_state = UserDefinedContainerState { - source_client: source_grpc_client.clone(), - sink_client: sink_grpc_client.clone(), - transformer_client: transformer_grpc_client.clone(), - fb_sink_client: fb_sink_grpc_client.clone(), - }; + let metrics_state = + metrics::UserDefinedContainerState::Monovertex(metrics::MonovertexContainerState { + source_client: source_grpc_client.clone(), + sink_client: sink_grpc_client.clone(), + transformer_client: transformer_grpc_client.clone(), + fb_sink_client: fb_sink_grpc_client.clone(), + }); // start the metrics server // FIXME: what to do with the handle @@ -244,12 +173,8 @@ async fn fetch_source( // check whether the source grpc client is provided, this happens only of the source is a // user defined source if let Some(source_grpc_client) = source_grpc_client.clone() { - let (source_read, source_ack, lag_reader) = new_source( - source_grpc_client, - config.batch_size, - config.timeout_in_ms as u16, - ) - .await?; + let (source_read, source_ack, lag_reader) = + new_source(source_grpc_client, config.batch_size, config.read_timeout).await?; return Ok(SourceType::UserDefinedSource( source_read, source_ack, @@ -270,7 +195,7 @@ async fn fetch_source( // fetch the actor handle for the sink. // sink_grpc_client can be optional because it is valid only for user-defined sink. async fn fetch_sink( - settings: &MonovertexConfig, + config: &MonovertexConfig, sink_grpc_client: Option>, fallback_sink_grpc_client: Option>, ) -> crate::Result<(SinkHandle, Option)> { @@ -278,27 +203,41 @@ async fn fetch_sink( Some(fallback_sink) => Some( SinkHandle::new( SinkClientType::UserDefined(fallback_sink), - settings.batch_size, + config.batch_size, ) .await?, ), - None => None, + None => { + if let Some(fb_sink_config) = &config.fb_sink_config { + if let sink::SinkType::Log(_) = &fb_sink_config.sink_type { + let log = SinkHandle::new(SinkClientType::Log, config.batch_size).await?; + return Ok((log, None)); + } + if let sink::SinkType::Blackhole(_) = &fb_sink_config.sink_type { + let blackhole = + SinkHandle::new(SinkClientType::Blackhole, config.batch_size).await?; + return Ok((blackhole, None)); + } + return Err(Error::Config( + "No valid Fallback Sink configuration found".to_string(), + )); + } + + None + } }; if let Some(sink_client) = sink_grpc_client { - let sink = SinkHandle::new( - SinkClientType::UserDefined(sink_client), - settings.batch_size, - ) - .await?; + let sink = + SinkHandle::new(SinkClientType::UserDefined(sink_client), config.batch_size).await?; return Ok((sink, fb_sink)); } - if let sink::SinkType::Log(_) = &settings.sink_config.sink_type { - let log = SinkHandle::new(SinkClientType::Log, settings.batch_size).await?; + if let sink::SinkType::Log(_) = &config.sink_config.sink_type { + let log = SinkHandle::new(SinkClientType::Log, config.batch_size).await?; return Ok((log, fb_sink)); } - if let sink::SinkType::Blackhole(_) = &settings.sink_config.sink_type { - let blackhole = SinkHandle::new(SinkClientType::Blackhole, settings.batch_size).await?; + if let sink::SinkType::Blackhole(_) = &config.sink_config.sink_type { + let blackhole = SinkHandle::new(SinkClientType::Blackhole, config.batch_size).await?; return Ok((blackhole, fb_sink)); } Err(Error::Config( @@ -345,15 +284,16 @@ mod tests { use std::fs::File; use std::io::Write; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source}; + use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; + use crate::config::components; use crate::config::monovertex::MonovertexConfig; use crate::error; use crate::monovertex::start_forwarder; use crate::shared::server_info::ServerInfo; - use numaflow::source::{Message, Offset, SourceReadRequest}; - use numaflow::{sink, source}; - use tokio::sync::mpsc::Sender; - use tokio_util::sync::CancellationToken; struct SimpleSource; #[tonic::async_trait] diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index ac9d8605f0..793eae4526 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -10,8 +10,8 @@ use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; use crate::config::monovertex::MonovertexConfig; use crate::error; use crate::message::{Message, Offset, ResponseStatusFromSink}; -use crate::monovertex::metrics; -use crate::monovertex::metrics::forward_metrics; +use crate::metrics; +use crate::metrics::forward_mvtx_metrics; use crate::sink::SinkHandle; use crate::Error; use crate::{source::SourceHandle, transformer::user_defined::SourceTransformHandle}; @@ -117,7 +117,7 @@ impl Forwarder { last_forwarded_at = std::time::Instant::now(); } - forward_metrics() + forward_mvtx_metrics() .e2e_time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); @@ -140,7 +140,7 @@ impl Forwarder { start_time.elapsed().as_millis() ); - forward_metrics() + forward_mvtx_metrics() .read_time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); @@ -151,7 +151,7 @@ impl Forwarder { } let msg_count = messages.len() as u64; - forward_metrics() + forward_mvtx_metrics() .read_total .get_or_create(&self.common_labels) .inc_by(msg_count); @@ -169,7 +169,7 @@ impl Forwarder { }, )?; - forward_metrics() + forward_mvtx_metrics() .read_bytes_total .get_or_create(&self.common_labels) .inc_by(bytes_count); @@ -213,7 +213,7 @@ impl Forwarder { "Transformer latency - {}ms", start_time.elapsed().as_millis() ); - forward_metrics() + forward_mvtx_metrics() .transformer .time .get_or_create(&self.common_labels) @@ -306,7 +306,7 @@ impl Forwarder { .await?; } - forward_metrics() + forward_mvtx_metrics() .sink .time .get_or_create(&self.common_labels) @@ -314,7 +314,7 @@ impl Forwarder { // update the metric for number of messages written to the sink // this included primary and fallback sink - forward_metrics() + forward_mvtx_metrics() .sink .write_total .get_or_create(&self.common_labels) @@ -355,7 +355,7 @@ impl Forwarder { attempts, error_map ); // update the metrics - forward_metrics() + forward_mvtx_metrics() .dropped_total .get_or_create(&self.common_labels) .inc_by(messages_to_send.len() as u64); @@ -532,7 +532,7 @@ impl Forwarder { ))); } // increment the metric for the fallback sink write - forward_metrics() + forward_mvtx_metrics() .fb_sink .write_total .get_or_create(&self.common_labels) @@ -549,12 +549,12 @@ impl Forwarder { debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); - forward_metrics() + forward_mvtx_metrics() .ack_time .get_or_create(&self.common_labels) .observe(start_time.elapsed().as_micros() as f64); - forward_metrics() + forward_mvtx_metrics() .ack_total .get_or_create(&self.common_labels) .inc_by(n as u64); @@ -565,6 +565,7 @@ impl Forwarder { #[cfg(test)] mod tests { use std::collections::HashSet; + use std::time::Duration; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; @@ -577,11 +578,11 @@ mod tests { use tokio_util::sync::CancellationToken; use crate::monovertex::forwarder::ForwarderBuilder; - use crate::monovertex::SourceType; use crate::shared::utils::create_rpc_channel; use crate::sink::{SinkClientType, SinkHandle}; use crate::source::user_defined::new_source; use crate::source::SourceHandle; + use crate::source::SourceType; use crate::transformer::user_defined::SourceTransformHandle; struct SimpleSource { @@ -767,7 +768,7 @@ mod tests { let (source_read, source_ack, source_lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), batch_size, - timeout_in_ms, + Duration::from_millis(timeout_in_ms), ) .await .expect("failed to connect to source server"); @@ -904,7 +905,7 @@ mod tests { let (source_read, source_ack, lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), batch_size, - timeout_in_ms, + Duration::from_millis(timeout_in_ms), ) .await .expect("failed to connect to source server"); @@ -970,7 +971,6 @@ mod tests { #[tokio::test] async fn test_fb_sink() { let batch_size = 100; - let timeout_in_ms = 1000; let (sink_tx, mut sink_rx) = mpsc::channel(10); @@ -1026,14 +1026,14 @@ mod tests { }); // Wait for the servers to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let cln_token = CancellationToken::new(); let (source_read, source_ack, source_lag_reader) = new_source( SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), 500, - 100, + Duration::from_millis(100), ) .await .expect("failed to connect to source server"); diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index 0439eb1c7f..ffe1c06944 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,2 +1,623 @@ +use std::collections::HashMap; + +use async_nats::jetstream; +use async_nats::jetstream::Context; +use futures::future::try_join_all; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; + +use crate::config::components::source::SourceType; +use crate::config::pipeline; +use crate::config::pipeline::PipelineConfig; +use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; +use crate::pipeline::isb::jetstream::reader::JetstreamReader; +use crate::pipeline::isb::jetstream::WriterHandle; +use crate::shared::server_info::check_for_server_compatibility; +use crate::shared::utils; +use crate::shared::utils::{ + create_rpc_channel, start_metrics_server, wait_until_source_ready, wait_until_transformer_ready, +}; +use crate::sink::SinkWriter; +use crate::source::generator::new_generator; +use crate::source::user_defined::new_source; +use crate::transformer::user_defined::SourceTransformHandle; +use crate::{config, error, source, Result}; + mod forwarder; mod isb; + +/// Starts the appropriate forwarder based on the pipeline configuration. +pub(crate) async fn start_forwarder( + cln_token: CancellationToken, + config: PipelineConfig, +) -> Result<()> { + let js_context = create_js_context(config.js_client_config.clone()).await?; + + match &config.vertex_config { + pipeline::VertexType::Source(source) => { + let buffer_writers = + create_buffer_writers(&config, js_context.clone(), cln_token.clone()).await?; + + let (source_type, source_grpc_client) = + create_source_type(source, &config, cln_token.clone()).await?; + let (transformer, transformer_grpc_client) = + create_transformer(source, cln_token.clone()).await?; + + start_metrics_server( + config.metrics_config.clone(), + UserDefinedContainerState::Pipeline(PipelineContainerState::Source(( + source_grpc_client.clone(), + transformer_grpc_client.clone(), + ))), + ) + .await; + + let source_handle = source::SourceHandle::new(source_type, config.batch_size); + let mut forwarder = forwarder::source_forwarder::ForwarderBuilder::new( + source_handle, + transformer, + buffer_writers, + cln_token.clone(), + config.clone(), + ) + .build(); + forwarder.start().await?; + } + pipeline::VertexType::Sink(sink) => { + // Create buffer readers for each partition + let buffer_readers = create_buffer_readers(&config, js_context.clone()).await?; + + // Create sink writers and clients + let mut sink_writers = Vec::new(); + for _ in &buffer_readers { + let (sink_writer, sink_grpc_client, fb_sink_grpc_client) = + create_sink_writer(&config, sink, cln_token.clone()).await?; + sink_writers.push((sink_writer, sink_grpc_client, fb_sink_grpc_client)); + } + + // Start the metrics server with one of the clients + if let Some((_, sink, fb_sink)) = sink_writers.first() { + start_metrics_server( + config.metrics_config.clone(), + UserDefinedContainerState::Pipeline(PipelineContainerState::Sink(( + sink.clone(), + fb_sink.clone(), + ))), + ) + .await; + } + + // Start a new forwarder for each buffer reader + let mut forwarder_tasks = Vec::new(); + for (buffer_reader, (sink_writer, _, _)) in buffer_readers.into_iter().zip(sink_writers) + { + let forwarder = forwarder::sink_forwarder::SinkForwarder::new( + buffer_reader, + sink_writer, + cln_token.clone(), + ) + .await; + + let task = tokio::spawn({ + let config = config.clone(); + async move { forwarder.start(config.clone()).await } + }); + + forwarder_tasks.push(task); + } + + try_join_all(forwarder_tasks) + .await + .map_err(|e| error::Error::Forwarder(e.to_string()))?; + } + } + Ok(()) +} + +/// Creates the required buffer writers based on the pipeline configuration, it creates a map +/// of vertex name to a list of writer handles. +async fn create_buffer_writers( + config: &PipelineConfig, + js_context: Context, + cln_token: CancellationToken, +) -> Result>> { + let mut buffer_writers = HashMap::new(); + for to_vertex in &config.to_vertex_config { + let writers = to_vertex + .writer_config + .streams + .iter() + .map(|stream| { + WriterHandle::new( + stream.0.clone(), + stream.1, + to_vertex.writer_config.clone(), + js_context.clone(), + config.batch_size, + config.paf_batch_size, + cln_token.clone(), + ) + }) + .collect(); + buffer_writers.insert(to_vertex.name.clone(), writers); + } + Ok(buffer_writers) +} + +async fn create_buffer_readers( + config: &PipelineConfig, + js_context: Context, +) -> Result> { + // Only the reader config of the first "from" vertex is needed, as all "from" vertices currently write + // to a common buffer, in the case of a join. + let reader_config = config + .from_vertex_config + .first() + .ok_or_else(|| error::Error::Config("No from vertex config found".to_string()))? + .reader_config + .clone(); + + let mut readers = Vec::new(); + for stream in &reader_config.streams { + let reader = JetstreamReader::new( + stream.0.clone(), + stream.1, + js_context.clone(), + reader_config.clone(), + ) + .await?; + readers.push(reader); + } + + Ok(readers) +} + +// Creates a sink writer based on the pipeline configuration +async fn create_sink_writer( + config: &PipelineConfig, + sink_vtx_config: &pipeline::SinkVtxConfig, + cln_token: CancellationToken, +) -> Result<( + SinkWriter, + Option>, + Option>, +)> { + let (sink_handle, sink_grpc_client) = utils::create_sink_handle( + config.batch_size, + &sink_vtx_config.sink_config.sink_type, + &cln_token, + ) + .await?; + let (fb_sink_handle, fb_sink_grpc_client) = match &sink_vtx_config.fb_sink_config { + None => (None, None), + Some(fb_sink_config) => { + let (handle, client) = + utils::create_sink_handle(config.batch_size, &fb_sink_config.sink_type, &cln_token) + .await?; + (Some(handle), client) + } + }; + + Ok(( + SinkWriter::new( + config.batch_size, + config.read_timeout, + sink_vtx_config.clone(), + sink_handle, + fb_sink_handle, + ) + .await?, + sink_grpc_client, + fb_sink_grpc_client, + )) +} + +/// Creates a source type based on the pipeline configuration +async fn create_source_type( + source: &pipeline::SourceVtxConfig, + config: &PipelineConfig, + cln_token: CancellationToken, +) -> Result<(source::SourceType, Option>)> { + match &source.source_config.source_type { + SourceType::Generator(generator_config) => { + let (generator_read, generator_ack, generator_lag) = + new_generator(generator_config.clone(), config.batch_size)?; + Ok(( + source::SourceType::Generator(generator_read, generator_ack, generator_lag), + None, + )) + } + SourceType::UserDefined(udsource_config) => { + check_for_server_compatibility( + udsource_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + let mut source_grpc_client = SourceClient::new( + create_rpc_channel(udsource_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(udsource_config.grpc_max_message_size) + .max_encoding_message_size(udsource_config.grpc_max_message_size); + wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; + let (ud_read, ud_ack, ud_lag) = new_source( + source_grpc_client.clone(), + config.batch_size, + config.read_timeout, + ) + .await?; + Ok(( + source::SourceType::UserDefinedSource(ud_read, ud_ack, ud_lag), + Some(source_grpc_client), + )) + } + } +} +/// Creates a transformer if it is configured in the pipeline +async fn create_transformer( + source: &pipeline::SourceVtxConfig, + cln_token: CancellationToken, +) -> Result<( + Option, + Option>, +)> { + if let Some(transformer_config) = &source.transformer_config { + if let config::components::transformer::TransformerType::UserDefined(ud_transformer) = + &transformer_config.transformer_type + { + check_for_server_compatibility( + ud_transformer.socket_path.clone().into(), + cln_token.clone(), + ) + .await?; + let mut transformer_grpc_client = SourceTransformClient::new( + create_rpc_channel(ud_transformer.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(ud_transformer.grpc_max_message_size) + .max_encoding_message_size(ud_transformer.grpc_max_message_size); + wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; + return Ok(( + Some(SourceTransformHandle::new(transformer_grpc_client.clone()).await?), + Some(transformer_grpc_client), + )); + } + } + Ok((None, None)) +} + +/// Creates a jetstream context based on the provided configuration +async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Result { + let js_client = match (config.user, config.password) { + (Some(user), Some(password)) => { + async_nats::connect_with_options( + config.url, + async_nats::ConnectOptions::with_user_and_password(user, password), + ) + .await + } + _ => async_nats::connect(config.url).await, + } + .map_err(|e| error::Error::Connection(e.to_string()))?; + Ok(jetstream::new(js_client)) +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use async_nats::jetstream; + use async_nats::jetstream::{consumer, stream}; + use futures::StreamExt; + + use super::*; + + use crate::config::components::metrics::MetricsConfig; + use crate::config::components::sink::{BlackholeConfig, SinkConfig, SinkType}; + use crate::config::components::source::GeneratorConfig; + use crate::config::components::source::SourceConfig; + use crate::config::components::source::SourceType; + use crate::config::pipeline::PipelineConfig; + use crate::pipeline::pipeline::isb; + use crate::pipeline::pipeline::isb::{BufferReaderConfig, BufferWriterConfig}; + use crate::pipeline::pipeline::VertexType; + use crate::pipeline::pipeline::{FromVertexConfig, ToVertexConfig}; + use crate::pipeline::pipeline::{SinkVtxConfig, SourceVtxConfig}; + use crate::pipeline::tests::isb::BufferFullStrategy::RetryUntilSuccess; + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_forwarder_for_source_vetex() { + // Unique names for the streams we use in this test + let streams = vec![ + "default-test-forwarder-for-source-vertex-out-0", + "default-test-forwarder-for-source-vertex-out-1", + "default-test-forwarder-for-source-vertex-out-2", + "default-test-forwarder-for-source-vertex-out-3", + "default-test-forwarder-for-source-vertex-out-4", + ]; + + let js_url = "localhost:4222"; + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let mut consumers = vec![]; + // Create streams to which the generator source vertex we create later will forward + // messages to. The consumers created for the corresponding streams will be used to ensure + // that messages were actually written to the streams. + for stream_name in &streams { + let stream_name = *stream_name; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 64 * 1024, + max_messages: 10000, + ..Default::default() + }) + .await + .unwrap(); + + let c: consumer::PullConsumer = context + .create_consumer_on_stream( + consumer::pull::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + consumers.push((stream_name.to_string(), c)); + } + + let pipeline_config = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "in".to_string(), + replica: 0, + batch_size: 1000, + paf_batch_size: 30000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + from_vertex_config: vec![], + to_vertex_config: vec![ToVertexConfig { + name: "out".to_string(), + writer_config: BufferWriterConfig { + streams: streams + .iter() + .enumerate() + .map(|(i, stream_name)| (stream_name.to_string(), i as u16)) + .collect(), + partitions: 5, + max_length: 30000, + refresh_interval: Duration::from_secs(1), + usage_limit: 0.8, + buffer_full_strategy: RetryUntilSuccess, + retry_interval: Duration::from_millis(10), + }, + partitions: 5, + conditions: None, + }], + vertex_config: VertexType::Source(SourceVtxConfig { + source_config: SourceConfig { + source_type: SourceType::Generator(GeneratorConfig { + rpu: 10, + content: bytes::Bytes::new(), + duration: Duration::from_secs(1), + value: None, + key_count: 0, + msg_size_bytes: 300, + jitter: Duration::from_millis(0), + }), + }, + transformer_config: None, + }), + metrics_config: MetricsConfig { + metrics_server_listen_port: 2469, + lag_check_interval_in_secs: 5, + lag_refresh_interval_in_secs: 3, + }, + }; + + let cancellation_token = tokio_util::sync::CancellationToken::new(); + let forwarder_task = tokio::spawn({ + let cancellation_token = cancellation_token.clone(); + async move { + start_forwarder(cancellation_token, pipeline_config) + .await + .unwrap(); + } + }); + + // Wait for a few messages to be forwarded + tokio::time::sleep(Duration::from_secs(2)).await; + cancellation_token.cancel(); + forwarder_task.await.unwrap(); + + for (stream_name, stream_consumer) in consumers { + let messages: Vec = stream_consumer + .batch() + .max_messages(10) + .expires(Duration::from_millis(50)) + .messages() + .await + .unwrap() + .map(|msg| msg.unwrap()) + .collect() + .await; + assert!( + !messages.is_empty(), + "Stream {} is expected to have messages", + stream_name + ); + } + + // Delete all streams created in this test + for stream_name in streams { + context.delete_stream(stream_name).await.unwrap(); + } + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_forwarder_for_sink_vetex() { + // Unique names for the streams we use in this test + let streams = vec![ + "default-test-forwarder-for-sink-vertex-out-0", + "default-test-forwarder-for-sink-vertex-out-1", + "default-test-forwarder-for-sink-vertex-out-2", + "default-test-forwarder-for-sink-vertex-out-3", + "default-test-forwarder-for-sink-vertex-out-4", + ]; + + let js_url = "localhost:4222"; + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + const MESSAGE_COUNT: usize = 10; + let mut consumers = vec![]; + // Create streams to which the generator source vertex we create later will forward + // messages to. The consumers created for the corresponding streams will be used to ensure + // that messages were actually written to the streams. + for stream_name in &streams { + let stream_name = *stream_name; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 64 * 1024, + max_messages: 10000, + ..Default::default() + }) + .await + .unwrap(); + + // Publish some messages into the stream + use crate::message::{Message, MessageID, Offset, StringOffset}; + use chrono::{TimeZone, Utc}; + let message = Message { + keys: vec!["key1".to_string()], + value: vec![1, 2, 3].into(), + offset: Some(Offset::String(StringOffset::new("123".to_string(), 0))), + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "123".to_string(), + index: 0, + }, + headers: HashMap::new(), + }; + let message: bytes::BytesMut = message.try_into().unwrap(); + + for _ in 0..MESSAGE_COUNT { + context + .publish(stream_name.to_string(), message.clone().into()) + .await + .unwrap() + .await + .unwrap(); + } + + let c: consumer::PullConsumer = context + .create_consumer_on_stream( + consumer::pull::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + consumers.push((stream_name.to_string(), c)); + } + + let pipeline_config = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "in".to_string(), + replica: 0, + batch_size: 1000, + paf_batch_size: 30000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + to_vertex_config: vec![], + from_vertex_config: vec![FromVertexConfig { + name: "in".to_string(), + reader_config: BufferReaderConfig { + partitions: 5, + streams: streams + .iter() + .enumerate() + .map(|(i, key)| (key.to_string(), i as u16)) + .collect(), + batch_size: 500, + read_timeout: Duration::from_secs(1), + wip_ack_interval: Duration::from_secs(1), + }, + partitions: 0, + }], + vertex_config: VertexType::Sink(SinkVtxConfig { + sink_config: SinkConfig { + sink_type: SinkType::Blackhole(BlackholeConfig::default()), + retry_config: None, + }, + fb_sink_config: None, + }), + metrics_config: MetricsConfig { + metrics_server_listen_port: 2469, + lag_check_interval_in_secs: 5, + lag_refresh_interval_in_secs: 3, + }, + }; + + let cancellation_token = tokio_util::sync::CancellationToken::new(); + let forwarder_task = tokio::spawn({ + let cancellation_token = cancellation_token.clone(); + async move { + start_forwarder(cancellation_token, pipeline_config) + .await + .unwrap(); + } + }); + + // Wait for a few messages to be forwarded + tokio::time::sleep(Duration::from_secs(3)).await; + cancellation_token.cancel(); + // token cancellation is not aborting the forwarder since we fetch messages from jetstream + // as a stream of messages (not using `consumer.batch()`). + // See `JetstreamReader::start` method in src/pipeline/isb/jetstream/reader.rs + //forwarder_task.await.unwrap(); + forwarder_task.abort(); + + for (stream_name, mut stream_consumer) in consumers { + let stream_info = stream_consumer.info().await.unwrap(); + assert_eq!( + stream_info.delivered.stream_sequence, MESSAGE_COUNT as u64, + "Stream={}, expected delivered stream sequence to be {}, current value is {}", + stream_name, MESSAGE_COUNT, stream_info.delivered.stream_sequence + ); + assert_eq!( + stream_info.ack_floor.stream_sequence, MESSAGE_COUNT as u64, + "Stream={}, expected ack'ed stream sequence to be {}, current value is {}", + stream_name, MESSAGE_COUNT, stream_info.ack_floor.stream_sequence + ); + } + + // Delete all streams created in this test + for stream_name in streams { + context.delete_stream(stream_name).await.unwrap(); + } + } +} diff --git a/rust/numaflow-core/src/pipeline/forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder.rs index 70b786d12e..6e8774c320 100644 --- a/rust/numaflow-core/src/pipeline/forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder.rs @@ -1 +1,17 @@ -// TODO +/// Forwarder consists +/// (Read) +-------> (UDF) -------> (Write) + +/// | | +/// | | +/// +-------> {Ack} <----------------+ +/// +/// {} -> Listens on a OneShot +/// () -> Streaming Interface +/// + +/// Forwarder specific to Sink where reader is ISB, UDF is not present, while +/// the Write is User-defined Sink or builtin. +pub(crate) mod sink_forwarder; + +/// Source where the Reader is builtin or User-defined Source, Write is ISB, +/// with an optional Transformer. +pub(crate) mod source_forwarder; diff --git a/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs new file mode 100644 index 0000000000..74846e931b --- /dev/null +++ b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs @@ -0,0 +1,55 @@ +use tokio_util::sync::CancellationToken; + +use crate::config::pipeline::PipelineConfig; +use crate::error::Error; +use crate::pipeline::isb::jetstream::reader::JetstreamReader; +use crate::sink::SinkWriter; +use crate::Result; + +/// Sink forwarder reads messages from the jetstream and writes to the sink. +pub(crate) struct SinkForwarder { + jetstream_reader: JetstreamReader, + sink_writer: SinkWriter, + cln_token: CancellationToken, +} + +impl SinkForwarder { + pub(crate) async fn new( + jetstream_reader: JetstreamReader, + sink_writer: SinkWriter, + cln_token: CancellationToken, + ) -> Self { + Self { + jetstream_reader, + sink_writer, + cln_token, + } + } + + pub(crate) async fn start(&self, pipeline_config: PipelineConfig) -> Result<()> { + // Create a child cancellation token only for the reader so that we can stop the reader first + let reader_cancellation_token = self.cln_token.child_token(); + let (read_messages_rx, reader_handle) = self + .jetstream_reader + .start(reader_cancellation_token.clone(), &pipeline_config) + .await?; + + let sink_writer_handle = self + .sink_writer + .start(read_messages_rx, self.cln_token.clone()) + .await?; + + // Join the reader and sink writer + match tokio::try_join!(reader_handle, sink_writer_handle) { + Ok((reader_result, sink_writer_result)) => { + reader_result?; + sink_writer_result?; + Ok(()) + } + Err(e) => Err(Error::Forwarder(format!( + "Error while joining reader and sink writer: {:?}", + e + ))), + } + } +} diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs new file mode 100644 index 0000000000..5dd94290de --- /dev/null +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -0,0 +1,191 @@ +use std::collections::HashMap; + +use chrono::Utc; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; + +use crate::config::pipeline::PipelineConfig; +use crate::error; +use crate::error::Error; +use crate::message::{Message, Offset}; +use crate::metrics::{forward_pipeline_metrics, pipeline_forward_read_metric_labels}; +use crate::pipeline::isb::jetstream::WriterHandle; +use crate::source::SourceHandle; +use crate::transformer::user_defined::SourceTransformHandle; + +/// Simple source forwarder that reads messages from the source, applies transformation if present +/// and writes to the messages to ISB. +pub(crate) struct Forwarder { + source_reader: SourceHandle, + transformer: Option, + buffer_writers: HashMap>, + cln_token: CancellationToken, + config: PipelineConfig, +} + +pub(crate) struct ForwarderBuilder { + source_reader: SourceHandle, + transformer: Option, + buffer_writers: HashMap>, + cln_token: CancellationToken, + config: PipelineConfig, +} + +impl ForwarderBuilder { + pub(crate) fn new( + source_reader: SourceHandle, + transformer: Option, + buffer_writers: HashMap>, + cln_token: CancellationToken, + config: PipelineConfig, + ) -> Self { + Self { + source_reader, + transformer, + buffer_writers, + cln_token, + config, + } + } + + pub(crate) fn build(self) -> Forwarder { + Forwarder { + source_reader: self.source_reader, + transformer: self.transformer, + buffer_writers: self.buffer_writers, + cln_token: self.cln_token, + config: self.config, + } + } +} + +impl Forwarder { + pub(crate) async fn start(&mut self) -> Result<(), Error> { + let mut processed_msgs_count: usize = 0; + let mut last_forwarded_at = std::time::Instant::now(); + info!("Forwarder has started"); + loop { + tokio::time::Instant::now(); + if self.cln_token.is_cancelled() { + break; + } + processed_msgs_count += self.read_and_process_messages().await?; + + if last_forwarded_at.elapsed().as_millis() >= 1000 { + info!( + "Forwarded {} messages at time in the pipeline {}", + processed_msgs_count, + Utc::now() + ); + processed_msgs_count = 0; + last_forwarded_at = std::time::Instant::now(); + } + } + Ok(()) + } + + async fn read_and_process_messages(&mut self) -> Result { + let start_time = tokio::time::Instant::now(); + let messages = self.source_reader.read().await.map_err(|e| { + Error::Forwarder(format!("Failed to read messages from source {:?}", e)) + })?; + + debug!( + "Read batch size: {} and latency - {}ms", + messages.len(), + start_time.elapsed().as_millis() + ); + + let labels = pipeline_forward_read_metric_labels( + self.config.pipeline_name.as_ref(), + self.config.vertex_name.as_ref(), + self.config.vertex_name.as_ref(), + "Source", + self.config.replica, + ); + forward_pipeline_metrics() + .forwarder + .data_read + .get_or_create(labels) + .inc_by(messages.len() as u64); + + if messages.is_empty() { + return Ok(0); + } + + let msg_count = messages.len() as u64; + let offsets: Vec = + messages + .iter() + .try_fold(Vec::with_capacity(messages.len()), |mut offsets, msg| { + if let Some(offset) = &msg.offset { + offsets.push(offset.clone()); + Ok(offsets) + } else { + Err(Error::Forwarder("Message offset is missing".to_string())) + } + })?; + + // Apply transformation if transformer is present + // FIXME: we should stream the responses back and write it to the jetstream writer + let transformed_messages = self.apply_transformer(messages).await.map_err(|e| { + Error::Forwarder(format!( + "Failed to apply transformation to messages {:?}", + e + )) + })?; + + self.write_to_jetstream(transformed_messages).await?; + + self.source_reader.ack(offsets).await?; + + Ok(msg_count as usize) + } + + /// Applies the transformer to the messages. + async fn apply_transformer(&mut self, messages: Vec) -> error::Result> { + let Some(client) = &mut self.transformer else { + // return early if there is no transformer + return Ok(messages); + }; + + let start_time = tokio::time::Instant::now(); + let results = client.transform(messages).await?; + + debug!( + "Transformer latency - {}ms", + start_time.elapsed().as_millis() + ); + + Ok(results) + } + + /// Writes messages to the jetstream, it writes to all the downstream buffers. + async fn write_to_jetstream(&mut self, messages: Vec) -> Result<(), Error> { + if messages.is_empty() { + return Ok(()); + } + + let mut results = Vec::new(); + + // write to all the buffers + for i in 0..messages.len() { + for writers in self.buffer_writers.values() { + // write to the stream writers in round-robin fashion + let writer = &writers[i % writers.len()]; // FIXME: we need to shuffle based on the message id hash + let result = writer.write(messages[i].clone()).await?; + results.push(result); + } + } + + // await for all the result futures to complete + // FIXME: we should not await for the results to complete, that will make it sequential + for result in results { + // we can use the ack to publish watermark etc + result + .await + .map_err(|e| Error::Forwarder(format!("Failed to write to jetstream {:?}", e)))??; + } + Ok(()) + } +} diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index 7e3e7e378c..ccba63a8d1 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -1,9 +1,10 @@ use async_nats::jetstream::Context; +use bytes::BytesMut; use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::CancellationToken; -use crate::config::pipeline::isb::jetstream::StreamWriterConfig; +use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; use crate::message::{Message, Offset}; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; @@ -16,6 +17,8 @@ use crate::Result; /// exception). pub(super) mod writer; +pub(crate) mod reader; + /// ISB Writer accepts an Actor pattern based messages. #[derive(Debug)] struct ActorMessage { @@ -37,28 +40,22 @@ impl ActorMessage { struct WriterActor { js_writer: JetstreamWriter, receiver: Receiver, - cancel_token: CancellationToken, } impl WriterActor { - fn new( - js_writer: JetstreamWriter, - receiver: Receiver, - cancel_token: CancellationToken, - ) -> Self { + fn new(js_writer: JetstreamWriter, receiver: Receiver) -> Self { Self { js_writer, receiver, - cancel_token, } } async fn handle_message(&mut self, msg: ActorMessage) { - let payload: Vec = msg + let payload: BytesMut = msg .message .try_into() .expect("message serialization should not fail"); - self.js_writer.write(payload, msg.callee_tx).await + self.js_writer.write(payload.into(), msg.callee_tx).await } async fn run(&mut self) { @@ -74,16 +71,26 @@ pub(crate) struct WriterHandle { } impl WriterHandle { - pub(super) fn new( - config: StreamWriterConfig, + pub(crate) fn new( + stream_name: String, + partition_idx: u16, + config: BufferWriterConfig, js_ctx: Context, batch_size: usize, + paf_batch_size: usize, cancel_token: CancellationToken, ) -> Self { let (sender, receiver) = mpsc::channel::(batch_size); - let js_writer = JetstreamWriter::new(config, js_ctx, batch_size, cancel_token.clone()); - let mut actor = WriterActor::new(js_writer.clone(), receiver, cancel_token); + let js_writer = JetstreamWriter::new( + stream_name, + partition_idx, + config, + js_ctx, + paf_batch_size, + cancel_token.clone(), + ); + let mut actor = WriterActor::new(js_writer.clone(), receiver); tokio::spawn(async move { actor.run().await; @@ -117,6 +124,7 @@ mod tests { use chrono::Utc; use tokio::sync::oneshot; use tokio::time::Instant; + use tracing::info; use super::*; use crate::message::{Message, MessageID}; @@ -140,21 +148,24 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; - // Create ISBMessageHandler let batch_size = 500; - let handler = WriterHandle::new(config, context.clone(), batch_size, cln_token.clone()); + let handler = WriterHandle::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + batch_size, + 1000, + cln_token.clone(), + ); let mut result_receivers = Vec::new(); // Publish 500 messages for i in 0..500 { let message = Message { keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec(), + value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -173,10 +184,11 @@ mod tests { result_receivers.push(receiver); } - for receiver in result_receivers { - let result = receiver.await.unwrap(); - assert!(result.is_ok()); - } + // FIXME: Uncomment after we start awaiting for PAFs + //for receiver in result_receivers { + // let result = receiver.await.unwrap(); + // assert!(result.is_ok()); + //} context.delete_stream(stream_name).await.unwrap(); } @@ -200,20 +212,23 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; - let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new(config, context.clone(), 500, cancel_token.clone()); + let handler = WriterHandle::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 500, + 1000, + cancel_token.clone(), + ); let mut receivers = Vec::new(); // Publish 100 messages successfully for i in 0..100 { let message = Message { keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec(), + value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -230,7 +245,7 @@ mod tests { // because the max message size is set to 1024 let message = Message { keys: vec!["key_101".to_string()], - value: vec![0; 1024], + value: vec![0; 1024].into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -247,14 +262,15 @@ mod tests { cancel_token.cancel(); // Check the results - for (i, receiver) in receivers.into_iter().enumerate() { - let result = receiver.await.unwrap(); - if i < 100 { - assert!(result.is_ok()); - } else { - assert!(result.is_err()); - } - } + // FIXME: Uncomment after we start awaiting for PAFs + //for (i, receiver) in receivers.into_iter().enumerate() { + // let result = receiver.await.unwrap(); + // if i < 100 { + // assert!(result.is_ok()); + // } else { + // assert!(result.is_err()); + // } + //} context.delete_stream(stream_name).await.unwrap(); } @@ -268,7 +284,7 @@ mod tests { let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); - let stream_name = "benchmark_stream"; + let stream_name = "benchmark_publish"; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -278,13 +294,16 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; - let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new(config, context.clone(), 500, cancel_token.clone()); + let handler = WriterHandle::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 500, + 1000, + cancel_token.clone(), + ); let (tx, mut rx) = mpsc::channel(100); let test_start_time = Instant::now(); @@ -298,7 +317,7 @@ mod tests { while Instant::now().duration_since(test_start_time) < duration { let message = Message { keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec(), + value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -315,7 +334,7 @@ mod tests { i += 1; if start_time.elapsed().as_secs() >= 1 { - println!("Messages sent: {}", sent_count); + info!("Messages sent: {}", sent_count); sent_count = 0; start_time = Instant::now(); } @@ -332,7 +351,7 @@ mod tests { } if start_time.elapsed().as_secs() >= 1 { - println!("Messages received: {}", count); + info!("Messages received: {}", count); count = 0; start_time = Instant::now(); } diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs new file mode 100644 index 0000000000..5f3d2926bf --- /dev/null +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -0,0 +1,361 @@ +use std::time::Duration; + +use async_nats::jetstream::{ + consumer::PullConsumer, AckKind, Context, Message as JetstreamMessage, +}; +use tokio::sync::mpsc::Receiver; +use tokio::sync::{mpsc, oneshot}; +use tokio::task::JoinHandle; +use tokio::time::{self, Instant}; +use tokio_stream::StreamExt; +use tokio_util::sync::CancellationToken; +use tracing::{error, warn}; + +use crate::config::pipeline::isb::BufferReaderConfig; +use crate::config::pipeline::PipelineConfig; +use crate::error::Error; +use crate::message::{IntOffset, Message, Offset, ReadAck, ReadMessage}; +use crate::metrics::{forward_pipeline_metrics, pipeline_forward_read_metric_labels}; +use crate::Result; + +// The JetstreamReader is a handle to the background actor that continuously fetches messages from Jetstream. +// It can be used to cancel the background task and stop reading from Jetstream. +// The sender end of the channel is not stored in this struct, since the struct is clone-able and the mpsc channel is only closed when all the senders are dropped. +// Storing the Sender end of channel in this struct would make it difficult to close the channel with `cancel` method. +#[derive(Clone)] +pub(crate) struct JetstreamReader { + partition_idx: u16, + config: BufferReaderConfig, + consumer: PullConsumer, +} + +impl JetstreamReader { + pub(crate) async fn new( + stream_name: String, + partition_idx: u16, + js_ctx: Context, + config: BufferReaderConfig, + ) -> Result { + let mut config = config; + + let mut consumer: PullConsumer = js_ctx + .get_consumer_from_stream(&stream_name, &stream_name) + .await + .map_err(|e| Error::ISB(format!("Failed to get consumer for stream {}", e)))?; + + let consumer_info = consumer + .info() + .await + .map_err(|e| Error::ISB(format!("Failed to get consumer info {}", e)))?; + + // Calculate inProgressTickSeconds based on the ack_wait_seconds. + let ack_wait_seconds = consumer_info.config.ack_wait.as_secs(); + let wip_ack_interval = Duration::from_secs(std::cmp::max( + config.wip_ack_interval.as_secs(), + ack_wait_seconds * 2 / 3, + )); + config.wip_ack_interval = wip_ack_interval; + + Ok(Self { + partition_idx, + config: config.clone(), + consumer, + }) + } + + // When we encounter an error, we log the error and return from the function. This drops the sender end of the channel. + // The closing of the channel should propagate to the receiver end and the receiver should exit gracefully. + // Within the loop, we only consider cancellationToken cancellation during the permit reservation and fetching messages, + // since rest of the operations should finish immediately. + pub(crate) async fn start( + &self, + cancel_token: CancellationToken, + pipeline_config: &PipelineConfig, + ) -> Result<(Receiver, JoinHandle>)> { + let (messages_tx, messages_rx) = mpsc::channel(2 * self.config.batch_size); + + let handle: JoinHandle> = tokio::spawn({ + let this = self.clone(); + let pipeline_config = pipeline_config.clone(); + + async move { + // FIXME: + let partition: &str = pipeline_config + .from_vertex_config + .first() + .unwrap() + .reader_config + .streams + .first() + .unwrap() + .0 + .as_ref(); + + let labels = pipeline_forward_read_metric_labels( + pipeline_config.pipeline_name.as_ref(), + partition, + pipeline_config.vertex_name.as_ref(), + pipeline_config.vertex_config.to_string().as_ref(), + pipeline_config.replica, + ); + + let chunk_stream = this + .consumer + .messages() + .await + .unwrap() + .chunks_timeout(this.config.batch_size, this.config.read_timeout); + + tokio::pin!(chunk_stream); + + // The .next() call will not return if there is no data even if read_timeout is + // reached. + while let Some(messages) = chunk_stream.next().await { + for message in messages { + let jetstream_message = match message { + Ok(message) => message, + Err(e) => { + error!(?e, "Failed to fetch messages from the Jetstream"); + continue; + } + }; + + let msg_info = match jetstream_message.info() { + Ok(info) => info, + Err(e) => { + error!(?e, "Failed to get message info from Jetstream"); + continue; + } + }; + + let mut message: Message = + match jetstream_message.payload.clone().try_into() { + Ok(message) => message, + Err(e) => { + error!( + ?e, + "Failed to parse message payload received from Jetstream" + ); + continue; + } + }; + + message.offset = Some(Offset::Int(IntOffset::new( + msg_info.stream_sequence, + this.partition_idx, + ))); + + let (ack_tx, ack_rx) = oneshot::channel(); + + tokio::spawn(Self::start_work_in_progress( + jetstream_message, + ack_rx, + this.config.wip_ack_interval, + )); + + let read_message = ReadMessage { + message, + ack: ack_tx, + }; + + if messages_tx.send(read_message).await.is_err() { + error!("Failed to send message to the channel"); + return Ok(()); + } + + forward_pipeline_metrics() + .forwarder + .data_read + .get_or_create(labels) + .inc(); + } + if cancel_token.is_cancelled() { + warn!("Cancellation token is cancelled. Exiting JetstreamReader"); + break; + } + } + Ok(()) + } + }); + Ok((messages_rx, handle)) + } + + // Intended to be run as background task which will continuously send InProgress acks to Jetstream. + // We will continuously retry if there is an error in acknowledging the message as work-in-progress. + // If the sender end of the ack_rx channel was dropped before sending a final Ack or Nak (due to some unhandled/unknown failure), we will send a Nak to Jetstream. + async fn start_work_in_progress( + msg: JetstreamMessage, + mut ack_rx: oneshot::Receiver, + tick: Duration, + ) { + let mut interval = time::interval_at(Instant::now() + tick, tick); + + loop { + let wip = async { + interval.tick().await; + let ack_result = msg.ack_with(AckKind::Progress).await; + if let Err(e) = ack_result { + // We expect that the ack in the next iteration will be successful. + // If its some unrecoverable Jetstream error, the fetching messages in the JestreamReader implementation should also fail and cause the system to shut down. + error!(?e, "Failed to send InProgress Ack to Jetstream for message"); + } + }; + + let ack = tokio::select! { + ack = &mut ack_rx => ack, + _ = wip => continue, + }; + + let ack = ack.unwrap_or_else(|e| { + error!(?e, "Received error while waiting for Ack oneshot channel"); + ReadAck::Nak + }); + + match ack { + ReadAck::Ack => { + let ack_result = msg.ack().await; + if let Err(e) = ack_result { + error!(?e, "Failed to send Ack to Jetstream for message"); + } + return; + } + ReadAck::Nak => { + let ack_result = msg.ack_with(AckKind::Nak(None)).await; + if let Err(e) = ack_result { + error!(?e, "Failed to send Nak to Jetstream for message"); + } + return; + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use async_nats::jetstream; + use async_nats::jetstream::{consumer, stream}; + use bytes::BytesMut; + use chrono::Utc; + use tracing::info; + + use super::*; + use crate::message::{Message, MessageID, Offset}; + use crate::pipeline::isb::jetstream::writer::JetstreamWriter; + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_jetstream_read() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_cancellation-2"; + context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let buf_reader_config = BufferReaderConfig { + partitions: 0, + streams: vec![], + batch_size: 2, + read_timeout: Duration::from_millis(1000), + wip_ack_interval: Duration::from_millis(5), + }; + let js_reader = JetstreamReader::new( + stream_name.to_string(), + 0, + context.clone(), + buf_reader_config, + ) + .await + .unwrap(); + + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); + let reader_cancel_token = CancellationToken::new(); + let (mut js_reader_rx, js_reader_task) = js_reader + .start(reader_cancel_token.clone(), &pipeline_config) + .await + .unwrap(); + + let writer_cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 5000, + writer_cancel_token.clone(), + ); + + for i in 0..10 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (success_tx, success_rx) = oneshot::channel::>(); + let message_bytes: BytesMut = message.try_into().unwrap(); + writer.write(message_bytes.into(), success_tx).await; + success_rx.await.unwrap().unwrap(); + } + info!("Sent 10 messages"); + // Cancel the token to exit the retry loop + writer_cancel_token.cancel(); + + let mut buffer = vec![]; + for _ in 0..10 { + let Some(val) = js_reader_rx.recv().await else { + break; + }; + buffer.push(val); + } + + assert_eq!( + buffer.len(), + 10, + "Expected 10 messages from the Jestream reader" + ); + + reader_cancel_token.cancel(); + // The token cancellation won't abort the task since we are using chunks_timeout in + // Jetstream reader. + // js_reader_task.await.unwrap().unwrap(); + js_reader_task.abort(); + let _ = js_reader_task.await; + assert!(js_reader_rx.is_closed()); + + context.delete_stream(stream_name).await.unwrap(); + } +} diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 431958b12e..65d10963ca 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -12,10 +12,9 @@ use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::error; -use tracing::{debug, warn}; +use tracing::{error, info, warn}; -use crate::config::pipeline::isb::jetstream::StreamWriterConfig; +use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; use crate::message::{IntOffset, Offset}; use crate::Result; @@ -24,7 +23,9 @@ use crate::Result; /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. pub(super) struct JetstreamWriter { - config: StreamWriterConfig, + stream_name: String, + partition_idx: u16, + config: BufferWriterConfig, js_ctx: Context, is_full: Arc, paf_resolver_tx: mpsc::Sender, @@ -35,15 +36,19 @@ impl JetstreamWriter { /// Creates a JetStream Writer and a background task to make sure the Write futures (PAFs) are /// successful. Batch Size determines the maximum pending futures. pub(super) fn new( - config: StreamWriterConfig, + stream_name: String, + partition_idx: u16, + config: BufferWriterConfig, js_ctx: Context, - batch_size: usize, + paf_batch_size: usize, cancel_token: CancellationToken, ) -> Self { let (paf_resolver_tx, paf_resolver_rx) = - mpsc::channel::(batch_size); + mpsc::channel::(paf_batch_size); let this = Self { + stream_name, + partition_idx, config, js_ctx, is_full: Arc::new(AtomicBool::new(false)), @@ -75,7 +80,7 @@ impl JetstreamWriter { loop { tokio::select! { _ = interval.tick() => { - match Self::fetch_buffer_usage(self.js_ctx.clone(), self.config.name.as_str(), self.config.max_length).await { + match Self::fetch_buffer_usage(self.js_ctx.clone(), self.stream_name.as_str(), self.config.max_length).await { Ok((soft_usage, solid_usage)) => { if solid_usage >= self.config.usage_limit && soft_usage >= self.config.usage_limit { self.is_full.store(true, Ordering::Relaxed); @@ -158,11 +163,11 @@ impl JetstreamWriter { match self.is_full.load(Ordering::Relaxed) { true => { // FIXME: add metrics - debug!(%self.config.name, "buffer is full"); + info!(%self.stream_name, "stream is full"); // FIXME: consider buffer-full strategy } false => match js_ctx - .publish(self.config.name.clone(), Bytes::from(payload.clone())) + .publish(self.stream_name.clone(), Bytes::from(payload.clone())) .await { Ok(paf) => { @@ -205,7 +210,7 @@ impl JetstreamWriter { loop { match js_ctx - .publish(self.config.name.clone(), Bytes::from(payload.clone())) + .publish(self.stream_name.clone(), Bytes::from(payload.clone())) .await { Ok(paf) => match paf.await { @@ -266,24 +271,42 @@ impl PafResolverActor { /// not successfully resolve, it will do blocking write till write to JetStream succeeds. async fn successfully_resolve_paf(&mut self, result: ResolveAndPublishResult) { match result.paf.await { - Ok(ack) => result - .callee_tx - .send(Ok(Offset::Int(IntOffset::new( - ack.sequence, - self.js_writer.config.partition_idx, - )))) - .unwrap(), + Ok(ack) => { + if ack.duplicate { + warn!("Duplicate message detected, ignoring {:?}", ack); + } + result + .callee_tx + .send(Ok(Offset::Int(IntOffset::new( + ack.sequence, + self.js_writer.partition_idx, + )))) + .unwrap_or_else(|e| { + error!("Failed to send offset: {:?}", e); + }) + } Err(e) => { error!(?e, "Failed to resolve the future, trying blocking write"); match self.js_writer.blocking_write(result.payload.clone()).await { - Ok(ack) => result - .callee_tx - .send(Ok(Offset::Int(IntOffset::new( - ack.sequence, - self.js_writer.config.partition_idx, - )))) - .unwrap(), - Err(e) => result.callee_tx.send(Err(e)).unwrap(), + Ok(ack) => { + if ack.duplicate { + warn!("Duplicate message detected, ignoring {:?}", ack); + } + result + .callee_tx + .send(Ok(Offset::Int(IntOffset::new( + ack.sequence, + self.js_writer.partition_idx, + )))) + .unwrap() + } + Err(e) => { + error!(?e, "Blocking write failed"); + result + .callee_tx + .send(Err(Error::ISB("Shutdown signal received".to_string()))) + .unwrap() + } } } } @@ -303,6 +326,7 @@ mod tests { use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; + use bytes::BytesMut; use chrono::Utc; use super::*; @@ -327,16 +351,30 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); - let writer = JetstreamWriter::new(config, context.clone(), 500, cln_token.clone()); + let writer = JetstreamWriter::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 500, + cln_token.clone(), + ); let message = Message { keys: vec!["key_0".to_string()], - value: "message 0".as_bytes().to_vec(), + value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -348,7 +386,8 @@ mod tests { }; let (success_tx, success_rx) = oneshot::channel::>(); - writer.write(message.try_into().unwrap(), success_tx).await; + let message_bytes: BytesMut = message.try_into().unwrap(); + writer.write(message_bytes.into(), success_tx).await; assert!(success_rx.await.is_ok()); context.delete_stream(stream_name).await.unwrap(); @@ -373,16 +412,30 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); - let writer = JetstreamWriter::new(config, context.clone(), 500, cln_token.clone()); + let writer = JetstreamWriter::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 500, + cln_token.clone(), + ); let message = Message { keys: vec!["key_0".to_string()], - value: "message 0".as_bytes().to_vec(), + value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -393,7 +446,8 @@ mod tests { headers: HashMap::new(), }; - let result = writer.blocking_write(message.try_into().unwrap()).await; + let message_bytes: BytesMut = message.try_into().unwrap(); + let result = writer.blocking_write(message_bytes.into()).await; assert!(result.is_ok()); let publish_ack = result.unwrap(); @@ -421,20 +475,34 @@ mod tests { .await .unwrap(); - let config = StreamWriterConfig { - name: stream_name.into(), - ..Default::default() - }; + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); let cancel_token = CancellationToken::new(); - let writer = JetstreamWriter::new(config, context.clone(), 500, cancel_token.clone()); + let writer = JetstreamWriter::new( + stream_name.to_string(), + 0, + Default::default(), + context.clone(), + 500, + cancel_token.clone(), + ); let mut result_receivers = Vec::new(); // Publish 10 messages successfully for i in 0..10 { let message = Message { keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec(), + value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -445,7 +513,8 @@ mod tests { headers: HashMap::new(), }; let (success_tx, success_rx) = oneshot::channel::>(); - writer.write(message.try_into().unwrap(), success_tx).await; + let message_bytes: BytesMut = message.try_into().unwrap(); + writer.write(message_bytes.into(), success_tx).await; result_receivers.push(success_rx); } @@ -453,7 +522,7 @@ mod tests { // so that it fails and sync write will be attempted and it will be blocked let message = Message { keys: vec!["key_11".to_string()], - value: vec![0; 1025], + value: vec![0; 1025].into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -464,7 +533,8 @@ mod tests { headers: HashMap::new(), }; let (success_tx, success_rx) = oneshot::channel::>(); - writer.write(message.try_into().unwrap(), success_tx).await; + let message_bytes: BytesMut = message.try_into().unwrap(); + writer.write(message_bytes.into(), success_tx).await; result_receivers.push(success_rx); // Cancel the token to exit the retry loop @@ -517,7 +587,18 @@ mod tests { .unwrap(); let _consumer = context - .create_consumer_strict_on_stream( + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await; + + let _consumer = context + .create_consumer_on_stream( consumer::Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, @@ -539,7 +620,7 @@ mod tests { } // Fetch buffer usage - let (soft_usage, solid_usage) = + let (soft_usage, _) = JetstreamWriter::fetch_buffer_usage(context.clone(), stream_name, max_length) .await .unwrap(); @@ -564,11 +645,6 @@ mod tests { let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); - let config = StreamWriterConfig { - name: "test_check_stream_status".into(), - max_length: 100, - ..Default::default() - }; let stream_name = "test_check_stream_status"; let _stream = context .get_or_create_stream(stream::Config { @@ -584,7 +660,7 @@ mod tests { .unwrap(); let _consumer = context - .create_consumer_strict_on_stream( + .create_consumer_on_stream( consumer::Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, @@ -596,7 +672,17 @@ mod tests { .unwrap(); let cancel_token = CancellationToken::new(); - let writer = JetstreamWriter::new(config, context.clone(), 500, cancel_token.clone()); + let writer = JetstreamWriter::new( + stream_name.to_string(), + 0, + BufferWriterConfig { + max_length: 100, + ..Default::default() + }, + context.clone(), + 500, + cancel_token.clone(), + ); let mut js_writer = writer.clone(); // Simulate the stream status check diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index aa2d802e6d..576f78499f 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -2,15 +2,6 @@ use std::net::SocketAddr; use std::path::PathBuf; use std::time::Duration; -use crate::config::components::metrics::MetricsConfig; -use crate::config::monovertex::MonovertexConfig; -use crate::error; -use crate::monovertex::metrics::{ - start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, -}; -use crate::shared::server_info; -use crate::source::SourceHandle; -use crate::Error; use axum::http::Uri; use backoff::retry::Retry; use backoff::strategy::fixed; @@ -26,52 +17,20 @@ use tokio_util::sync::CancellationToken; use tonic::transport::{Channel, Endpoint}; use tonic::Request; use tower::service_fn; -use tracing::{info, warn}; - -pub(crate) async fn check_compatibility( - cln_token: &CancellationToken, - source_file_path: Option, - sink_file_path: Option, - transformer_file_path: Option, - fb_sink_file_path: Option, -) -> error::Result<()> { - if let Some(source_file_path) = source_file_path { - server_info::check_for_server_compatibility(source_file_path, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for source server info file: {:?}", e); - Error::Forwarder("Error waiting for server info file".to_string()) - })?; - } - - if let Some(sink_file_path) = sink_file_path { - server_info::check_for_server_compatibility(sink_file_path, cln_token.clone()) - .await - .map_err(|e| { - error!("Error waiting for sink server info file: {:?}", e); - Error::Forwarder("Error waiting for server info file".to_string()) - })?; - } - - if let Some(transformer_path) = transformer_file_path { - server_info::check_for_server_compatibility(transformer_path, cln_token.clone()) - .await - .map_err(|e| { - error!("Error waiting for transformer server info file: {:?}", e); - Error::Forwarder("Error waiting for server info file".to_string()) - })?; - } +use tracing::info; - if let Some(fb_sink_path) = fb_sink_file_path { - server_info::check_for_server_compatibility(fb_sink_path, cln_token.clone()) - .await - .map_err(|e| { - warn!("Error waiting for fallback sink server info file: {:?}", e); - Error::Forwarder("Error waiting for server info file".to_string()) - })?; - } - Ok(()) -} +use crate::config::components::metrics::MetricsConfig; +use crate::config::components::sink::SinkType; +use crate::config::monovertex::MonovertexConfig; +use crate::error; +use crate::metrics::{ + start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, +}; +use crate::shared::server_info::check_for_server_compatibility; +use crate::sink::{SinkClientType, SinkHandle}; +use crate::source::SourceHandle; +use crate::Error; +use crate::Result; pub(crate) async fn start_metrics_server( metrics_config: MetricsConfig, @@ -110,67 +69,61 @@ pub(crate) async fn create_pending_reader( )) .build() } - -pub(crate) async fn wait_until_ready( - cln_token: CancellationToken, - source_client: &mut Option>, - sink_client: &mut Option>, - transformer_client: &mut Option>, - fb_sink_client: &mut Option>, -) -> error::Result<()> { +pub(crate) async fn wait_until_source_ready( + cln_token: &CancellationToken, + client: &mut SourceClient, +) -> Result<()> { + info!("Waiting for source client to be ready..."); loop { if cln_token.is_cancelled() { return Err(Error::Forwarder( "Cancellation token is cancelled".to_string(), )); } - - let source_ready = if let Some(client) = source_client { - let ready = client.is_ready(Request::new(())).await.is_ok(); - if !ready { - info!("UDSource is not ready, waiting..."); - } - ready - } else { - true - }; - - let sink_ready = if let Some(sink_client) = sink_client { - sink_client.is_ready(Request::new(())).await.is_ok() - } else { - true - }; - if !sink_ready { - info!("UDSink is not ready, waiting..."); + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, } + info!("Waiting for source client to be ready..."); + } + Ok(()) +} - let transformer_ready = if let Some(client) = transformer_client { - let ready = client.is_ready(Request::new(())).await.is_ok(); - if !ready { - info!("UDTransformer is not ready, waiting..."); - } - ready - } else { - true - }; - - let fb_sink_ready = if let Some(client) = fb_sink_client { - let ready = client.is_ready(Request::new(())).await.is_ok(); - if !ready { - info!("Fallback Sink is not ready, waiting..."); - } - ready - } else { - true - }; - - if source_ready && sink_ready && transformer_ready && fb_sink_ready { - break; +pub(crate) async fn wait_until_sink_ready( + cln_token: &CancellationToken, + client: &mut SinkClient, +) -> Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); } - - sleep(Duration::from_secs(1)).await; + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for sink client to be ready..."); } + Ok(()) +} +pub(crate) async fn wait_until_transformer_ready( + cln_token: &CancellationToken, + client: &mut SourceTransformClient, +) -> Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); + } + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for transformer client to be ready..."); + } Ok(()) } @@ -187,7 +140,7 @@ pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { }) } -pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Result { +pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> Result { const RECONNECT_INTERVAL: u64 = 1000; const MAX_RECONNECT_ATTEMPTS: usize = 5; @@ -202,7 +155,7 @@ pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> crate::error::Re Ok(channel) } -pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { +pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { let channel = Endpoint::try_from("http://[::]:50051") .map_err(|e| Error::Connection(format!("Failed to create endpoint: {:?}", e)))? .connect_with_connector(service_fn(move |_: Uri| { @@ -218,101 +171,54 @@ pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result Result<(SinkHandle, Option>)> { + match sink_type { + SinkType::Log(_) => Ok(( + SinkHandle::new(SinkClientType::Log, batch_size).await?, + None, + )), + SinkType::Blackhole(_) => Ok(( + SinkHandle::new(SinkClientType::Blackhole, batch_size).await?, + None, + )), + SinkType::UserDefined(ud_config) => { + check_for_server_compatibility( + ud_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + let mut sink_grpc_client = + SinkClient::new(create_rpc_channel(ud_config.socket_path.clone().into()).await?) + .max_encoding_message_size(ud_config.grpc_max_message_size) + .max_encoding_message_size(ud_config.grpc_max_message_size); + wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; + Ok(( + SinkHandle::new( + SinkClientType::UserDefined(sink_grpc_client.clone()), + batch_size, + ) + .await?, + Some(sink_grpc_client), + )) + } + } +} + #[cfg(test)] mod tests { - use std::fs::File; - use std::io::Write; - use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow::{sink, source, sourcetransform}; - use tempfile::tempdir; use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; use super::*; - use crate::shared::server_info::ServerInfo; use crate::shared::utils::create_rpc_channel; - async fn write_server_info(file_path: &str, server_info: &ServerInfo) -> error::Result<()> { - let serialized = serde_json::to_string(server_info).unwrap(); - let mut file = File::create(file_path).unwrap(); - file.write_all(serialized.as_bytes()).unwrap(); - file.write_all(b"U+005C__END__").unwrap(); - Ok(()) - } - - #[tokio::test] - async fn test_check_compatibility_success() { - let dir = tempdir().unwrap(); - let source_file_path = dir.path().join("sourcer-server-info"); - let sink_file_path = dir.path().join("sinker-server-info"); - let transformer_file_path = dir.path().join("sourcetransformer-server-info"); - let fb_sink_file_path = dir.path().join("fb-sink-server-info"); - - let server_info = ServerInfo { - protocol: "uds".to_string(), - language: "rust".to_string(), - minimum_numaflow_version: "0.1.0".to_string(), - version: "0.1.0".to_string(), - metadata: None, - }; - - write_server_info(source_file_path.to_str().unwrap(), &server_info) - .await - .unwrap(); - write_server_info(sink_file_path.to_str().unwrap(), &server_info) - .await - .unwrap(); - write_server_info(transformer_file_path.to_str().unwrap(), &server_info) - .await - .unwrap(); - write_server_info(fb_sink_file_path.to_str().unwrap(), &server_info) - .await - .unwrap(); - - let cln_token = CancellationToken::new(); - let result = check_compatibility( - &cln_token, - Some(source_file_path), - Some(sink_file_path), - None, - None, - ) - .await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_check_compatibility_failure() { - let cln_token = CancellationToken::new(); - let dir = tempdir().unwrap(); - let source_file_path = dir.path().join("source_server_info.json"); - let sink_file_path = dir.path().join("sink_server_info.json"); - let transformer_file_path = dir.path().join("transformer_server_info.json"); - let fb_sink_file_path = dir.path().join("fb_sink_server_info.json"); - - // do not write server info files to simulate failure - // cancel the token after 100ms to simulate cancellation - let token = cln_token.clone(); - let handle = tokio::spawn(async move { - sleep(Duration::from_millis(100)).await; - token.cancel(); - }); - let result = check_compatibility( - &cln_token, - Some(source_file_path), - Some(sink_file_path), - Some(transformer_file_path), - Some(fb_sink_file_path), - ) - .await; - - assert!(result.is_err()); - handle.await.unwrap(); - } - struct SimpleSource {} #[tonic::async_trait] @@ -406,28 +312,28 @@ mod tests { // Wait for the servers to start sleep(Duration::from_millis(100)).await; - let source_grpc_client = + let cln_token = CancellationToken::new(); + + let mut source_grpc_client = SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); - let sink_grpc_client = + wait_until_source_ready(&cln_token, &mut source_grpc_client) + .await + .unwrap(); + + let mut sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); + wait_until_sink_ready(&cln_token, &mut sink_grpc_client) + .await + .unwrap(); + let mut transformer_grpc_client = Some(SourceTransformClient::new( create_rpc_channel(transformer_sock_file.clone()) .await .unwrap(), )); - - let mut fb_sink_grpc_client = None; - - let cln_token = CancellationToken::new(); - let result = wait_until_ready( - cln_token, - &mut Some(source_grpc_client), - &mut Some(sink_grpc_client), - &mut transformer_grpc_client, - &mut fb_sink_grpc_client, - ) - .await; - assert!(result.is_ok()); + wait_until_transformer_ready(&cln_token, transformer_grpc_client.as_mut().unwrap()) + .await + .unwrap(); source_shutdown_tx.send(()).unwrap(); sink_shutdown_tx.send(()).unwrap(); diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 88e41dd107..b289dc94c1 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -1,9 +1,22 @@ +use std::collections::HashMap; + use numaflow_pb::clients::sink::sink_client::SinkClient; +use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; +use tokio::task::JoinHandle; +use tokio::time::sleep; +use tokio::{pin, time}; +use tokio_stream::StreamExt; +use tokio_util::sync::CancellationToken; use tonic::transport::Channel; +use tracing::{debug, error, warn}; use user_defined::UserDefinedSink; -use crate::message::{Message, ResponseFromSink}; +use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; +use crate::config::pipeline::SinkVtxConfig; +use crate::error::Error; +use crate::message::{Message, ReadAck, ReadMessage, ResponseFromSink, ResponseStatusFromSink}; +use crate::Result; mod blackhole; mod log; @@ -19,13 +32,13 @@ mod user_defined; #[allow(unused)] pub(crate) trait LocalSink { /// Write the messages to the Sink. - async fn sink(&mut self, messages: Vec) -> crate::Result>; + async fn sink(&mut self, messages: Vec) -> Result>; } enum ActorMessage { Sink { messages: Vec, - respond_to: oneshot::Sender>>, + respond_to: oneshot::Sender>>, }, } @@ -58,6 +71,7 @@ where } } +#[derive(Clone)] pub(crate) struct SinkHandle { sender: mpsc::Sender, } @@ -69,7 +83,7 @@ pub(crate) enum SinkClientType { } impl SinkHandle { - pub(crate) async fn new(sink_client: SinkClientType, batch_size: usize) -> crate::Result { + pub(crate) async fn new(sink_client: SinkClientType, batch_size: usize) -> Result { let (sender, receiver) = mpsc::channel(batch_size); match sink_client { SinkClientType::Log => { @@ -103,10 +117,7 @@ impl SinkHandle { Ok(Self { sender }) } - pub(crate) async fn sink( - &self, - messages: Vec, - ) -> crate::Result> { + pub(crate) async fn sink(&self, messages: Vec) -> Result> { let (tx, rx) = oneshot::channel(); let msg = ActorMessage::Sink { messages, @@ -116,3 +127,372 @@ impl SinkHandle { rx.await.unwrap() } } + +#[derive(Clone)] +pub(super) struct SinkWriter { + batch_size: usize, + read_timeout: time::Duration, + config: SinkVtxConfig, + sink_handle: SinkHandle, + fb_sink_handle: Option, +} + +impl SinkWriter { + pub(super) async fn new( + batch_size: usize, + read_timeout: time::Duration, + config: SinkVtxConfig, + sink_handle: SinkHandle, + fb_sink_handle: Option, + ) -> Result { + Ok(Self { + batch_size, + read_timeout, + config, + sink_handle, + fb_sink_handle, + }) + } + + pub(super) async fn start( + &self, + messages_rx: Receiver, + cancellation_token: CancellationToken, + ) -> Result>> { + let handle: JoinHandle> = tokio::spawn({ + let mut this = self.clone(); + async move { + let chunk_stream = tokio_stream::wrappers::ReceiverStream::new(messages_rx) + .chunks_timeout(this.batch_size, this.read_timeout); + + pin!(chunk_stream); + + while let Some(batch) = chunk_stream.next().await { + if batch.is_empty() { + continue; + } + + let messages: Vec = + batch.iter().map(|rm| rm.message.clone()).collect(); + + match this + .write_to_sink(messages, cancellation_token.clone()) + .await + { + Ok(_) => { + for rm in batch { + let _ = rm.ack.send(ReadAck::Ack); + } + } + Err(e) => { + error!(?e, "Error writing to sink"); + for rm in batch { + let _ = rm.ack.send(ReadAck::Nak); + } + } + } + + if cancellation_token.is_cancelled() { + warn!("Cancellation token is cancelled. Exiting SinkWriter"); + break; + } + } + + Ok(()) + } + }); + Ok(handle) + } + + // Writes the messages to the sink and handles fallback messages if present + async fn write_to_sink( + &mut self, + messages: Vec, + cln_token: CancellationToken, + ) -> Result<()> { + if messages.is_empty() { + return Ok(()); + } + + let mut attempts = 0; + let mut error_map = HashMap::new(); + let mut fallback_msgs = Vec::new(); + // start with the original set of message to be sent. + // we will overwrite this vec with failed messages and will keep retrying. + let mut messages_to_send = messages; + + // only breaks out of this loop based on the retry strategy unless all the messages have been written to sink + // successfully. + let retry_config = &self + .config + .sink_config + .retry_config + .clone() + .unwrap_or_default(); + + loop { + while attempts < retry_config.sink_max_retry_attempts { + let status = self + .write_to_sink_once( + &mut error_map, + &mut fallback_msgs, + &mut messages_to_send, + retry_config, + ) + .await; + match status { + Ok(true) => break, + Ok(false) => { + attempts += 1; + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, error_map + ); + } + Err(e) => Err(e)?, + } + + // if we are shutting down, stop the retry + if cln_token.is_cancelled() { + return Err(Error::Sink( + "Cancellation token triggered during retry".to_string(), + )); + } + } + + // If after the retries we still have messages to process, handle the post retry failures + let need_retry = self.handle_sink_post_retry( + &mut attempts, + &mut error_map, + &mut fallback_msgs, + &mut messages_to_send, + retry_config, + ); + + match need_retry { + // if we are done with the messages, break the loop + Ok(false) => break, + // if we need to retry, reset the attempts and error_map + Ok(true) => { + attempts = 0; + error_map.clear(); + } + Err(e) => Err(e)?, + } + } + + // If there are fallback messages, write them to the fallback sink + if !fallback_msgs.is_empty() { + self.handle_fallback_messages(fallback_msgs, retry_config) + .await?; + } + + Ok(()) + } + + /// Handles the post retry failures based on the configured strategy, + /// returns true if we need to retry, else false. + fn handle_sink_post_retry( + &mut self, + attempts: &mut u16, + error_map: &mut HashMap, + fallback_msgs: &mut Vec, + messages_to_send: &mut Vec, + retry_config: &RetryConfig, + ) -> Result { + // if we are done with the messages, break the loop + if messages_to_send.is_empty() { + return Ok(false); + } + // check what is the failure strategy in the config + let strategy = retry_config.sink_retry_on_fail_strategy.clone(); + match strategy { + // if we need to retry, return true + OnFailureStrategy::Retry => { + warn!( + "Using onFailure Retry, Retry attempts {} completed", + attempts + ); + return Ok(true); + } + // if we need to drop the messages, log and return false + OnFailureStrategy::Drop => { + // log that we are dropping the messages as requested + warn!( + "Dropping messages after {} attempts. Errors: {:?}", + attempts, error_map + ); + } + // if we need to move the messages to the fallback, return false + OnFailureStrategy::Fallback => { + // log that we are moving the messages to the fallback as requested + warn!( + "Moving messages to fallback after {} attempts. Errors: {:?}", + attempts, error_map + ); + // move the messages to the fallback messages + fallback_msgs.append(messages_to_send); + } + } + // if we are done with the messages, break the loop + Ok(false) + } + + /// Writes to sink once and will return true if successful, else false. Please note that it + /// mutates is incoming fields. + async fn write_to_sink_once( + &mut self, + error_map: &mut HashMap, + fallback_msgs: &mut Vec, + messages_to_send: &mut Vec, + retry_config: &RetryConfig, + ) -> Result { + let start_time = time::Instant::now(); + match self.sink_handle.sink(messages_to_send.clone()).await { + Ok(response) => { + debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); + + // create a map of id to result, since there is no strict requirement + // for the udsink to return the results in the same order as the requests + let result_map = response + .into_iter() + .map(|resp| (resp.id, resp.status)) + .collect::>(); + + error_map.clear(); + // drain all the messages that were successfully written + // and keep only the failed messages to send again + // construct the error map for the failed messages + messages_to_send.retain(|msg| { + if let Some(result) = result_map.get(&msg.id.to_string()) { + return match result { + ResponseStatusFromSink::Success => false, + ResponseStatusFromSink::Failed(err_msg) => { + *error_map.entry(err_msg.clone()).or_insert(0) += 1; + true + } + ResponseStatusFromSink::Fallback => { + fallback_msgs.push(msg.clone()); + false + } + }; + } + false + }); + + // if all messages are successfully written, break the loop + if messages_to_send.is_empty() { + return Ok(true); + } + + sleep(tokio::time::Duration::from_millis( + retry_config.sink_retry_interval_in_ms as u64, + )) + .await; + + // we need to retry + Ok(false) + } + Err(e) => Err(e), + } + } + + // Writes the fallback messages to the fallback sink + async fn handle_fallback_messages( + &mut self, + fallback_msgs: Vec, + retry_config: &RetryConfig, + ) -> Result<()> { + if self.fb_sink_handle.is_none() { + return Err(Error::Sink( + "Response contains fallback messages but no fallback sink is configured" + .to_string(), + )); + } + + let fallback_client = self.fb_sink_handle.as_mut().unwrap(); + let mut attempts = 0; + let mut fallback_error_map = HashMap::new(); + // start with the original set of message to be sent. + // we will overwrite this vec with failed messages and will keep retrying. + let mut messages_to_send = fallback_msgs; + + let default_retry = retry_config + .sink_default_retry_strategy + .clone() + .backoff + .unwrap(); + let max_attempts = default_retry.steps.unwrap(); + let sleep_interval = default_retry.interval.unwrap(); + + while attempts < max_attempts { + let start_time = tokio::time::Instant::now(); + match fallback_client.sink(messages_to_send.clone()).await { + Ok(fb_response) => { + debug!( + "Fallback sink latency - {}ms", + start_time.elapsed().as_millis() + ); + + // create a map of id to result, since there is no strict requirement + // for the udsink to return the results in the same order as the requests + let result_map = fb_response + .into_iter() + .map(|resp| (resp.id, resp.status)) + .collect::>(); + + let mut contains_fallback_status = false; + + fallback_error_map.clear(); + // drain all the messages that were successfully written + // and keep only the failed messages to send again + // construct the error map for the failed messages + messages_to_send.retain(|msg| { + if let Some(result) = result_map.get(&msg.id.to_string()) { + return match result { + ResponseStatusFromSink::Success => false, + ResponseStatusFromSink::Failed(err_msg) => { + *fallback_error_map.entry(err_msg.clone()).or_insert(0) += 1; + true + } + ResponseStatusFromSink::Fallback => { + contains_fallback_status = true; + false + } + }; + } else { + false + } + }); + + // specifying fallback status in fallback response is not allowed + if contains_fallback_status { + return Err(Error::Sink( + "Fallback response contains fallback status".to_string(), + )); + } + + attempts += 1; + + if messages_to_send.is_empty() { + break; + } + + warn!( + "Retry attempt {} due to retryable error. Errors: {:?}", + attempts, fallback_error_map + ); + sleep(tokio::time::Duration::from(sleep_interval)).await; + } + Err(e) => return Err(e), + } + } + if !messages_to_send.is_empty() { + return Err(Error::Sink(format!( + "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", + attempts, fallback_error_map + ))); + } + Ok(()) + } +} diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index d4828ca1f3..41ddfd06dd 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -34,7 +34,7 @@ mod tests { let messages = vec![ Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), @@ -46,7 +46,7 @@ mod tests { }, Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index be6a89755d..4e53d8b797 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -50,7 +50,7 @@ mod tests { let messages = vec![ Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), @@ -62,7 +62,7 @@ mod tests { }, Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index ba20bdbef1..92d05230ad 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -173,7 +173,7 @@ mod tests { let messages = vec![ Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), headers: Default::default(), @@ -185,7 +185,7 @@ mod tests { }, Message { keys: vec![], - value: b"Hello, World!".to_vec(), + value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), headers: Default::default(), diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 3af6dc8190..2c7bc0b83c 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -2,7 +2,6 @@ use tokio::sync::{mpsc, oneshot}; use crate::{ message::{Message, Offset}, - monovertex::SourceType, reader::LagReader, }; @@ -164,3 +163,16 @@ impl SourceHandle { .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } } + +pub(crate) enum SourceType { + UserDefinedSource( + user_defined::UserDefinedSourceRead, + user_defined::UserDefinedSourceAck, + user_defined::UserDefinedSourceLagReader, + ), + Generator( + generator::GeneratorRead, + generator::GeneratorAck, + generator::GeneratorLagReader, + ), +} diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index cf0ffb327e..3c91bbf1cc 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -68,7 +68,7 @@ mod stream_generator { impl StreamGenerator { pub(super) fn new(cfg: GeneratorConfig, batch_size: usize) -> Self { - let mut tick = tokio::time::interval(Duration::from_millis(cfg.duration as u64)); + let mut tick = tokio::time::interval(cfg.duration); tick.set_missed_tick_behavior(MissedTickBehavior::Skip); let mut rpu = cfg.rpu; @@ -169,7 +169,7 @@ mod stream_generator { Message { keys: self.next_key_to_be_fetched(), - value: data, + value: data.into(), offset: Some(offset.clone()), event_time, id: MessageID { @@ -251,7 +251,7 @@ mod stream_generator { content: content.clone(), rpu, jitter: Duration::from_millis(0), - duration: 100, + duration: Duration::from_millis(100), ..Default::default() }; @@ -407,7 +407,7 @@ mod tests { content: content.clone(), rpu, jitter: Duration::from_millis(0), - duration: 100, + duration: Duration::from_millis(100), ..Default::default() }; @@ -437,7 +437,7 @@ mod tests { content: Bytes::new(), rpu, jitter: Duration::from_millis(0), - duration: 100, + duration: Duration::from_millis(100), key_count: 3, msg_size_bytes: 100, ..Default::default() diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 89a4551899..00a3dd47a3 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use numaflow_pb::clients::source; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::source::{ @@ -19,7 +21,7 @@ pub(crate) struct UserDefinedSourceRead { read_tx: mpsc::Sender, resp_stream: Streaming, num_records: usize, - timeout_in_ms: u16, + timeout: Duration, } /// User-Defined Source to operative on custom sources. @@ -33,13 +35,13 @@ pub(crate) struct UserDefinedSourceAck { pub(crate) async fn new_source( client: SourceClient, num_records: usize, - timeout_in_ms: u16, + read_timeout: Duration, ) -> Result<( UserDefinedSourceRead, UserDefinedSourceAck, UserDefinedSourceLagReader, )> { - let src_read = UserDefinedSourceRead::new(client.clone(), num_records, timeout_in_ms).await?; + let src_read = UserDefinedSourceRead::new(client.clone(), num_records, read_timeout).await?; let src_ack = UserDefinedSourceAck::new(client.clone(), num_records).await?; let lag_reader = UserDefinedSourceLagReader::new(client); @@ -50,7 +52,7 @@ impl UserDefinedSourceRead { async fn new( mut client: SourceClient, batch_size: usize, - timeout_in_ms: u16, + timeout: Duration, ) -> Result { let (read_tx, resp_stream) = Self::create_reader(batch_size, &mut client).await?; @@ -58,7 +60,7 @@ impl UserDefinedSourceRead { read_tx, resp_stream, num_records: batch_size, - timeout_in_ms, + timeout, }) } @@ -107,7 +109,7 @@ impl SourceReader for UserDefinedSourceRead { let request = ReadRequest { request: Some(read_request::Request { num_records: self.num_records as u64, - timeout_in_ms: self.timeout_in_ms as u32, + timeout_in_ms: self.timeout.as_millis() as u32, }), handshake: None, }; @@ -319,14 +321,15 @@ mod tests { // wait for the server to start // TODO: flaky - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); - let (mut src_read, mut src_ack, mut lag_reader) = new_source(client, 5, 1000) - .await - .map_err(|e| panic!("failed to create source reader: {:?}", e)) - .unwrap(); + let (mut src_read, mut src_ack, mut lag_reader) = + new_source(client, 5, Duration::from_millis(1000)) + .await + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); let messages = src_read.read().await.unwrap(); assert_eq!(messages.len(), 5); diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 5b6c478f42..bbacfbbfa9 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -175,7 +175,7 @@ impl SourceTransformer { offset: msg_info.offset.to_string(), }, keys: result.keys, - value: result.value, + value: result.value.into(), offset: None, event_time: utc_from_timestamp(result.event_time), headers: msg_info.headers.clone(), diff --git a/rust/numaflow-models/src/models/get_container_req.rs b/rust/numaflow-models/src/models/get_container_req.rs index 2214ca06bd..69863443be 100644 --- a/rust/numaflow-models/src/models/get_container_req.rs +++ b/rust/numaflow-models/src/models/get_container_req.rs @@ -20,6 +20,8 @@ limitations under the License. pub struct GetContainerReq { #[serde(rename = "env")] pub env: Vec, + #[serde(rename = "executeRustBinary")] + pub execute_rust_binary: bool, #[serde(rename = "image")] pub image: String, #[serde(rename = "imagePullPolicy")] @@ -35,6 +37,7 @@ pub struct GetContainerReq { impl GetContainerReq { pub fn new( env: Vec, + execute_rust_binary: bool, image: String, image_pull_policy: String, isb_svc_type: String, @@ -43,6 +46,7 @@ impl GetContainerReq { ) -> GetContainerReq { GetContainerReq { env, + execute_rust_binary, image, image_pull_policy, isb_svc_type, diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml index a5b1f06904..d298aadb69 100644 --- a/rust/rust-toolchain.toml +++ b/rust/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] profile = "default" -channel = "1.80" +channel = "1.81" diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index 792e4f205f..4ffd64ed7a 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -15,7 +15,7 @@ async fn main() { .with( tracing_subscriber::EnvFilter::try_from_default_env() // TODO: add a better default based on entry point invocation - // e.g., serving/monovertex might need a different default + // e.g., serving/monovertex might need a different default .unwrap_or_else(|_| "info".into()), ) .with(tracing_subscriber::fmt::layer().with_ansi(false)) @@ -30,11 +30,11 @@ async fn main() { if let Err(e) = servesink::servesink().await { info!("Error running servesink: {}", e); } - } else if args.contains(&"--monovertex".to_string()) { - if let Err(e) = numaflow_core::monovertex::mono_vertex().await { - error!("Error running monovertex: {}", e); + } else if args.contains(&"--rust".to_string()) { + if let Err(e) = numaflow_core::run().await { + error!("Error running rust binary: {}", e); } } else { - error!("Invalid argument. Use --serve, --servesink, or --monovertex."); + error!("Invalid argument. Use --serve, --servesink, or --rust."); } } From eca3b0c0be314939422ee18cdc938546d3b9e4e3 Mon Sep 17 00:00:00 2001 From: qianbeibuzui <772369024@qq.com> Date: Mon, 28 Oct 2024 22:14:17 +0800 Subject: [PATCH 130/188] feat:KafkaSource supports KafkaVersion modification (#2191) Signed-off-by: majiantao <772369024@qq.com> Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- api/json-schema/schema.json | 3 + api/openapi-spec/swagger.json | 3 + .../numaflow.numaproj.io_monovertices.yaml | 2 + .../full/numaflow.numaproj.io_pipelines.yaml | 2 + .../full/numaflow.numaproj.io_vertices.yaml | 2 + config/install.yaml | 6 + config/namespace-install.yaml | 6 + docs/APIs.md | 13 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 1047 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 2 + pkg/apis/numaflow/v1alpha1/kafka_source.go | 3 +- .../numaflow/v1alpha1/zz_generated.openapi.go | 6 + pkg/sources/kafka/reader.go | 7 + .../src/models/kafka_source.rs | 3 + 14 files changed, 601 insertions(+), 504 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index d881d39f52..990485939c 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -20899,6 +20899,9 @@ "consumerGroup": { "type": "string" }, + "kafkaVersion": { + "type": "string" + }, "sasl": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.SASL", "description": "SASL user to configure SASL connection for kafka broker SASL.enable=true default for SASL." diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 82730f4440..544cab1601 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -20898,6 +20898,9 @@ "consumerGroup": { "type": "string" }, + "kafkaVersion": { + "type": "string" + }, "sasl": { "description": "SASL user to configure SASL connection for kafka broker SASL.enable=true default for SASL.", "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.SASL" diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 6f777379e3..79ffec3919 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -4651,6 +4651,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index 52a866eb2a..dda91dbf69 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -9331,6 +9331,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index da1af1d126..0dfad151d6 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -4119,6 +4119,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: diff --git a/config/install.yaml b/config/install.yaml index 7acb3d54a0..11653adc71 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -7837,6 +7837,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: @@ -19066,6 +19068,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: @@ -25474,6 +25478,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index e97e13ba1d..025269f458 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -7837,6 +7837,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: @@ -19066,6 +19068,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: @@ -25474,6 +25478,8 @@ spec: type: string consumerGroup: type: string + kafkaVersion: + type: string sasl: properties: gssapi: diff --git a/docs/APIs.md b/docs/APIs.md index 1261395f58..6a5a5fb930 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5439,6 +5439,19 @@ default for SASL. + + + + +kafkaVersion
string + + + + + + + + diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index c5f28f89b8..4523f1c02e 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2880,515 +2880,516 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8114 bytes of a gzipped FileDescriptorProto + // 8136 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x1c, 0x59, 0x76, 0xde, 0xf4, 0x7f, 0xf7, 0x69, 0xfe, 0xcd, 0x95, 0x46, 0x43, 0x69, 0x35, 0x6a, 0x6d, 0xad, 0x77, 0x57, 0x8e, 0x6d, 0x32, 0x43, 0xef, 0xcc, 0xce, 0xda, 0xde, 0x9d, 0x61, 0x93, 0xa2, 0x44, 0x89, 0x94, 0xb8, 0xa7, 0x49, 0xcd, 0xac, 0x27, 0xde, 0x49, 0xb1, 0xea, 0xb2, 0x59, 0xc3, 0xea, 0xaa, 0xde, 0xaa, 0x6a, 0x4a, 0x1c, 0xc7, 0x58, 0x7b, 0x37, 0xc1, 0x6c, 0x90, 0x04, 0x09, 0xfc, - 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x41, 0x36, 0x0f, 0x01, 0xf2, 0xe3, + 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x46, 0x36, 0x0f, 0x01, 0xf2, 0xe3, 0x20, 0x48, 0x36, 0xff, 0x8b, 0x20, 0x40, 0x26, 0x0f, 0x21, 0xb2, 0x0c, 0xf2, 0x90, 0x00, 0x09, - 0x8c, 0x18, 0x89, 0x1d, 0xc1, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x2d, 0x91, 0x5d, 0x4d, 0x8d, - 0xc6, 0x99, 0xb7, 0xaa, 0x7b, 0xcf, 0xfd, 0xce, 0xad, 0x5b, 0xf7, 0xe7, 0xdc, 0x73, 0xce, 0x3d, - 0x17, 0x6e, 0x75, 0xad, 0x60, 0x7f, 0xb0, 0xbb, 0x60, 0xb8, 0xbd, 0x45, 0x67, 0xd0, 0xd3, 0xfb, - 0x9e, 0xfb, 0x3e, 0x7f, 0xd8, 0xb3, 0xdd, 0x87, 0x8b, 0xfd, 0x83, 0xee, 0xa2, 0xde, 0xb7, 0xfc, - 0x28, 0xe5, 0xf0, 0x55, 0xdd, 0xee, 0xef, 0xeb, 0xaf, 0x2e, 0x76, 0xa9, 0x43, 0x3d, 0x3d, 0xa0, - 0xe6, 0x42, 0xdf, 0x73, 0x03, 0x97, 0x7c, 0x39, 0x02, 0x5a, 0x50, 0x40, 0x0b, 0xaa, 0xd8, 0x42, - 0xff, 0xa0, 0xbb, 0xc0, 0x80, 0xa2, 0x14, 0x05, 0x74, 0xe5, 0xa7, 0x62, 0x35, 0xe8, 0xba, 0x5d, - 0x77, 0x91, 0xe3, 0xed, 0x0e, 0xf6, 0xf8, 0x1b, 0x7f, 0xe1, 0x4f, 0x82, 0xcf, 0x15, 0xed, 0xe0, - 0x0d, 0x7f, 0xc1, 0x72, 0x59, 0xb5, 0x16, 0x0d, 0xd7, 0xa3, 0x8b, 0x87, 0x43, 0x75, 0xb9, 0xf2, - 0xa5, 0x88, 0xa6, 0xa7, 0x1b, 0xfb, 0x96, 0x43, 0xbd, 0x23, 0xf5, 0x2d, 0x8b, 0x1e, 0xf5, 0xdd, - 0x81, 0x67, 0xd0, 0x33, 0x95, 0xf2, 0x17, 0x7b, 0x34, 0xd0, 0xb3, 0x78, 0x2d, 0x8e, 0x2a, 0xe5, - 0x0d, 0x9c, 0xc0, 0xea, 0x0d, 0xb3, 0x79, 0xfd, 0x69, 0x05, 0x7c, 0x63, 0x9f, 0xf6, 0xf4, 0xa1, - 0x72, 0x3f, 0x3d, 0xaa, 0xdc, 0x20, 0xb0, 0xec, 0x45, 0xcb, 0x09, 0xfc, 0xc0, 0x4b, 0x17, 0xd2, - 0x7e, 0x1b, 0xe0, 0xc2, 0xf2, 0xae, 0x1f, 0x78, 0xba, 0x11, 0x6c, 0xb9, 0xe6, 0x36, 0xed, 0xf5, - 0x6d, 0x3d, 0xa0, 0xe4, 0x00, 0xea, 0xec, 0x83, 0x4c, 0x3d, 0xd0, 0xe7, 0x0b, 0xd7, 0x0b, 0x37, - 0x9a, 0x4b, 0xcb, 0x0b, 0x63, 0xfe, 0xc0, 0x85, 0x4d, 0x09, 0xd4, 0x9e, 0x3a, 0x39, 0x6e, 0xd5, - 0xd5, 0x1b, 0x86, 0x0c, 0xc8, 0xaf, 0x15, 0x60, 0xca, 0x71, 0x4d, 0xda, 0xa1, 0x36, 0x35, 0x02, - 0xd7, 0x9b, 0x2f, 0x5e, 0x2f, 0xdd, 0x68, 0x2e, 0x7d, 0x73, 0x6c, 0x8e, 0x19, 0x5f, 0xb4, 0x70, - 0x2f, 0xc6, 0xe0, 0xa6, 0x13, 0x78, 0x47, 0xed, 0x8b, 0x3f, 0x38, 0x6e, 0xbd, 0x70, 0x72, 0xdc, - 0x9a, 0x8a, 0x67, 0x61, 0xa2, 0x26, 0x64, 0x07, 0x9a, 0x81, 0x6b, 0xb3, 0x26, 0xb3, 0x5c, 0xc7, - 0x9f, 0x2f, 0xf1, 0x8a, 0x5d, 0x5b, 0x10, 0x4d, 0xcd, 0xd8, 0x2f, 0xb0, 0x3e, 0xb6, 0x70, 0xf8, - 0xea, 0xc2, 0x76, 0x48, 0xd6, 0xbe, 0x20, 0x81, 0x9b, 0x51, 0x9a, 0x8f, 0x71, 0x1c, 0x42, 0x61, - 0xd6, 0xa7, 0xc6, 0xc0, 0xb3, 0x82, 0xa3, 0x15, 0xd7, 0x09, 0xe8, 0xa3, 0x60, 0xbe, 0xcc, 0x5b, - 0xf9, 0x0b, 0x59, 0xd0, 0x5b, 0xae, 0xd9, 0x49, 0x52, 0xb7, 0x2f, 0x9c, 0x1c, 0xb7, 0x66, 0x53, - 0x89, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x59, 0x3d, 0xbd, 0x4b, 0xb7, 0x06, 0xb6, 0xdd, 0xa1, 0x86, - 0x47, 0x03, 0x7f, 0xbe, 0xc2, 0x3f, 0xe1, 0x46, 0x16, 0x9f, 0x0d, 0xd7, 0xd0, 0xed, 0xfb, 0xbb, - 0xef, 0x53, 0x23, 0x40, 0xba, 0x47, 0x3d, 0xea, 0x18, 0xb4, 0x3d, 0x2f, 0x3f, 0x66, 0x6e, 0x3d, - 0x85, 0x84, 0x43, 0xd8, 0xe4, 0x16, 0xbc, 0xd8, 0xf7, 0x2c, 0x97, 0x57, 0xc1, 0xd6, 0x7d, 0xff, - 0x9e, 0xde, 0xa3, 0xf3, 0xd5, 0xeb, 0x85, 0x1b, 0x8d, 0xf6, 0x65, 0x09, 0xf3, 0xe2, 0x56, 0x9a, - 0x00, 0x87, 0xcb, 0x90, 0x1b, 0x50, 0x57, 0x89, 0xf3, 0xb5, 0xeb, 0x85, 0x1b, 0x15, 0xd1, 0x77, - 0x54, 0x59, 0x0c, 0x73, 0xc9, 0x1a, 0xd4, 0xf5, 0xbd, 0x3d, 0xcb, 0x61, 0x94, 0x75, 0xde, 0x84, - 0x57, 0xb3, 0x3e, 0x6d, 0x59, 0xd2, 0x08, 0x1c, 0xf5, 0x86, 0x61, 0x59, 0x72, 0x07, 0x88, 0x4f, - 0xbd, 0x43, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0x0e, 0x9c, 0x80, 0xd7, 0xbd, 0xc1, 0xeb, 0x7e, 0x45, - 0xd6, 0x9d, 0x74, 0x86, 0x28, 0x30, 0xa3, 0x14, 0x79, 0x0b, 0xe6, 0xe4, 0x58, 0x8d, 0x5a, 0x01, - 0x38, 0xd2, 0x45, 0xd6, 0x90, 0x98, 0xca, 0xc3, 0x21, 0x6a, 0x62, 0xc2, 0x55, 0x7d, 0x10, 0xb8, - 0x3d, 0x06, 0x99, 0x64, 0xba, 0xed, 0x1e, 0x50, 0x67, 0xbe, 0x79, 0xbd, 0x70, 0xa3, 0xde, 0xbe, - 0x7e, 0x72, 0xdc, 0xba, 0xba, 0xfc, 0x04, 0x3a, 0x7c, 0x22, 0x0a, 0xb9, 0x0f, 0x0d, 0xd3, 0xf1, - 0xb7, 0x5c, 0xdb, 0x32, 0x8e, 0xe6, 0xa7, 0x78, 0x05, 0x5f, 0x95, 0x9f, 0xda, 0x58, 0xbd, 0xd7, - 0x11, 0x19, 0x8f, 0x8f, 0x5b, 0x57, 0x87, 0xa7, 0xd4, 0x85, 0x30, 0x1f, 0x23, 0x0c, 0xb2, 0xc9, - 0x01, 0x57, 0x5c, 0x67, 0xcf, 0xea, 0xce, 0x4f, 0xf3, 0xbf, 0x71, 0x7d, 0x44, 0x87, 0x5e, 0xbd, - 0xd7, 0x11, 0x74, 0xed, 0x69, 0xc9, 0x4e, 0xbc, 0x62, 0x84, 0x40, 0x4c, 0x98, 0x51, 0x93, 0xf1, - 0x8a, 0xad, 0x5b, 0x3d, 0x7f, 0x7e, 0x86, 0x77, 0xde, 0x1f, 0x1b, 0x81, 0x89, 0x71, 0xe2, 0xf6, - 0x25, 0xf9, 0x29, 0x33, 0x89, 0x64, 0x1f, 0x53, 0x98, 0x57, 0xde, 0x84, 0x17, 0x87, 0xe6, 0x06, - 0x32, 0x07, 0xa5, 0x03, 0x7a, 0xc4, 0xa7, 0xbe, 0x06, 0xb2, 0x47, 0x72, 0x11, 0x2a, 0x87, 0xba, - 0x3d, 0xa0, 0xf3, 0x45, 0x9e, 0x26, 0x5e, 0x7e, 0xa6, 0xf8, 0x46, 0x41, 0xfb, 0xeb, 0x25, 0x98, - 0x52, 0x33, 0x4e, 0xc7, 0x72, 0x0e, 0xc8, 0xdb, 0x50, 0xb2, 0xdd, 0xae, 0x9c, 0x37, 0x7f, 0x6e, - 0xec, 0x59, 0x6c, 0xc3, 0xed, 0xb6, 0x6b, 0x27, 0xc7, 0xad, 0xd2, 0x86, 0xdb, 0x45, 0x86, 0x48, - 0x0c, 0xa8, 0x1c, 0xe8, 0x7b, 0x07, 0x3a, 0xaf, 0x43, 0x73, 0xa9, 0x3d, 0x36, 0xf4, 0x5d, 0x86, - 0xc2, 0xea, 0xda, 0x6e, 0x9c, 0x1c, 0xb7, 0x2a, 0xfc, 0x15, 0x05, 0x36, 0x71, 0xa1, 0xb1, 0x6b, - 0xeb, 0xc6, 0xc1, 0xbe, 0x6b, 0xd3, 0xf9, 0x52, 0x4e, 0x46, 0x6d, 0x85, 0x24, 0x7e, 0x73, 0xf8, - 0x8a, 0x11, 0x0f, 0x62, 0x40, 0x75, 0x60, 0xfa, 0x96, 0x73, 0x20, 0xe7, 0xc0, 0x37, 0xc7, 0xe6, - 0xb6, 0xb3, 0xca, 0xbf, 0x09, 0x4e, 0x8e, 0x5b, 0x55, 0xf1, 0x8c, 0x12, 0x5a, 0xfb, 0xfd, 0x29, - 0x98, 0x51, 0x3f, 0xe9, 0x01, 0xf5, 0x02, 0xfa, 0x88, 0x5c, 0x87, 0xb2, 0xc3, 0x86, 0x26, 0xff, - 0xc9, 0xed, 0x29, 0xd9, 0x5d, 0xca, 0x7c, 0x48, 0xf2, 0x1c, 0x56, 0x33, 0xd1, 0x55, 0x64, 0x83, - 0x8f, 0x5f, 0xb3, 0x0e, 0x87, 0x11, 0x35, 0x13, 0xcf, 0x28, 0xa1, 0xc9, 0xbb, 0x50, 0xe6, 0x1f, - 0x2f, 0x9a, 0xfa, 0xab, 0xe3, 0xb3, 0x60, 0x9f, 0x5e, 0x67, 0x5f, 0xc0, 0x3f, 0x9c, 0x83, 0xb2, - 0xae, 0x38, 0x30, 0xf7, 0x64, 0xc3, 0xfe, 0x5c, 0x8e, 0x86, 0x5d, 0x13, 0x5d, 0x71, 0x67, 0x75, - 0x0d, 0x19, 0x22, 0xf9, 0x8b, 0x05, 0x78, 0xd1, 0x70, 0x9d, 0x40, 0x67, 0x72, 0x86, 0x5a, 0x64, - 0xe7, 0x2b, 0x9c, 0xcf, 0x9d, 0xb1, 0xf9, 0xac, 0xa4, 0x11, 0xdb, 0x2f, 0xb1, 0x35, 0x63, 0x28, - 0x19, 0x87, 0x79, 0x93, 0xbf, 0x5c, 0x80, 0x97, 0xd8, 0x5c, 0x3e, 0x44, 0xcc, 0x57, 0xa0, 0xc9, - 0xd6, 0xea, 0xf2, 0xc9, 0x71, 0xeb, 0xa5, 0xf5, 0x2c, 0x66, 0x98, 0x5d, 0x07, 0x56, 0xbb, 0x0b, - 0xfa, 0xb0, 0x58, 0xc2, 0x57, 0xb7, 0xe6, 0xd2, 0xc6, 0x24, 0x45, 0x9d, 0xf6, 0x67, 0x64, 0x57, - 0xce, 0x92, 0xec, 0x30, 0xab, 0x16, 0xe4, 0x26, 0xd4, 0x0e, 0x5d, 0x7b, 0xd0, 0xa3, 0xfe, 0x7c, - 0x9d, 0x4f, 0xb1, 0x57, 0xb2, 0xa6, 0xd8, 0x07, 0x9c, 0xa4, 0x3d, 0x2b, 0xe1, 0x6b, 0xe2, 0xdd, - 0x47, 0x55, 0x96, 0x58, 0x50, 0xb5, 0xad, 0x9e, 0x15, 0xf8, 0x7c, 0xe1, 0x6c, 0x2e, 0xdd, 0x1c, - 0xfb, 0xb3, 0xc4, 0x10, 0xdd, 0xe0, 0x60, 0x62, 0xd4, 0x88, 0x67, 0x94, 0x0c, 0xd8, 0x54, 0xe8, - 0x1b, 0xba, 0x2d, 0x16, 0xd6, 0xe6, 0xd2, 0xd7, 0xc6, 0x1f, 0x36, 0x0c, 0xa5, 0x3d, 0x2d, 0xbf, - 0xa9, 0xc2, 0x5f, 0x51, 0x60, 0x93, 0x5f, 0x80, 0x99, 0xc4, 0xdf, 0xf4, 0xe7, 0x9b, 0xbc, 0x75, - 0x5e, 0xc9, 0x6a, 0x9d, 0x90, 0x2a, 0x5a, 0x79, 0x12, 0x3d, 0xc4, 0xc7, 0x14, 0x18, 0xb9, 0x0b, - 0x75, 0xdf, 0x32, 0xa9, 0xa1, 0x7b, 0xfe, 0xfc, 0xd4, 0x69, 0x80, 0xe7, 0x24, 0x70, 0xbd, 0x23, - 0x8b, 0x61, 0x08, 0x40, 0x16, 0x00, 0xfa, 0xba, 0x17, 0x58, 0x42, 0x50, 0x9d, 0xe6, 0x42, 0xd3, - 0xcc, 0xc9, 0x71, 0x0b, 0xb6, 0xc2, 0x54, 0x8c, 0x51, 0x30, 0x7a, 0x56, 0x76, 0xdd, 0xe9, 0x0f, - 0x02, 0xb1, 0xb0, 0x36, 0x04, 0x7d, 0x27, 0x4c, 0xc5, 0x18, 0x05, 0xf9, 0xad, 0x02, 0x7c, 0x26, - 0x7a, 0x1d, 0x1e, 0x64, 0xb3, 0x13, 0x1f, 0x64, 0xad, 0x93, 0xe3, 0xd6, 0x67, 0x3a, 0xa3, 0x59, - 0xe2, 0x93, 0xea, 0x43, 0x3e, 0x2c, 0xc0, 0xcc, 0xa0, 0x6f, 0xea, 0x01, 0xed, 0x04, 0x6c, 0xc7, - 0xd3, 0x3d, 0x9a, 0x9f, 0xe3, 0x55, 0xbc, 0x35, 0xfe, 0x2c, 0x98, 0x80, 0x8b, 0x7e, 0x73, 0x32, - 0x1d, 0x53, 0x6c, 0xb5, 0xb7, 0x61, 0x7a, 0x79, 0x10, 0xec, 0xbb, 0x9e, 0xf5, 0x01, 0x17, 0xff, - 0xc9, 0x1a, 0x54, 0x02, 0x2e, 0xc6, 0x09, 0x09, 0xe1, 0xf3, 0x59, 0x3f, 0x5d, 0x88, 0xd4, 0x77, - 0xe9, 0x91, 0x92, 0x4b, 0xc4, 0x4a, 0x2d, 0xc4, 0x3a, 0x51, 0x5c, 0xfb, 0xd3, 0x05, 0xa8, 0xb5, - 0x75, 0xe3, 0xc0, 0xdd, 0xdb, 0x23, 0xef, 0x40, 0xdd, 0x72, 0x02, 0xea, 0x1d, 0xea, 0xb6, 0x84, - 0x5d, 0x88, 0xc1, 0x86, 0x1b, 0xc2, 0xe8, 0xf3, 0xd8, 0xee, 0x8b, 0x31, 0x5a, 0x1d, 0xc8, 0x5d, - 0x0b, 0x97, 0x8c, 0xd7, 0x25, 0x06, 0x86, 0x68, 0xa4, 0x05, 0x15, 0x3f, 0xa0, 0x7d, 0x9f, 0xaf, - 0x81, 0xd3, 0xa2, 0x1a, 0x1d, 0x96, 0x80, 0x22, 0x5d, 0xfb, 0x6b, 0x05, 0x68, 0xb4, 0x75, 0xdf, - 0x32, 0xd8, 0x57, 0x92, 0x15, 0x28, 0x0f, 0x7c, 0xea, 0x9d, 0xed, 0xdb, 0xf8, 0xb2, 0xb5, 0xe3, - 0x53, 0x0f, 0x79, 0x61, 0x72, 0x1f, 0xea, 0x7d, 0xdd, 0xf7, 0x1f, 0xba, 0x9e, 0x29, 0x97, 0xde, - 0x53, 0x02, 0x89, 0x6d, 0x82, 0x2c, 0x8a, 0x21, 0x88, 0xd6, 0x84, 0x48, 0xf6, 0xd0, 0x7e, 0xb7, - 0x00, 0x17, 0xda, 0x83, 0xbd, 0x3d, 0xea, 0x49, 0xa9, 0x58, 0xca, 0x9b, 0x14, 0x2a, 0x1e, 0x35, - 0x2d, 0x5f, 0xd6, 0x7d, 0x75, 0xec, 0x8e, 0x82, 0x0c, 0x45, 0x8a, 0xb7, 0xbc, 0xbd, 0x78, 0x02, - 0x0a, 0x74, 0x32, 0x80, 0xc6, 0xfb, 0x94, 0xed, 0xc6, 0xa9, 0xde, 0x93, 0x5f, 0x77, 0x7b, 0x6c, - 0x56, 0x77, 0x68, 0xd0, 0xe1, 0x48, 0x71, 0x69, 0x3a, 0x4c, 0xc4, 0x88, 0x93, 0xf6, 0xdb, 0x15, - 0x98, 0x5a, 0x71, 0x7b, 0xbb, 0x96, 0x43, 0xcd, 0x9b, 0x66, 0x97, 0x92, 0xf7, 0xa0, 0x4c, 0xcd, - 0x2e, 0x95, 0x5f, 0x3b, 0xbe, 0xe0, 0xc1, 0xc0, 0x22, 0xf1, 0x89, 0xbd, 0x21, 0x07, 0x26, 0x1b, - 0x30, 0xb3, 0xe7, 0xb9, 0x3d, 0x31, 0x97, 0x6f, 0x1f, 0xf5, 0xa5, 0xec, 0xdc, 0xfe, 0x31, 0x35, - 0x70, 0xd6, 0x12, 0xb9, 0x8f, 0x8f, 0x5b, 0x10, 0xbd, 0x61, 0xaa, 0x2c, 0x79, 0x07, 0xe6, 0xa3, - 0x94, 0x70, 0x52, 0x5b, 0x61, 0xdb, 0x19, 0x2e, 0x3b, 0x55, 0xda, 0x57, 0x4f, 0x8e, 0x5b, 0xf3, - 0x6b, 0x23, 0x68, 0x70, 0x64, 0x69, 0x36, 0x55, 0xcc, 0x45, 0x99, 0x62, 0xa1, 0x91, 0x22, 0xd3, - 0x84, 0x56, 0x30, 0xbe, 0xef, 0x5b, 0x4b, 0xb1, 0xc0, 0x21, 0xa6, 0x64, 0x0d, 0xa6, 0x02, 0x37, - 0xd6, 0x5e, 0x15, 0xde, 0x5e, 0x9a, 0x52, 0x54, 0x6c, 0xbb, 0x23, 0x5b, 0x2b, 0x51, 0x8e, 0x20, - 0x5c, 0x52, 0xef, 0xa9, 0x96, 0xaa, 0xf2, 0x96, 0xba, 0x72, 0x72, 0xdc, 0xba, 0xb4, 0x9d, 0x49, - 0x81, 0x23, 0x4a, 0x92, 0x5f, 0x29, 0xc0, 0x8c, 0xca, 0x92, 0x6d, 0x54, 0x9b, 0x64, 0x1b, 0x11, - 0xd6, 0x23, 0xb6, 0x13, 0x0c, 0x30, 0xc5, 0x50, 0xfb, 0x7e, 0x0d, 0x1a, 0xe1, 0x54, 0x4f, 0x3e, - 0x07, 0x15, 0xae, 0x82, 0x90, 0x12, 0x7c, 0xb8, 0x86, 0x73, 0x4d, 0x05, 0x8a, 0x3c, 0xf2, 0x79, - 0xa8, 0x19, 0x6e, 0xaf, 0xa7, 0x3b, 0x26, 0x57, 0x2b, 0x35, 0xda, 0x4d, 0x26, 0xba, 0xac, 0x88, - 0x24, 0x54, 0x79, 0xe4, 0x2a, 0x94, 0x75, 0xaf, 0x2b, 0x34, 0x3c, 0x0d, 0x31, 0x1f, 0x2d, 0x7b, - 0x5d, 0x1f, 0x79, 0x2a, 0xf9, 0x0a, 0x94, 0xa8, 0x73, 0x38, 0x5f, 0x1e, 0x2d, 0x1b, 0xdd, 0x74, - 0x0e, 0x1f, 0xe8, 0x5e, 0xbb, 0x29, 0xeb, 0x50, 0xba, 0xe9, 0x1c, 0x22, 0x2b, 0x43, 0x36, 0xa0, - 0x46, 0x9d, 0x43, 0xf6, 0xef, 0xa5, 0xea, 0xe5, 0xb3, 0x23, 0x8a, 0x33, 0x12, 0xb9, 0x4d, 0x08, - 0x25, 0x2c, 0x99, 0x8c, 0x0a, 0x82, 0x7c, 0x03, 0xa6, 0x84, 0xb0, 0xb5, 0xc9, 0xfe, 0x89, 0x3f, - 0x5f, 0xe5, 0x90, 0xad, 0xd1, 0xd2, 0x1a, 0xa7, 0x8b, 0x54, 0x5d, 0xb1, 0x44, 0x1f, 0x13, 0x50, - 0xe4, 0x1b, 0xd0, 0x50, 0x3b, 0x63, 0xf5, 0x67, 0x33, 0xb5, 0x44, 0x6a, 0x3b, 0x8d, 0xf4, 0x5b, - 0x03, 0xcb, 0xa3, 0x3d, 0xea, 0x04, 0x7e, 0xfb, 0x45, 0xa5, 0x37, 0x50, 0xb9, 0x3e, 0x46, 0x68, - 0x64, 0x77, 0x58, 0xdd, 0x25, 0x74, 0x35, 0x9f, 0x1b, 0x31, 0xab, 0x8f, 0xa1, 0xeb, 0xfa, 0x26, - 0xcc, 0x86, 0xfa, 0x28, 0xa9, 0xd2, 0x10, 0xda, 0x9b, 0x2f, 0xb1, 0xe2, 0xeb, 0xc9, 0xac, 0xc7, - 0xc7, 0xad, 0x57, 0x32, 0x94, 0x1a, 0x11, 0x01, 0xa6, 0xc1, 0xc8, 0x07, 0x30, 0xe3, 0x51, 0xdd, - 0xb4, 0x1c, 0xea, 0xfb, 0x5b, 0x9e, 0xbb, 0x9b, 0x5f, 0xf2, 0xe4, 0x28, 0xa2, 0xdb, 0x63, 0x02, - 0x19, 0x53, 0x9c, 0xc8, 0x43, 0x98, 0xb6, 0xad, 0x43, 0x1a, 0xb1, 0x6e, 0x4e, 0x84, 0xf5, 0x8b, - 0x27, 0xc7, 0xad, 0xe9, 0x8d, 0x38, 0x30, 0x26, 0xf9, 0x30, 0x49, 0xa5, 0xef, 0x7a, 0x81, 0x12, - 0x4f, 0x3f, 0xfb, 0x44, 0xf1, 0x74, 0xcb, 0xf5, 0x82, 0x68, 0x10, 0xb2, 0x37, 0x1f, 0x45, 0x71, - 0xed, 0x6f, 0x57, 0x60, 0x78, 0x13, 0x97, 0xec, 0x71, 0x85, 0x49, 0xf7, 0xb8, 0x74, 0x6f, 0x10, - 0x6b, 0xcf, 0x1b, 0xb2, 0xd8, 0x04, 0x7a, 0x44, 0x46, 0xaf, 0x2e, 0x4d, 0xba, 0x57, 0x3f, 0x37, - 0x13, 0xcf, 0x70, 0xf7, 0xaf, 0x7e, 0x7c, 0xdd, 0xbf, 0xf6, 0x6c, 0xba, 0xbf, 0xf6, 0xbd, 0x32, - 0xcc, 0xac, 0xea, 0xb4, 0xe7, 0x3a, 0x4f, 0xdd, 0xc7, 0x17, 0x9e, 0x8b, 0x7d, 0xfc, 0x0d, 0xa8, - 0x7b, 0xb4, 0x6f, 0x5b, 0x86, 0x2e, 0xc4, 0x75, 0xa9, 0x37, 0x47, 0x99, 0x86, 0x61, 0xee, 0x08, - 0xfd, 0x4d, 0xe9, 0xb9, 0xd4, 0xdf, 0x94, 0x3f, 0x7e, 0xfd, 0x8d, 0xf6, 0x2b, 0x45, 0xe0, 0xa2, - 0x2d, 0xb9, 0x0e, 0x65, 0x26, 0xb6, 0xa5, 0xb5, 0x86, 0x7c, 0xb4, 0xf0, 0x1c, 0x72, 0x05, 0x8a, - 0x81, 0x2b, 0xa7, 0x1b, 0x90, 0xf9, 0xc5, 0x6d, 0x17, 0x8b, 0x81, 0x4b, 0x3e, 0x00, 0x30, 0x5c, - 0xc7, 0xb4, 0x94, 0x39, 0x29, 0xdf, 0x87, 0xad, 0xb9, 0xde, 0x43, 0xdd, 0x33, 0x57, 0x42, 0x44, - 0xb1, 0x83, 0x8f, 0xde, 0x31, 0xc6, 0x8d, 0xbc, 0x09, 0x55, 0xd7, 0x59, 0x1b, 0xd8, 0x36, 0x6f, - 0xd0, 0x46, 0xfb, 0x8b, 0x27, 0xc7, 0xad, 0xea, 0x7d, 0x9e, 0xf2, 0xf8, 0xb8, 0x75, 0x59, 0xec, - 0x88, 0xd8, 0xdb, 0xdb, 0x9e, 0x15, 0x58, 0x4e, 0x37, 0xdc, 0xd0, 0xca, 0x62, 0xda, 0xaf, 0x16, - 0xa0, 0xb9, 0x66, 0x3d, 0xa2, 0xe6, 0xdb, 0x96, 0x63, 0xba, 0x0f, 0x09, 0x42, 0xd5, 0xa6, 0x4e, - 0x37, 0xd8, 0x1f, 0x73, 0xc7, 0x29, 0xf4, 0x3a, 0x1c, 0x01, 0x25, 0x12, 0x59, 0x84, 0x86, 0xd8, - 0xaf, 0x58, 0x4e, 0x97, 0xb7, 0x61, 0x3d, 0x9a, 0xe9, 0x3b, 0x2a, 0x03, 0x23, 0x1a, 0xed, 0x08, - 0x5e, 0x1c, 0x6a, 0x06, 0x62, 0x42, 0x39, 0xd0, 0xbb, 0x6a, 0x51, 0x59, 0x1b, 0xbb, 0x81, 0xb7, - 0xf5, 0x6e, 0xac, 0x71, 0xb9, 0x54, 0xb8, 0xad, 0x33, 0xa9, 0x90, 0xa1, 0x6b, 0x7f, 0x50, 0x80, - 0xfa, 0xda, 0xc0, 0x31, 0xf8, 0xa6, 0xfe, 0xe9, 0xda, 0x64, 0x25, 0x62, 0x16, 0x33, 0x45, 0xcc, - 0x01, 0x54, 0x0f, 0x1e, 0x86, 0x22, 0x68, 0x73, 0x69, 0x73, 0xfc, 0x5e, 0x21, 0xab, 0xb4, 0x70, - 0x97, 0xe3, 0x09, 0x63, 0xe7, 0x8c, 0xac, 0x50, 0xf5, 0xee, 0xdb, 0x9c, 0xa9, 0x64, 0x76, 0xe5, - 0x2b, 0xd0, 0x8c, 0x91, 0x9d, 0xc9, 0xee, 0xf1, 0x77, 0xca, 0x50, 0xbd, 0xd5, 0xe9, 0x2c, 0x6f, - 0xad, 0x93, 0xd7, 0xa0, 0x29, 0xed, 0x60, 0xf7, 0xa2, 0x36, 0x08, 0xcd, 0xa0, 0x9d, 0x28, 0x0b, - 0xe3, 0x74, 0x4c, 0x80, 0xf7, 0xa8, 0x6e, 0xf7, 0xe4, 0x60, 0x09, 0x65, 0x07, 0x64, 0x89, 0x28, - 0xf2, 0x88, 0x0e, 0x33, 0x03, 0x9f, 0x7a, 0xac, 0x09, 0xc5, 0x7e, 0x5f, 0x0e, 0x9b, 0x53, 0x6a, - 0x04, 0xf8, 0x02, 0xb3, 0x93, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x03, 0xea, 0xfa, 0x20, 0xd8, 0xe7, - 0x5b, 0x2e, 0x31, 0x36, 0xae, 0x72, 0x33, 0xa1, 0x4c, 0x7b, 0x7c, 0xdc, 0x9a, 0xba, 0x8b, 0xed, - 0xd7, 0xd4, 0x3b, 0x86, 0xd4, 0xac, 0x72, 0x4a, 0xc7, 0x20, 0x2b, 0x57, 0x39, 0x73, 0xe5, 0xb6, - 0x12, 0x00, 0x98, 0x02, 0x24, 0xef, 0xc2, 0xd4, 0x01, 0x3d, 0x0a, 0xf4, 0x5d, 0xc9, 0xa0, 0x7a, - 0x16, 0x06, 0x73, 0x4c, 0xe8, 0xbf, 0x1b, 0x2b, 0x8e, 0x09, 0x30, 0xe2, 0xc3, 0xc5, 0x03, 0xea, - 0xed, 0x52, 0xcf, 0x95, 0xfa, 0x0a, 0xc9, 0xa4, 0x76, 0x16, 0x26, 0xf3, 0x27, 0xc7, 0xad, 0x8b, - 0x77, 0x33, 0x60, 0x30, 0x13, 0x5c, 0xfb, 0x3f, 0x45, 0x98, 0xbd, 0x25, 0x1c, 0x11, 0x5c, 0x4f, - 0x48, 0x1e, 0xe4, 0x32, 0x94, 0xbc, 0xfe, 0x80, 0xf7, 0x9c, 0x92, 0x30, 0x35, 0xe0, 0xd6, 0x0e, - 0xb2, 0x34, 0xf2, 0x0e, 0xd4, 0x4d, 0x39, 0x65, 0x48, 0x75, 0xc9, 0x58, 0xaa, 0x2d, 0xf5, 0x86, - 0x21, 0x1a, 0xdb, 0x1b, 0xf6, 0xfc, 0x6e, 0xc7, 0xfa, 0x80, 0x4a, 0x0d, 0x02, 0xdf, 0x1b, 0x6e, - 0x8a, 0x24, 0x54, 0x79, 0x6c, 0x55, 0x3d, 0xa0, 0x47, 0x62, 0xff, 0x5c, 0x8e, 0x56, 0xd5, 0xbb, - 0x32, 0x0d, 0xc3, 0x5c, 0xd2, 0x52, 0x83, 0x85, 0xf5, 0x82, 0xb2, 0xd0, 0xfd, 0x3c, 0x60, 0x09, - 0x72, 0xdc, 0xb0, 0x29, 0xf3, 0x7d, 0x2b, 0x08, 0xa8, 0x27, 0x7f, 0xe3, 0x58, 0x53, 0xe6, 0x1d, - 0x8e, 0x80, 0x12, 0x89, 0xfc, 0x04, 0x34, 0x38, 0x78, 0xdb, 0x76, 0x77, 0xf9, 0x8f, 0x6b, 0x08, - 0x2d, 0xd0, 0x03, 0x95, 0x88, 0x51, 0xbe, 0xf6, 0x87, 0x45, 0xb8, 0x74, 0x8b, 0x06, 0x42, 0xaa, - 0x59, 0xa5, 0x7d, 0xdb, 0x3d, 0x62, 0xf2, 0x34, 0xd2, 0x6f, 0x91, 0xb7, 0x00, 0x2c, 0x7f, 0xb7, - 0x73, 0x68, 0xf0, 0x71, 0x20, 0xc6, 0xf0, 0x75, 0x39, 0x24, 0x61, 0xbd, 0xd3, 0x96, 0x39, 0x8f, - 0x13, 0x6f, 0x18, 0x2b, 0x13, 0x6d, 0xc8, 0x8b, 0x4f, 0xd8, 0x90, 0x77, 0x00, 0xfa, 0x91, 0x54, - 0x5e, 0xe2, 0x94, 0x3f, 0xad, 0xd8, 0x9c, 0x45, 0x20, 0x8f, 0xc1, 0xe4, 0x91, 0x93, 0x1d, 0x98, - 0x33, 0xe9, 0x9e, 0x3e, 0xb0, 0x83, 0x70, 0x27, 0x21, 0x07, 0xf1, 0xe9, 0x37, 0x23, 0xa1, 0x93, - 0xc4, 0x6a, 0x0a, 0x09, 0x87, 0xb0, 0xb5, 0xbf, 0x5b, 0x82, 0x2b, 0xb7, 0x68, 0x10, 0xea, 0xe8, - 0xe4, 0xec, 0xd8, 0xe9, 0x53, 0x83, 0xfd, 0x85, 0x0f, 0x0b, 0x50, 0xb5, 0xf5, 0x5d, 0x6a, 0xb3, - 0xd5, 0x8b, 0x7d, 0xcd, 0x7b, 0x63, 0x2f, 0x04, 0xa3, 0xb9, 0x2c, 0x6c, 0x70, 0x0e, 0xa9, 0xa5, - 0x41, 0x24, 0xa2, 0x64, 0xcf, 0x26, 0x75, 0xc3, 0x1e, 0xf8, 0x81, 0xd8, 0xd9, 0x49, 0x79, 0x32, - 0x9c, 0xd4, 0x57, 0xa2, 0x2c, 0x8c, 0xd3, 0x91, 0x25, 0x00, 0xc3, 0xb6, 0xa8, 0x13, 0xf0, 0x52, - 0x62, 0x5c, 0x11, 0xf5, 0x7f, 0x57, 0xc2, 0x1c, 0x8c, 0x51, 0x31, 0x56, 0x3d, 0xd7, 0xb1, 0x02, - 0x57, 0xb0, 0x2a, 0x27, 0x59, 0x6d, 0x46, 0x59, 0x18, 0xa7, 0xe3, 0xc5, 0x68, 0xe0, 0x59, 0x86, - 0xcf, 0x8b, 0x55, 0x52, 0xc5, 0xa2, 0x2c, 0x8c, 0xd3, 0xb1, 0x35, 0x2f, 0xf6, 0xfd, 0x67, 0x5a, - 0xf3, 0x7e, 0xb3, 0x01, 0xd7, 0x12, 0xcd, 0x1a, 0xe8, 0x01, 0xdd, 0x1b, 0xd8, 0x1d, 0x1a, 0xa8, - 0x1f, 0x38, 0xe6, 0x5a, 0xf8, 0xe7, 0xa2, 0xff, 0x2e, 0xdc, 0x9f, 0x8c, 0xc9, 0xfc, 0xf7, 0xa1, - 0x0a, 0x9e, 0xea, 0xdf, 0x2f, 0x42, 0xc3, 0xd1, 0x03, 0x9f, 0x0f, 0x5c, 0x39, 0x46, 0x43, 0x31, - 0xec, 0x9e, 0xca, 0xc0, 0x88, 0x86, 0x6c, 0xc1, 0x45, 0xd9, 0xc4, 0x37, 0x1f, 0xb1, 0x3d, 0x3f, - 0xf5, 0x44, 0x59, 0xb9, 0x9c, 0xca, 0xb2, 0x17, 0x37, 0x33, 0x68, 0x30, 0xb3, 0x24, 0xd9, 0x84, - 0x0b, 0x86, 0x70, 0x09, 0xa1, 0xb6, 0xab, 0x9b, 0x0a, 0x50, 0xa8, 0x44, 0xc3, 0xad, 0xd1, 0xca, - 0x30, 0x09, 0x66, 0x95, 0x4b, 0xf7, 0xe6, 0xea, 0x58, 0xbd, 0xb9, 0x36, 0x4e, 0x6f, 0xae, 0x8f, - 0xd7, 0x9b, 0x1b, 0xa7, 0xeb, 0xcd, 0xac, 0xe5, 0x59, 0x3f, 0xa2, 0x1e, 0x13, 0x4f, 0xc4, 0x0a, - 0x1b, 0xf3, 0x38, 0x0a, 0x5b, 0xbe, 0x93, 0x41, 0x83, 0x99, 0x25, 0xc9, 0x2e, 0x5c, 0x11, 0xe9, - 0x37, 0x1d, 0xc3, 0x3b, 0xea, 0xb3, 0x85, 0x27, 0x86, 0xdb, 0x4c, 0xe8, 0xa4, 0xaf, 0x74, 0x46, - 0x52, 0xe2, 0x13, 0x50, 0xc8, 0xcf, 0xc2, 0xb4, 0xf8, 0x4b, 0x9b, 0x7a, 0x9f, 0xc3, 0x0a, 0xff, - 0xa3, 0x97, 0x24, 0xec, 0xf4, 0x4a, 0x3c, 0x13, 0x93, 0xb4, 0x64, 0x19, 0x66, 0xfb, 0x87, 0x06, - 0x7b, 0x5c, 0xdf, 0xbb, 0x47, 0xa9, 0x49, 0x4d, 0x6e, 0xf0, 0x6c, 0xb4, 0x5f, 0x56, 0xda, 0x9d, - 0xad, 0x64, 0x36, 0xa6, 0xe9, 0xc9, 0x1b, 0x30, 0xe5, 0x07, 0xba, 0x17, 0x48, 0x45, 0xf0, 0xfc, - 0x8c, 0xf0, 0xcf, 0x52, 0x7a, 0xd2, 0x4e, 0x2c, 0x0f, 0x13, 0x94, 0x99, 0xeb, 0xc5, 0xec, 0xf9, - 0xad, 0x17, 0x79, 0x66, 0xab, 0x7f, 0x5a, 0x84, 0xeb, 0xb7, 0x68, 0xb0, 0xe9, 0x3a, 0x52, 0x8d, - 0x9e, 0xb5, 0xec, 0x9f, 0x4a, 0x8b, 0x9e, 0x5c, 0xb4, 0x8b, 0x13, 0x5d, 0xb4, 0x4b, 0x13, 0x5a, - 0xb4, 0xcb, 0xe7, 0xb8, 0x68, 0xff, 0xfd, 0x22, 0xbc, 0x9c, 0x68, 0xc9, 0x2d, 0xd7, 0x54, 0x13, - 0xfe, 0xa7, 0x0d, 0x78, 0x8a, 0x06, 0x7c, 0x2c, 0xe4, 0x4e, 0x6e, 0x08, 0x4d, 0x49, 0x3c, 0xdf, - 0x4d, 0x4b, 0x3c, 0xef, 0xe6, 0x59, 0xf9, 0x32, 0x38, 0x9c, 0x6a, 0xc5, 0xbb, 0x03, 0xc4, 0x93, - 0x66, 0xdb, 0x48, 0x9d, 0x2d, 0x85, 0x9e, 0xd0, 0x01, 0x14, 0x87, 0x28, 0x30, 0xa3, 0x14, 0xe9, - 0xc0, 0x4b, 0x3e, 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x48, 0x43, 0xaf, 0x48, 0xb8, 0x97, - 0x3a, 0x59, 0x44, 0x98, 0x5d, 0x36, 0xcf, 0x3c, 0xf0, 0x2f, 0x81, 0x8b, 0x9c, 0xa2, 0x69, 0x26, - 0x26, 0xb1, 0x7c, 0x98, 0x96, 0x58, 0xde, 0xcb, 0xff, 0xdf, 0xc6, 0x93, 0x56, 0x96, 0x00, 0xf8, - 0x5f, 0x88, 0x8b, 0x2b, 0xe1, 0x22, 0x8d, 0x61, 0x0e, 0xc6, 0xa8, 0xd8, 0x02, 0xa4, 0xda, 0x39, - 0x2e, 0xa9, 0x84, 0x0b, 0x50, 0x27, 0x9e, 0x89, 0x49, 0xda, 0x91, 0xd2, 0x4e, 0x65, 0x6c, 0x69, - 0xe7, 0x0e, 0x90, 0x84, 0xe2, 0x51, 0xe0, 0x55, 0x93, 0xfe, 0xc7, 0xeb, 0x43, 0x14, 0x98, 0x51, - 0x6a, 0x44, 0x57, 0xae, 0x4d, 0xb6, 0x2b, 0xd7, 0xc7, 0xef, 0xca, 0xe4, 0x3d, 0xb8, 0xcc, 0x59, - 0xc9, 0xf6, 0x49, 0x02, 0x0b, 0xb9, 0xe7, 0xb3, 0x12, 0xf8, 0x32, 0x8e, 0x22, 0xc4, 0xd1, 0x18, - 0xec, 0xff, 0x18, 0x1e, 0x35, 0x19, 0x73, 0xdd, 0x1e, 0x2d, 0x13, 0xad, 0x64, 0xd0, 0x60, 0x66, - 0x49, 0xd6, 0xc5, 0x02, 0xd6, 0x0d, 0xf5, 0x5d, 0x9b, 0x9a, 0xd2, 0xff, 0x3a, 0xec, 0x62, 0xdb, - 0x1b, 0x1d, 0x99, 0x83, 0x31, 0xaa, 0x2c, 0x31, 0x65, 0xea, 0x8c, 0x62, 0xca, 0x2d, 0xae, 0xa5, - 0xdf, 0x4b, 0x48, 0x43, 0x52, 0xd6, 0x09, 0x3d, 0xea, 0x57, 0xd2, 0x04, 0x38, 0x5c, 0x86, 0x4b, - 0x89, 0x86, 0x67, 0xf5, 0x03, 0x3f, 0x89, 0x35, 0x93, 0x92, 0x12, 0x33, 0x68, 0x30, 0xb3, 0x24, - 0x93, 0xcf, 0xf7, 0xa9, 0x6e, 0x07, 0xfb, 0x49, 0xc0, 0xd9, 0xa4, 0x7c, 0x7e, 0x7b, 0x98, 0x04, - 0xb3, 0xca, 0x65, 0x2e, 0x48, 0x73, 0xcf, 0xa7, 0x58, 0xf5, 0x9d, 0x12, 0x5c, 0xbe, 0x45, 0x83, - 0xd0, 0x35, 0xed, 0x53, 0x35, 0xca, 0xc7, 0xa0, 0x46, 0xf9, 0x8d, 0x0a, 0x5c, 0xb8, 0x45, 0x83, - 0x21, 0x69, 0xec, 0xff, 0xd3, 0xe6, 0xdf, 0x84, 0x0b, 0x91, 0x37, 0x64, 0x27, 0x70, 0x3d, 0xb1, - 0x96, 0xa7, 0x76, 0xcb, 0x9d, 0x61, 0x12, 0xcc, 0x2a, 0x47, 0xbe, 0x01, 0x2f, 0xf3, 0xa5, 0xde, - 0xe9, 0x0a, 0xfd, 0xac, 0x50, 0x26, 0xc4, 0xce, 0xf3, 0xb4, 0x24, 0xe4, 0xcb, 0x9d, 0x6c, 0x32, - 0x1c, 0x55, 0x9e, 0x7c, 0x1b, 0xa6, 0xfa, 0x56, 0x9f, 0xda, 0x96, 0xc3, 0xe5, 0xb3, 0xdc, 0x4e, - 0x44, 0x5b, 0x31, 0xb0, 0x68, 0x03, 0x17, 0x4f, 0xc5, 0x04, 0xc3, 0xcc, 0x9e, 0x5a, 0x3f, 0xc7, - 0x9e, 0xfa, 0x3f, 0x8b, 0x50, 0xbb, 0xe5, 0xb9, 0x83, 0x7e, 0xfb, 0x88, 0x74, 0xa1, 0xfa, 0x90, - 0x1b, 0xcf, 0xa4, 0x69, 0x6a, 0xfc, 0x13, 0x05, 0xc2, 0x06, 0x17, 0x89, 0x44, 0xe2, 0x1d, 0x25, - 0x3c, 0xeb, 0xc4, 0x07, 0xf4, 0x88, 0x9a, 0xd2, 0x86, 0x16, 0x76, 0xe2, 0xbb, 0x2c, 0x11, 0x45, - 0x1e, 0xe9, 0xc1, 0xac, 0x6e, 0xdb, 0xee, 0x43, 0x6a, 0x6e, 0xe8, 0x01, 0xb7, 0x7b, 0x4b, 0xdb, - 0xca, 0x59, 0xd5, 0xd2, 0xdc, 0x99, 0x61, 0x39, 0x09, 0x85, 0x69, 0x6c, 0xf2, 0x3e, 0xd4, 0xfc, - 0xc0, 0xf5, 0x94, 0xb0, 0xd5, 0x5c, 0x5a, 0x19, 0xff, 0xa7, 0xb7, 0xbf, 0xde, 0x11, 0x50, 0x42, - 0x67, 0x2f, 0x5f, 0x50, 0x31, 0xd0, 0x7e, 0xbd, 0x00, 0x70, 0x7b, 0x7b, 0x7b, 0x4b, 0x9a, 0x17, - 0x4c, 0x28, 0xeb, 0x83, 0xd0, 0x50, 0x39, 0xbe, 0x41, 0x30, 0xe1, 0xc8, 0x2b, 0x6d, 0x78, 0x83, - 0x60, 0x1f, 0x39, 0x3a, 0xf9, 0x71, 0xa8, 0x49, 0x01, 0x59, 0x36, 0x7b, 0xe8, 0x4f, 0x21, 0x85, - 0x68, 0x54, 0xf9, 0xda, 0xdf, 0x2a, 0x02, 0xac, 0x9b, 0x36, 0xed, 0xa8, 0x43, 0x20, 0x8d, 0x60, - 0xdf, 0xa3, 0xfe, 0xbe, 0x6b, 0x9b, 0x63, 0x5a, 0x53, 0xb9, 0xce, 0x7f, 0x5b, 0x81, 0x60, 0x84, - 0x47, 0x4c, 0x98, 0xf2, 0x03, 0xda, 0x57, 0xbe, 0xbd, 0x63, 0x1a, 0x51, 0xe6, 0x84, 0x5e, 0x24, - 0xc2, 0xc1, 0x04, 0x2a, 0xd1, 0xa1, 0x69, 0x39, 0x86, 0x18, 0x20, 0xed, 0xa3, 0x31, 0x3b, 0xd2, - 0x2c, 0xdb, 0x71, 0xac, 0x47, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0xa7, 0x08, 0x97, 0x38, 0x3f, 0x56, - 0x8d, 0x84, 0x07, 0x2f, 0xf9, 0x93, 0x43, 0x07, 0x56, 0xff, 0xf8, 0xe9, 0x58, 0x8b, 0xf3, 0x8e, - 0x9b, 0x34, 0xd0, 0x23, 0x79, 0x2e, 0x4a, 0x8b, 0x9d, 0x52, 0x1d, 0x40, 0xd9, 0x67, 0xf3, 0x95, - 0x68, 0xbd, 0xce, 0xd8, 0x5d, 0x28, 0xfb, 0x03, 0xf8, 0xec, 0x15, 0x5a, 0x8d, 0xf9, 0xac, 0xc5, - 0xd9, 0x91, 0x5f, 0x82, 0xaa, 0x1f, 0xe8, 0xc1, 0x40, 0x0d, 0xcd, 0x9d, 0x49, 0x33, 0xe6, 0xe0, - 0xd1, 0x3c, 0x22, 0xde, 0x51, 0x32, 0xd5, 0x7e, 0xa7, 0x00, 0x57, 0xb2, 0x0b, 0x6e, 0x58, 0x7e, - 0x40, 0xfe, 0xc4, 0x50, 0xb3, 0x9f, 0xf2, 0x8f, 0xb3, 0xd2, 0xbc, 0xd1, 0xc3, 0x33, 0x0d, 0x2a, - 0x25, 0xd6, 0xe4, 0x01, 0x54, 0xac, 0x80, 0xf6, 0xd4, 0xfe, 0xf2, 0xfe, 0x84, 0x3f, 0x3d, 0xb6, - 0xb4, 0x33, 0x2e, 0x28, 0x98, 0x69, 0xdf, 0x2b, 0x8e, 0xfa, 0x64, 0xbe, 0x7c, 0xd8, 0x49, 0x2f, - 0xf1, 0xbb, 0xf9, 0xbc, 0xc4, 0x93, 0x15, 0x1a, 0x76, 0x16, 0xff, 0x53, 0xc3, 0xce, 0xe2, 0xf7, - 0xf3, 0x3b, 0x8b, 0xa7, 0x9a, 0x61, 0xa4, 0xcf, 0xf8, 0x47, 0x25, 0xb8, 0xfa, 0xa4, 0x6e, 0xc3, - 0xd6, 0x33, 0xd9, 0x3b, 0xf3, 0xae, 0x67, 0x4f, 0xee, 0x87, 0x64, 0x09, 0x2a, 0xfd, 0x7d, 0xdd, - 0x57, 0x42, 0xd9, 0xd5, 0xd0, 0xcd, 0x90, 0x25, 0x3e, 0x66, 0x93, 0x06, 0x17, 0xe6, 0xf8, 0x2b, - 0x0a, 0x52, 0x36, 0x1d, 0xf7, 0xa8, 0xef, 0x47, 0x3a, 0x81, 0x70, 0x3a, 0xde, 0x14, 0xc9, 0xa8, - 0xf2, 0x49, 0x00, 0x55, 0xa1, 0x62, 0x96, 0x2b, 0xd3, 0xf8, 0x8e, 0x5c, 0x19, 0x07, 0x0b, 0xa2, - 0x8f, 0x92, 0xd6, 0x0a, 0xc9, 0x8b, 0x2c, 0x40, 0x39, 0x88, 0xdc, 0xbc, 0xd5, 0xd6, 0xbc, 0x9c, - 0x21, 0x9f, 0x72, 0x3a, 0xb6, 0xb1, 0x77, 0x77, 0xb9, 0x52, 0xdd, 0x94, 0xf6, 0x73, 0xcb, 0x75, - 0xb8, 0x40, 0x56, 0x8a, 0x36, 0xf6, 0xf7, 0x87, 0x28, 0x30, 0xa3, 0x94, 0xf6, 0x6f, 0xea, 0x70, - 0x29, 0xbb, 0x3f, 0xb0, 0x76, 0x3b, 0xa4, 0x9e, 0xcf, 0xb0, 0x0b, 0xc9, 0x76, 0x7b, 0x20, 0x92, - 0x51, 0xe5, 0x7f, 0xa2, 0x1d, 0xce, 0x7e, 0xa3, 0x00, 0x97, 0x3d, 0x69, 0x23, 0x7a, 0x16, 0x4e, - 0x67, 0xaf, 0x08, 0x75, 0xc6, 0x08, 0x86, 0x38, 0xba, 0x2e, 0xe4, 0x6f, 0x14, 0x60, 0xbe, 0x97, - 0xd2, 0x73, 0x9c, 0xe3, 0x99, 0x4b, 0x7e, 0x8e, 0x62, 0x73, 0x04, 0x3f, 0x1c, 0x59, 0x13, 0xf2, - 0x6d, 0x68, 0xf6, 0x59, 0xbf, 0xf0, 0x03, 0xea, 0x18, 0xca, 0x41, 0x74, 0xfc, 0x91, 0xb4, 0x15, - 0x61, 0x85, 0x67, 0xae, 0xb8, 0x7c, 0x10, 0xcb, 0xc0, 0x38, 0xc7, 0xe7, 0xfc, 0x90, 0xe5, 0x0d, - 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x2b, 0xf6, 0x1b, 0x0d, 0x31, 0x56, 0x3a, 0x32, 0x0d, 0xc3, - 0x5c, 0xf2, 0x13, 0xd0, 0xe0, 0x26, 0xa7, 0x65, 0xaf, 0xeb, 0xcf, 0x37, 0xb8, 0xbb, 0xd8, 0xb4, - 0x70, 0x80, 0x93, 0x89, 0x18, 0xe5, 0x93, 0x2f, 0xc1, 0xd4, 0x2e, 0x1f, 0xbe, 0xf2, 0xdc, 0xbd, - 0xd0, 0x71, 0x71, 0x69, 0xad, 0x1d, 0x4b, 0xc7, 0x04, 0x15, 0x59, 0x02, 0xa0, 0xa1, 0x5d, 0x2e, - 0xad, 0xcf, 0x8a, 0x2c, 0x76, 0x18, 0xa3, 0x22, 0xaf, 0x40, 0x29, 0xb0, 0x7d, 0xae, 0xc3, 0xaa, - 0x47, 0x5b, 0xd0, 0xed, 0x8d, 0x0e, 0xb2, 0x74, 0xed, 0x0f, 0x0b, 0x30, 0x9b, 0x3a, 0x8e, 0xc4, - 0x8a, 0x0c, 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x91, 0x1d, 0xdc, 0x40, 0x96, 0x4e, 0xde, 0x93, 0x62, - 0x79, 0x31, 0x67, 0x88, 0x91, 0x7b, 0x7a, 0xe0, 0x33, 0x39, 0x7c, 0x48, 0x22, 0xe7, 0x66, 0xbe, - 0xa8, 0x3e, 0x72, 0x1d, 0x88, 0x99, 0xf9, 0xa2, 0x3c, 0x4c, 0x50, 0xa6, 0x14, 0x7e, 0xe5, 0xd3, - 0x28, 0xfc, 0xb4, 0x5f, 0x2d, 0xc6, 0x5a, 0x40, 0x4a, 0xf6, 0x4f, 0x69, 0x81, 0x2f, 0xb0, 0x05, - 0x34, 0x5c, 0xdc, 0x1b, 0xf1, 0xf5, 0x8f, 0x2f, 0xc6, 0x32, 0x97, 0xbc, 0x2d, 0xda, 0xbe, 0x94, - 0xf3, 0x20, 0xf7, 0xf6, 0x46, 0x47, 0x78, 0x57, 0xa9, 0xbf, 0x16, 0xfe, 0x82, 0xf2, 0x39, 0xfd, - 0x02, 0xed, 0x9f, 0x97, 0xa0, 0x79, 0xc7, 0xdd, 0xfd, 0x84, 0x78, 0x50, 0x67, 0x2f, 0x53, 0xc5, - 0x8f, 0x71, 0x99, 0xda, 0x81, 0x97, 0x83, 0xc0, 0xee, 0x50, 0xc3, 0x75, 0x4c, 0x7f, 0x79, 0x2f, - 0xa0, 0xde, 0x9a, 0xe5, 0x58, 0xfe, 0x3e, 0x35, 0xa5, 0x39, 0xe9, 0x33, 0x27, 0xc7, 0xad, 0x97, - 0xb7, 0xb7, 0x37, 0xb2, 0x48, 0x70, 0x54, 0x59, 0x3e, 0x6d, 0x88, 0xb3, 0xa3, 0xfc, 0x6c, 0x95, - 0xf4, 0xb9, 0x11, 0xd3, 0x46, 0x2c, 0x1d, 0x13, 0x54, 0xda, 0x7f, 0x2c, 0x42, 0x23, 0x0c, 0x1e, - 0x41, 0x3e, 0x0f, 0xb5, 0x5d, 0xcf, 0x3d, 0xa0, 0x9e, 0xb0, 0xdc, 0xc9, 0xb3, 0x55, 0x6d, 0x91, - 0x84, 0x2a, 0x8f, 0x7c, 0x0e, 0x2a, 0x81, 0xdb, 0xb7, 0x8c, 0xb4, 0x42, 0x6d, 0x9b, 0x25, 0xa2, - 0xc8, 0xe3, 0x03, 0x81, 0xbb, 0x15, 0xf2, 0xaf, 0xaa, 0xc7, 0x06, 0x02, 0x4f, 0x45, 0x99, 0xab, - 0x06, 0x42, 0x79, 0xe2, 0x03, 0xe1, 0x0b, 0xa1, 0x08, 0x58, 0x49, 0x8e, 0xc4, 0x94, 0xd0, 0xf6, - 0x2e, 0x94, 0x7d, 0xdd, 0xb7, 0xe5, 0xf2, 0x96, 0x23, 0x5e, 0xc3, 0x72, 0x67, 0x43, 0xc6, 0x6b, - 0x58, 0xee, 0x6c, 0x20, 0x07, 0xd5, 0x7e, 0xbf, 0x08, 0x4d, 0xd1, 0xbe, 0x62, 0xf6, 0x98, 0x64, - 0x0b, 0xbf, 0xc9, 0x5d, 0x2e, 0xfc, 0x41, 0x8f, 0x7a, 0x5c, 0x1d, 0x25, 0x27, 0xc3, 0xb8, 0x1d, - 0x21, 0xca, 0x0c, 0xdd, 0x2e, 0xa2, 0xa4, 0x3f, 0xe2, 0x4d, 0xff, 0x61, 0x11, 0x1a, 0x1b, 0xd6, - 0x1e, 0x35, 0x8e, 0x0c, 0x9b, 0x9f, 0x36, 0x35, 0xa9, 0x4d, 0x03, 0x7a, 0xcb, 0xd3, 0x0d, 0xba, - 0x45, 0x3d, 0x8b, 0x07, 0x61, 0x62, 0xe3, 0x88, 0xcf, 0x54, 0xf2, 0xb4, 0xe9, 0xea, 0x08, 0x1a, - 0x1c, 0x59, 0x9a, 0xac, 0xc3, 0x94, 0x49, 0x7d, 0xcb, 0xa3, 0xe6, 0x56, 0x6c, 0x43, 0xf3, 0x79, - 0xb5, 0x24, 0xad, 0xc6, 0xf2, 0x1e, 0x1f, 0xb7, 0xa6, 0x95, 0x22, 0x53, 0xec, 0x6c, 0x12, 0x45, - 0xd9, 0xd4, 0xd0, 0xd7, 0x07, 0x7e, 0x56, 0x1d, 0x63, 0x53, 0xc3, 0x56, 0x36, 0x09, 0x8e, 0x2a, - 0xab, 0x55, 0xa0, 0xb4, 0xe1, 0x76, 0xb5, 0xef, 0x95, 0x20, 0x8c, 0xd6, 0x45, 0xfe, 0x6c, 0x01, - 0x9a, 0xba, 0xe3, 0xb8, 0x81, 0x8c, 0x84, 0x25, 0x2c, 0xf5, 0x98, 0x3b, 0x28, 0xd8, 0xc2, 0x72, - 0x04, 0x2a, 0x8c, 0xbc, 0xa1, 0xe1, 0x39, 0x96, 0x83, 0x71, 0xde, 0x64, 0x90, 0xb2, 0x3b, 0x6f, - 0xe6, 0xaf, 0xc5, 0x29, 0xac, 0xcc, 0x57, 0xbe, 0x06, 0x73, 0xe9, 0xca, 0x9e, 0xc5, 0x6c, 0x94, - 0xcb, 0x80, 0x5f, 0x04, 0x88, 0x7c, 0x4f, 0x9e, 0x81, 0xb2, 0xcb, 0x4a, 0x28, 0xbb, 0xc6, 0x0f, - 0x99, 0x10, 0x55, 0x7a, 0xa4, 0x82, 0xeb, 0x5b, 0x29, 0x05, 0xd7, 0xfa, 0x24, 0x98, 0x3d, 0x59, - 0xa9, 0xb5, 0x0b, 0x17, 0x22, 0xda, 0x68, 0xcc, 0xdf, 0x4d, 0x8d, 0x4c, 0x21, 0xb3, 0x7d, 0x71, - 0xc4, 0xc8, 0x9c, 0x8d, 0x39, 0x03, 0x0d, 0x8f, 0x4d, 0xed, 0x6f, 0x16, 0x60, 0x2e, 0xce, 0x84, - 0x9f, 0xef, 0xfe, 0x32, 0x4c, 0x7b, 0x54, 0x37, 0xdb, 0x7a, 0x60, 0xec, 0x73, 0xb7, 0xf3, 0x02, - 0xf7, 0x13, 0xe7, 0x27, 0xd1, 0x30, 0x9e, 0x81, 0x49, 0x3a, 0xa2, 0x43, 0x93, 0x25, 0x6c, 0x5b, - 0x3d, 0xea, 0x0e, 0x82, 0x31, 0x35, 0xb8, 0x7c, 0xf3, 0x84, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x8f, - 0x0a, 0x30, 0x13, 0xaf, 0xf0, 0xb9, 0x6b, 0xf7, 0xf6, 0x93, 0xda, 0xbd, 0x95, 0x09, 0xfc, 0xf7, - 0x11, 0x1a, 0xbd, 0xef, 0x34, 0xe3, 0x9f, 0xc6, 0xb5, 0x78, 0x71, 0xc5, 0x45, 0xe1, 0x89, 0x8a, - 0x8b, 0x4f, 0x7e, 0x10, 0xa8, 0x51, 0x12, 0x77, 0xf9, 0x39, 0x96, 0xb8, 0x3f, 0xce, 0x48, 0x52, - 0xb1, 0x68, 0x48, 0xd5, 0x1c, 0xd1, 0x90, 0x7a, 0x61, 0x34, 0xa4, 0xda, 0xc4, 0x26, 0xb6, 0xd3, - 0x44, 0x44, 0xaa, 0x3f, 0xd3, 0x88, 0x48, 0x8d, 0xf3, 0x8a, 0x88, 0x04, 0x79, 0x23, 0x22, 0x7d, - 0xb7, 0x00, 0x33, 0x66, 0xe2, 0xf4, 0xae, 0x3c, 0x37, 0x3f, 0xfe, 0x72, 0x96, 0x3c, 0x0c, 0x2c, - 0x8e, 0x6f, 0x25, 0xd3, 0x30, 0xc5, 0x32, 0x2b, 0x0e, 0xd1, 0xd4, 0xc7, 0x12, 0x87, 0x88, 0xfc, - 0x12, 0x34, 0x6c, 0xb5, 0xd6, 0xc9, 0xe8, 0x8c, 0x1b, 0x13, 0xe9, 0x92, 0x12, 0x33, 0x3a, 0x21, - 0x10, 0x26, 0x61, 0xc4, 0x51, 0xfb, 0xbd, 0x5a, 0x7c, 0x41, 0x7c, 0xd6, 0xf6, 0x83, 0xd7, 0x93, - 0xf6, 0x83, 0xeb, 0x69, 0xfb, 0xc1, 0xd0, 0x6a, 0x2e, 0x6d, 0x08, 0x3f, 0x19, 0x5b, 0x27, 0x4a, - 0x3c, 0x00, 0x52, 0xd8, 0xe5, 0x32, 0xd6, 0x8a, 0x65, 0x98, 0x95, 0x42, 0x80, 0xca, 0xe4, 0x93, - 0xec, 0x74, 0xe4, 0xf1, 0xb5, 0x9a, 0xcc, 0xc6, 0x34, 0x3d, 0x63, 0xe8, 0xab, 0x38, 0xb8, 0x62, - 0x37, 0x14, 0xf5, 0x71, 0x15, 0xa3, 0x36, 0xa4, 0x60, 0x3b, 0x27, 0x8f, 0xea, 0xbe, 0xb4, 0x02, - 0xc4, 0x76, 0x4e, 0xc8, 0x53, 0x51, 0xe6, 0xc6, 0x4d, 0x21, 0xb5, 0xa7, 0x98, 0x42, 0x74, 0x68, - 0xda, 0xba, 0x1f, 0x88, 0xce, 0x64, 0xca, 0xd9, 0xe4, 0x8f, 0x9d, 0x6e, 0xdd, 0x67, 0xb2, 0x44, - 0x24, 0xc0, 0x6f, 0x44, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb1, 0x57, 0x3e, 0xb3, 0x98, 0xcb, - 0x81, 0x8c, 0x16, 0x77, 0x16, 0x1e, 0xa1, 0x06, 0x6f, 0x23, 0x86, 0x83, 0x09, 0xd4, 0x11, 0xd6, - 0x12, 0x18, 0xc7, 0x5a, 0x42, 0x7e, 0x56, 0x08, 0x6e, 0x47, 0xe1, 0x6f, 0x6d, 0xf2, 0xdf, 0x1a, - 0x7a, 0x8b, 0x62, 0x3c, 0x13, 0x93, 0xb4, 0xac, 0x57, 0x0c, 0x64, 0x33, 0xa8, 0xe2, 0x53, 0xc9, - 0x5e, 0xb1, 0x93, 0xcc, 0xc6, 0x34, 0x3d, 0xd9, 0x82, 0x8b, 0x61, 0x52, 0xbc, 0x1a, 0xd3, 0x1c, - 0x27, 0x74, 0xdf, 0xdb, 0xc9, 0xa0, 0xc1, 0xcc, 0x92, 0xfc, 0x3c, 0xcc, 0xc0, 0xf3, 0xa8, 0x13, - 0xdc, 0xd6, 0xfd, 0x7d, 0xe9, 0x07, 0x18, 0x9d, 0x87, 0x89, 0xb2, 0x30, 0x4e, 0x47, 0x96, 0x00, - 0x04, 0x1c, 0x2f, 0x35, 0x9b, 0x74, 0xb5, 0xdd, 0x09, 0x73, 0x30, 0x46, 0xa5, 0x7d, 0xb7, 0x01, - 0xcd, 0x7b, 0x7a, 0x60, 0x1d, 0x52, 0x6e, 0xda, 0x3c, 0x1f, 0xfb, 0xd2, 0x5f, 0x29, 0xc0, 0xa5, - 0xa4, 0xff, 0xea, 0x39, 0x1a, 0x99, 0x78, 0xfc, 0x24, 0xcc, 0xe4, 0x86, 0x23, 0x6a, 0xc1, 0xcd, - 0x4d, 0x43, 0xee, 0xb0, 0xe7, 0x6d, 0x6e, 0xea, 0x8c, 0x62, 0x88, 0xa3, 0xeb, 0xf2, 0x49, 0x31, - 0x37, 0x3d, 0xdf, 0x01, 0x3f, 0x53, 0xc6, 0xb0, 0xda, 0x73, 0x63, 0x0c, 0xab, 0x3f, 0x17, 0x52, - 0x7f, 0x3f, 0x66, 0x0c, 0x6b, 0xe4, 0x74, 0xca, 0x92, 0x47, 0x3e, 0x04, 0xda, 0x28, 0xa3, 0x1a, - 0x8f, 0xd6, 0xa0, 0x8c, 0x14, 0x4c, 0x58, 0xde, 0xd5, 0x7d, 0xcb, 0x90, 0x62, 0x47, 0x8e, 0x00, - 0xc7, 0x2a, 0xf0, 0xa1, 0xf0, 0xdd, 0xe0, 0xaf, 0x28, 0xb0, 0xa3, 0x38, 0x8f, 0xc5, 0x5c, 0x71, - 0x1e, 0xc9, 0x0a, 0x94, 0x9d, 0x03, 0x7a, 0x74, 0xb6, 0xb8, 0x07, 0x7c, 0x13, 0x78, 0xef, 0x2e, - 0x3d, 0x42, 0x5e, 0x58, 0xfb, 0x7e, 0x11, 0x80, 0x7d, 0xfe, 0xe9, 0xcc, 0x52, 0x3f, 0x0e, 0x35, - 0x7f, 0xc0, 0x15, 0x43, 0x52, 0x60, 0x8a, 0x3c, 0xd9, 0x44, 0x32, 0xaa, 0x7c, 0xf2, 0x39, 0xa8, - 0x7c, 0x6b, 0x40, 0x07, 0xca, 0xc7, 0x22, 0xdc, 0x37, 0x7c, 0x9d, 0x25, 0xa2, 0xc8, 0x3b, 0x3f, - 0xd5, 0xb1, 0x32, 0x5f, 0x55, 0xce, 0xcb, 0x7c, 0xd5, 0x80, 0xda, 0x3d, 0x97, 0x3b, 0xc6, 0x6a, - 0xff, 0xad, 0x08, 0x10, 0x39, 0x1e, 0x92, 0x5f, 0x2f, 0xc0, 0x4b, 0xe1, 0x80, 0x0b, 0xc4, 0xf6, - 0x8f, 0xc7, 0x14, 0xcf, 0x6d, 0xca, 0xca, 0x1a, 0xec, 0x7c, 0x06, 0xda, 0xca, 0x62, 0x87, 0xd9, - 0xb5, 0x20, 0x08, 0x75, 0xda, 0xeb, 0x07, 0x47, 0xab, 0x96, 0x27, 0x7b, 0x60, 0xa6, 0x7f, 0xeb, - 0x4d, 0x49, 0x23, 0x8a, 0x4a, 0x1d, 0x05, 0x1f, 0x44, 0x2a, 0x07, 0x43, 0x1c, 0xb2, 0x0f, 0x75, - 0xc7, 0x7d, 0xcf, 0x67, 0xcd, 0x21, 0xbb, 0xe3, 0x5b, 0xe3, 0x37, 0xb9, 0x68, 0x56, 0x61, 0xd2, - 0x90, 0x2f, 0x58, 0x73, 0x64, 0x63, 0xff, 0x5a, 0x11, 0x2e, 0x64, 0xb4, 0x03, 0x79, 0x0b, 0xe6, - 0xa4, 0x8f, 0x67, 0x14, 0x5c, 0xbf, 0x10, 0x05, 0xd7, 0xef, 0xa4, 0xf2, 0x70, 0x88, 0x9a, 0xbc, - 0x07, 0xa0, 0x1b, 0x06, 0xf5, 0xfd, 0x4d, 0xd7, 0x54, 0xfb, 0x81, 0x37, 0x99, 0xf8, 0xb2, 0x1c, - 0xa6, 0x3e, 0x3e, 0x6e, 0xfd, 0x54, 0x96, 0xdb, 0x76, 0xaa, 0x9d, 0xa3, 0x02, 0x18, 0x83, 0x24, - 0xdf, 0x04, 0x10, 0x3a, 0x80, 0x30, 0xb2, 0xc4, 0x53, 0x14, 0x67, 0x0b, 0x2a, 0x70, 0xd9, 0xc2, - 0xd7, 0x07, 0xba, 0x13, 0x58, 0xc1, 0x91, 0x08, 0xe4, 0xf3, 0x20, 0x44, 0xc1, 0x18, 0xa2, 0xf6, - 0x4f, 0x8a, 0x50, 0x57, 0x66, 0x81, 0x67, 0xa0, 0x0b, 0xee, 0x26, 0x74, 0xc1, 0x13, 0x72, 0xd4, - 0xce, 0xd2, 0x04, 0xbb, 0x29, 0x4d, 0xf0, 0xad, 0xfc, 0xac, 0x9e, 0xac, 0x07, 0xfe, 0xad, 0x22, - 0xcc, 0x28, 0xd2, 0xbc, 0x1a, 0xda, 0xaf, 0xc2, 0xac, 0x70, 0xb0, 0xd8, 0xd4, 0x1f, 0x89, 0x98, - 0x46, 0xbc, 0xc1, 0xca, 0xc2, 0x37, 0xba, 0x9d, 0xcc, 0xc2, 0x34, 0x2d, 0xeb, 0xd6, 0x22, 0x69, - 0x87, 0x6d, 0xc2, 0x84, 0x49, 0x56, 0xec, 0x37, 0x79, 0xb7, 0x6e, 0xa7, 0xf2, 0x70, 0x88, 0x3a, - 0xad, 0x22, 0x2e, 0x9f, 0x83, 0x8a, 0xf8, 0xdf, 0x15, 0x60, 0x2a, 0x6a, 0xaf, 0x73, 0x57, 0x10, - 0xef, 0x25, 0x15, 0xc4, 0xcb, 0xb9, 0xbb, 0xc3, 0x08, 0xf5, 0xf0, 0x5f, 0xa8, 0x41, 0xe2, 0xbc, - 0x00, 0xd9, 0x85, 0x2b, 0x56, 0xa6, 0xd7, 0x63, 0x6c, 0xb6, 0x09, 0x0f, 0xc0, 0xaf, 0x8f, 0xa4, - 0xc4, 0x27, 0xa0, 0x90, 0x01, 0xd4, 0x0f, 0xa9, 0x17, 0x58, 0x06, 0x55, 0xdf, 0x77, 0x2b, 0xb7, - 0x48, 0x26, 0x95, 0xe0, 0x61, 0x9b, 0x3e, 0x90, 0x0c, 0x30, 0x64, 0x45, 0x76, 0xa1, 0x42, 0xcd, - 0x2e, 0x55, 0x51, 0xa6, 0x72, 0x46, 0xfd, 0x0d, 0xdb, 0x93, 0xbd, 0xf9, 0x28, 0xa0, 0x89, 0x1f, - 0x57, 0x34, 0x95, 0x73, 0x0a, 0x58, 0xa7, 0x54, 0x2f, 0x91, 0x83, 0x50, 0xdb, 0x5a, 0x99, 0xd0, - 0xe4, 0xf1, 0x04, 0x5d, 0xab, 0x0f, 0x8d, 0x87, 0x7a, 0x40, 0xbd, 0x9e, 0xee, 0x1d, 0xc8, 0xdd, - 0xc6, 0xf8, 0x5f, 0xf8, 0xb6, 0x42, 0x8a, 0xbe, 0x30, 0x4c, 0xc2, 0x88, 0x0f, 0x71, 0xa1, 0x11, - 0x48, 0xf1, 0x59, 0xa9, 0x94, 0xc7, 0x67, 0xaa, 0x04, 0x71, 0x5f, 0x9e, 0x1b, 0x50, 0xaf, 0x18, - 0xf1, 0x20, 0x87, 0x89, 0x10, 0xf1, 0xe2, 0x62, 0x80, 0x76, 0x0e, 0xd3, 0x84, 0x84, 0x8a, 0x96, - 0x9b, 0xec, 0x50, 0xf3, 0xda, 0xff, 0xaa, 0x44, 0xd3, 0xf2, 0xb3, 0xd6, 0x13, 0x7e, 0x29, 0xa9, - 0x27, 0xbc, 0x96, 0xd6, 0x13, 0xa6, 0xec, 0xf1, 0x67, 0xf7, 0x34, 0x4e, 0xa9, 0xd7, 0xca, 0xe7, - 0xa0, 0x5e, 0x7b, 0x15, 0x9a, 0x87, 0x7c, 0x26, 0x10, 0x21, 0xab, 0x2a, 0x7c, 0x19, 0xe1, 0x33, - 0xfb, 0x83, 0x28, 0x19, 0xe3, 0x34, 0xac, 0x88, 0xbc, 0x14, 0x27, 0x8c, 0x12, 0x2d, 0x8b, 0x74, - 0xa2, 0x64, 0x8c, 0xd3, 0x70, 0x27, 0x45, 0xcb, 0x39, 0x10, 0x05, 0x6a, 0xbc, 0x80, 0x70, 0x52, - 0x54, 0x89, 0x18, 0xe5, 0x93, 0x1b, 0x50, 0x1f, 0x98, 0x7b, 0x82, 0xb6, 0xce, 0x69, 0xb9, 0x84, - 0xb9, 0xb3, 0xba, 0x26, 0x43, 0x68, 0xa9, 0x5c, 0x56, 0x93, 0x9e, 0xde, 0x57, 0x19, 0x7c, 0x6f, - 0x28, 0x6b, 0xb2, 0x19, 0x25, 0x63, 0x9c, 0x86, 0xfc, 0x0c, 0xcc, 0x78, 0xd4, 0x1c, 0x18, 0x34, - 0x2c, 0x05, 0xbc, 0x94, 0x8c, 0x2d, 0x1a, 0xcf, 0xc1, 0x14, 0xe5, 0x08, 0x25, 0x61, 0x73, 0x2c, - 0x25, 0xe1, 0xd7, 0x60, 0xc6, 0xf4, 0x74, 0xcb, 0xa1, 0xe6, 0x7d, 0x87, 0x3b, 0x5d, 0x48, 0x57, - 0xc9, 0x50, 0x41, 0xbf, 0x9a, 0xc8, 0xc5, 0x14, 0xb5, 0xf6, 0x2f, 0x8a, 0x50, 0x11, 0x11, 0x4f, - 0xd7, 0xe1, 0x82, 0xe5, 0x58, 0x81, 0xa5, 0xdb, 0xab, 0xd4, 0xd6, 0x8f, 0x92, 0x8e, 0x27, 0x2f, - 0xb3, 0x8d, 0xf6, 0xfa, 0x70, 0x36, 0x66, 0x95, 0x61, 0x8d, 0x13, 0x88, 0xe5, 0x5b, 0xa1, 0x08, - 0x3d, 0x9a, 0x08, 0xb7, 0x9d, 0xc8, 0xc1, 0x14, 0x25, 0x13, 0x86, 0xfa, 0x19, 0x5e, 0x25, 0x5c, - 0x18, 0x4a, 0xfa, 0x92, 0x24, 0xe9, 0xb8, 0x90, 0x3e, 0xe0, 0x02, 0x71, 0x78, 0x20, 0x49, 0x3a, - 0x98, 0x09, 0x21, 0x3d, 0x95, 0x87, 0x43, 0xd4, 0x0c, 0x61, 0x4f, 0xb7, 0xec, 0x81, 0x47, 0x23, - 0x84, 0x4a, 0x84, 0xb0, 0x96, 0xca, 0xc3, 0x21, 0x6a, 0xed, 0x7f, 0x14, 0x80, 0x0c, 0x1f, 0xb1, - 0x20, 0xfb, 0x50, 0x75, 0xb8, 0x2e, 0x32, 0x77, 0x94, 0xff, 0x98, 0x4a, 0x53, 0x2c, 0x12, 0x32, - 0x41, 0xe2, 0x13, 0x07, 0xea, 0xf4, 0x51, 0x40, 0x3d, 0x27, 0x3c, 0x72, 0x35, 0x99, 0x1b, 0x05, - 0xc4, 0xde, 0x4c, 0x22, 0x63, 0xc8, 0x43, 0xfb, 0xdd, 0x22, 0x34, 0x63, 0x74, 0x4f, 0xdb, 0xe2, - 0xf3, 0xa8, 0x0f, 0x42, 0x05, 0xb8, 0xe3, 0xd9, 0x72, 0xbe, 0x8b, 0x45, 0x7d, 0x90, 0x59, 0xb8, - 0x81, 0x71, 0x3a, 0xb2, 0x04, 0xd0, 0xd3, 0xfd, 0x80, 0x7a, 0x5c, 0x16, 0x4a, 0xc5, 0x5a, 0xd8, - 0x0c, 0x73, 0x30, 0x46, 0x45, 0xae, 0xcb, 0x3b, 0x21, 0xca, 0xc9, 0xd8, 0x98, 0x23, 0x2e, 0x7c, - 0xa8, 0x4c, 0xe0, 0xc2, 0x07, 0xd2, 0x85, 0x39, 0x55, 0x6b, 0x95, 0x7b, 0xb6, 0xc8, 0x89, 0xa2, - 0xa3, 0xa6, 0x20, 0x70, 0x08, 0x54, 0xfb, 0x7e, 0x01, 0xa6, 0x13, 0x0a, 0x28, 0x11, 0xd5, 0x52, - 0x1d, 0x10, 0x4a, 0x44, 0xb5, 0x8c, 0x9d, 0xeb, 0xf9, 0x02, 0x54, 0x45, 0x03, 0xa5, 0xfd, 0x7e, - 0x45, 0x13, 0xa2, 0xcc, 0x65, 0x2b, 0x8b, 0x54, 0x71, 0xa7, 0x57, 0x16, 0xa9, 0x03, 0x47, 0x95, - 0x2f, 0x2c, 0x47, 0xa2, 0x76, 0xb2, 0xa5, 0x63, 0x96, 0x23, 0x91, 0x8e, 0x21, 0x85, 0xf6, 0x0f, - 0x78, 0xbd, 0x03, 0xef, 0x28, 0xdc, 0x59, 0x77, 0xa1, 0x26, 0x7d, 0x3d, 0xe5, 0xd0, 0x78, 0x2b, - 0x87, 0x56, 0x8c, 0xe3, 0x48, 0x6f, 0x45, 0xdd, 0x38, 0xb8, 0xbf, 0xb7, 0x87, 0x0a, 0x9d, 0xdc, - 0x84, 0x86, 0xeb, 0xc8, 0x11, 0x2c, 0x3f, 0xff, 0x8b, 0x6c, 0xe5, 0xb8, 0xaf, 0x12, 0x1f, 0x1f, - 0xb7, 0x2e, 0x85, 0x2f, 0x89, 0x4a, 0x62, 0x54, 0x52, 0xfb, 0x33, 0x05, 0x78, 0x09, 0x5d, 0xdb, - 0xb6, 0x9c, 0x6e, 0xd2, 0xf2, 0x49, 0x6c, 0x98, 0xe9, 0xe9, 0x8f, 0x76, 0x1c, 0xfd, 0x50, 0xb7, - 0x6c, 0x7d, 0xd7, 0xa6, 0x4f, 0xdd, 0x19, 0x0f, 0x02, 0xcb, 0x5e, 0x10, 0x77, 0x64, 0x2e, 0xac, - 0x3b, 0xc1, 0x7d, 0xaf, 0x13, 0x78, 0x96, 0xd3, 0x15, 0xb3, 0xe4, 0x66, 0x02, 0x0b, 0x53, 0xd8, - 0xda, 0xef, 0x95, 0x80, 0xfb, 0x11, 0x92, 0x2f, 0x43, 0xa3, 0x47, 0x8d, 0x7d, 0xdd, 0xb1, 0x7c, - 0x15, 0x1f, 0xf8, 0x32, 0xfb, 0xae, 0x4d, 0x95, 0xf8, 0x98, 0xfd, 0x8a, 0xe5, 0xce, 0x06, 0x3f, - 0xd2, 0x13, 0xd1, 0x12, 0x03, 0xaa, 0x5d, 0xdf, 0xd7, 0xfb, 0x56, 0x6e, 0x17, 0x13, 0x11, 0x8f, - 0x55, 0x4c, 0x47, 0xe2, 0x19, 0x25, 0x34, 0x31, 0xa0, 0xd2, 0xb7, 0x75, 0xcb, 0xc9, 0x7d, 0xa7, - 0x1b, 0xfb, 0x82, 0x2d, 0x86, 0x24, 0x54, 0x95, 0xfc, 0x11, 0x05, 0x36, 0x19, 0x40, 0xd3, 0x37, - 0x3c, 0xbd, 0xe7, 0xef, 0xeb, 0x4b, 0xaf, 0xbd, 0x9e, 0x5b, 0xf8, 0x8f, 0x58, 0x09, 0x59, 0x64, - 0x05, 0x97, 0x37, 0x3b, 0xb7, 0x97, 0x97, 0x5e, 0x7b, 0x1d, 0xe3, 0x7c, 0xe2, 0x6c, 0x5f, 0x7b, - 0x75, 0x49, 0xce, 0x20, 0x13, 0x67, 0xfb, 0xda, 0xab, 0x4b, 0x18, 0xe7, 0xa3, 0xfd, 0xef, 0x02, - 0x34, 0x42, 0x5a, 0xb2, 0x03, 0xc0, 0xe6, 0x32, 0x19, 0x41, 0xf5, 0x4c, 0xf7, 0xdf, 0x70, 0x6d, - 0xcf, 0x4e, 0x58, 0x18, 0x63, 0x40, 0x19, 0x21, 0x66, 0x8b, 0x93, 0x0e, 0x31, 0xbb, 0x08, 0x8d, - 0x7d, 0xdd, 0x31, 0xfd, 0x7d, 0xfd, 0x80, 0x4a, 0xf7, 0xeb, 0x70, 0x2b, 0x72, 0x5b, 0x65, 0x60, - 0x44, 0xa3, 0xfd, 0xa3, 0x2a, 0x08, 0xbf, 0x10, 0x36, 0xe9, 0x98, 0x96, 0x2f, 0x0e, 0x49, 0x14, - 0x78, 0xc9, 0x70, 0xd2, 0x59, 0x95, 0xe9, 0x18, 0x52, 0x90, 0xcb, 0x50, 0xea, 0x59, 0x8e, 0x94, - 0x40, 0xb8, 0x22, 0x77, 0xd3, 0x72, 0x90, 0xa5, 0xf1, 0x2c, 0xfd, 0x91, 0x94, 0x30, 0x44, 0x96, - 0xfe, 0x08, 0x59, 0x1a, 0xf9, 0x2a, 0xcc, 0xda, 0xae, 0x7b, 0xc0, 0xa6, 0x0f, 0x25, 0x88, 0x08, - 0xab, 0x3a, 0x57, 0xad, 0x6c, 0x24, 0xb3, 0x30, 0x4d, 0x4b, 0x76, 0xe0, 0xe5, 0x0f, 0xa8, 0xe7, - 0xca, 0xf9, 0xb2, 0x63, 0x53, 0xda, 0x57, 0x30, 0x42, 0x34, 0xe6, 0x5e, 0xb2, 0x3f, 0x9f, 0x4d, - 0x82, 0xa3, 0xca, 0x72, 0xbf, 0x7c, 0xdd, 0xeb, 0xd2, 0x60, 0xcb, 0x73, 0x99, 0xec, 0x62, 0x39, - 0x5d, 0x05, 0x5b, 0x8d, 0x60, 0xb7, 0xb3, 0x49, 0x70, 0x54, 0x59, 0xf2, 0x0e, 0xcc, 0x8b, 0x2c, - 0x21, 0xb6, 0x2c, 0x8b, 0x69, 0xc6, 0xb2, 0xd5, 0x55, 0xa8, 0xd3, 0xc2, 0x5e, 0xb6, 0x3d, 0x82, - 0x06, 0x47, 0x96, 0x26, 0x77, 0x60, 0x4e, 0x59, 0x4b, 0xb7, 0xa8, 0xd7, 0x09, 0x7d, 0x85, 0xa6, - 0xdb, 0xd7, 0x4e, 0x8e, 0x5b, 0x57, 0x56, 0x69, 0xdf, 0xa3, 0x46, 0xdc, 0xea, 0xac, 0xa8, 0x70, - 0xa8, 0x1c, 0x41, 0xb8, 0xc4, 0x1d, 0x82, 0x76, 0xfa, 0x2b, 0xae, 0x6b, 0x9b, 0xee, 0x43, 0x47, - 0x7d, 0xbb, 0x10, 0xd8, 0xb9, 0x81, 0xb4, 0x93, 0x49, 0x81, 0x23, 0x4a, 0xb2, 0x2f, 0xe7, 0x39, - 0xab, 0xee, 0x43, 0x27, 0x8d, 0x0a, 0xd1, 0x97, 0x77, 0x46, 0xd0, 0xe0, 0xc8, 0xd2, 0x64, 0x0d, - 0x48, 0xfa, 0x0b, 0x76, 0xfa, 0xd2, 0x84, 0x7f, 0x49, 0x04, 0x43, 0x4a, 0xe7, 0x62, 0x46, 0x09, - 0xb2, 0x01, 0x17, 0xd3, 0xa9, 0x8c, 0x9d, 0xb4, 0xe6, 0xf3, 0x30, 0xc8, 0x98, 0x91, 0x8f, 0x99, - 0xa5, 0xb4, 0x7f, 0x5c, 0x84, 0xe9, 0x44, 0xf4, 0x8c, 0xe7, 0x2e, 0x4a, 0x01, 0xdb, 0x3c, 0xf4, - 0xfc, 0xee, 0xfa, 0xea, 0x6d, 0xaa, 0x9b, 0xd4, 0x53, 0x87, 0x33, 0x1a, 0x72, 0x59, 0x4c, 0xe4, - 0x60, 0x8a, 0x92, 0xec, 0x41, 0x45, 0xd8, 0x09, 0xf2, 0xde, 0xa4, 0xa4, 0xda, 0x88, 0x1b, 0x0b, - 0xe4, 0xf5, 0x63, 0xae, 0x47, 0x51, 0xc0, 0x6b, 0x01, 0x4c, 0xc5, 0x29, 0xd8, 0x44, 0x12, 0x89, - 0xbd, 0xb5, 0x84, 0xc8, 0xbb, 0x0e, 0xa5, 0x20, 0x18, 0x37, 0xfe, 0x81, 0xb0, 0x3b, 0x6d, 0x6f, - 0x20, 0xc3, 0xd0, 0xf6, 0xd8, 0xbf, 0xf3, 0x7d, 0xcb, 0x75, 0x64, 0x30, 0xfc, 0x1d, 0xa8, 0xc9, - 0xdd, 0xd3, 0x98, 0xf1, 0x1b, 0xb8, 0xac, 0xa4, 0xd4, 0xae, 0x0a, 0x4b, 0xfb, 0xf7, 0x45, 0x68, - 0x84, 0x6a, 0x92, 0x53, 0x04, 0x99, 0x77, 0xa1, 0x11, 0x3a, 0x34, 0xe6, 0xbe, 0x26, 0x36, 0xf2, - 0xb3, 0xe3, 0x3b, 0xfb, 0xf0, 0x15, 0x23, 0x1e, 0x71, 0x67, 0xc9, 0x52, 0x0e, 0x67, 0xc9, 0x3e, - 0xd4, 0x02, 0xcf, 0xea, 0x76, 0xe5, 0x2e, 0x21, 0x8f, 0xb7, 0x64, 0xd8, 0x5c, 0xdb, 0x02, 0x50, - 0xb6, 0xac, 0x78, 0x41, 0xc5, 0x46, 0x7b, 0x1f, 0xe6, 0xd2, 0x94, 0x5c, 0x84, 0x36, 0xf6, 0xa9, - 0x39, 0xb0, 0x55, 0x1b, 0x47, 0x22, 0xb4, 0x4c, 0xc7, 0x90, 0x82, 0xdc, 0x80, 0x3a, 0xfb, 0x4d, - 0x1f, 0xb8, 0x8e, 0x12, 0x63, 0xf9, 0x6e, 0x64, 0x5b, 0xa6, 0x61, 0x98, 0xab, 0xfd, 0xd7, 0x12, - 0x5c, 0x8e, 0x94, 0x5d, 0x9b, 0xba, 0xa3, 0x77, 0x4f, 0x71, 0x37, 0xe8, 0xa7, 0x27, 0xe2, 0xce, - 0x7a, 0x53, 0x48, 0xe9, 0x39, 0xb8, 0x29, 0xe4, 0xff, 0x16, 0x81, 0x3b, 0x5f, 0x93, 0x6f, 0xc3, - 0x94, 0x1e, 0xbb, 0x16, 0x5a, 0xfe, 0xce, 0x9b, 0xb9, 0x7f, 0x27, 0xf7, 0xf1, 0x0e, 0x1d, 0xe0, - 0xe2, 0xa9, 0x98, 0x60, 0x48, 0x5c, 0xa8, 0xef, 0xe9, 0xb6, 0xcd, 0x64, 0xa1, 0xdc, 0xc6, 0xbb, - 0x04, 0x73, 0xde, 0xcd, 0xd7, 0x24, 0x34, 0x86, 0x4c, 0xc8, 0x77, 0x0b, 0x30, 0xed, 0xc5, 0xb7, - 0x6b, 0xf2, 0x87, 0xe4, 0x71, 0xed, 0x88, 0xa1, 0xc5, 0xdd, 0xed, 0xe2, 0x7b, 0xc2, 0x24, 0x4f, - 0xed, 0xbf, 0x14, 0x60, 0xba, 0x63, 0x5b, 0xa6, 0xe5, 0x74, 0xcf, 0xf1, 0xa2, 0x92, 0xfb, 0x50, - 0xf1, 0x6d, 0xcb, 0xa4, 0x63, 0xae, 0x26, 0x62, 0x1d, 0x63, 0x00, 0x28, 0x70, 0x92, 0x37, 0x9f, - 0x94, 0x4e, 0x71, 0xf3, 0xc9, 0x1f, 0x54, 0x41, 0x1e, 0x23, 0x20, 0x03, 0x68, 0x74, 0xd5, 0x85, - 0x0a, 0xf2, 0x1b, 0x6f, 0xe7, 0x08, 0xc6, 0x99, 0xb8, 0x9a, 0x41, 0xcc, 0xfd, 0x61, 0x22, 0x46, - 0x9c, 0x08, 0x4d, 0xde, 0x47, 0xbe, 0x9a, 0xf3, 0x3e, 0x72, 0xc1, 0x6e, 0xf8, 0x46, 0x72, 0x1d, - 0xca, 0xfb, 0x41, 0xd0, 0x97, 0x9d, 0x69, 0xfc, 0x73, 0x22, 0x51, 0x3c, 0x28, 0x21, 0x13, 0xb1, - 0x77, 0xe4, 0xd0, 0x8c, 0x85, 0xa3, 0x87, 0xb7, 0x3e, 0xae, 0xe4, 0x72, 0x23, 0x89, 0xb3, 0x60, - 0xef, 0xc8, 0xa1, 0xc9, 0x2f, 0x42, 0x33, 0xf0, 0x74, 0xc7, 0xdf, 0x73, 0xbd, 0x1e, 0xf5, 0xe4, - 0x1e, 0x75, 0x2d, 0xc7, 0x95, 0xdc, 0xdb, 0x11, 0x9a, 0x50, 0xc9, 0x26, 0x92, 0x30, 0xce, 0x8d, - 0x1c, 0x40, 0x7d, 0x60, 0x8a, 0x8a, 0x49, 0x35, 0xd8, 0x72, 0x9e, 0x5b, 0xd6, 0x63, 0x4e, 0x22, - 0xea, 0x0d, 0x43, 0x06, 0xc9, 0x0b, 0x4e, 0x6b, 0x93, 0xba, 0xe0, 0x34, 0xde, 0x1b, 0xb3, 0x82, - 0xd5, 0x90, 0x9e, 0x94, 0x6b, 0x9d, 0xae, 0xf4, 0x71, 0x5b, 0xcb, 0x2d, 0x72, 0x0a, 0x96, 0xcd, - 0x50, 0x36, 0x76, 0xba, 0xa8, 0x78, 0x68, 0x3d, 0x90, 0xb6, 0x23, 0x62, 0x24, 0x2e, 0x75, 0x12, - 0x27, 0x23, 0x17, 0x4f, 0x37, 0x1f, 0x84, 0xb7, 0x0b, 0xc5, 0x82, 0xca, 0x67, 0xde, 0xde, 0xa4, - 0xfd, 0x87, 0x22, 0x94, 0xb6, 0x37, 0x3a, 0x22, 0x50, 0x2c, 0xbf, 0x26, 0x8e, 0x76, 0x0e, 0xac, - 0xfe, 0x03, 0xea, 0x59, 0x7b, 0x47, 0x72, 0xeb, 0x1d, 0x0b, 0x14, 0x9b, 0xa6, 0xc0, 0x8c, 0x52, - 0xe4, 0x5d, 0x98, 0x32, 0xf4, 0x15, 0xea, 0x05, 0xe3, 0x28, 0x16, 0xf8, 0x51, 0xf1, 0x95, 0xe5, - 0xa8, 0x38, 0x26, 0xc0, 0xc8, 0x0e, 0x80, 0x11, 0x41, 0x97, 0xce, 0xac, 0x0e, 0x89, 0x01, 0xc7, - 0x80, 0x08, 0x42, 0xe3, 0x80, 0x91, 0x72, 0xd4, 0xf2, 0x59, 0x50, 0x79, 0xcf, 0xb9, 0xab, 0xca, - 0x62, 0x04, 0xa3, 0x39, 0x30, 0x9d, 0xb8, 0xe9, 0x89, 0x7c, 0x05, 0xea, 0x6e, 0x3f, 0x36, 0x9d, - 0x36, 0xb8, 0x37, 0x6d, 0xfd, 0xbe, 0x4c, 0x7b, 0x7c, 0xdc, 0x9a, 0xde, 0x70, 0xbb, 0x96, 0xa1, - 0x12, 0x30, 0x24, 0x27, 0x1a, 0x54, 0xf9, 0xb9, 0x4d, 0x75, 0xcf, 0x13, 0x5f, 0x3b, 0xf8, 0x55, - 0x2c, 0x3e, 0xca, 0x1c, 0xed, 0x97, 0xcb, 0x10, 0x59, 0x5c, 0x89, 0x0f, 0x55, 0x71, 0x66, 0x44, - 0xce, 0xdc, 0xe7, 0x7a, 0x3c, 0x45, 0xb2, 0x22, 0x5d, 0x28, 0xbd, 0xef, 0xee, 0xe6, 0x9e, 0xb8, - 0x63, 0x81, 0x1d, 0x84, 0xae, 0x2c, 0x96, 0x80, 0x8c, 0x03, 0xf9, 0xab, 0x05, 0x78, 0xd1, 0x4f, - 0x8b, 0xbe, 0xb2, 0x3b, 0x60, 0x7e, 0x19, 0x3f, 0x2d, 0x4c, 0x4b, 0xb7, 0xe7, 0x51, 0xd9, 0x38, - 0x5c, 0x17, 0xd6, 0xfe, 0xc2, 0x14, 0x2a, 0xbb, 0xd3, 0xad, 0x9c, 0xf7, 0xd9, 0x26, 0xdb, 0x3f, - 0x99, 0x86, 0x92, 0x95, 0xf6, 0x9d, 0x22, 0x34, 0x63, 0xb3, 0x75, 0xee, 0xeb, 0xc3, 0x1e, 0xa5, - 0xae, 0x0f, 0xdb, 0x1a, 0xdf, 0x33, 0x20, 0xaa, 0xd5, 0x79, 0xdf, 0x20, 0xf6, 0xcf, 0x8a, 0x50, - 0xda, 0x59, 0x5d, 0x4b, 0x6e, 0x5a, 0x0b, 0xcf, 0x60, 0xd3, 0xba, 0x0f, 0xb5, 0xdd, 0x81, 0x65, - 0x07, 0x96, 0x93, 0x3b, 0xf4, 0x8c, 0xba, 0x6d, 0x4d, 0xda, 0x3a, 0x04, 0x2a, 0x2a, 0x78, 0xd2, - 0x85, 0x5a, 0x57, 0xc4, 0xfe, 0xcc, 0xed, 0x2f, 0x29, 0x63, 0x88, 0x0a, 0x46, 0xf2, 0x05, 0x15, - 0xba, 0x76, 0x04, 0xd5, 0x9d, 0x55, 0x29, 0xf6, 0x3f, 0xdb, 0xd6, 0xd4, 0x7e, 0x11, 0x42, 0x29, - 0xe0, 0xd9, 0x33, 0xff, 0xef, 0x05, 0x48, 0x0a, 0x3e, 0xcf, 0xbe, 0x37, 0x1d, 0xa4, 0x7b, 0xd3, - 0xea, 0x24, 0x06, 0x5f, 0x76, 0x87, 0xd2, 0xfe, 0x6d, 0x01, 0x52, 0x07, 0xfd, 0xc8, 0xeb, 0x32, - 0x8c, 0x5c, 0xd2, 0x31, 0x4d, 0x85, 0x91, 0x23, 0x49, 0xea, 0x58, 0x38, 0xb9, 0x0f, 0xd9, 0x76, - 0x2d, 0x6e, 0x40, 0x93, 0xd5, 0xbf, 0x37, 0xfe, 0x76, 0x2d, 0xcb, 0x1c, 0x27, 0x9d, 0x27, 0xe3, - 0x59, 0x98, 0xe4, 0xab, 0xfd, 0xc3, 0x22, 0x54, 0x9f, 0x59, 0x6c, 0x03, 0x9a, 0xf0, 0x67, 0x5d, - 0xc9, 0x39, 0xdb, 0x8f, 0xf4, 0x66, 0xed, 0xa5, 0xbc, 0x59, 0xf3, 0x5e, 0x93, 0xfe, 0x14, 0x5f, - 0xd6, 0x7f, 0x5d, 0x00, 0xb9, 0xd6, 0xac, 0x3b, 0x7e, 0xa0, 0x3b, 0x06, 0x25, 0x46, 0xb8, 0xb0, - 0xe5, 0x75, 0x9a, 0x92, 0x8e, 0x85, 0x42, 0x96, 0xe1, 0xcf, 0x6a, 0x21, 0x23, 0x3f, 0x09, 0xf5, - 0x7d, 0xd7, 0x0f, 0xf8, 0xe2, 0x55, 0x4c, 0xaa, 0xcc, 0x6e, 0xcb, 0x74, 0x0c, 0x29, 0xd2, 0xe6, - 0xec, 0xca, 0x68, 0x73, 0xb6, 0xf6, 0x9b, 0x45, 0x98, 0xfa, 0xa4, 0x04, 0x4f, 0xc8, 0xf2, 0xfe, - 0x2d, 0xe5, 0xf4, 0xfe, 0x2d, 0x9f, 0xc5, 0xfb, 0x57, 0xfb, 0x61, 0x01, 0xe0, 0x99, 0x45, 0x6e, - 0x30, 0x93, 0x8e, 0xb9, 0xb9, 0xfb, 0x55, 0xb6, 0x5b, 0xee, 0xdf, 0xab, 0xa8, 0x4f, 0xe2, 0x4e, - 0xb9, 0x1f, 0x16, 0x60, 0x46, 0x4f, 0x38, 0xba, 0xe6, 0x96, 0x97, 0x53, 0x7e, 0xb3, 0xa1, 0x9f, - 0x56, 0x32, 0x1d, 0x53, 0x6c, 0xc9, 0x1b, 0x51, 0x04, 0xf3, 0x7b, 0x51, 0xb7, 0x1f, 0x0a, 0x3d, - 0xce, 0x65, 0xb7, 0x04, 0xe5, 0x53, 0x1c, 0x8b, 0x4b, 0x13, 0x71, 0x2c, 0x8e, 0x1f, 0x99, 0x2c, - 0x3f, 0xf1, 0xc8, 0xe4, 0x21, 0x34, 0xf6, 0x3c, 0xb7, 0xc7, 0x7d, 0x77, 0xe5, 0x1d, 0xe1, 0x37, - 0x73, 0x2c, 0x94, 0xbd, 0x5d, 0xcb, 0xa1, 0x26, 0xf7, 0x0b, 0x0e, 0x15, 0x57, 0x6b, 0x0a, 0x1f, - 0x23, 0x56, 0x5c, 0xd7, 0xef, 0x0a, 0xae, 0xd5, 0x49, 0x72, 0x0d, 0xe7, 0x92, 0x6d, 0x81, 0x8e, - 0x8a, 0x4d, 0xd2, 0x5f, 0xb7, 0xf6, 0x6c, 0xfc, 0x75, 0xb5, 0x3f, 0x5f, 0x53, 0x13, 0xd8, 0x73, - 0x17, 0x2c, 0xf7, 0xd3, 0x83, 0xee, 0x5d, 0x3a, 0x74, 0x0a, 0xbd, 0xfe, 0x0c, 0x4f, 0xa1, 0x37, - 0x26, 0x73, 0x0a, 0x1d, 0xf2, 0x9d, 0x42, 0x6f, 0x4e, 0xe8, 0x14, 0xfa, 0xd4, 0xa4, 0x4e, 0xa1, - 0x4f, 0x8f, 0x75, 0x0a, 0x7d, 0xe6, 0x54, 0xa7, 0xd0, 0x8f, 0x4b, 0x90, 0xda, 0x8c, 0x7f, 0x6a, - 0x78, 0xfb, 0x23, 0x65, 0x78, 0xfb, 0x5e, 0x11, 0xa2, 0x89, 0xf8, 0x8c, 0x8e, 0x49, 0xef, 0x40, - 0xbd, 0xa7, 0x3f, 0xe2, 0x8e, 0xd3, 0x79, 0xee, 0x98, 0xde, 0x94, 0x18, 0x18, 0xa2, 0x11, 0x1f, - 0xc0, 0x0a, 0xef, 0x79, 0xc8, 0x6d, 0xc2, 0x88, 0xae, 0x8c, 0x10, 0x4a, 0xd2, 0xe8, 0x1d, 0x63, - 0x6c, 0xb4, 0x7f, 0x55, 0x04, 0x79, 0x21, 0x08, 0xa1, 0x50, 0xd9, 0xb3, 0x1e, 0x51, 0x33, 0xb7, - 0xbb, 0x73, 0xec, 0xe6, 0x7f, 0x61, 0xa3, 0xe1, 0x09, 0x28, 0xd0, 0xb9, 0xf2, 0x5d, 0xd8, 0xdc, - 0x64, 0xfb, 0xe5, 0x50, 0xbe, 0xc7, 0x6d, 0x77, 0x52, 0xf9, 0x2e, 0x92, 0x50, 0xf1, 0x10, 0xba, - 0x7e, 0xee, 0x7e, 0x91, 0xdb, 0xc4, 0x98, 0x70, 0xe3, 0x50, 0xba, 0x7e, 0x5f, 0x84, 0xa1, 0x90, - 0x3c, 0xda, 0xbf, 0xf0, 0x83, 0x1f, 0x5d, 0x7b, 0xe1, 0x87, 0x3f, 0xba, 0xf6, 0xc2, 0x47, 0x3f, - 0xba, 0xf6, 0xc2, 0x2f, 0x9f, 0x5c, 0x2b, 0xfc, 0xe0, 0xe4, 0x5a, 0xe1, 0x87, 0x27, 0xd7, 0x0a, - 0x1f, 0x9d, 0x5c, 0x2b, 0xfc, 0xa7, 0x93, 0x6b, 0x85, 0xbf, 0xf4, 0x9f, 0xaf, 0xbd, 0xf0, 0xf3, - 0x5f, 0x8e, 0xaa, 0xb0, 0xa8, 0xaa, 0xb0, 0xa8, 0x18, 0x2e, 0xf6, 0x0f, 0xba, 0x8b, 0xac, 0x0a, - 0x51, 0x8a, 0xaa, 0xc2, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x40, 0x5a, 0x93, 0x65, 0x9f, - 0x00, 0x00, + 0x8c, 0x18, 0x89, 0x13, 0xc1, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x2d, 0x91, 0x5d, 0x4d, 0x8d, + 0xc6, 0x99, 0xb7, 0xee, 0x7b, 0xce, 0xfd, 0xce, 0xad, 0x5b, 0xb7, 0xee, 0x3d, 0xf7, 0x9c, 0x73, + 0xcf, 0x85, 0x5b, 0x5d, 0x2b, 0xd8, 0x1f, 0xec, 0x2e, 0x18, 0x6e, 0x6f, 0xd1, 0x19, 0xf4, 0xf4, + 0xbe, 0xe7, 0xbe, 0xcf, 0x7f, 0xec, 0xd9, 0xee, 0xc3, 0xc5, 0xfe, 0x41, 0x77, 0x51, 0xef, 0x5b, + 0x7e, 0x54, 0x72, 0xf8, 0xaa, 0x6e, 0xf7, 0xf7, 0xf5, 0x57, 0x17, 0xbb, 0xd4, 0xa1, 0x9e, 0x1e, + 0x50, 0x73, 0xa1, 0xef, 0xb9, 0x81, 0x4b, 0xbe, 0x1c, 0x01, 0x2d, 0x28, 0xa0, 0x05, 0x55, 0x6d, + 0xa1, 0x7f, 0xd0, 0x5d, 0x60, 0x40, 0x51, 0x89, 0x02, 0xba, 0xf2, 0x53, 0xb1, 0x16, 0x74, 0xdd, + 0xae, 0xbb, 0xc8, 0xf1, 0x76, 0x07, 0x7b, 0xfc, 0x1f, 0xff, 0xc3, 0x7f, 0x09, 0x39, 0x57, 0xb4, + 0x83, 0x37, 0xfc, 0x05, 0xcb, 0x65, 0xcd, 0x5a, 0x34, 0x5c, 0x8f, 0x2e, 0x1e, 0x0e, 0xb5, 0xe5, + 0xca, 0x97, 0x22, 0x9e, 0x9e, 0x6e, 0xec, 0x5b, 0x0e, 0xf5, 0x8e, 0xd4, 0xb3, 0x2c, 0x7a, 0xd4, + 0x77, 0x07, 0x9e, 0x41, 0xcf, 0x54, 0xcb, 0x5f, 0xec, 0xd1, 0x40, 0xcf, 0x92, 0xb5, 0x38, 0xaa, + 0x96, 0x37, 0x70, 0x02, 0xab, 0x37, 0x2c, 0xe6, 0xf5, 0xa7, 0x55, 0xf0, 0x8d, 0x7d, 0xda, 0xd3, + 0x87, 0xea, 0xfd, 0xf4, 0xa8, 0x7a, 0x83, 0xc0, 0xb2, 0x17, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x5d, + 0x49, 0xfb, 0x1d, 0x80, 0x0b, 0xcb, 0xbb, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xe5, 0x9a, 0xdb, 0xb4, + 0xd7, 0xb7, 0xf5, 0x80, 0x92, 0x03, 0xa8, 0xb3, 0x07, 0x32, 0xf5, 0x40, 0x9f, 0x2f, 0x5c, 0x2f, + 0xdc, 0x68, 0x2e, 0x2d, 0x2f, 0x8c, 0xf9, 0x02, 0x17, 0x36, 0x25, 0x50, 0x7b, 0xea, 0xe4, 0xb8, + 0x55, 0x57, 0xff, 0x30, 0x14, 0x40, 0x7e, 0xad, 0x00, 0x53, 0x8e, 0x6b, 0xd2, 0x0e, 0xb5, 0xa9, + 0x11, 0xb8, 0xde, 0x7c, 0xf1, 0x7a, 0xe9, 0x46, 0x73, 0xe9, 0x9b, 0x63, 0x4b, 0xcc, 0x78, 0xa2, + 0x85, 0x7b, 0x31, 0x01, 0x37, 0x9d, 0xc0, 0x3b, 0x6a, 0x5f, 0xfc, 0xc1, 0x71, 0xeb, 0x85, 0x93, + 0xe3, 0xd6, 0x54, 0x9c, 0x84, 0x89, 0x96, 0x90, 0x1d, 0x68, 0x06, 0xae, 0xcd, 0xba, 0xcc, 0x72, + 0x1d, 0x7f, 0xbe, 0xc4, 0x1b, 0x76, 0x6d, 0x41, 0x74, 0x35, 0x13, 0xbf, 0xc0, 0xc6, 0xd8, 0xc2, + 0xe1, 0xab, 0x0b, 0xdb, 0x21, 0x5b, 0xfb, 0x82, 0x04, 0x6e, 0x46, 0x65, 0x3e, 0xc6, 0x71, 0x08, + 0x85, 0x59, 0x9f, 0x1a, 0x03, 0xcf, 0x0a, 0x8e, 0x56, 0x5c, 0x27, 0xa0, 0x8f, 0x82, 0xf9, 0x32, + 0xef, 0xe5, 0x2f, 0x64, 0x41, 0x6f, 0xb9, 0x66, 0x27, 0xc9, 0xdd, 0xbe, 0x70, 0x72, 0xdc, 0x9a, + 0x4d, 0x15, 0x62, 0x1a, 0x93, 0x38, 0x30, 0x67, 0xf5, 0xf4, 0x2e, 0xdd, 0x1a, 0xd8, 0x76, 0x87, + 0x1a, 0x1e, 0x0d, 0xfc, 0xf9, 0x0a, 0x7f, 0x84, 0x1b, 0x59, 0x72, 0x36, 0x5c, 0x43, 0xb7, 0xef, + 0xef, 0xbe, 0x4f, 0x8d, 0x00, 0xe9, 0x1e, 0xf5, 0xa8, 0x63, 0xd0, 0xf6, 0xbc, 0x7c, 0x98, 0xb9, + 0xf5, 0x14, 0x12, 0x0e, 0x61, 0x93, 0x5b, 0xf0, 0x62, 0xdf, 0xb3, 0x5c, 0xde, 0x04, 0x5b, 0xf7, + 0xfd, 0x7b, 0x7a, 0x8f, 0xce, 0x57, 0xaf, 0x17, 0x6e, 0x34, 0xda, 0x97, 0x25, 0xcc, 0x8b, 0x5b, + 0x69, 0x06, 0x1c, 0xae, 0x43, 0x6e, 0x40, 0x5d, 0x15, 0xce, 0xd7, 0xae, 0x17, 0x6e, 0x54, 0xc4, + 0xd8, 0x51, 0x75, 0x31, 0xa4, 0x92, 0x35, 0xa8, 0xeb, 0x7b, 0x7b, 0x96, 0xc3, 0x38, 0xeb, 0xbc, + 0x0b, 0xaf, 0x66, 0x3d, 0xda, 0xb2, 0xe4, 0x11, 0x38, 0xea, 0x1f, 0x86, 0x75, 0xc9, 0x1d, 0x20, + 0x3e, 0xf5, 0x0e, 0x2d, 0x83, 0x2e, 0x1b, 0x86, 0x3b, 0x70, 0x02, 0xde, 0xf6, 0x06, 0x6f, 0xfb, + 0x15, 0xd9, 0x76, 0xd2, 0x19, 0xe2, 0xc0, 0x8c, 0x5a, 0xe4, 0x2d, 0x98, 0x93, 0xdf, 0x6a, 0xd4, + 0x0b, 0xc0, 0x91, 0x2e, 0xb2, 0x8e, 0xc4, 0x14, 0x0d, 0x87, 0xb8, 0x89, 0x09, 0x57, 0xf5, 0x41, + 0xe0, 0xf6, 0x18, 0x64, 0x52, 0xe8, 0xb6, 0x7b, 0x40, 0x9d, 0xf9, 0xe6, 0xf5, 0xc2, 0x8d, 0x7a, + 0xfb, 0xfa, 0xc9, 0x71, 0xeb, 0xea, 0xf2, 0x13, 0xf8, 0xf0, 0x89, 0x28, 0xe4, 0x3e, 0x34, 0x4c, + 0xc7, 0xdf, 0x72, 0x6d, 0xcb, 0x38, 0x9a, 0x9f, 0xe2, 0x0d, 0x7c, 0x55, 0x3e, 0x6a, 0x63, 0xf5, + 0x5e, 0x47, 0x10, 0x1e, 0x1f, 0xb7, 0xae, 0x0e, 0x4f, 0xa9, 0x0b, 0x21, 0x1d, 0x23, 0x0c, 0xb2, + 0xc9, 0x01, 0x57, 0x5c, 0x67, 0xcf, 0xea, 0xce, 0x4f, 0xf3, 0xb7, 0x71, 0x7d, 0xc4, 0x80, 0x5e, + 0xbd, 0xd7, 0x11, 0x7c, 0xed, 0x69, 0x29, 0x4e, 0xfc, 0xc5, 0x08, 0x81, 0x98, 0x30, 0xa3, 0x26, + 0xe3, 0x15, 0x5b, 0xb7, 0x7a, 0xfe, 0xfc, 0x0c, 0x1f, 0xbc, 0x3f, 0x36, 0x02, 0x13, 0xe3, 0xcc, + 0xed, 0x4b, 0xf2, 0x51, 0x66, 0x12, 0xc5, 0x3e, 0xa6, 0x30, 0xaf, 0xbc, 0x09, 0x2f, 0x0e, 0xcd, + 0x0d, 0x64, 0x0e, 0x4a, 0x07, 0xf4, 0x88, 0x4f, 0x7d, 0x0d, 0x64, 0x3f, 0xc9, 0x45, 0xa8, 0x1c, + 0xea, 0xf6, 0x80, 0xce, 0x17, 0x79, 0x99, 0xf8, 0xf3, 0x33, 0xc5, 0x37, 0x0a, 0xda, 0x5f, 0x2f, + 0xc1, 0x94, 0x9a, 0x71, 0x3a, 0x96, 0x73, 0x40, 0xde, 0x86, 0x92, 0xed, 0x76, 0xe5, 0xbc, 0xf9, + 0x73, 0x63, 0xcf, 0x62, 0x1b, 0x6e, 0xb7, 0x5d, 0x3b, 0x39, 0x6e, 0x95, 0x36, 0xdc, 0x2e, 0x32, + 0x44, 0x62, 0x40, 0xe5, 0x40, 0xdf, 0x3b, 0xd0, 0x79, 0x1b, 0x9a, 0x4b, 0xed, 0xb1, 0xa1, 0xef, + 0x32, 0x14, 0xd6, 0xd6, 0x76, 0xe3, 0xe4, 0xb8, 0x55, 0xe1, 0x7f, 0x51, 0x60, 0x13, 0x17, 0x1a, + 0xbb, 0xb6, 0x6e, 0x1c, 0xec, 0xbb, 0x36, 0x9d, 0x2f, 0xe5, 0x14, 0xd4, 0x56, 0x48, 0xe2, 0x35, + 0x87, 0x7f, 0x31, 0x92, 0x41, 0x0c, 0xa8, 0x0e, 0x4c, 0xdf, 0x72, 0x0e, 0xe4, 0x1c, 0xf8, 0xe6, + 0xd8, 0xd2, 0x76, 0x56, 0xf9, 0x33, 0xc1, 0xc9, 0x71, 0xab, 0x2a, 0x7e, 0xa3, 0x84, 0xd6, 0xfe, + 0xf7, 0x14, 0xcc, 0xa8, 0x97, 0xf4, 0x80, 0x7a, 0x01, 0x7d, 0x44, 0xae, 0x43, 0xd9, 0x61, 0x9f, + 0x26, 0x7f, 0xc9, 0xed, 0x29, 0x39, 0x5c, 0xca, 0xfc, 0x93, 0xe4, 0x14, 0xd6, 0x32, 0x31, 0x54, + 0x64, 0x87, 0x8f, 0xdf, 0xb2, 0x0e, 0x87, 0x11, 0x2d, 0x13, 0xbf, 0x51, 0x42, 0x93, 0x77, 0xa1, + 0xcc, 0x1f, 0x5e, 0x74, 0xf5, 0x57, 0xc7, 0x17, 0xc1, 0x1e, 0xbd, 0xce, 0x9e, 0x80, 0x3f, 0x38, + 0x07, 0x65, 0x43, 0x71, 0x60, 0xee, 0xc9, 0x8e, 0xfd, 0xb9, 0x1c, 0x1d, 0xbb, 0x26, 0x86, 0xe2, + 0xce, 0xea, 0x1a, 0x32, 0x44, 0xf2, 0x17, 0x0b, 0xf0, 0xa2, 0xe1, 0x3a, 0x81, 0xce, 0xf4, 0x0c, + 0xb5, 0xc8, 0xce, 0x57, 0xb8, 0x9c, 0x3b, 0x63, 0xcb, 0x59, 0x49, 0x23, 0xb6, 0x5f, 0x62, 0x6b, + 0xc6, 0x50, 0x31, 0x0e, 0xcb, 0x26, 0x7f, 0xb9, 0x00, 0x2f, 0xb1, 0xb9, 0x7c, 0x88, 0x99, 0xaf, + 0x40, 0x93, 0x6d, 0xd5, 0xe5, 0x93, 0xe3, 0xd6, 0x4b, 0xeb, 0x59, 0xc2, 0x30, 0xbb, 0x0d, 0xac, + 0x75, 0x17, 0xf4, 0x61, 0xb5, 0x84, 0xaf, 0x6e, 0xcd, 0xa5, 0x8d, 0x49, 0xaa, 0x3a, 0xed, 0xcf, + 0xc8, 0xa1, 0x9c, 0xa5, 0xd9, 0x61, 0x56, 0x2b, 0xc8, 0x4d, 0xa8, 0x1d, 0xba, 0xf6, 0xa0, 0x47, + 0xfd, 0xf9, 0x3a, 0x9f, 0x62, 0xaf, 0x64, 0x4d, 0xb1, 0x0f, 0x38, 0x4b, 0x7b, 0x56, 0xc2, 0xd7, + 0xc4, 0x7f, 0x1f, 0x55, 0x5d, 0x62, 0x41, 0xd5, 0xb6, 0x7a, 0x56, 0xe0, 0xf3, 0x85, 0xb3, 0xb9, + 0x74, 0x73, 0xec, 0xc7, 0x12, 0x9f, 0xe8, 0x06, 0x07, 0x13, 0x5f, 0x8d, 0xf8, 0x8d, 0x52, 0x00, + 0x9b, 0x0a, 0x7d, 0x43, 0xb7, 0xc5, 0xc2, 0xda, 0x5c, 0xfa, 0xda, 0xf8, 0x9f, 0x0d, 0x43, 0x69, + 0x4f, 0xcb, 0x67, 0xaa, 0xf0, 0xbf, 0x28, 0xb0, 0xc9, 0x2f, 0xc0, 0x4c, 0xe2, 0x6d, 0xfa, 0xf3, + 0x4d, 0xde, 0x3b, 0xaf, 0x64, 0xf5, 0x4e, 0xc8, 0x15, 0xad, 0x3c, 0x89, 0x11, 0xe2, 0x63, 0x0a, + 0x8c, 0xdc, 0x85, 0xba, 0x6f, 0x99, 0xd4, 0xd0, 0x3d, 0x7f, 0x7e, 0xea, 0x34, 0xc0, 0x73, 0x12, + 0xb8, 0xde, 0x91, 0xd5, 0x30, 0x04, 0x20, 0x0b, 0x00, 0x7d, 0xdd, 0x0b, 0x2c, 0xa1, 0xa8, 0x4e, + 0x73, 0xa5, 0x69, 0xe6, 0xe4, 0xb8, 0x05, 0x5b, 0x61, 0x29, 0xc6, 0x38, 0x18, 0x3f, 0xab, 0xbb, + 0xee, 0xf4, 0x07, 0x81, 0x58, 0x58, 0x1b, 0x82, 0xbf, 0x13, 0x96, 0x62, 0x8c, 0x83, 0xfc, 0x56, + 0x01, 0x3e, 0x13, 0xfd, 0x1d, 0xfe, 0xc8, 0x66, 0x27, 0xfe, 0x91, 0xb5, 0x4e, 0x8e, 0x5b, 0x9f, + 0xe9, 0x8c, 0x16, 0x89, 0x4f, 0x6a, 0x0f, 0xf9, 0xb0, 0x00, 0x33, 0x83, 0xbe, 0xa9, 0x07, 0xb4, + 0x13, 0xb0, 0x1d, 0x4f, 0xf7, 0x68, 0x7e, 0x8e, 0x37, 0xf1, 0xd6, 0xf8, 0xb3, 0x60, 0x02, 0x2e, + 0x7a, 0xcd, 0xc9, 0x72, 0x4c, 0x89, 0xd5, 0xde, 0x86, 0xe9, 0xe5, 0x41, 0xb0, 0xef, 0x7a, 0xd6, + 0x07, 0x5c, 0xfd, 0x27, 0x6b, 0x50, 0x09, 0xb8, 0x1a, 0x27, 0x34, 0x84, 0xcf, 0x67, 0xbd, 0x74, + 0xa1, 0x52, 0xdf, 0xa5, 0x47, 0x4a, 0x2f, 0x11, 0x2b, 0xb5, 0x50, 0xeb, 0x44, 0x75, 0xed, 0x4f, + 0x17, 0xa0, 0xd6, 0xd6, 0x8d, 0x03, 0x77, 0x6f, 0x8f, 0xbc, 0x03, 0x75, 0xcb, 0x09, 0xa8, 0x77, + 0xa8, 0xdb, 0x12, 0x76, 0x21, 0x06, 0x1b, 0x6e, 0x08, 0xa3, 0xc7, 0x63, 0xbb, 0x2f, 0x26, 0x68, + 0x75, 0x20, 0x77, 0x2d, 0x5c, 0x33, 0x5e, 0x97, 0x18, 0x18, 0xa2, 0x91, 0x16, 0x54, 0xfc, 0x80, + 0xf6, 0x7d, 0xbe, 0x06, 0x4e, 0x8b, 0x66, 0x74, 0x58, 0x01, 0x8a, 0x72, 0xed, 0xaf, 0x15, 0xa0, + 0xd1, 0xd6, 0x7d, 0xcb, 0x60, 0x4f, 0x49, 0x56, 0xa0, 0x3c, 0xf0, 0xa9, 0x77, 0xb6, 0x67, 0xe3, + 0xcb, 0xd6, 0x8e, 0x4f, 0x3d, 0xe4, 0x95, 0xc9, 0x7d, 0xa8, 0xf7, 0x75, 0xdf, 0x7f, 0xe8, 0x7a, + 0xa6, 0x5c, 0x7a, 0x4f, 0x09, 0x24, 0xb6, 0x09, 0xb2, 0x2a, 0x86, 0x20, 0x5a, 0x13, 0x22, 0xdd, + 0x43, 0xfb, 0xbd, 0x02, 0x5c, 0x68, 0x0f, 0xf6, 0xf6, 0xa8, 0x27, 0xb5, 0x62, 0xa9, 0x6f, 0x52, + 0xa8, 0x78, 0xd4, 0xb4, 0x7c, 0xd9, 0xf6, 0xd5, 0xb1, 0x07, 0x0a, 0x32, 0x14, 0xa9, 0xde, 0xf2, + 0xfe, 0xe2, 0x05, 0x28, 0xd0, 0xc9, 0x00, 0x1a, 0xef, 0x53, 0xb6, 0x1b, 0xa7, 0x7a, 0x4f, 0x3e, + 0xdd, 0xed, 0xb1, 0x45, 0xdd, 0xa1, 0x41, 0x87, 0x23, 0xc5, 0xb5, 0xe9, 0xb0, 0x10, 0x23, 0x49, + 0xda, 0xef, 0x54, 0x60, 0x6a, 0xc5, 0xed, 0xed, 0x5a, 0x0e, 0x35, 0x6f, 0x9a, 0x5d, 0x4a, 0xde, + 0x83, 0x32, 0x35, 0xbb, 0x54, 0x3e, 0xed, 0xf8, 0x8a, 0x07, 0x03, 0x8b, 0xd4, 0x27, 0xf6, 0x0f, + 0x39, 0x30, 0xd9, 0x80, 0x99, 0x3d, 0xcf, 0xed, 0x89, 0xb9, 0x7c, 0xfb, 0xa8, 0x2f, 0x75, 0xe7, + 0xf6, 0x8f, 0xa9, 0x0f, 0x67, 0x2d, 0x41, 0x7d, 0x7c, 0xdc, 0x82, 0xe8, 0x1f, 0xa6, 0xea, 0x92, + 0x77, 0x60, 0x3e, 0x2a, 0x09, 0x27, 0xb5, 0x15, 0xb6, 0x9d, 0xe1, 0xba, 0x53, 0xa5, 0x7d, 0xf5, + 0xe4, 0xb8, 0x35, 0xbf, 0x36, 0x82, 0x07, 0x47, 0xd6, 0x66, 0x53, 0xc5, 0x5c, 0x44, 0x14, 0x0b, + 0x8d, 0x54, 0x99, 0x26, 0xb4, 0x82, 0xf1, 0x7d, 0xdf, 0x5a, 0x4a, 0x04, 0x0e, 0x09, 0x25, 0x6b, + 0x30, 0x15, 0xb8, 0xb1, 0xfe, 0xaa, 0xf0, 0xfe, 0xd2, 0x94, 0xa1, 0x62, 0xdb, 0x1d, 0xd9, 0x5b, + 0x89, 0x7a, 0x04, 0xe1, 0x92, 0xfa, 0x9f, 0xea, 0xa9, 0x2a, 0xef, 0xa9, 0x2b, 0x27, 0xc7, 0xad, + 0x4b, 0xdb, 0x99, 0x1c, 0x38, 0xa2, 0x26, 0xf9, 0x95, 0x02, 0xcc, 0x28, 0x92, 0xec, 0xa3, 0xda, + 0x24, 0xfb, 0x88, 0xb0, 0x11, 0xb1, 0x9d, 0x10, 0x80, 0x29, 0x81, 0xda, 0xf7, 0x6b, 0xd0, 0x08, + 0xa7, 0x7a, 0xf2, 0x39, 0xa8, 0x70, 0x13, 0x84, 0xd4, 0xe0, 0xc3, 0x35, 0x9c, 0x5b, 0x2a, 0x50, + 0xd0, 0xc8, 0xe7, 0xa1, 0x66, 0xb8, 0xbd, 0x9e, 0xee, 0x98, 0xdc, 0xac, 0xd4, 0x68, 0x37, 0x99, + 0xea, 0xb2, 0x22, 0x8a, 0x50, 0xd1, 0xc8, 0x55, 0x28, 0xeb, 0x5e, 0x57, 0x58, 0x78, 0x1a, 0x62, + 0x3e, 0x5a, 0xf6, 0xba, 0x3e, 0xf2, 0x52, 0xf2, 0x15, 0x28, 0x51, 0xe7, 0x70, 0xbe, 0x3c, 0x5a, + 0x37, 0xba, 0xe9, 0x1c, 0x3e, 0xd0, 0xbd, 0x76, 0x53, 0xb6, 0xa1, 0x74, 0xd3, 0x39, 0x44, 0x56, + 0x87, 0x6c, 0x40, 0x8d, 0x3a, 0x87, 0xec, 0xdd, 0x4b, 0xd3, 0xcb, 0x67, 0x47, 0x54, 0x67, 0x2c, + 0x72, 0x9b, 0x10, 0x6a, 0x58, 0xb2, 0x18, 0x15, 0x04, 0xf9, 0x06, 0x4c, 0x09, 0x65, 0x6b, 0x93, + 0xbd, 0x13, 0x7f, 0xbe, 0xca, 0x21, 0x5b, 0xa3, 0xb5, 0x35, 0xce, 0x17, 0x99, 0xba, 0x62, 0x85, + 0x3e, 0x26, 0xa0, 0xc8, 0x37, 0xa0, 0xa1, 0x76, 0xc6, 0xea, 0xcd, 0x66, 0x5a, 0x89, 0xd4, 0x76, + 0x1a, 0xe9, 0xb7, 0x06, 0x96, 0x47, 0x7b, 0xd4, 0x09, 0xfc, 0xf6, 0x8b, 0xca, 0x6e, 0xa0, 0xa8, + 0x3e, 0x46, 0x68, 0x64, 0x77, 0xd8, 0xdc, 0x25, 0x6c, 0x35, 0x9f, 0x1b, 0x31, 0xab, 0x8f, 0x61, + 0xeb, 0xfa, 0x26, 0xcc, 0x86, 0xf6, 0x28, 0x69, 0xd2, 0x10, 0xd6, 0x9b, 0x2f, 0xb1, 0xea, 0xeb, + 0x49, 0xd2, 0xe3, 0xe3, 0xd6, 0x2b, 0x19, 0x46, 0x8d, 0x88, 0x01, 0xd3, 0x60, 0xe4, 0x03, 0x98, + 0xf1, 0xa8, 0x6e, 0x5a, 0x0e, 0xf5, 0xfd, 0x2d, 0xcf, 0xdd, 0xcd, 0xaf, 0x79, 0x72, 0x14, 0x31, + 0xec, 0x31, 0x81, 0x8c, 0x29, 0x49, 0xe4, 0x21, 0x4c, 0xdb, 0xd6, 0x21, 0x8d, 0x44, 0x37, 0x27, + 0x22, 0xfa, 0xc5, 0x93, 0xe3, 0xd6, 0xf4, 0x46, 0x1c, 0x18, 0x93, 0x72, 0x98, 0xa6, 0xd2, 0x77, + 0xbd, 0x40, 0xa9, 0xa7, 0x9f, 0x7d, 0xa2, 0x7a, 0xba, 0xe5, 0x7a, 0x41, 0xf4, 0x11, 0xb2, 0x7f, + 0x3e, 0x8a, 0xea, 0xda, 0xdf, 0xaa, 0xc0, 0xf0, 0x26, 0x2e, 0x39, 0xe2, 0x0a, 0x93, 0x1e, 0x71, + 0xe9, 0xd1, 0x20, 0xd6, 0x9e, 0x37, 0x64, 0xb5, 0x09, 0x8c, 0x88, 0x8c, 0x51, 0x5d, 0x9a, 0xf4, + 0xa8, 0x7e, 0x6e, 0x26, 0x9e, 0xe1, 0xe1, 0x5f, 0xfd, 0xf8, 0x86, 0x7f, 0xed, 0xd9, 0x0c, 0x7f, + 0xed, 0x7b, 0x65, 0x98, 0x59, 0xd5, 0x69, 0xcf, 0x75, 0x9e, 0xba, 0x8f, 0x2f, 0x3c, 0x17, 0xfb, + 0xf8, 0x1b, 0x50, 0xf7, 0x68, 0xdf, 0xb6, 0x0c, 0x5d, 0xa8, 0xeb, 0xd2, 0x6e, 0x8e, 0xb2, 0x0c, + 0x43, 0xea, 0x08, 0xfb, 0x4d, 0xe9, 0xb9, 0xb4, 0xdf, 0x94, 0x3f, 0x7e, 0xfb, 0x8d, 0xf6, 0x2b, + 0x45, 0xe0, 0xaa, 0x2d, 0xb9, 0x0e, 0x65, 0xa6, 0xb6, 0xa5, 0xad, 0x86, 0xfc, 0x6b, 0xe1, 0x14, + 0x72, 0x05, 0x8a, 0x81, 0x2b, 0xa7, 0x1b, 0x90, 0xf4, 0xe2, 0xb6, 0x8b, 0xc5, 0xc0, 0x25, 0x1f, + 0x00, 0x18, 0xae, 0x63, 0x5a, 0xca, 0x9d, 0x94, 0xef, 0xc1, 0xd6, 0x5c, 0xef, 0xa1, 0xee, 0x99, + 0x2b, 0x21, 0xa2, 0xd8, 0xc1, 0x47, 0xff, 0x31, 0x26, 0x8d, 0xbc, 0x09, 0x55, 0xd7, 0x59, 0x1b, + 0xd8, 0x36, 0xef, 0xd0, 0x46, 0xfb, 0x8b, 0x27, 0xc7, 0xad, 0xea, 0x7d, 0x5e, 0xf2, 0xf8, 0xb8, + 0x75, 0x59, 0xec, 0x88, 0xd8, 0xbf, 0xb7, 0x3d, 0x2b, 0xb0, 0x9c, 0x6e, 0xb8, 0xa1, 0x95, 0xd5, + 0xb4, 0x5f, 0x2d, 0x40, 0x73, 0xcd, 0x7a, 0x44, 0xcd, 0xb7, 0x2d, 0xc7, 0x74, 0x1f, 0x12, 0x84, + 0xaa, 0x4d, 0x9d, 0x6e, 0xb0, 0x3f, 0xe6, 0x8e, 0x53, 0xd8, 0x75, 0x38, 0x02, 0x4a, 0x24, 0xb2, + 0x08, 0x0d, 0xb1, 0x5f, 0xb1, 0x9c, 0x2e, 0xef, 0xc3, 0x7a, 0x34, 0xd3, 0x77, 0x14, 0x01, 0x23, + 0x1e, 0xed, 0x08, 0x5e, 0x1c, 0xea, 0x06, 0x62, 0x42, 0x39, 0xd0, 0xbb, 0x6a, 0x51, 0x59, 0x1b, + 0xbb, 0x83, 0xb7, 0xf5, 0x6e, 0xac, 0x73, 0xb9, 0x56, 0xb8, 0xad, 0x33, 0xad, 0x90, 0xa1, 0x6b, + 0x7f, 0x50, 0x80, 0xfa, 0xda, 0xc0, 0x31, 0xf8, 0xa6, 0xfe, 0xe9, 0xd6, 0x64, 0xa5, 0x62, 0x16, + 0x33, 0x55, 0xcc, 0x01, 0x54, 0x0f, 0x1e, 0x86, 0x2a, 0x68, 0x73, 0x69, 0x73, 0xfc, 0x51, 0x21, + 0x9b, 0xb4, 0x70, 0x97, 0xe3, 0x09, 0x67, 0xe7, 0x8c, 0x6c, 0x50, 0xf5, 0xee, 0xdb, 0x5c, 0xa8, + 0x14, 0x76, 0xe5, 0x2b, 0xd0, 0x8c, 0xb1, 0x9d, 0xc9, 0xef, 0xf1, 0xb7, 0xcb, 0x50, 0xbd, 0xd5, + 0xe9, 0x2c, 0x6f, 0xad, 0x93, 0xd7, 0xa0, 0x29, 0xfd, 0x60, 0xf7, 0xa2, 0x3e, 0x08, 0xdd, 0xa0, + 0x9d, 0x88, 0x84, 0x71, 0x3e, 0xa6, 0xc0, 0x7b, 0x54, 0xb7, 0x7b, 0xf2, 0x63, 0x09, 0x75, 0x07, + 0x64, 0x85, 0x28, 0x68, 0x44, 0x87, 0x99, 0x81, 0x4f, 0x3d, 0xd6, 0x85, 0x62, 0xbf, 0x2f, 0x3f, + 0x9b, 0x53, 0x5a, 0x04, 0xf8, 0x02, 0xb3, 0x93, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x03, 0xea, 0xfa, + 0x20, 0xd8, 0xe7, 0x5b, 0x2e, 0xf1, 0x6d, 0x5c, 0xe5, 0x6e, 0x42, 0x59, 0xf6, 0xf8, 0xb8, 0x35, + 0x75, 0x17, 0xdb, 0xaf, 0xa9, 0xff, 0x18, 0x72, 0xb3, 0xc6, 0x29, 0x1b, 0x83, 0x6c, 0x5c, 0xe5, + 0xcc, 0x8d, 0xdb, 0x4a, 0x00, 0x60, 0x0a, 0x90, 0xbc, 0x0b, 0x53, 0x07, 0xf4, 0x28, 0xd0, 0x77, + 0xa5, 0x80, 0xea, 0x59, 0x04, 0xcc, 0x31, 0xa5, 0xff, 0x6e, 0xac, 0x3a, 0x26, 0xc0, 0x88, 0x0f, + 0x17, 0x0f, 0xa8, 0xb7, 0x4b, 0x3d, 0x57, 0xda, 0x2b, 0xa4, 0x90, 0xda, 0x59, 0x84, 0xcc, 0x9f, + 0x1c, 0xb7, 0x2e, 0xde, 0xcd, 0x80, 0xc1, 0x4c, 0x70, 0xed, 0xff, 0x14, 0x61, 0xf6, 0x96, 0x08, + 0x44, 0x70, 0x3d, 0xa1, 0x79, 0x90, 0xcb, 0x50, 0xf2, 0xfa, 0x03, 0x3e, 0x72, 0x4a, 0xc2, 0xd5, + 0x80, 0x5b, 0x3b, 0xc8, 0xca, 0xc8, 0x3b, 0x50, 0x37, 0xe5, 0x94, 0x21, 0xcd, 0x25, 0x63, 0x99, + 0xb6, 0xd4, 0x3f, 0x0c, 0xd1, 0xd8, 0xde, 0xb0, 0xe7, 0x77, 0x3b, 0xd6, 0x07, 0x54, 0x5a, 0x10, + 0xf8, 0xde, 0x70, 0x53, 0x14, 0xa1, 0xa2, 0xb1, 0x55, 0xf5, 0x80, 0x1e, 0x89, 0xfd, 0x73, 0x39, + 0x5a, 0x55, 0xef, 0xca, 0x32, 0x0c, 0xa9, 0xa4, 0xa5, 0x3e, 0x16, 0x36, 0x0a, 0xca, 0xc2, 0xf6, + 0xf3, 0x80, 0x15, 0xc8, 0xef, 0x86, 0x4d, 0x99, 0xef, 0x5b, 0x41, 0x40, 0x3d, 0xf9, 0x1a, 0xc7, + 0x9a, 0x32, 0xef, 0x70, 0x04, 0x94, 0x48, 0xe4, 0x27, 0xa0, 0xc1, 0xc1, 0xdb, 0xb6, 0xbb, 0xcb, + 0x5f, 0x5c, 0x43, 0x58, 0x81, 0x1e, 0xa8, 0x42, 0x8c, 0xe8, 0xda, 0x1f, 0x16, 0xe1, 0xd2, 0x2d, + 0x1a, 0x08, 0xad, 0x66, 0x95, 0xf6, 0x6d, 0xf7, 0x88, 0xe9, 0xd3, 0x48, 0xbf, 0x45, 0xde, 0x02, + 0xb0, 0xfc, 0xdd, 0xce, 0xa1, 0xc1, 0xbf, 0x03, 0xf1, 0x0d, 0x5f, 0x97, 0x9f, 0x24, 0xac, 0x77, + 0xda, 0x92, 0xf2, 0x38, 0xf1, 0x0f, 0x63, 0x75, 0xa2, 0x0d, 0x79, 0xf1, 0x09, 0x1b, 0xf2, 0x0e, + 0x40, 0x3f, 0xd2, 0xca, 0x4b, 0x9c, 0xf3, 0xa7, 0x95, 0x98, 0xb3, 0x28, 0xe4, 0x31, 0x98, 0x3c, + 0x7a, 0xb2, 0x03, 0x73, 0x26, 0xdd, 0xd3, 0x07, 0x76, 0x10, 0xee, 0x24, 0xe4, 0x47, 0x7c, 0xfa, + 0xcd, 0x48, 0x18, 0x24, 0xb1, 0x9a, 0x42, 0xc2, 0x21, 0x6c, 0xed, 0xef, 0x94, 0xe0, 0xca, 0x2d, + 0x1a, 0x84, 0x36, 0x3a, 0x39, 0x3b, 0x76, 0xfa, 0xd4, 0x60, 0x6f, 0xe1, 0xc3, 0x02, 0x54, 0x6d, + 0x7d, 0x97, 0xda, 0x6c, 0xf5, 0x62, 0x4f, 0xf3, 0xde, 0xd8, 0x0b, 0xc1, 0x68, 0x29, 0x0b, 0x1b, + 0x5c, 0x42, 0x6a, 0x69, 0x10, 0x85, 0x28, 0xc5, 0xb3, 0x49, 0xdd, 0xb0, 0x07, 0x7e, 0x20, 0x76, + 0x76, 0x52, 0x9f, 0x0c, 0x27, 0xf5, 0x95, 0x88, 0x84, 0x71, 0x3e, 0xb2, 0x04, 0x60, 0xd8, 0x16, + 0x75, 0x02, 0x5e, 0x4b, 0x7c, 0x57, 0x44, 0xbd, 0xdf, 0x95, 0x90, 0x82, 0x31, 0x2e, 0x26, 0xaa, + 0xe7, 0x3a, 0x56, 0xe0, 0x0a, 0x51, 0xe5, 0xa4, 0xa8, 0xcd, 0x88, 0x84, 0x71, 0x3e, 0x5e, 0x8d, + 0x06, 0x9e, 0x65, 0xf8, 0xbc, 0x5a, 0x25, 0x55, 0x2d, 0x22, 0x61, 0x9c, 0x8f, 0xad, 0x79, 0xb1, + 0xe7, 0x3f, 0xd3, 0x9a, 0xf7, 0x9b, 0x0d, 0xb8, 0x96, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0xde, 0xc0, + 0xee, 0xd0, 0x40, 0xbd, 0xc0, 0x31, 0xd7, 0xc2, 0x3f, 0x17, 0xbd, 0x77, 0x11, 0xfe, 0x64, 0x4c, + 0xe6, 0xbd, 0x0f, 0x35, 0xf0, 0x54, 0xef, 0x7e, 0x11, 0x1a, 0x8e, 0x1e, 0xf8, 0xfc, 0xc3, 0x95, + 0xdf, 0x68, 0xa8, 0x86, 0xdd, 0x53, 0x04, 0x8c, 0x78, 0xc8, 0x16, 0x5c, 0x94, 0x5d, 0x7c, 0xf3, + 0x11, 0xdb, 0xf3, 0x53, 0x4f, 0xd4, 0x95, 0xcb, 0xa9, 0xac, 0x7b, 0x71, 0x33, 0x83, 0x07, 0x33, + 0x6b, 0x92, 0x4d, 0xb8, 0x60, 0x88, 0x90, 0x10, 0x6a, 0xbb, 0xba, 0xa9, 0x00, 0x85, 0x49, 0x34, + 0xdc, 0x1a, 0xad, 0x0c, 0xb3, 0x60, 0x56, 0xbd, 0xf4, 0x68, 0xae, 0x8e, 0x35, 0x9a, 0x6b, 0xe3, + 0x8c, 0xe6, 0xfa, 0x78, 0xa3, 0xb9, 0x71, 0xba, 0xd1, 0xcc, 0x7a, 0x9e, 0x8d, 0x23, 0xea, 0x31, + 0xf5, 0x44, 0xac, 0xb0, 0xb1, 0x88, 0xa3, 0xb0, 0xe7, 0x3b, 0x19, 0x3c, 0x98, 0x59, 0x93, 0xec, + 0xc2, 0x15, 0x51, 0x7e, 0xd3, 0x31, 0xbc, 0xa3, 0x3e, 0x5b, 0x78, 0x62, 0xb8, 0xcd, 0x84, 0x4d, + 0xfa, 0x4a, 0x67, 0x24, 0x27, 0x3e, 0x01, 0x85, 0xfc, 0x2c, 0x4c, 0x8b, 0xb7, 0xb4, 0xa9, 0xf7, + 0x39, 0xac, 0x88, 0x3f, 0x7a, 0x49, 0xc2, 0x4e, 0xaf, 0xc4, 0x89, 0x98, 0xe4, 0x25, 0xcb, 0x30, + 0xdb, 0x3f, 0x34, 0xd8, 0xcf, 0xf5, 0xbd, 0x7b, 0x94, 0x9a, 0xd4, 0xe4, 0x0e, 0xcf, 0x46, 0xfb, + 0x65, 0x65, 0xdd, 0xd9, 0x4a, 0x92, 0x31, 0xcd, 0x4f, 0xde, 0x80, 0x29, 0x3f, 0xd0, 0xbd, 0x40, + 0x1a, 0x82, 0xe7, 0x67, 0x44, 0x7c, 0x96, 0xb2, 0x93, 0x76, 0x62, 0x34, 0x4c, 0x70, 0x66, 0xae, + 0x17, 0xb3, 0xe7, 0xb7, 0x5e, 0xe4, 0x99, 0xad, 0xfe, 0x49, 0x11, 0xae, 0xdf, 0xa2, 0xc1, 0xa6, + 0xeb, 0x48, 0x33, 0x7a, 0xd6, 0xb2, 0x7f, 0x2a, 0x2b, 0x7a, 0x72, 0xd1, 0x2e, 0x4e, 0x74, 0xd1, + 0x2e, 0x4d, 0x68, 0xd1, 0x2e, 0x9f, 0xe3, 0xa2, 0xfd, 0xf7, 0x8a, 0xf0, 0x72, 0xa2, 0x27, 0xb7, + 0x5c, 0x53, 0x4d, 0xf8, 0x9f, 0x76, 0xe0, 0x29, 0x3a, 0xf0, 0xb1, 0xd0, 0x3b, 0xb9, 0x23, 0x34, + 0xa5, 0xf1, 0x7c, 0x37, 0xad, 0xf1, 0xbc, 0x9b, 0x67, 0xe5, 0xcb, 0x90, 0x70, 0xaa, 0x15, 0xef, + 0x0e, 0x10, 0x4f, 0xba, 0x6d, 0x23, 0x73, 0xb6, 0x54, 0x7a, 0xc2, 0x00, 0x50, 0x1c, 0xe2, 0xc0, + 0x8c, 0x5a, 0xa4, 0x03, 0x2f, 0xf9, 0xd4, 0x09, 0x2c, 0x87, 0xda, 0x49, 0x38, 0xa1, 0x0d, 0xbd, + 0x22, 0xe1, 0x5e, 0xea, 0x64, 0x31, 0x61, 0x76, 0xdd, 0x3c, 0xf3, 0xc0, 0xbf, 0x00, 0xae, 0x72, + 0x8a, 0xae, 0x99, 0x98, 0xc6, 0xf2, 0x61, 0x5a, 0x63, 0x79, 0x2f, 0xff, 0x7b, 0x1b, 0x4f, 0x5b, + 0x59, 0x02, 0xe0, 0x6f, 0x21, 0xae, 0xae, 0x84, 0x8b, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x05, + 0x48, 0xf5, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x4e, 0x9c, 0x88, 0x49, 0xde, 0x91, 0xda, 0x4e, + 0x65, 0x6c, 0x6d, 0xe7, 0x0e, 0x90, 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0xf1, 0xc7, 0xeb, 0x43, + 0x1c, 0x98, 0x51, 0x6b, 0xc4, 0x50, 0xae, 0x4d, 0x76, 0x28, 0xd7, 0xc7, 0x1f, 0xca, 0xe4, 0x3d, + 0xb8, 0xcc, 0x45, 0xc9, 0xfe, 0x49, 0x02, 0x0b, 0xbd, 0xe7, 0xb3, 0x12, 0xf8, 0x32, 0x8e, 0x62, + 0xc4, 0xd1, 0x18, 0xec, 0xfd, 0x18, 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xad, 0x13, 0xad, 0x64, + 0xf0, 0x60, 0x66, 0x4d, 0x36, 0xc4, 0x02, 0x36, 0x0c, 0xf5, 0x5d, 0x9b, 0x9a, 0x32, 0xfe, 0x3a, + 0x1c, 0x62, 0xdb, 0x1b, 0x1d, 0x49, 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x75, 0x46, 0x35, 0xe5, + 0x16, 0xb7, 0xd2, 0xef, 0x25, 0xb4, 0x21, 0xa9, 0xeb, 0x84, 0x11, 0xf5, 0x2b, 0x69, 0x06, 0x1c, + 0xae, 0xc3, 0xb5, 0x44, 0xc3, 0xb3, 0xfa, 0x81, 0x9f, 0xc4, 0x9a, 0x49, 0x69, 0x89, 0x19, 0x3c, + 0x98, 0x59, 0x93, 0xe9, 0xe7, 0xfb, 0x54, 0xb7, 0x83, 0xfd, 0x24, 0xe0, 0x6c, 0x52, 0x3f, 0xbf, + 0x3d, 0xcc, 0x82, 0x59, 0xf5, 0x32, 0x17, 0xa4, 0xb9, 0xe7, 0x53, 0xad, 0xfa, 0x4e, 0x09, 0x2e, + 0xdf, 0xa2, 0x41, 0x18, 0x9a, 0xf6, 0xa9, 0x19, 0xe5, 0x63, 0x30, 0xa3, 0xfc, 0x46, 0x05, 0x2e, + 0xdc, 0xa2, 0xc1, 0x90, 0x36, 0xf6, 0xff, 0x69, 0xf7, 0x6f, 0xc2, 0x85, 0x28, 0x1a, 0xb2, 0x13, + 0xb8, 0x9e, 0x58, 0xcb, 0x53, 0xbb, 0xe5, 0xce, 0x30, 0x0b, 0x66, 0xd5, 0x23, 0xdf, 0x80, 0x97, + 0xf9, 0x52, 0xef, 0x74, 0x85, 0x7d, 0x56, 0x18, 0x13, 0x62, 0xe7, 0x79, 0x5a, 0x12, 0xf2, 0xe5, + 0x4e, 0x36, 0x1b, 0x8e, 0xaa, 0x4f, 0xbe, 0x0d, 0x53, 0x7d, 0xab, 0x4f, 0x6d, 0xcb, 0xe1, 0xfa, + 0x59, 0xee, 0x20, 0xa2, 0xad, 0x18, 0x58, 0xb4, 0x81, 0x8b, 0x97, 0x62, 0x42, 0x60, 0xe6, 0x48, + 0xad, 0x9f, 0xe3, 0x48, 0xfd, 0x1f, 0x45, 0xa8, 0xdd, 0xf2, 0xdc, 0x41, 0xbf, 0x7d, 0x44, 0xba, + 0x50, 0x7d, 0xc8, 0x9d, 0x67, 0xd2, 0x35, 0x35, 0xfe, 0x89, 0x02, 0xe1, 0x83, 0x8b, 0x54, 0x22, + 0xf1, 0x1f, 0x25, 0x3c, 0x1b, 0xc4, 0x07, 0xf4, 0x88, 0x9a, 0xd2, 0x87, 0x16, 0x0e, 0xe2, 0xbb, + 0xac, 0x10, 0x05, 0x8d, 0xf4, 0x60, 0x56, 0xb7, 0x6d, 0xf7, 0x21, 0x35, 0x37, 0xf4, 0x80, 0xfb, + 0xbd, 0xa5, 0x6f, 0xe5, 0xac, 0x66, 0x69, 0x1e, 0xcc, 0xb0, 0x9c, 0x84, 0xc2, 0x34, 0x36, 0x79, + 0x1f, 0x6a, 0x7e, 0xe0, 0x7a, 0x4a, 0xd9, 0x6a, 0x2e, 0xad, 0x8c, 0xff, 0xd2, 0xdb, 0x5f, 0xef, + 0x08, 0x28, 0x61, 0xb3, 0x97, 0x7f, 0x50, 0x09, 0xd0, 0x7e, 0xbd, 0x00, 0x70, 0x7b, 0x7b, 0x7b, + 0x4b, 0xba, 0x17, 0x4c, 0x28, 0xeb, 0x83, 0xd0, 0x51, 0x39, 0xbe, 0x43, 0x30, 0x11, 0xc8, 0x2b, + 0x7d, 0x78, 0x83, 0x60, 0x1f, 0x39, 0x3a, 0xf9, 0x71, 0xa8, 0x49, 0x05, 0x59, 0x76, 0x7b, 0x18, + 0x4f, 0x21, 0x95, 0x68, 0x54, 0x74, 0xed, 0xb7, 0x8b, 0x00, 0xeb, 0xa6, 0x4d, 0x3b, 0xea, 0x10, + 0x48, 0x23, 0xd8, 0xf7, 0xa8, 0xbf, 0xef, 0xda, 0xe6, 0x98, 0xde, 0x54, 0x6e, 0xf3, 0xdf, 0x56, + 0x20, 0x18, 0xe1, 0x11, 0x13, 0xa6, 0xfc, 0x80, 0xf6, 0x55, 0x6c, 0xef, 0x98, 0x4e, 0x94, 0x39, + 0x61, 0x17, 0x89, 0x70, 0x30, 0x81, 0x4a, 0x74, 0x68, 0x5a, 0x8e, 0x21, 0x3e, 0x90, 0xf6, 0xd1, + 0x98, 0x03, 0x69, 0x96, 0xed, 0x38, 0xd6, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xbf, 0x5b, 0x84, 0x4b, + 0x5c, 0x1e, 0x6b, 0x46, 0x22, 0x82, 0x97, 0xfc, 0xc9, 0xa1, 0x03, 0xab, 0x7f, 0xfc, 0x74, 0xa2, + 0xc5, 0x79, 0xc7, 0x4d, 0x1a, 0xe8, 0x91, 0x3e, 0x17, 0x95, 0xc5, 0x4e, 0xa9, 0x0e, 0xa0, 0xec, + 0xb3, 0xf9, 0x4a, 0xf4, 0x5e, 0x67, 0xec, 0x21, 0x94, 0xfd, 0x00, 0x7c, 0xf6, 0x0a, 0xbd, 0xc6, + 0x7c, 0xd6, 0xe2, 0xe2, 0xc8, 0x2f, 0x41, 0xd5, 0x0f, 0xf4, 0x60, 0xa0, 0x3e, 0xcd, 0x9d, 0x49, + 0x0b, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, 0xfe, 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xae, 0x64, 0x57, + 0xdc, 0xb0, 0xfc, 0x80, 0xfc, 0x89, 0xa1, 0x6e, 0x3f, 0xe5, 0x1b, 0x67, 0xb5, 0x79, 0xa7, 0x87, + 0x67, 0x1a, 0x54, 0x49, 0xac, 0xcb, 0x03, 0xa8, 0x58, 0x01, 0xed, 0xa9, 0xfd, 0xe5, 0xfd, 0x09, + 0x3f, 0x7a, 0x6c, 0x69, 0x67, 0x52, 0x50, 0x08, 0xd3, 0xbe, 0x57, 0x1c, 0xf5, 0xc8, 0x7c, 0xf9, + 0xb0, 0x93, 0x51, 0xe2, 0x77, 0xf3, 0x45, 0x89, 0x27, 0x1b, 0x34, 0x1c, 0x2c, 0xfe, 0xa7, 0x86, + 0x83, 0xc5, 0xef, 0xe7, 0x0f, 0x16, 0x4f, 0x75, 0xc3, 0xc8, 0x98, 0xf1, 0x8f, 0x4a, 0x70, 0xf5, + 0x49, 0xc3, 0x86, 0xad, 0x67, 0x72, 0x74, 0xe6, 0x5d, 0xcf, 0x9e, 0x3c, 0x0e, 0xc9, 0x12, 0x54, + 0xfa, 0xfb, 0xba, 0xaf, 0x94, 0xb2, 0xab, 0x61, 0x98, 0x21, 0x2b, 0x7c, 0xcc, 0x26, 0x0d, 0xae, + 0xcc, 0xf1, 0xbf, 0x28, 0x58, 0xd9, 0x74, 0xdc, 0xa3, 0xbe, 0x1f, 0xd9, 0x04, 0xc2, 0xe9, 0x78, + 0x53, 0x14, 0xa3, 0xa2, 0x93, 0x00, 0xaa, 0xc2, 0xc4, 0x2c, 0x57, 0xa6, 0xf1, 0x03, 0xb9, 0x32, + 0x0e, 0x16, 0x44, 0x0f, 0x25, 0xbd, 0x15, 0x52, 0x16, 0x59, 0x80, 0x72, 0x10, 0x85, 0x79, 0xab, + 0xad, 0x79, 0x39, 0x43, 0x3f, 0xe5, 0x7c, 0x6c, 0x63, 0xef, 0xee, 0x72, 0xa3, 0xba, 0x29, 0xfd, + 0xe7, 0x96, 0xeb, 0x70, 0x85, 0xac, 0x14, 0x6d, 0xec, 0xef, 0x0f, 0x71, 0x60, 0x46, 0x2d, 0xed, + 0x5f, 0xd7, 0xe1, 0x52, 0xf6, 0x78, 0x60, 0xfd, 0x76, 0x48, 0x3d, 0x9f, 0x61, 0x17, 0x92, 0xfd, + 0xf6, 0x40, 0x14, 0xa3, 0xa2, 0x7f, 0xa2, 0x03, 0xce, 0x7e, 0xa3, 0x00, 0x97, 0x3d, 0xe9, 0x23, + 0x7a, 0x16, 0x41, 0x67, 0xaf, 0x08, 0x73, 0xc6, 0x08, 0x81, 0x38, 0xba, 0x2d, 0xe4, 0x6f, 0x14, + 0x60, 0xbe, 0x97, 0xb2, 0x73, 0x9c, 0xe3, 0x99, 0x4b, 0x7e, 0x8e, 0x62, 0x73, 0x84, 0x3c, 0x1c, + 0xd9, 0x12, 0xf2, 0x6d, 0x68, 0xf6, 0xd9, 0xb8, 0xf0, 0x03, 0xea, 0x18, 0x2a, 0x40, 0x74, 0xfc, + 0x2f, 0x69, 0x2b, 0xc2, 0x0a, 0xcf, 0x5c, 0x71, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0xe7, 0xfc, + 0x90, 0xe5, 0x0d, 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x2b, 0xf6, 0x1b, 0x0d, 0xf1, 0xad, 0x74, + 0x64, 0x19, 0x86, 0x54, 0xf2, 0x13, 0xd0, 0xe0, 0x2e, 0xa7, 0x65, 0xaf, 0xeb, 0xcf, 0x37, 0x78, + 0xb8, 0xd8, 0xb4, 0x08, 0x80, 0x93, 0x85, 0x18, 0xd1, 0xc9, 0x97, 0x60, 0x6a, 0x97, 0x7f, 0xbe, + 0xf2, 0xdc, 0xbd, 0xb0, 0x71, 0x71, 0x6d, 0xad, 0x1d, 0x2b, 0xc7, 0x04, 0x17, 0x59, 0x02, 0xa0, + 0xa1, 0x5f, 0x2e, 0x6d, 0xcf, 0x8a, 0x3c, 0x76, 0x18, 0xe3, 0x22, 0xaf, 0x40, 0x29, 0xb0, 0x7d, + 0x6e, 0xc3, 0xaa, 0x47, 0x5b, 0xd0, 0xed, 0x8d, 0x0e, 0xb2, 0x72, 0xed, 0x0f, 0x0b, 0x30, 0x9b, + 0x3a, 0x8e, 0xc4, 0xaa, 0x0c, 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x95, 0x1d, 0xdc, 0x40, 0x56, 0x4e, + 0xde, 0x93, 0x6a, 0x79, 0x31, 0x67, 0x8a, 0x91, 0x7b, 0x7a, 0xe0, 0x33, 0x3d, 0x7c, 0x48, 0x23, + 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, 0x72, 0x1d, 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, + 0xbf, 0xf2, 0x69, 0x0c, 0x7e, 0xda, 0xaf, 0x16, 0x63, 0x3d, 0x20, 0x35, 0xfb, 0xa7, 0xf4, 0xc0, + 0x17, 0xd8, 0x02, 0x1a, 0x2e, 0xee, 0x8d, 0xf8, 0xfa, 0xc7, 0x17, 0x63, 0x49, 0x25, 0x6f, 0x8b, + 0xbe, 0x2f, 0xe5, 0x3c, 0xc8, 0xbd, 0xbd, 0xd1, 0x11, 0xd1, 0x55, 0xea, 0xad, 0x85, 0xaf, 0xa0, + 0x7c, 0x4e, 0xaf, 0x40, 0xfb, 0x67, 0x25, 0x68, 0xde, 0x71, 0x77, 0x3f, 0x21, 0x11, 0xd4, 0xd9, + 0xcb, 0x54, 0xf1, 0x63, 0x5c, 0xa6, 0x76, 0xe0, 0xe5, 0x20, 0xb0, 0x3b, 0xd4, 0x70, 0x1d, 0xd3, + 0x5f, 0xde, 0x0b, 0xa8, 0xb7, 0x66, 0x39, 0x96, 0xbf, 0x4f, 0x4d, 0xe9, 0x4e, 0xfa, 0xcc, 0xc9, + 0x71, 0xeb, 0xe5, 0xed, 0xed, 0x8d, 0x2c, 0x16, 0x1c, 0x55, 0x97, 0x4f, 0x1b, 0xe2, 0xec, 0x28, + 0x3f, 0x5b, 0x25, 0x63, 0x6e, 0xc4, 0xb4, 0x11, 0x2b, 0xc7, 0x04, 0x97, 0xf6, 0x1f, 0x8a, 0xd0, + 0x08, 0x93, 0x47, 0x90, 0xcf, 0x43, 0x6d, 0xd7, 0x73, 0x0f, 0xa8, 0x27, 0x3c, 0x77, 0xf2, 0x6c, + 0x55, 0x5b, 0x14, 0xa1, 0xa2, 0x91, 0xcf, 0x41, 0x25, 0x70, 0xfb, 0x96, 0x91, 0x36, 0xa8, 0x6d, + 0xb3, 0x42, 0x14, 0x34, 0xfe, 0x21, 0xf0, 0xb0, 0x42, 0xfe, 0x54, 0xf5, 0xd8, 0x87, 0xc0, 0x4b, + 0x51, 0x52, 0xd5, 0x87, 0x50, 0x9e, 0xf8, 0x87, 0xf0, 0x85, 0x50, 0x05, 0xac, 0x24, 0xbf, 0xc4, + 0x94, 0xd2, 0xf6, 0x2e, 0x94, 0x7d, 0xdd, 0xb7, 0xe5, 0xf2, 0x96, 0x23, 0x5f, 0xc3, 0x72, 0x67, + 0x43, 0xe6, 0x6b, 0x58, 0xee, 0x6c, 0x20, 0x07, 0xd5, 0x7e, 0xbb, 0x04, 0x4d, 0xd1, 0xbf, 0x62, + 0xf6, 0x98, 0x64, 0x0f, 0xbf, 0xc9, 0x43, 0x2e, 0xfc, 0x41, 0x8f, 0x7a, 0xdc, 0x1c, 0x25, 0x27, + 0xc3, 0xb8, 0x1f, 0x21, 0x22, 0x86, 0x61, 0x17, 0x51, 0xd1, 0x1f, 0xed, 0xae, 0x67, 0x4b, 0x05, + 0x4f, 0x80, 0x22, 0x75, 0x5c, 0x19, 0x49, 0x19, 0x2e, 0x15, 0x77, 0x63, 0x34, 0x4c, 0x70, 0x6a, + 0x1f, 0x16, 0xa1, 0xb1, 0x61, 0xed, 0x51, 0xe3, 0xc8, 0xb0, 0xf9, 0x39, 0x55, 0x93, 0xda, 0x34, + 0xa0, 0xb7, 0x3c, 0xdd, 0xa0, 0x5b, 0xd4, 0xb3, 0x78, 0xfa, 0x26, 0xf6, 0x05, 0xf2, 0x39, 0x4e, + 0x9e, 0x53, 0x5d, 0x1d, 0xc1, 0x83, 0x23, 0x6b, 0x93, 0x75, 0x98, 0x32, 0xa9, 0x6f, 0x79, 0xd4, + 0xdc, 0x8a, 0x6d, 0x85, 0x3e, 0xaf, 0x5a, 0xb8, 0x1a, 0xa3, 0x3d, 0x3e, 0x6e, 0x4d, 0x2b, 0x13, + 0xa8, 0xd8, 0x13, 0x25, 0xaa, 0xb2, 0x49, 0xa5, 0xaf, 0x0f, 0xfc, 0xac, 0x36, 0xc6, 0x26, 0x95, + 0xad, 0x6c, 0x16, 0x1c, 0x55, 0x57, 0xab, 0x40, 0x69, 0xc3, 0xed, 0x6a, 0xdf, 0x2b, 0x41, 0x98, + 0xe7, 0x8b, 0xfc, 0xd9, 0x02, 0x34, 0x75, 0xc7, 0x71, 0x03, 0x99, 0x43, 0x4b, 0xf8, 0xf8, 0x31, + 0x77, 0x3a, 0xb1, 0x85, 0xe5, 0x08, 0x54, 0xb8, 0x87, 0x43, 0x97, 0x75, 0x8c, 0x82, 0x71, 0xd9, + 0x64, 0x90, 0xf2, 0x58, 0x6f, 0xe6, 0x6f, 0xc5, 0x29, 0xfc, 0xd3, 0x57, 0xbe, 0x06, 0x73, 0xe9, + 0xc6, 0x9e, 0xc5, 0xe1, 0x94, 0xcb, 0xf5, 0x5f, 0x04, 0x88, 0xa2, 0x56, 0x9e, 0x81, 0x99, 0xcc, + 0x4a, 0x98, 0xc9, 0xc6, 0x4f, 0xb6, 0x10, 0x35, 0x7a, 0xa4, 0x69, 0xec, 0x5b, 0x29, 0xd3, 0xd8, + 0xfa, 0x24, 0x84, 0x3d, 0xd9, 0x1c, 0xb6, 0x0b, 0x17, 0x22, 0xde, 0xe8, 0x9b, 0xbf, 0x9b, 0xfa, + 0x32, 0x85, 0xb6, 0xf7, 0xc5, 0x11, 0x5f, 0xe6, 0x6c, 0x2c, 0x8c, 0x68, 0xf8, 0xdb, 0xd4, 0xfe, + 0x66, 0x01, 0xe6, 0xe2, 0x42, 0xf8, 0xc9, 0xf0, 0x2f, 0xc3, 0xb4, 0x47, 0x75, 0xb3, 0xad, 0x07, + 0xc6, 0x3e, 0x0f, 0x58, 0x2f, 0xf0, 0x08, 0x73, 0x7e, 0x86, 0x0d, 0xe3, 0x04, 0x4c, 0xf2, 0x11, + 0x1d, 0x9a, 0xac, 0x60, 0xdb, 0xea, 0x51, 0x77, 0x10, 0x8c, 0x69, 0xfb, 0xe5, 0xdb, 0x2e, 0x8c, + 0x60, 0x30, 0x8e, 0xa9, 0x7d, 0x54, 0x80, 0x99, 0x78, 0x83, 0xcf, 0xdd, 0x2e, 0xb8, 0x9f, 0xb4, + 0x0b, 0xae, 0x4c, 0xe0, 0xbd, 0x8f, 0xb0, 0x05, 0x7e, 0xa7, 0x19, 0x7f, 0x34, 0x6e, 0xff, 0x8b, + 0x9b, 0x3c, 0x0a, 0x4f, 0x34, 0x79, 0x7c, 0xf2, 0xd3, 0x47, 0x8d, 0xd2, 0xd5, 0xcb, 0xcf, 0xb1, + 0xae, 0xfe, 0x71, 0xe6, 0xa0, 0x8a, 0xe5, 0x51, 0xaa, 0xe6, 0xc8, 0xa3, 0xd4, 0x0b, 0xf3, 0x28, + 0xd5, 0x26, 0x36, 0xb1, 0x9d, 0x26, 0x97, 0x52, 0xfd, 0x99, 0xe6, 0x52, 0x6a, 0x9c, 0x57, 0x2e, + 0x25, 0xc8, 0x9b, 0x4b, 0xe9, 0xbb, 0x05, 0x98, 0x31, 0x13, 0xe7, 0x7e, 0xe5, 0x89, 0xfb, 0xf1, + 0x97, 0xb3, 0xe4, 0x31, 0x62, 0x71, 0xf0, 0x2b, 0x59, 0x86, 0x29, 0x91, 0x59, 0x19, 0x8c, 0xa6, + 0x3e, 0x96, 0x0c, 0x46, 0xe4, 0x97, 0xa0, 0x61, 0xab, 0xb5, 0x4e, 0xe6, 0x75, 0xdc, 0x98, 0xc8, + 0x90, 0x94, 0x98, 0xd1, 0xd9, 0x82, 0xb0, 0x08, 0x23, 0x89, 0xda, 0xef, 0xd7, 0xe2, 0x0b, 0xe2, + 0xb3, 0xf6, 0x3c, 0xbc, 0x9e, 0xf4, 0x3c, 0x5c, 0x4f, 0x7b, 0x1e, 0x86, 0x56, 0x73, 0xe9, 0x7d, + 0xf8, 0xc9, 0xd8, 0x3a, 0x51, 0xe2, 0xa9, 0x93, 0xc2, 0x21, 0x97, 0xb1, 0x56, 0x2c, 0xc3, 0xac, + 0x54, 0x02, 0x14, 0x91, 0x4f, 0xb2, 0xd3, 0x51, 0xac, 0xd8, 0x6a, 0x92, 0x8c, 0x69, 0x7e, 0x26, + 0xd0, 0x57, 0x19, 0x74, 0xc5, 0x3e, 0x2a, 0x1a, 0xe3, 0x2a, 0xbb, 0x6d, 0xc8, 0xc1, 0xf6, 0x5c, + 0x1e, 0xd5, 0x7d, 0xe9, 0x3f, 0x88, 0xed, 0xb9, 0x90, 0x97, 0xa2, 0xa4, 0xc6, 0x9d, 0x28, 0xb5, + 0xa7, 0x38, 0x51, 0x74, 0x68, 0xda, 0xba, 0x1f, 0x88, 0xc1, 0x64, 0xca, 0xd9, 0xe4, 0x8f, 0x9d, + 0x6e, 0xdd, 0x67, 0xba, 0x44, 0xa4, 0xc0, 0x6f, 0x44, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb1, + 0xbf, 0x7c, 0x66, 0x31, 0x97, 0x03, 0x99, 0x67, 0xee, 0x2c, 0x32, 0xc2, 0x0d, 0xdd, 0x46, 0x0c, + 0x07, 0x13, 0xa8, 0x23, 0xfc, 0x2c, 0x30, 0x8e, 0x9f, 0x85, 0xfc, 0xac, 0x50, 0xdc, 0x8e, 0xc2, + 0xd7, 0xda, 0xe4, 0xaf, 0x35, 0x8c, 0x33, 0xc5, 0x38, 0x11, 0x93, 0xbc, 0x6c, 0x54, 0x0c, 0x64, + 0x37, 0xa8, 0xea, 0x53, 0xc9, 0x51, 0xb1, 0x93, 0x24, 0x63, 0x9a, 0x9f, 0x6c, 0xc1, 0xc5, 0xb0, + 0x28, 0xde, 0x8c, 0x69, 0x8e, 0x13, 0x06, 0xfe, 0xed, 0x64, 0xf0, 0x60, 0x66, 0x4d, 0x7e, 0x92, + 0x66, 0xe0, 0x79, 0xd4, 0x09, 0x6e, 0xeb, 0xfe, 0xbe, 0x8c, 0x20, 0x8c, 0x4e, 0xd2, 0x44, 0x24, + 0x8c, 0xf3, 0x91, 0x25, 0x00, 0x01, 0xc7, 0x6b, 0xcd, 0x26, 0x83, 0x74, 0x77, 0x42, 0x0a, 0xc6, + 0xb8, 0xb4, 0xef, 0x36, 0xa0, 0x79, 0x4f, 0x0f, 0xac, 0x43, 0xca, 0x9d, 0xa2, 0xe7, 0xe3, 0x99, + 0xfa, 0x2b, 0x05, 0xb8, 0x94, 0x8c, 0x7c, 0x3d, 0x47, 0xf7, 0x14, 0xcf, 0xbc, 0x84, 0x99, 0xd2, + 0x70, 0x44, 0x2b, 0xb8, 0xa3, 0x6a, 0x28, 0x90, 0xf6, 0xbc, 0x1d, 0x55, 0x9d, 0x51, 0x02, 0x71, + 0x74, 0x5b, 0x3e, 0x29, 0x8e, 0xaa, 0xe7, 0x3b, 0x55, 0x68, 0xca, 0x8d, 0x56, 0x7b, 0x6e, 0xdc, + 0x68, 0xf5, 0xe7, 0x42, 0xeb, 0xef, 0xc7, 0xdc, 0x68, 0x8d, 0x9c, 0xe1, 0x5c, 0xf2, 0xb0, 0x88, + 0x40, 0x1b, 0xe5, 0x8e, 0xe3, 0x79, 0x1e, 0x94, 0x7b, 0x83, 0x29, 0xcb, 0xbb, 0xba, 0x6f, 0x19, + 0x52, 0xed, 0xc8, 0x91, 0x1a, 0x59, 0xa5, 0x4c, 0x14, 0x51, 0x1f, 0xfc, 0x2f, 0x0a, 0xec, 0x28, + 0x43, 0x64, 0x31, 0x57, 0x86, 0x48, 0xb2, 0x02, 0x65, 0xe7, 0x80, 0x1e, 0x9d, 0x2d, 0x63, 0x02, + 0xdf, 0x04, 0xde, 0xbb, 0x4b, 0x8f, 0x90, 0x57, 0xd6, 0xbe, 0x5f, 0x04, 0x60, 0x8f, 0x7f, 0x3a, + 0x87, 0xd6, 0x8f, 0x43, 0xcd, 0x1f, 0x70, 0xc3, 0x90, 0x54, 0x98, 0xa2, 0x18, 0x38, 0x51, 0x8c, + 0x8a, 0x4e, 0x3e, 0x07, 0x95, 0x6f, 0x0d, 0xe8, 0x40, 0x45, 0x67, 0x84, 0xfb, 0x86, 0xaf, 0xb3, + 0x42, 0x14, 0xb4, 0xf3, 0x33, 0x3a, 0x2b, 0xc7, 0x57, 0xe5, 0xbc, 0x1c, 0x5f, 0x0d, 0xa8, 0xdd, + 0x73, 0x79, 0x48, 0xad, 0xf6, 0x5f, 0x8b, 0x00, 0x51, 0xc8, 0x22, 0xf9, 0xf5, 0x02, 0xbc, 0x14, + 0x7e, 0x70, 0x81, 0xd8, 0xfe, 0xf1, 0x6c, 0xe4, 0xb9, 0x9d, 0x60, 0x59, 0x1f, 0x3b, 0x9f, 0x81, + 0xb6, 0xb2, 0xc4, 0x61, 0x76, 0x2b, 0x08, 0x42, 0x9d, 0xf6, 0xfa, 0xc1, 0xd1, 0xaa, 0xe5, 0xc9, + 0x11, 0x98, 0x19, 0x19, 0x7b, 0x53, 0xf2, 0x88, 0xaa, 0xd2, 0x46, 0xc1, 0x3f, 0x22, 0x45, 0xc1, + 0x10, 0x87, 0xec, 0x43, 0xdd, 0x71, 0xdf, 0xf3, 0x59, 0x77, 0xc8, 0xe1, 0xf8, 0xd6, 0xf8, 0x5d, + 0x2e, 0xba, 0x55, 0x38, 0x43, 0xe4, 0x1f, 0xac, 0x39, 0xb2, 0xb3, 0x7f, 0xad, 0x08, 0x17, 0x32, + 0xfa, 0x81, 0xbc, 0x05, 0x73, 0x32, 0x3a, 0x34, 0x4a, 0xcb, 0x5f, 0x88, 0xd2, 0xf2, 0x77, 0x52, + 0x34, 0x1c, 0xe2, 0x26, 0xef, 0x01, 0xe8, 0x86, 0x41, 0x7d, 0x7f, 0xd3, 0x35, 0xd5, 0x7e, 0xe0, + 0x4d, 0xa6, 0xbe, 0x2c, 0x87, 0xa5, 0x8f, 0x8f, 0x5b, 0x3f, 0x95, 0x15, 0xf0, 0x9d, 0xea, 0xe7, + 0xa8, 0x02, 0xc6, 0x20, 0xc9, 0x37, 0x01, 0x84, 0x0d, 0x20, 0xcc, 0x49, 0xf1, 0x14, 0xc3, 0xd9, + 0x82, 0x4a, 0x79, 0xb6, 0xf0, 0xf5, 0x81, 0xee, 0x04, 0x56, 0x70, 0x24, 0x52, 0x00, 0x3d, 0x08, + 0x51, 0x30, 0x86, 0xa8, 0xfd, 0xe3, 0x22, 0xd4, 0x95, 0x5b, 0xe0, 0x19, 0xd8, 0x82, 0xbb, 0x09, + 0x5b, 0xf0, 0x84, 0x42, 0xbc, 0xb3, 0x2c, 0xc1, 0x6e, 0xca, 0x12, 0x7c, 0x2b, 0xbf, 0xa8, 0x27, + 0xdb, 0x81, 0x7f, 0xab, 0x08, 0x33, 0x8a, 0x35, 0xaf, 0x85, 0xf6, 0xab, 0x30, 0x2b, 0x42, 0x33, + 0x36, 0xf5, 0x47, 0x22, 0x1b, 0x12, 0xef, 0xb0, 0xb2, 0x88, 0xaa, 0x6e, 0x27, 0x49, 0x98, 0xe6, + 0x65, 0xc3, 0x5a, 0x14, 0xed, 0xb0, 0x4d, 0x98, 0x70, 0xe6, 0x8a, 0xfd, 0x26, 0x1f, 0xd6, 0xed, + 0x14, 0x0d, 0x87, 0xb8, 0xd3, 0x26, 0xe2, 0xf2, 0x39, 0x98, 0x88, 0xff, 0x6d, 0x01, 0xa6, 0xa2, + 0xfe, 0x3a, 0x77, 0x03, 0xf1, 0x5e, 0xd2, 0x40, 0xbc, 0x9c, 0x7b, 0x38, 0x8c, 0x30, 0x0f, 0xff, + 0x85, 0x1a, 0x24, 0x4e, 0x1a, 0x90, 0x5d, 0xb8, 0x62, 0x65, 0xc6, 0x4b, 0xc6, 0x66, 0x9b, 0xf0, + 0xe8, 0xfc, 0xfa, 0x48, 0x4e, 0x7c, 0x02, 0x0a, 0x19, 0x40, 0xfd, 0x90, 0x7a, 0x81, 0x65, 0x50, + 0xf5, 0x7c, 0xb7, 0x72, 0xab, 0x64, 0xd2, 0x08, 0x1e, 0xf6, 0xe9, 0x03, 0x29, 0x00, 0x43, 0x51, + 0x64, 0x17, 0x2a, 0xd4, 0xec, 0x52, 0x95, 0x9f, 0x2a, 0x67, 0xbe, 0xe0, 0xb0, 0x3f, 0xd9, 0x3f, + 0x1f, 0x05, 0x34, 0xf1, 0xe3, 0x86, 0xa6, 0x72, 0x4e, 0x05, 0xeb, 0x94, 0xe6, 0x25, 0x72, 0x10, + 0x5a, 0x5b, 0x2b, 0x13, 0x9a, 0x3c, 0x9e, 0x60, 0x6b, 0xf5, 0xa1, 0xf1, 0x50, 0x0f, 0xa8, 0xd7, + 0xd3, 0xbd, 0x03, 0xb9, 0xdb, 0x18, 0xff, 0x09, 0xdf, 0x56, 0x48, 0xd1, 0x13, 0x86, 0x45, 0x18, + 0xc9, 0x21, 0x2e, 0x34, 0x02, 0xa9, 0x3e, 0x2b, 0x93, 0xf2, 0xf8, 0x42, 0x95, 0x22, 0xee, 0xcb, + 0x13, 0x07, 0xea, 0x2f, 0x46, 0x32, 0xc8, 0x61, 0x22, 0xb9, 0xbc, 0xb8, 0x52, 0xa0, 0x9d, 0xc3, + 0x35, 0x21, 0xa1, 0xa2, 0xe5, 0x26, 0x3b, 0x49, 0xbd, 0xf6, 0x3f, 0x2b, 0xd1, 0xb4, 0xfc, 0xac, + 0xed, 0x84, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa5, 0xed, 0x84, 0x29, 0x7f, 0xfc, 0xd9, 0x63, 0x94, + 0x53, 0xe6, 0xb5, 0xf2, 0x39, 0x98, 0xd7, 0x5e, 0x85, 0xe6, 0x21, 0x9f, 0x09, 0x44, 0xb2, 0xab, + 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0xfe, 0x20, 0x2a, 0xc6, 0x38, 0x0f, 0xab, 0x22, 0xaf, 0xd3, 0x09, + 0xf3, 0x4b, 0xcb, 0x2a, 0x9d, 0xa8, 0x18, 0xe3, 0x3c, 0x3c, 0xbc, 0xd1, 0x72, 0x0e, 0x44, 0x85, + 0x1a, 0xaf, 0x20, 0xc2, 0x1b, 0x55, 0x21, 0x46, 0x74, 0x72, 0x03, 0xea, 0x03, 0x73, 0x4f, 0xf0, + 0xd6, 0x39, 0x2f, 0xd7, 0x30, 0x77, 0x56, 0xd7, 0x64, 0xf2, 0x2d, 0x45, 0x65, 0x2d, 0xe9, 0xe9, + 0x7d, 0x45, 0xe0, 0x7b, 0x43, 0xd9, 0x92, 0xcd, 0xa8, 0x18, 0xe3, 0x3c, 0xe4, 0x67, 0x60, 0xc6, + 0xa3, 0xe6, 0xc0, 0xa0, 0x61, 0x2d, 0xe0, 0xb5, 0x64, 0x56, 0xd2, 0x38, 0x05, 0x53, 0x9c, 0x23, + 0x8c, 0x84, 0xcd, 0xb1, 0x8c, 0x84, 0x5f, 0x83, 0x19, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0xf7, 0x1d, + 0x1e, 0x74, 0x21, 0x83, 0x2c, 0x43, 0x03, 0xfd, 0x6a, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0x9f, 0x17, + 0xa1, 0x22, 0x72, 0xa5, 0xae, 0xc3, 0x05, 0xcb, 0xb1, 0x02, 0x4b, 0xb7, 0x57, 0xa9, 0xad, 0x1f, + 0x25, 0x03, 0x4f, 0x5e, 0x66, 0x1b, 0xed, 0xf5, 0x61, 0x32, 0x66, 0xd5, 0x61, 0x9d, 0x13, 0x88, + 0xe5, 0x5b, 0xa1, 0x08, 0x3b, 0x9a, 0x48, 0xd4, 0x9d, 0xa0, 0x60, 0x8a, 0x93, 0x29, 0x43, 0xfd, + 0x8c, 0xa8, 0x12, 0xae, 0x0c, 0x25, 0x63, 0x49, 0x92, 0x7c, 0x5c, 0x49, 0x1f, 0x70, 0x85, 0x38, + 0x3c, 0xca, 0x24, 0x43, 0xd3, 0x84, 0x92, 0x9e, 0xa2, 0xe1, 0x10, 0x37, 0x43, 0xd8, 0xd3, 0x2d, + 0x7b, 0xe0, 0xd1, 0x08, 0xa1, 0x12, 0x21, 0xac, 0xa5, 0x68, 0x38, 0xc4, 0xad, 0xfd, 0xf7, 0x02, + 0x90, 0xe1, 0xc3, 0x19, 0x64, 0x1f, 0xaa, 0x0e, 0xb7, 0x45, 0xe6, 0xbe, 0x1f, 0x20, 0x66, 0xd2, + 0x14, 0x8b, 0x84, 0x2c, 0x90, 0xf8, 0xc4, 0x81, 0x3a, 0x7d, 0x14, 0x50, 0xcf, 0x09, 0x0f, 0x6b, + 0x4d, 0xe6, 0x2e, 0x02, 0xb1, 0x37, 0x93, 0xc8, 0x18, 0xca, 0xd0, 0x7e, 0xaf, 0x08, 0xcd, 0x18, + 0xdf, 0xd3, 0xb6, 0xf8, 0x3c, 0x5f, 0x84, 0x30, 0x01, 0xee, 0x78, 0xb6, 0x9c, 0xef, 0x62, 0xf9, + 0x22, 0x24, 0x09, 0x37, 0x30, 0xce, 0x47, 0x96, 0x00, 0x7a, 0xba, 0x1f, 0x50, 0x8f, 0xeb, 0x42, + 0xa9, 0x2c, 0x0d, 0x9b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0xba, 0xbc, 0x4d, 0xa2, 0x9c, 0xcc, 0xaa, + 0x39, 0xe2, 0xaa, 0x88, 0xca, 0x04, 0xae, 0x8a, 0x20, 0x5d, 0x98, 0x53, 0xad, 0x56, 0xd4, 0xb3, + 0xe5, 0x5c, 0x14, 0x03, 0x35, 0x05, 0x81, 0x43, 0xa0, 0xda, 0xf7, 0x0b, 0x30, 0x9d, 0x30, 0x40, + 0x89, 0x7c, 0x98, 0xea, 0x68, 0x51, 0x22, 0x1f, 0x66, 0xec, 0x44, 0xd0, 0x17, 0xa0, 0x2a, 0x3a, + 0x28, 0x1d, 0x31, 0x2c, 0xba, 0x10, 0x25, 0x95, 0xad, 0x2c, 0xd2, 0xc4, 0x9d, 0x5e, 0x59, 0xa4, + 0x0d, 0x1c, 0x15, 0x5d, 0x78, 0x8e, 0x44, 0xeb, 0x64, 0x4f, 0xc7, 0x3c, 0x47, 0xa2, 0x1c, 0x43, + 0x0e, 0xed, 0xef, 0xf3, 0x76, 0x07, 0xde, 0x51, 0xb8, 0xb3, 0xee, 0x42, 0x4d, 0x46, 0x89, 0xca, + 0x4f, 0xe3, 0xad, 0x1c, 0x56, 0x31, 0x8e, 0x23, 0xe3, 0x1c, 0x75, 0xe3, 0xe0, 0xfe, 0xde, 0x1e, + 0x2a, 0x74, 0x72, 0x13, 0x1a, 0xae, 0x23, 0xbf, 0x60, 0xf9, 0xf8, 0x5f, 0x64, 0x2b, 0xc7, 0x7d, + 0x55, 0xf8, 0xf8, 0xb8, 0x75, 0x29, 0xfc, 0x93, 0x68, 0x24, 0x46, 0x35, 0xb5, 0x3f, 0x53, 0x80, + 0x97, 0xd0, 0xb5, 0x6d, 0xcb, 0xe9, 0x26, 0x3d, 0x9f, 0xc4, 0x86, 0x99, 0x9e, 0xfe, 0x68, 0xc7, + 0xd1, 0x0f, 0x75, 0xcb, 0xd6, 0x77, 0x6d, 0xfa, 0xd4, 0x9d, 0xf1, 0x20, 0xb0, 0xec, 0x05, 0x71, + 0xbb, 0xe6, 0xc2, 0xba, 0x13, 0xdc, 0xf7, 0x3a, 0x81, 0x67, 0x39, 0x5d, 0x31, 0x4b, 0x6e, 0x26, + 0xb0, 0x30, 0x85, 0xad, 0xfd, 0x7e, 0x09, 0x78, 0x04, 0x22, 0xf9, 0x32, 0x34, 0x7a, 0xd4, 0xd8, + 0xd7, 0x1d, 0xcb, 0x57, 0x99, 0x85, 0x2f, 0xb3, 0xe7, 0xda, 0x54, 0x85, 0x8f, 0xd9, 0xab, 0x58, + 0xee, 0x6c, 0xf0, 0xc3, 0x40, 0x11, 0x2f, 0x31, 0xa0, 0xda, 0xf5, 0x7d, 0xbd, 0x6f, 0xe5, 0x0e, + 0x31, 0x11, 0x99, 0x5c, 0xc5, 0x74, 0x24, 0x7e, 0xa3, 0x84, 0x26, 0x06, 0x54, 0xfa, 0xb6, 0x6e, + 0x39, 0xb9, 0x6f, 0x83, 0x63, 0x4f, 0xb0, 0xc5, 0x90, 0x84, 0xa9, 0x92, 0xff, 0x44, 0x81, 0x4d, + 0x06, 0xd0, 0xf4, 0x0d, 0x4f, 0xef, 0xf9, 0xfb, 0xfa, 0xd2, 0x6b, 0xaf, 0xe7, 0x56, 0xfe, 0x23, + 0x51, 0x42, 0x17, 0x59, 0xc1, 0xe5, 0xcd, 0xce, 0xed, 0xe5, 0xa5, 0xd7, 0x5e, 0xc7, 0xb8, 0x9c, + 0xb8, 0xd8, 0xd7, 0x5e, 0x5d, 0x92, 0x33, 0xc8, 0xc4, 0xc5, 0xbe, 0xf6, 0xea, 0x12, 0xc6, 0xe5, + 0x68, 0xff, 0xab, 0x00, 0x8d, 0x90, 0x97, 0xec, 0x00, 0xb0, 0xb9, 0x4c, 0xe6, 0x5e, 0x3d, 0xd3, + 0xcd, 0x39, 0xdc, 0xda, 0xb3, 0x13, 0x56, 0xc6, 0x18, 0x50, 0x46, 0x72, 0xda, 0xe2, 0xa4, 0x93, + 0xd3, 0x2e, 0x42, 0x63, 0x5f, 0x77, 0x4c, 0x7f, 0x5f, 0x3f, 0xa0, 0x32, 0x70, 0x3b, 0xdc, 0x8a, + 0xdc, 0x56, 0x04, 0x8c, 0x78, 0xb4, 0x7f, 0x58, 0x05, 0x11, 0x17, 0xc2, 0x26, 0x1d, 0xd3, 0xf2, + 0xc5, 0xf1, 0x8a, 0x02, 0xaf, 0x19, 0x4e, 0x3a, 0xab, 0xb2, 0x1c, 0x43, 0x0e, 0x72, 0x19, 0x4a, + 0x3d, 0xcb, 0x91, 0x1a, 0x08, 0x37, 0xe4, 0x6e, 0x5a, 0x0e, 0xb2, 0x32, 0x4e, 0xd2, 0x1f, 0x49, + 0x0d, 0x43, 0x90, 0xf4, 0x47, 0xc8, 0xca, 0xc8, 0x57, 0x61, 0xd6, 0x76, 0xdd, 0x03, 0x36, 0x7d, + 0x28, 0x45, 0x44, 0x78, 0xd5, 0xb9, 0x69, 0x65, 0x23, 0x49, 0xc2, 0x34, 0x2f, 0xd9, 0x81, 0x97, + 0x3f, 0xa0, 0x9e, 0x2b, 0xe7, 0xcb, 0x8e, 0x4d, 0x69, 0x5f, 0xc1, 0x08, 0xd5, 0x98, 0x47, 0xc9, + 0xfe, 0x7c, 0x36, 0x0b, 0x8e, 0xaa, 0xcb, 0x23, 0xfa, 0x75, 0xaf, 0x4b, 0x83, 0x2d, 0xcf, 0x65, + 0xba, 0x8b, 0xe5, 0x74, 0x15, 0x6c, 0x35, 0x82, 0xdd, 0xce, 0x66, 0xc1, 0x51, 0x75, 0xc9, 0x3b, + 0x30, 0x2f, 0x48, 0x42, 0x6d, 0x59, 0x16, 0xd3, 0x8c, 0x65, 0xab, 0x4b, 0x54, 0xa7, 0x85, 0xbf, + 0x6c, 0x7b, 0x04, 0x0f, 0x8e, 0xac, 0x4d, 0xee, 0xc0, 0x9c, 0xf2, 0x96, 0x6e, 0x51, 0xaf, 0x13, + 0xc6, 0x0a, 0x4d, 0xb7, 0xaf, 0x9d, 0x1c, 0xb7, 0xae, 0xac, 0xd2, 0xbe, 0x47, 0x8d, 0xb8, 0xd7, + 0x59, 0x71, 0xe1, 0x50, 0x3d, 0x82, 0x70, 0x89, 0x07, 0x04, 0xed, 0xf4, 0x57, 0x5c, 0xd7, 0x36, + 0xdd, 0x87, 0x8e, 0x7a, 0x76, 0xa1, 0xb0, 0x73, 0x07, 0x69, 0x27, 0x93, 0x03, 0x47, 0xd4, 0x64, + 0x4f, 0xce, 0x29, 0xab, 0xee, 0x43, 0x27, 0x8d, 0x0a, 0xd1, 0x93, 0x77, 0x46, 0xf0, 0xe0, 0xc8, + 0xda, 0x64, 0x0d, 0x48, 0xfa, 0x09, 0x76, 0xfa, 0xd2, 0x85, 0x7f, 0x49, 0xa4, 0x51, 0x4a, 0x53, + 0x31, 0xa3, 0x06, 0xd9, 0x80, 0x8b, 0xe9, 0x52, 0x26, 0x4e, 0x7a, 0xf3, 0x79, 0x02, 0x65, 0xcc, + 0xa0, 0x63, 0x66, 0x2d, 0xed, 0x1f, 0x15, 0x61, 0x3a, 0x91, 0x77, 0xe3, 0xb9, 0xcb, 0x6f, 0xc0, + 0x36, 0x0f, 0x3d, 0xbf, 0xbb, 0xbe, 0x7a, 0x9b, 0xea, 0x26, 0xf5, 0xd4, 0xb1, 0x8e, 0x86, 0x5c, + 0x16, 0x13, 0x14, 0x4c, 0x71, 0x92, 0x3d, 0xa8, 0x08, 0x3f, 0x41, 0xde, 0x3b, 0x98, 0x54, 0x1f, + 0x71, 0x67, 0x81, 0xbc, 0xb8, 0xcc, 0xf5, 0x28, 0x0a, 0x78, 0x2d, 0x80, 0xa9, 0x38, 0x07, 0x9b, + 0x48, 0x22, 0xb5, 0xb7, 0x96, 0x50, 0x79, 0xd7, 0xa1, 0x14, 0x04, 0xe3, 0x66, 0x4e, 0x10, 0x7e, + 0xa7, 0xed, 0x0d, 0x64, 0x18, 0xda, 0x1e, 0x7b, 0x77, 0xbe, 0x6f, 0xb9, 0x8e, 0x4c, 0xa3, 0xbf, + 0x03, 0x35, 0xb9, 0x7b, 0x1a, 0x33, 0xf3, 0x03, 0xd7, 0x95, 0x94, 0xd9, 0x55, 0x61, 0x69, 0xff, + 0xae, 0x08, 0x8d, 0xd0, 0x4c, 0x72, 0x8a, 0xf4, 0xf4, 0x2e, 0x34, 0xc2, 0x80, 0xc6, 0xdc, 0x17, + 0xcc, 0x46, 0x71, 0x76, 0x7c, 0x67, 0x1f, 0xfe, 0xc5, 0x48, 0x46, 0x3c, 0x58, 0xb2, 0x94, 0x23, + 0x58, 0xb2, 0x0f, 0xb5, 0xc0, 0xb3, 0xba, 0x5d, 0xb9, 0x4b, 0xc8, 0x13, 0x2d, 0x19, 0x76, 0xd7, + 0xb6, 0x00, 0x94, 0x3d, 0x2b, 0xfe, 0xa0, 0x12, 0xa3, 0xbd, 0x0f, 0x73, 0x69, 0x4e, 0xae, 0x42, + 0x1b, 0xfb, 0xd4, 0x1c, 0xd8, 0xaa, 0x8f, 0x23, 0x15, 0x5a, 0x96, 0x63, 0xc8, 0x41, 0x6e, 0x40, + 0x9d, 0xbd, 0xa6, 0x0f, 0x5c, 0x47, 0xa9, 0xb1, 0x7c, 0x37, 0xb2, 0x2d, 0xcb, 0x30, 0xa4, 0x6a, + 0xff, 0xa5, 0x04, 0x97, 0x23, 0x63, 0xd7, 0xa6, 0xee, 0xe8, 0xdd, 0x53, 0xdc, 0x2a, 0xfa, 0xe9, + 0x59, 0xba, 0xb3, 0xde, 0x31, 0x52, 0x7a, 0x0e, 0xee, 0x18, 0xf9, 0xbf, 0x45, 0xe0, 0xc1, 0xd7, + 0xe4, 0xdb, 0x30, 0xa5, 0xc7, 0x2e, 0x94, 0x96, 0xaf, 0xf3, 0x66, 0xee, 0xd7, 0xc9, 0x63, 0xbc, + 0xc3, 0x00, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x12, 0x17, 0xea, 0x7b, 0xba, 0x6d, 0x33, 0x5d, 0x28, + 0xb7, 0xf3, 0x2e, 0x21, 0x9c, 0x0f, 0xf3, 0x35, 0x09, 0x8d, 0xa1, 0x10, 0xf2, 0xdd, 0x02, 0x4c, + 0x7b, 0xf1, 0xed, 0x9a, 0x7c, 0x21, 0x79, 0x42, 0x3b, 0x62, 0x68, 0xf1, 0x70, 0xbb, 0xf8, 0x9e, + 0x30, 0x29, 0x53, 0xfb, 0xcf, 0x05, 0x98, 0xee, 0xd8, 0x96, 0x69, 0x39, 0xdd, 0x73, 0xbc, 0xe2, + 0xe4, 0x3e, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x98, 0xab, 0x89, 0x58, 0xc7, 0x18, 0x00, 0x0a, 0x9c, + 0xe4, 0x9d, 0x29, 0xa5, 0x53, 0xdc, 0x99, 0xf2, 0x07, 0x55, 0x90, 0xc7, 0x08, 0xc8, 0x00, 0x1a, + 0x5d, 0x75, 0x15, 0x83, 0x7c, 0xc6, 0xdb, 0x39, 0xd2, 0x78, 0x26, 0x2e, 0x75, 0x10, 0x73, 0x7f, + 0x58, 0x88, 0x91, 0x24, 0x42, 0x93, 0x37, 0x99, 0xaf, 0xe6, 0xbc, 0xc9, 0x5c, 0x88, 0x1b, 0xbe, + 0xcb, 0x5c, 0x87, 0xf2, 0x7e, 0x10, 0xf4, 0xe5, 0x60, 0x1a, 0xff, 0x9c, 0x48, 0x94, 0x49, 0x4a, + 0xe8, 0x44, 0xec, 0x3f, 0x72, 0x68, 0x26, 0xc2, 0xd1, 0xc3, 0xfb, 0x22, 0x57, 0x72, 0x85, 0x91, + 0xc4, 0x45, 0xb0, 0xff, 0xc8, 0xa1, 0xc9, 0x2f, 0x42, 0x33, 0xf0, 0x74, 0xc7, 0xdf, 0x73, 0xbd, + 0x1e, 0xf5, 0xe4, 0x1e, 0x75, 0x2d, 0xc7, 0x65, 0xde, 0xdb, 0x11, 0x9a, 0x30, 0xc9, 0x26, 0x8a, + 0x30, 0x2e, 0x8d, 0x1c, 0x40, 0x7d, 0x60, 0x8a, 0x86, 0x49, 0x33, 0xd8, 0x72, 0x9e, 0xfb, 0xd9, + 0x63, 0x41, 0x22, 0xea, 0x1f, 0x86, 0x02, 0x92, 0x57, 0xa3, 0xd6, 0x26, 0x75, 0x35, 0x6a, 0x7c, + 0x34, 0x66, 0xa5, 0xb9, 0x21, 0x3d, 0xa9, 0xd7, 0x3a, 0x5d, 0x19, 0xe3, 0xb6, 0x96, 0x5b, 0xe5, + 0x14, 0x22, 0x9b, 0xa1, 0x6e, 0xec, 0x74, 0x51, 0xc9, 0xd0, 0x7a, 0x20, 0x7d, 0x47, 0xc4, 0x48, + 0x5c, 0x07, 0x25, 0x4e, 0x46, 0x2e, 0x9e, 0x6e, 0x3e, 0x08, 0xef, 0x25, 0x8a, 0xa5, 0xa3, 0xcf, + 0xbc, 0xf7, 0x49, 0xfb, 0xf7, 0x45, 0x28, 0x6d, 0x6f, 0x74, 0x44, 0x8a, 0x59, 0x7e, 0xc1, 0x1c, + 0xed, 0x1c, 0x58, 0xfd, 0x07, 0xd4, 0xb3, 0xf6, 0x8e, 0xe4, 0xd6, 0x3b, 0x96, 0x62, 0x36, 0xcd, + 0x81, 0x19, 0xb5, 0xc8, 0xbb, 0x30, 0x65, 0xe8, 0x2b, 0xd4, 0x0b, 0xc6, 0x31, 0x2c, 0xf0, 0x43, + 0xe6, 0x2b, 0xcb, 0x51, 0x75, 0x4c, 0x80, 0x91, 0x1d, 0x00, 0x23, 0x82, 0x2e, 0x9d, 0xd9, 0x1c, + 0x12, 0x03, 0x8e, 0x01, 0x11, 0x84, 0xc6, 0x01, 0x63, 0xe5, 0xa8, 0xe5, 0xb3, 0xa0, 0xf2, 0x91, + 0x73, 0x57, 0xd5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x3a, 0x71, 0x47, 0x14, 0xf9, 0x0a, 0xd4, 0xdd, + 0x7e, 0x6c, 0x3a, 0x6d, 0xf0, 0x68, 0xda, 0xfa, 0x7d, 0x59, 0xf6, 0xf8, 0xb8, 0x35, 0xbd, 0xe1, + 0x76, 0x2d, 0x43, 0x15, 0x60, 0xc8, 0x4e, 0x34, 0xa8, 0xf2, 0x73, 0x9b, 0xea, 0x86, 0x28, 0xbe, + 0x76, 0xf0, 0x4b, 0x5c, 0x7c, 0x94, 0x14, 0xed, 0x97, 0xcb, 0x10, 0x79, 0x5c, 0x89, 0x0f, 0x55, + 0x71, 0x66, 0x44, 0xce, 0xdc, 0xe7, 0x7a, 0x3c, 0x45, 0x8a, 0x22, 0x5d, 0x28, 0xbd, 0xef, 0xee, + 0xe6, 0x9e, 0xb8, 0x63, 0x29, 0x21, 0x84, 0xad, 0x2c, 0x56, 0x80, 0x4c, 0x02, 0xf9, 0xab, 0x05, + 0x78, 0xd1, 0x4f, 0xab, 0xbe, 0x72, 0x38, 0x60, 0x7e, 0x1d, 0x3f, 0xad, 0x4c, 0xcb, 0xb0, 0xe7, + 0x51, 0x64, 0x1c, 0x6e, 0x0b, 0xeb, 0x7f, 0xe1, 0x0a, 0x95, 0xc3, 0xe9, 0x56, 0xce, 0x9b, 0x70, + 0x93, 0xfd, 0x9f, 0x2c, 0x43, 0x29, 0x4a, 0xfb, 0x4e, 0x11, 0x9a, 0xb1, 0xd9, 0x3a, 0xf7, 0xc5, + 0x63, 0x8f, 0x52, 0x17, 0x8f, 0x6d, 0x8d, 0x1f, 0x19, 0x10, 0xb5, 0xea, 0xbc, 0xef, 0x1e, 0xfb, + 0xa7, 0x45, 0x28, 0xed, 0xac, 0xae, 0x25, 0x37, 0xad, 0x85, 0x67, 0xb0, 0x69, 0xdd, 0x87, 0xda, + 0xee, 0xc0, 0xb2, 0x03, 0xcb, 0xc9, 0x9d, 0xb4, 0x46, 0xdd, 0xd3, 0x26, 0x7d, 0x1d, 0x02, 0x15, + 0x15, 0x3c, 0xe9, 0x42, 0xad, 0x2b, 0xb2, 0x86, 0xe6, 0x8e, 0x97, 0x94, 0xd9, 0x47, 0x85, 0x20, + 0xf9, 0x07, 0x15, 0xba, 0x76, 0x04, 0xd5, 0x9d, 0x55, 0xa9, 0xf6, 0x3f, 0xdb, 0xde, 0xd4, 0x7e, + 0x11, 0x42, 0x2d, 0xe0, 0xd9, 0x0b, 0xff, 0x6f, 0x05, 0x48, 0x2a, 0x3e, 0xcf, 0x7e, 0x34, 0x1d, + 0xa4, 0x47, 0xd3, 0xea, 0x24, 0x3e, 0xbe, 0xec, 0x01, 0xa5, 0xfd, 0x9b, 0x02, 0xa4, 0x0e, 0xfa, + 0x91, 0xd7, 0x65, 0x02, 0xba, 0x64, 0x60, 0x9a, 0x4a, 0x40, 0x47, 0x92, 0xdc, 0xb1, 0x44, 0x74, + 0x1f, 0xb2, 0xed, 0x5a, 0xdc, 0x81, 0x26, 0x9b, 0x7f, 0x6f, 0xfc, 0xed, 0x5a, 0x96, 0x3b, 0x4e, + 0x06, 0x4f, 0xc6, 0x49, 0x98, 0x94, 0xab, 0xfd, 0x83, 0x22, 0x54, 0x9f, 0x59, 0x6e, 0x03, 0x9a, + 0x88, 0x67, 0x5d, 0xc9, 0x39, 0xdb, 0x8f, 0x8c, 0x66, 0xed, 0xa5, 0xa2, 0x59, 0xf3, 0x5e, 0xb0, + 0xfe, 0x94, 0x58, 0xd6, 0x7f, 0x55, 0x00, 0xb9, 0xd6, 0xac, 0x3b, 0x7e, 0xa0, 0x3b, 0x06, 0x25, + 0x46, 0xb8, 0xb0, 0xe5, 0x0d, 0x9a, 0x92, 0x81, 0x85, 0x42, 0x97, 0xe1, 0xbf, 0xd5, 0x42, 0x46, + 0x7e, 0x12, 0xea, 0xfb, 0xae, 0x1f, 0xf0, 0xc5, 0xab, 0x98, 0x34, 0x99, 0xdd, 0x96, 0xe5, 0x18, + 0x72, 0xa4, 0xdd, 0xd9, 0x95, 0xd1, 0xee, 0x6c, 0xed, 0x37, 0x8b, 0x30, 0xf5, 0x49, 0x49, 0x9e, + 0x90, 0x15, 0xfd, 0x5b, 0xca, 0x19, 0xfd, 0x5b, 0x3e, 0x4b, 0xf4, 0xaf, 0xf6, 0xc3, 0x02, 0xc0, + 0x33, 0xcb, 0xdc, 0x60, 0x26, 0x03, 0x73, 0x73, 0x8f, 0xab, 0xec, 0xb0, 0xdc, 0xbf, 0x5b, 0x51, + 0x8f, 0xc4, 0x83, 0x72, 0x3f, 0x2c, 0xc0, 0x8c, 0x9e, 0x08, 0x74, 0xcd, 0xad, 0x2f, 0xa7, 0xe2, + 0x66, 0xc3, 0x38, 0xad, 0x64, 0x39, 0xa6, 0xc4, 0x92, 0x37, 0xa2, 0xdc, 0xe7, 0xf7, 0xa2, 0x61, + 0x3f, 0x94, 0xb4, 0x9c, 0xeb, 0x6e, 0x09, 0xce, 0xa7, 0x04, 0x16, 0x97, 0x26, 0x12, 0x58, 0x1c, + 0x3f, 0x32, 0x59, 0x7e, 0xe2, 0x91, 0xc9, 0x43, 0x68, 0xec, 0x79, 0x6e, 0x8f, 0xc7, 0xee, 0xca, + 0xdb, 0xc5, 0x6f, 0xe6, 0x58, 0x28, 0x7b, 0xbb, 0x96, 0x43, 0x4d, 0x1e, 0x17, 0x1c, 0x1a, 0xae, + 0xd6, 0x14, 0x3e, 0x46, 0xa2, 0xb8, 0xad, 0xdf, 0x15, 0x52, 0xab, 0x93, 0x94, 0x1a, 0xce, 0x25, + 0xdb, 0x02, 0x1d, 0x95, 0x98, 0x64, 0xbc, 0x6e, 0xed, 0xd9, 0xc4, 0xeb, 0x6a, 0x7f, 0xbe, 0xa6, + 0x26, 0xb0, 0xe7, 0x2e, 0xcd, 0xee, 0xa7, 0x07, 0xdd, 0xbb, 0x74, 0xe8, 0x14, 0x7a, 0xfd, 0x19, + 0x9e, 0x42, 0x6f, 0x4c, 0xe6, 0x14, 0x3a, 0xe4, 0x3b, 0x85, 0xde, 0x9c, 0xd0, 0x29, 0xf4, 0xa9, + 0x49, 0x9d, 0x42, 0x9f, 0x1e, 0xeb, 0x14, 0xfa, 0xcc, 0xa9, 0x4e, 0xa1, 0x1f, 0x97, 0x20, 0xb5, + 0x19, 0xff, 0xd4, 0xf1, 0xf6, 0x47, 0xca, 0xf1, 0xf6, 0xbd, 0x22, 0x44, 0x13, 0xf1, 0x19, 0x03, + 0x93, 0xde, 0x81, 0x7a, 0x4f, 0x7f, 0xc4, 0x03, 0xa7, 0xf3, 0xdc, 0x4e, 0xbd, 0x29, 0x31, 0x30, + 0x44, 0x23, 0x3e, 0x80, 0x15, 0xde, 0x10, 0x91, 0xdb, 0x85, 0x11, 0x5d, 0x36, 0x21, 0x8c, 0xa4, + 0xd1, 0x7f, 0x8c, 0x89, 0xd1, 0xfe, 0x65, 0x11, 0xe4, 0x55, 0x22, 0x84, 0x42, 0x65, 0xcf, 0x7a, + 0x44, 0xcd, 0xdc, 0xe1, 0xce, 0x6b, 0x0c, 0x45, 0xde, 0x57, 0xc2, 0x7d, 0x34, 0xbc, 0x00, 0x05, + 0x3a, 0x37, 0xbe, 0x0b, 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, 0xdf, + 0x45, 0x11, 0x2a, 0x19, 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, 0xca, + 0xd6, 0xef, 0x8b, 0x34, 0x14, 0x52, 0x46, 0xfb, 0x17, 0x7e, 0xf0, 0xa3, 0x6b, 0x2f, 0xfc, 0xf0, + 0x47, 0xd7, 0x5e, 0xf8, 0xe8, 0x47, 0xd7, 0x5e, 0xf8, 0xe5, 0x93, 0x6b, 0x85, 0x1f, 0x9c, 0x5c, + 0x2b, 0xfc, 0xf0, 0xe4, 0x5a, 0xe1, 0xa3, 0x93, 0x6b, 0x85, 0xff, 0x78, 0x72, 0xad, 0xf0, 0x97, + 0xfe, 0xd3, 0xb5, 0x17, 0x7e, 0xfe, 0xcb, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, 0xc5, + 0xfe, 0x41, 0x77, 0x91, 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xae, 0x9f, 0x3b, 0x27, 0x9f, 0x9f, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6213,6 +6214,11 @@ func (m *KafkaSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.KafkaVersion) + copy(dAtA[i:], m.KafkaVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KafkaVersion))) + i-- + dAtA[i] = 0x3a if m.SASL != nil { { size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i]) @@ -10685,6 +10691,8 @@ func (m *KafkaSource) Size() (n int) { l = m.SASL.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.KafkaVersion) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12652,6 +12660,7 @@ func (this *KafkaSource) String() string { `TLS:` + strings.Replace(this.TLS.String(), "TLS", "TLS", 1) + `,`, `Config:` + fmt.Sprintf("%v", this.Config) + `,`, `SASL:` + strings.Replace(this.SASL.String(), "SASL", "SASL", 1) + `,`, + `KafkaVersion:` + fmt.Sprintf("%v", this.KafkaVersion) + `,`, `}`, }, "") return s @@ -22887,6 +22896,38 @@ func (m *KafkaSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KafkaVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KafkaVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 1fdcdbcad5..bafd54085c 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -863,6 +863,8 @@ message KafkaSource { // SASL.enable=true default for SASL. // +optional optional SASL sasl = 6; + + optional string kafkaVersion = 7; } message Lifecycle { diff --git a/pkg/apis/numaflow/v1alpha1/kafka_source.go b/pkg/apis/numaflow/v1alpha1/kafka_source.go index e0019e7ed2..7e53221e6c 100644 --- a/pkg/apis/numaflow/v1alpha1/kafka_source.go +++ b/pkg/apis/numaflow/v1alpha1/kafka_source.go @@ -29,5 +29,6 @@ type KafkaSource struct { // SASL user to configure SASL connection for kafka broker // SASL.enable=true default for SASL. // +optional - SASL *SASL `json:"sasl" protobuf:"bytes,6,opt,name=sasl"` + SASL *SASL `json:"sasl" protobuf:"bytes,6,opt,name=sasl"` + KafkaVersion string `json:"kafkaVersion,omitempty" protobuf:"bytes,7,opt,name=kafkaVersion"` } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 2c42771989..314e17e867 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -2947,6 +2947,12 @@ func schema_pkg_apis_numaflow_v1alpha1_KafkaSource(ref common.ReferenceCallback) Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.SASL"), }, }, + "kafkaVersion": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"topic"}, }, diff --git a/pkg/sources/kafka/reader.go b/pkg/sources/kafka/reader.go index e06f88a6f5..9926c94888 100644 --- a/pkg/sources/kafka/reader.go +++ b/pkg/sources/kafka/reader.go @@ -95,6 +95,13 @@ func NewKafkaSource(ctx context.Context, vertexInstance *dfv1.VertexInstance, ha config.Net.SASL = *sasl } } + if v := source.KafkaVersion; v != "" { + if version, err := sarama.ParseKafkaVersion(source.KafkaVersion); err != nil { + return nil, err + } else { + config.Version = version + } + } sarama.Logger = zap.NewStdLog(ks.logger.Desugar()) diff --git a/rust/numaflow-models/src/models/kafka_source.rs b/rust/numaflow-models/src/models/kafka_source.rs index acdd1eee98..e085b6673d 100644 --- a/rust/numaflow-models/src/models/kafka_source.rs +++ b/rust/numaflow-models/src/models/kafka_source.rs @@ -24,6 +24,8 @@ pub struct KafkaSource { pub config: Option, #[serde(rename = "consumerGroup", skip_serializing_if = "Option::is_none")] pub consumer_group: Option, + #[serde(rename = "kafkaVersion", skip_serializing_if = "Option::is_none")] + pub kafka_version: Option, #[serde(rename = "sasl", skip_serializing_if = "Option::is_none")] pub sasl: Option>, #[serde(rename = "tls", skip_serializing_if = "Option::is_none")] @@ -38,6 +40,7 @@ impl KafkaSource { brokers: None, config: None, consumer_group: None, + kafka_version: None, sasl: None, tls: None, topic, From 00a74df0f03a8548b3f11776c598c61266801706 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Tue, 29 Oct 2024 12:56:39 -0700 Subject: [PATCH 131/188] doc: monovertex (#2193) Signed-off-by: Vigith Maurice --- docs/assets/monovertex.png | Bin 0 -> 12441 bytes docs/core-concepts/monovertex.md | 79 +++++++++++++++++++++++++++++++ docs/core-concepts/pipeline.md | 7 ++- mkdocs.yml | 1 + 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 docs/assets/monovertex.png create mode 100644 docs/core-concepts/monovertex.md diff --git a/docs/assets/monovertex.png b/docs/assets/monovertex.png new file mode 100644 index 0000000000000000000000000000000000000000..eeb4f49732ad1beec60da6f7cea2b29d1d069ce6 GIT binary patch literal 12441 zcmZX4byQT}7cYoPC?ToRs30vN%?ypwB_PsBiAagm41B1Is4SUpZ(dNSR(^%TAG_QWMpKt4Z^xmClvkGnu<{{ za)Ay@W{Eh@ot>R7L88fU+8tPIQWC-WV0E~`q4)A^TYxxpuZQw&TG|DYxy)y;CD8o< z&-v!m68#*ly1F_rB_-vO@xhE;?ie#AOE2>sZVmZC8yevOA@YNgGzgJ~o}TD`cJTLY zcD7E}`N0)ZFLJRk!XfVSX%uv497C^tCt z9KIBas}2k&4fgxytDX6?kd5(TFToqTu(-!G;x@j#`yBVm;W~lN(k&`1VeCnbb%)e<;!peQsj{{%!KweXm6&P?s$Cul5R% zic5RKveew~uvL;-%2-FpN!iPdaoKn6&AQw?jk*q?>EmXf6~Y8jg7iXnjZNEW&H|+Q zLo1eNIHQiVhrKOdNR!|ZW)U(w%}8UHMMvD;6%LXDfe7??Zju$5n|ZYr2B!1s=^d#z z!`EeHo4SBS6CnI+Q}M$cdmbDg6Oz-oqg}72&wcIfg-we8+prJQolqS7?RnsXD`~c* zGc0P;Y9>NOvip$n_HCct*E3V&+}@{5mx@bD;x7GL%cD-*j-s!h-^#t2q~52FjJ=-$=Q;?#U3WKsCpStmaGdzs#biCM=pmS@~dn(20-H!##P z>2;u@!Xm#(aM1)I7JCt=qo;RnI-F`Dr$^`v#Ru1ssTiE^_0l)ISWrOEw@9%Fn%wT_ zc~oPbXr-Zv{B681?Y{%b2EoXyXAfkm7qd0m(&@{I2mSA7UXX4(o%A(|8@d4Qb~6gZqxefBkYwq7NsyD z@ayLrTK2%MVK9yN*!E(_6A`kMcTbvLjD2<#>NX}k80^GUQg-4|XE}sKre{lg>cq2(7Usx$#i1^K7(>@ zRIrx#)vafOZDT#3_UQ00Vk3FC^Lu?r23_Te6xb&rxC=tGwasT9tD+Xwf~v_#o)~(4 z4xFuniPxPLt7w7DHvA5^ot@A!N?Sde)>M{y1AZ$r!yMip&vuKu7^K+XX|TzrUo?kG z)8uI=;YPr;ygCRp{<}Z3X|ULoF1u?tLC>ksOp-n&1o@|Tu>zKqsKKLqLCbyAb!1^{ z6Xm88l@{U{)1hVZ*V4BMnmQm%z*xi9Yj&^mkHcoI&*g!@VRSO0GCf*?4KB_ePLY;p_*H{*Ef~sAT!|=s0+!0y9%0iWOL; z|2OEt>ccv+?vo{$;VrEsGud9V8mmM`+dTC_aocOO->wydN+i|G(d7=>_%f|`_Wnp*bDel0NS~(~_WorHL4HkhLSU8d z>un|GE=Af}JG+I8j5up?>{exc;h9{KxVT->ek^jr>z*&wVkfjM(DOI88ML`ni|9cF zlsplH$L*~p5JK>BqPd{=ac4N=VMf(5!-#K|@S0h0#7{6U8w0=Uul69Rv$Hc}Om^e? zb_Y(Q*XI%?k#N6pb3TgxkxInM@;VStmi@y$#?kf1E0Y@|^s@2YYczy+Siuek_M$1* z`QJTdh7=EX;f+_v!N%|u3Sc>s*atqSMofgSkJqztws}96zF{F1u;CkA_263Y`TpR> z>Hf;{{fN!qSPmt*Y7iv`t=DDipM>W-blQxYS+!f0z|O5S8o|ZHfW}e`0+lcc_9%d< z$pHTP?=Ndsn3mEhA6s5)e7&9%Cp>r3Z%jjdMSz=|+p5L4IB@&-c{2!ZhDI7v+j4Xt z6?}hZqud#NGiv&d-9+h#WGL~fIx$jPE%TxR6f6Bs+|CwX@&`u=X8X}cb^0V8A5CW= zJExZ=shL17Ctib~(Sc4?nP-WG?&niEX>~7QL)|r&aF{Hf0kT|+U}BX^5+N%k411B< zVSJU2OX&gm>(^!iwDP&ptHaPWyQuv^rc1g)>+p4W%8jodsW}y2B>*6y-1%jKDYwS-)afVAbN&8bzbvt5D0siz zYbQ6%%$<^AQ|`!lpiTQcs_2)PjS<{HqY`1h4o9;kwSIto6{kusNe4hxv2I4mr&4yg zAA%hm?wG0D7^XMP&2kdv_AqO>^IPom*-!b%T*k3f&`hTZK9Uu)!7+|=r8qJy!5D&W zr-fE^TQl(MuMQ-#n{{Z>R_vdy9Gy+0G@R}UgJebF-g`5Uxc@V@F<~>nrk*Ja@R?QKEKlxN>J`tebUVY5$0yf>AD8?Nhq0@WZs>p)+*vGaaqdLT#{ zVDv}rk}>>M;W6T~v*2g_Cu71YYD8)A$cGUg(=~am@YMn-LC5~+=o}F7zHdN4>B(Y< zVS<1q+qA&0KoHLQGnK)f_a>{pCUiD-7Cce_Tf2-cmFLyFX=!4T7A!V--^+_hSV`$7 zw|b55-)~1N$>2DjOPfE@huz2T`0n)-`MXauV%(k9;X%;^cMw(jW&gu9eIdI}B_QNz zR;{OW&?o1AP>Z4|S6tj2WIagUtPeMVBL)XzK3tq>BmAXAuf3Qk^>oA6L)nfH?Y{}b zIc>)9wb%FDjhqG&c)>6j_GrSwQ8POChP(qGi-={)C>$(4z;+P5S#N0!#uB~{^4(P( z{hlkAOH%;y4Btd9b=}3ff)Y50jM&mx9=)3kh@#RAHvRmw3dxc%%{oKP|t! z{Nucz?{b>rSlAEv+a1)OjTJ^+_ZU*H27z^g)i2%}%5h2jTBiB*BkAFh;pP>OpGL|d zDWO$8mVo)2rAN_kqT*R@9_%DG$y6?(l^~~|u6qq})*!V06!0;qE^g0#zKU_bV+}Wq zDuP)t0s9Bco2sjF?cJ;X5>dyfH|J+hwiSMz2*4Y#=X=|K0UJW(R1gOy%Wxh3rksu6{OM3v(1=A!bTlOtGil*C&+FO@Vt*4I!*lU0qrsOBEd_4| z)jb-N`3X*>cNphM;#7QpTob(FQpcxG*dIM*5w<7+@NisR^p-oO1-2=9Es}?LI-)lz zL|Ftnq{1V=G{Y`iROmsC;BTVtxik3q_?VUJu+%X|VDvH-^0-2_BwGD9-(QCXO4lIJ zkA!0xE>6|E4aiNna``~bYfK(h9oj3e!w25nb0;HWr@PAB#tNCi?w>7R_XMXI;`5Z0 zmEE^z8y)F*JglIj53h?}V6a5cyMlSeCtPcQ%eLlxWcUQQCNhx^DJ(mJ8-r79Y#(EU z?^#;ZzpwVrjJICK9+{)ldy_z9ul*Z6CYhtDzZEwSHWCT$l=@E^K2lAT@&b26rARBW z%wJEjZaEyYpR@|xc`>_Aku`Fzq|JI=+!Wtw7-pZ%#|Ylii<$Yti7i$B0K3Wd^<(^s z*We#-)?>f(Wfn{>O3HPV*^fNU94^O^0zJpgUK3i|jmCcGJu<`rE{a7yWRrCoxJq-C zrVu&(B8G+Sd684o4l@^**xH5LH3&x4*GIS}0nI(~x%KIl#}(XM{J?X-c7^a*95@!z zROwAd#v>L-_R0X+t{11U13+6=zYFwkm=>pP$hE-kS1;hYTF^BzY^f74 zH>R&2uRLjd#sXkoWa`8f$EZtl6->#~lE>FTh8$gn;)BTh~@4 zzRaD~A@-4fPrL+-y5ABAcjw|YH~ZAlQ>10;-5=*@@SD5_A(M)T`9Sd{g8`f(^yn(d z32&%OVhqEk%rCAJ1Py>%ZmDLzGzJsTrOt3TWZy<@3=7M1FZ?klI?nHS>#>CF-Um>| z_gtQEx?hVuc^<~_ClINeu6o#;E{n0{5BInc04>d7~RFJ*IQp>s-jSj9GGRu9PwhKxGe2-NSZ?0b zL7Z^&m>Ye9*HOnZU%!5r^rqixH%hac9QXJhMw5o-DuW+|J7y-NTcyq*0`E<{szQ{$ z3K)~Zc|~+Xp#QK*U3fUYDkvnU^}xFI<-_l(S-`z@pV7PK-5pBz+%6Uip{fR1 zv88o9daWOdw392+zZ|N3wp8s2J^kH=R@#~uTpRk{fYp<4R=>#ci5nCsk2X~!zN7>0 zY52X3@U*nFp@oIKB)fJ#z}Z^9(9njb!;BKzE+vzwu3FA(LWp>vlO6jnrhrbCe z*D>x?z^=IM%(s{oK7Cl{7><8bZ5+X`m&pT+(+tvmC&C*}`)g_GnOQ*we*%-=Qj45h ztq#oB2l{48o%r(dJh<&-OK^?KnjSdJaj~%l>#mA%Bzvb5u8D>lbX{rJrd^wo#`u1P z;W4{2ulaKBUjv4=hHonr!W1ZW;QHD3nfxx+P&$lXnk+YszI>Hk%v|qN7w^8fNh5l) zpCjwWG%7NcLgN!TPM3pS`+lVb%Rl=Ftpw&x$zR+jh>A(z29g2xz>oY^?tC<&#ALja zexn}g+UMya?R;$QFW`&+*HKy)lP1v76L~MmGaSo*j-=aV2~Ih??gxF7ZyXE_Mhp8q zdGX!qT%r2mtXSx--UQ+CMLd!UIJZUxYEf?3^Xa}gciqw&hQhbqJEf=q>tSpU~h1H{K zlS}?xy+t81AD0j*i*?IhGakr+xWShVKSo?rOG`EXE%hkGGu1MKbNi@~eL~;`)w?Tn zC1rG+<-U#2j$W>~u6JjYPR=a_dPrfu^~VYYA8*&bxsTt?qsEr{TvyN|01y|T0S}BI zIiW$W-rKYHXS-lsr%)}3WjyfFieW*%KYe~FF-Nuil33k=`CE47TZ-W$-V6H}b6($p<8ST}vS_4V)3(w%k%0jTiLq-46X zk#(Y&a18^0;yl=ITsX1sr-Ni~JX0>F@fo7U9a%yz+6NE~Tp6#| zryVoOyOJK+@Zc?NJ@6sbmYBZJ~(x$GJ8fDxXmPUzR5XDa8v5`?MQ&I zD$d{LU)i>TiZ%KXzsI7mv>(RA^%=Vm!WLBNT08I=j82h+X6AfHNB~wK?Yy24Ks-l* z`Yvd=XJN9{~th@LE2ob>bYw^|8s6>Sso zk3`y~sS=xzXH%cw&BhqocJk+Eh*r;iQO`>jwy23|k>-O!rz$OW3ovIJij~08l98uP z4-n@nJaE<-UjjcgsRibHzO`__q{>iSTpYtRRHzwWG5ERza`rj6tqALCaQl0~cPb0O1k$Uj3!MfBSugn#Z zpT(gRcn>M*F;!I{@3;1o-K#xyr~5TU)_ZvvL1}xTE#u4c8NPh>N~^|-;I%gs6J{9f z+Q~8LU?h#0Fa)S?!W!9QN&Lf~8<$DKziP*;{hPGe@PDksazcP`e5nidE9 zV}Bps+L#7|eByN83Fd_6oeW>CG0S2^eb)s~&aB6Zn{K!AHuy}xX!P+_pSUUH2&8_jpgpEh#CQ)BrEM(il2ah}N zgeTSy3+ee%J-jaU;C_{tY=vd#RsA3zZ>%H-e=nMlXd9C_ygC}CzAVA9-PfsKQfnCB zuLjW2%q%R$Q)Rb+E!eA3=WO#YT_z`g#<1G?e*d`NW6C(^7*S$ZTO9fJ=N|T1?Lru{$|(nm&Y|bc&CPlv z-!moDrB!LWMy5h{e$^FWcaQPh@hnKLX7~O?F7-RQ8OF)i26h9_PR=)e>mpsAS{>)n zgN@mXzQCPdIegYNbQZxK>voO8t+8h~@X?b*0Wq6L5!*dousnids2IyTquw6#uJ-CZ zj|oJ^j2qlUvKrKAxS#vj++n=8H*ZQTSI3W03+WVfW&CeZo0Mp;Lz?=Uz?_B4!5qek zwD8|w-_WO+tdEeC-%Qtz70R|m5a4;RG!pAiV&z8lG5cX$w z#=y;q_FQ}QW%`@2<^XDNw*+81st6n$9C|<&XBNm$=YHAMWlBjV>LmarR+G zE?vA-PW~1)`Y>~qbF^`jeQ}{!Mh_2_B&?1%*{fpGhZD}@S&E$5W#ayNMxkD#B9>Iv zmkMsCn#0c@(!+#hDzPvB8uClMSPQN3V@_nBRYXeGG-TzVdJ-_@p37^nmJFHazfu^P znA``G8h%tH?ZD-Gct5}B2;O+&$i>Pp6&gIOoV5h;n3h*LaPeD9P)&251ScL1--i9_ zQ#M%=mRiiXmdU3FN~rHN-TcXxPTHE8-y$1UJC+P z7E^>0i@R42l^o7N1o<@)2%JBL@TQfTwQ28c7-uC>#nrwr)o35xI2ll-J`?<;NZE;eS)v2N}b& z_5($%|3C(_=G#MRTM@Fb@;4!zyTmf6(D1=c{xes?vObbkbOLl%%mk&T#3SVZpp|lK zG_2@PujyWC>f^k#H%t15O)X#?x~e(R$8=>*iYrrA;FRehLDAFoD0*QnhJX05`>+6h zOG;OfV)P{mr%s7x>qK2)Vg3gc>vQ+6%H1TG&~ydK_6mBmz*sei(iZ_Z--mjixB9`) zPu->6UFhCDqkm9KEdTz)i|n7e?nnt38QF(r-jZ6u>Ul5w`Q=cb7%nOl!*6*j128)# zIY8d+l!}D1_d(f>9@GC}NhU2i#-7VtyT#X_8^a2QG4hg9HHhZaTm7w9j|cIGv9pc; zgW3?i5dbO207yCcggY@d_CI?Y#eo=misR|+nW+g7zs^68zHI(9t{Sn2&4Y^y$NmGl zwA`Rvwftb6s!V$rwjY_8HY+K0Fpix z0OtFlMvFf%-sy{@K4!bl{Bblo6ae{yMNeaSNQBiuQsH|2=~g*unlw?^eatHc*$Xfowef zrozj+$~PEE85djtJyp!B{w9XhK^2>o1(2k{KefV&3e=bT=2fhqQ?#`IhK64b@a6#G z6X5*BY@^5U$Q-VqlkAjW4Q90eU~?7y+o=@nLk<~N1n-fdRzsO<5E+c6asKSjlX0#- z6Qu>fke|4egN-7z68qz5zOVs+)qi)<<~y}gH)f{6hXP2ykF)PrX)Ct|{KrJdJWn8O zPyxB#D|P5iY9P~_YxcPn6dXKGVoRMb&HdT`VyQH_KaHSP0$74=zhS2niWkLf`|CGK7T)JcY$x@|aD#G@>)e_)WTm)BsC?+4CZlW%70W4{?bG_zo^0}Gj zBNohvY#BluiS}FE9O+Y_`w~HL*CTN=n(?g63xvmYU&FT>lH9cc^4d{MWf`bVF^FEE z|79?JTn$izeiCdvKF}{AUm{2ZO*2d;IflWHyar&4`$a83g~gqojSY`X_vu`j8eBQI zOyxs2Z=x>T<`MuGgJ5l;dXZe8KwPdwzT^Us--Aj+fF1gYMFC=le(Nv~^~{$(8Dcgj zC*Z6uNR*{d4|Yu~M4i)%D*$gD_Hhz5_2^Jzk9;#EIs6xnBhT=?~e zLWAtW<_*~f*++Nn=rCEjR#3W|GS(yx@flGBpm@yy>N7PZmFQAdrB19wbS`ktC@j1IpV*@RG25?keE(J&JdtPu?r|Z}OXa54E^DwV7l|I#29RBulL3nZr9mUNVwX z`_>pX9h-g8bDt=E2XJG>A@pNRJg8>ado7|gEl@p~ZS@y!X=xeDZfc5s$MJb^Wng7+ zrHuQ@5V#sd-(GJOd3$K_^|r7vTn4xgvrSF$i)1tEL}_k-QC{&IH7~cKRZK@QCR-E> zJ@TP@!M=j6GOniSOi0&Nvb|qv-Y4Je3D8i_?jIrMbvEITyy;$~t7b}nIz%B%|9p*d z-y1S^jEb(J<&ZGpl;r~BGs7`%7QVDo3ba0Qx^xT#rdCa)W|?FWCcb2$7GcqE`5IpUb1A_fEXKh4K%FEKC~j&4RRY%9 z#{$_AS$mr&MgtC2l3ZAc3xi{5zv-NKjb_Po8!s`lfu4wgWOm^JDh};R#2t_6_uBnrf3} zF}rix3AA!tSd`0wxh)Ld3GM$C1{C6%)NcS1AKbwD3`Ci@_t9p2AVj8!c87RT}8v;NX z&(8#T1uhJA7O&ogA8@dpvWMykTQIU*7yxCC7tdQ;1#6S-Q0+m7eb>Vd0cQIF{KZih zyMP9n)P+zRb==EGcBt^PGDC~PN!R2>sNNVDRS5g4&Z8Ip3@G`cK`MYMqyfm03$vOf zLzN*aHlfgS1xzhxE;k~lwwC@r4Shl7;7QEAwQr}GJ@Vlo?lFCK*|=%CQ6R{7b~4|5 zu|;UPhG+U5c8_`kD068wKAYqNgTdKNkSI5ynixCnYkx5lWsgmvQ0(;88udyGp+e;oqo*E>}v7|JAJ}dW<`R))pKJs0-AN z8TkJc0A!9%e;i9u_XkFj%1Qyc{l-mhGzKmJUJCobBV^TZ-P71Mn$Y-q6N#=ji&e4e zu&2QjwjW;mm3s|r7}{B^bMY>s>#pa*XU%pQZ|J}}TyqS}J17}7%p_nCbE&)i#eB0W z1tpysJ%#aR>}-R~2Im+gmm1td~hU4-Y$%lZ6hB0lY(_5pbp+RyOg+YOOvv zhhz&O;^B5kmmmAoZNqyIZHRlbGmjAD7KtIWPh0JX+2*p`JJVHL}F{^#7yTIWK)ZE5$w_bB+aCc=ND4m`~PS)0f zz<9sQhriEY*bVG6<%ya{`U|<|B*9Q~J!PnMC?u5Y9CCAlULBg9YkGp%!!~eYsL#63 zjA_$fym--7Yu9BfLiQnG{UK0mc- z^EZVj6Xe5#e3SwDDX+bw<1P?EX#;C+ZO=6Kt@Whs!nvcllewp#lq0wn9038&a0FpP zzyAWq9N4L?5Ekz59lFXf>~L`27t2H(7YEd80EcR%rgmYHmfCnJmGlWt^_;EP4NO0V zJ`fPMW15MEJtdrmo)OM`^3Axs4puK>_Av-*m0pa5v(5!&7AuSD9JNc(awoG$ zZ0w(nEu|IO!o5=2p9r`0P3`JIgDEV$G*r6WBA?!pn@8url!8V2)5HAUKCzA zACga#_1k^E)QK0zULx~dUha=46?fvEkEH=qoGj^A%s+H_`vvjpgM)Zd(Z(U$U0c(W zF$VzaJo|_UaH|eJ_E zQv92Sd9w9VfAURLb|bhwMHUvdJ{GhNuO7#i9-NdJeqtL<*5TwAqLKa+uBWfha)sOy zX$Zz^{C~pE`ako5M15M+(1rh3O*+_;dUy-5v=?LL*G z^yON$9y&APzFyzPY+|xL33c2nR8&oqScaNkY{|9s0cSw)X>?$((*$51#E1{=iP3%(W0c>%S2D1L@WQ zUBCiLIbvT+JIQxzCY0Q$U+(4(cZ$;BVCwhXsALEz4)J410GISXsW~;tld!`-^8;sz ze)D)g&qG1Rdgb~=QL%PWXjR%%FF;X`XW`-^Y##g_5xjr@k&-)UcyEqT*IcArtf!5#T$jlaBE_0f9wHk Date: Thu, 31 Oct 2024 20:58:48 +0530 Subject: [PATCH 132/188] chore: Batch ack requests and sink responses for better performance #163 (#2194) Signed-off-by: Yashash H L --- Dockerfile | 2 +- examples/21-simple-mono-vertex.yaml | 6 +- go.mod | 2 +- go.sum | 4 +- pkg/apis/proto/sink/v1/sink.proto | 2 +- pkg/apis/proto/source/v1/source.proto | 2 +- pkg/sdkclient/sinker/client.go | 10 +- pkg/sdkclient/sinker/client_test.go | 16 ++- pkg/sdkclient/source/client.go | 26 ++--- pkg/sdkclient/source/client_test.go | 4 +- pkg/sdkclient/source/interface.go | 2 +- pkg/sinks/udsink/udsink_grpc.go | 14 ++- pkg/sinks/udsink/udsink_grpc_test.go | 5 +- pkg/sources/udsource/grpc_udsource.go | 14 +-- pkg/sources/udsource/grpc_udsource_test.go | 17 +-- rust/Cargo.lock | 4 +- rust/numaflow-core/Cargo.toml | 2 +- rust/numaflow-core/src/message.rs | 105 ++++++------------ rust/numaflow-core/src/metrics.rs | 2 +- rust/numaflow-core/src/monovertex.rs | 2 +- .../numaflow-core/src/monovertex/forwarder.rs | 12 +- rust/numaflow-core/src/pipeline.rs | 2 +- rust/numaflow-core/src/shared/utils.rs | 2 +- rust/numaflow-core/src/sink/user_defined.rs | 28 +++-- rust/numaflow-core/src/source/user_defined.rs | 47 ++++---- rust/numaflow-pb/src/clients/sink.v1.rs | 4 +- rust/numaflow-pb/src/clients/source.v1.rs | 4 +- 27 files changed, 152 insertions(+), 188 deletions(-) diff --git a/Dockerfile b/Dockerfile index c234eb30ed..57feea2da7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -89,4 +89,4 @@ RUN chmod +x /bin/e2eapi #################################################################################################### FROM scratch AS e2eapi COPY --from=testbase /bin/e2eapi . -ENTRYPOINT ["/e2eapi"] +ENTRYPOINT ["/e2eapi"] \ No newline at end of file diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index 98192aa8fd..a47dbe3123 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -1,12 +1,10 @@ -apiVersion: numaflow.numaproj.io/v1alpha1 -kind: MonoVertex metadata: name: simple-mono-vertex spec: source: udsource: container: - image: quay.io/numaio/numaflow-java/source-simple-source:stable + image: quay.io/numaio/numaflow-rs/simple-source:stable # transformer is an optional container to do any transformation to the incoming data before passing to the sink transformer: container: @@ -14,4 +12,4 @@ spec: sink: udsink: container: - image: quay.io/numaio/numaflow-java/simple-sink:stable \ No newline at end of file + image: quay.io/numaio/numaflow-rs/sink-log:stable \ No newline at end of file diff --git a/go.mod b/go.mod index a40001aeb8..c9048c770d 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd + github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.55.0 diff --git a/go.sum b/go.sum index c5f73afc67..b60aad18ee 100644 --- a/go.sum +++ b/go.sum @@ -483,8 +483,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd h1:yL7sbAaeCw2rWar1CF19N69KEHmcJpL1YjtqOWEG41c= -github.com/numaproj/numaflow-go v0.8.2-0.20241014112709-e12c1b5176bd/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= +github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b h1:UEhFHfBwe2DwtnYzdFteTZ2tKwMX739llzfebfEMGg4= +github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/apis/proto/sink/v1/sink.proto b/pkg/apis/proto/sink/v1/sink.proto index 8f42720b5d..e7a85523d7 100644 --- a/pkg/apis/proto/sink/v1/sink.proto +++ b/pkg/apis/proto/sink/v1/sink.proto @@ -97,7 +97,7 @@ message SinkResponse { // err_msg is the error message, set it if success is set to false. string err_msg = 3; } - Result result = 1; + repeated Result results = 1; optional Handshake handshake = 2; optional TransmissionStatus status = 3; } \ No newline at end of file diff --git a/pkg/apis/proto/source/v1/source.proto b/pkg/apis/proto/source/v1/source.proto index 7dc1a67412..f1d2a6eb55 100644 --- a/pkg/apis/proto/source/v1/source.proto +++ b/pkg/apis/proto/source/v1/source.proto @@ -130,7 +130,7 @@ message ReadResponse { message AckRequest { message Request { // Required field holding the offset to be acked - Offset offset = 1; + repeated Offset offsets = 1; } // Required field holding the request. The list will be ordered and will have the same order as the original Read response. Request request = 1; diff --git a/pkg/sdkclient/sinker/client.go b/pkg/sdkclient/sinker/client.go index 3c7bc4b23d..9ab3fc2858 100644 --- a/pkg/sdkclient/sinker/client.go +++ b/pkg/sdkclient/sinker/client.go @@ -167,17 +167,21 @@ func (c *client) SinkFn(ctx context.Context, requests []*sinkpb.SinkRequest) ([] // Wait for the corresponding responses var responses []*sinkpb.SinkResponse - for i := 0; i < len(requests)+1; i++ { + responsesCount := 0 + for { resp, err := c.sinkStream.Recv() if err != nil { return nil, fmt.Errorf("failed to receive sink response: %v", err) } if resp.GetStatus() != nil && resp.GetStatus().GetEot() { - if i != len(requests) { - c.log.Errorw("Received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received", i), zap.Int("expected", len(requests))) + if responsesCount != len(requests) { + c.log.Errorw("Received EOT message before all responses are received, we will wait indefinitely for the remaining responses", zap.Int("received", responsesCount), zap.Int("expected", len(requests))) + } else { + break } continue } + responsesCount += len(resp.GetResults()) responses = append(responses, resp) } diff --git a/pkg/sdkclient/sinker/client_test.go b/pkg/sdkclient/sinker/client_test.go index e95b39dba4..01cebcc590 100644 --- a/pkg/sdkclient/sinker/client_test.go +++ b/pkg/sdkclient/sinker/client_test.go @@ -65,9 +65,11 @@ func TestClient_SinkFn(t *testing.T) { mockSinkClient := sinkmock.NewMockSink_SinkFnClient(ctrl) mockSinkClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ - Result: &sinkpb.SinkResponse_Result{ - Id: "temp-id", - Status: sinkpb.Status_SUCCESS, + Results: []*sinkpb.SinkResponse_Result{ + { + Id: "temp-id", + Status: sinkpb.Status_SUCCESS, + }, }, }, nil) mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ @@ -94,9 +96,11 @@ func TestClient_SinkFn(t *testing.T) { }) assert.Equal(t, []*sinkpb.SinkResponse{ { - Result: &sinkpb.SinkResponse_Result{ - Id: "temp-id", - Status: sinkpb.Status_SUCCESS, + Results: []*sinkpb.SinkResponse_Result{ + { + Id: "temp-id", + Status: sinkpb.Status_SUCCESS, + }, }, }, }, response) diff --git a/pkg/sdkclient/source/client.go b/pkg/sdkclient/source/client.go index 550c888d66..feff9ddc59 100644 --- a/pkg/sdkclient/source/client.go +++ b/pkg/sdkclient/source/client.go @@ -190,27 +190,19 @@ func (c *client) ReadFn(_ context.Context, req *sourcepb.ReadRequest, datumCh ch } // AckFn acknowledges the data from the source. -func (c *client) AckFn(_ context.Context, reqs []*sourcepb.AckRequest) ([]*sourcepb.AckResponse, error) { +func (c *client) AckFn(_ context.Context, req *sourcepb.AckRequest) (*sourcepb.AckResponse, error) { // Send the ack request - for _, req := range reqs { - err := c.ackStream.Send(req) - if err != nil { - return nil, fmt.Errorf("failed to send ack request: %v", err) - } + err := c.ackStream.Send(req) + if err != nil { + return nil, fmt.Errorf("failed to send ack request: %v", err) } - responses := make([]*sourcepb.AckResponse, len(reqs)) - for i := 0; i < len(reqs); i++ { - // Wait for the ack response - resp, err := c.ackStream.Recv() - // we don't need an EOF check because we only close the stream during shutdown. - if err != nil { - return nil, fmt.Errorf("failed to receive ack response: %v", err) - } - responses[i] = resp + // Wait for the ack response + resp, err := c.ackStream.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive ack response: %v", err) } - - return responses, nil + return resp, nil } // PendingFn returns the number of pending data from the source. diff --git a/pkg/sdkclient/source/client_test.go b/pkg/sdkclient/source/client_test.go index d19e3e8737..818c3c3430 100644 --- a/pkg/sdkclient/source/client_test.go +++ b/pkg/sdkclient/source/client_test.go @@ -188,9 +188,9 @@ func TestAckFn(t *testing.T) { assert.True(t, ackHandshakeResponse.GetHandshake().GetSot()) // Test AckFn - ack, err := testClient.AckFn(ctx, []*sourcepb.AckRequest{{}}) + ack, err := testClient.AckFn(ctx, &sourcepb.AckRequest{}) assert.NoError(t, err) - assert.Equal(t, []*sourcepb.AckResponse{{}}, ack) + assert.Equal(t, &sourcepb.AckResponse{}, ack) } func TestPendingFn(t *testing.T) { diff --git a/pkg/sdkclient/source/interface.go b/pkg/sdkclient/source/interface.go index cc26f2cd95..ea897b8207 100644 --- a/pkg/sdkclient/source/interface.go +++ b/pkg/sdkclient/source/interface.go @@ -32,7 +32,7 @@ type Client interface { // ReadFn reads messages from the udsource. ReadFn(ctx context.Context, req *sourcepb.ReadRequest, datumCh chan<- *sourcepb.ReadResponse) error // AckFn acknowledges messages from the udsource. - AckFn(ctx context.Context, req []*sourcepb.AckRequest) ([]*sourcepb.AckResponse, error) + AckFn(ctx context.Context, req *sourcepb.AckRequest) (*sourcepb.AckResponse, error) // PendingFn returns the number of pending messages from the udsource. PendingFn(ctx context.Context, req *emptypb.Empty) (*sourcepb.PendingResponse, error) // PartitionsFn returns the list of partitions from the udsource. diff --git a/pkg/sinks/udsink/udsink_grpc.go b/pkg/sinks/udsink/udsink_grpc.go index 80f47d2675..6b9a1a77eb 100644 --- a/pkg/sinks/udsink/udsink_grpc.go +++ b/pkg/sinks/udsink/udsink_grpc.go @@ -106,24 +106,26 @@ func (u *UDSgRPCBasedUDSink) ApplySink(ctx context.Context, requests []*sinkpb.S return errs } // Use ID to map the response messages, so that there's no strict requirement for the user-defined sink to return the response in order. - resMap := make(map[string]*sinkpb.SinkResponse) + resMap := make(map[string]*sinkpb.SinkResponse_Result) for _, res := range responses { - resMap[res.Result.GetId()] = res + for _, result := range res.Results { + resMap[result.GetId()] = result + } } for i, m := range requests { if r, existing := resMap[m.Request.GetId()]; !existing { errs[i] = &NotFoundErr } else { - if r.Result.GetStatus() == sinkpb.Status_FAILURE { - if r.Result.GetErrMsg() != "" { + if r.GetStatus() == sinkpb.Status_FAILURE { + if r.GetErrMsg() != "" { errs[i] = &ApplyUDSinkErr{ UserUDSinkErr: true, - Message: r.Result.GetErrMsg(), + Message: r.GetErrMsg(), } } else { errs[i] = &UnknownUDSinkErr } - } else if r.Result.GetStatus() == sinkpb.Status_FALLBACK { + } else if r.GetStatus() == sinkpb.Status_FALLBACK { errs[i] = &WriteToFallbackErr } else { errs[i] = nil diff --git a/pkg/sinks/udsink/udsink_grpc_test.go b/pkg/sinks/udsink/udsink_grpc_test.go index 2af72530fc..a3733b6c6f 100644 --- a/pkg/sinks/udsink/udsink_grpc_test.go +++ b/pkg/sinks/udsink/udsink_grpc_test.go @@ -99,10 +99,7 @@ func Test_gRPCBasedUDSink_ApplyWithMockClient(t *testing.T) { mockSinkClient := sinkmock.NewMockSink_SinkFnClient(ctrl) mockSinkClient.EXPECT().Send(gomock.Any()).Return(nil).AnyTimes() mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ - Result: testResponseList[0], - }, nil) - mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{ - Result: testResponseList[1], + Results: testResponseList, }, nil) mockSinkClient.EXPECT().Recv().Return(&sinkpb.SinkResponse{Status: &sinkpb.TransmissionStatus{ Eot: true, diff --git a/pkg/sources/udsource/grpc_udsource.go b/pkg/sources/udsource/grpc_udsource.go index 8d0389a2ee..32efdbc51d 100644 --- a/pkg/sources/udsource/grpc_udsource.go +++ b/pkg/sources/udsource/grpc_udsource.go @@ -175,16 +175,12 @@ func (u *GRPCBasedUDSource) ApplyAckFn(ctx context.Context, offsets []isb.Offset for i, offset := range offsets { rOffsets[i] = ConvertToUserDefinedSourceOffset(offset) } - ackRequests := make([]*sourcepb.AckRequest, len(rOffsets)) - for i, offset := range rOffsets { - var r = &sourcepb.AckRequest{ - Request: &sourcepb.AckRequest_Request{ - Offset: offset, - }, - } - ackRequests[i] = r + var ackRequest = &sourcepb.AckRequest{ + Request: &sourcepb.AckRequest_Request{ + Offsets: rOffsets, + }, } - _, err := u.client.AckFn(ctx, ackRequests) + _, err := u.client.AckFn(ctx, ackRequest) return err } diff --git a/pkg/sources/udsource/grpc_udsource_test.go b/pkg/sources/udsource/grpc_udsource_test.go index e0a0ab4ca5..4d3d78f919 100644 --- a/pkg/sources/udsource/grpc_udsource_test.go +++ b/pkg/sources/udsource/grpc_udsource_test.go @@ -274,21 +274,14 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { mockClient.EXPECT().ReadFn(gomock.Any(), gomock.Any()).Return(nil, nil) mockClient.EXPECT().AckFn(gomock.Any(), gomock.Any()).Return(mockAckClient, nil) - req1 := &sourcepb.AckRequest{ - Request: &sourcepb.AckRequest_Request{ - Offset: offset1, - }, - } - - req2 := &sourcepb.AckRequest{ + req := &sourcepb.AckRequest{ Request: &sourcepb.AckRequest_Request{ - Offset: offset2, + Offsets: []*sourcepb.Offset{offset1, offset2}, }, } - mockAckClient.EXPECT().Send(req1).Return(nil).Times(1) - mockAckClient.EXPECT().Send(req2).Return(nil).Times(1) - mockAckClient.EXPECT().Recv().Return(&sourcepb.AckResponse{}, nil).Times(2) + mockAckClient.EXPECT().Send(req).Return(nil).Times(1) + mockAckClient.EXPECT().Recv().Return(&sourcepb.AckResponse{}, nil).Times(1) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -321,7 +314,7 @@ func Test_gRPCBasedUDSource_ApplyAckWithMockClient(t *testing.T) { req1 := &sourcepb.AckRequest{ Request: &sourcepb.AckRequest_Request{ - Offset: offset1, + Offsets: []*sourcepb.Offset{offset1, offset2}, }, } diff --git a/rust/Cargo.lock b/rust/Cargo.lock index c069cd81f0..ee664937a7 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1614,7 +1614,7 @@ dependencies = [ [[package]] name = "numaflow" version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?rev=9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387#9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387" +source = "git+https://github.com/numaproj/numaflow-rs.git?rev=ddd879588e11455921f1ca958ea2b3c076689293#ddd879588e11455921f1ca958ea2b3c076689293" dependencies = [ "chrono", "futures-util", @@ -1648,7 +1648,7 @@ dependencies = [ "hyper-util", "kube", "log", - "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387)", + "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=ddd879588e11455921f1ca958ea2b3c076689293)", "numaflow-models", "numaflow-pb", "parking_lot", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 9a7678ff67..179da28dd8 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -46,7 +46,7 @@ async-nats = "0.37.0" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "9fb3c0ad0f5f43cc42b4919f849b7dcce9a91387" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } [build-dependencies] diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index ef650a0df1..f24212967f 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -10,8 +10,8 @@ use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; use numaflow_pb::clients::sink::sink_request::Request; use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; -use numaflow_pb::clients::sink::{sink_response, SinkRequest, SinkResponse}; -use numaflow_pb::clients::source::{read_response, AckRequest}; +use numaflow_pb::clients::sink::{sink_response, SinkRequest}; +use numaflow_pb::clients::source::read_response; use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; @@ -208,22 +208,17 @@ impl fmt::Display for MessageID { } } -impl TryFrom for AckRequest { +impl TryFrom for numaflow_pb::clients::source::Offset { type Error = Error; - fn try_from(value: Offset) -> std::result::Result { - match value { + fn try_from(offset: Offset) -> std::result::Result { + match offset { Offset::Int(_) => Err(Error::Source("IntOffset not supported".to_string())), - Offset::String(o) => Ok(Self { - request: Some(numaflow_pb::clients::source::ack_request::Request { - offset: Some(numaflow_pb::clients::source::Offset { - offset: BASE64_STANDARD - .decode(o.offset) - .expect("we control the encoding, so this should never fail"), - partition_id: o.partition_idx as i32, - }), - }), - handshake: None, + Offset::String(o) => Ok(numaflow_pb::clients::source::Offset { + offset: BASE64_STANDARD + .decode(o.offset) + .expect("we control the encoding, so this should never fail"), + partition_id: o.partition_idx as i32, }), } } @@ -371,7 +366,7 @@ pub(crate) struct ResponseFromSink { pub(crate) status: ResponseStatusFromSink, } -impl From for SinkResponse { +impl From for sink_response::Result { fn from(value: ResponseFromSink) -> Self { let (status, err_msg) = match value.status { ResponseStatusFromSink::Success => (Success, "".to_string()), @@ -380,35 +375,24 @@ impl From for SinkResponse { }; Self { - result: Some(sink_response::Result { - id: value.id, - status: status as i32, - err_msg, - }), - handshake: None, - status: None, + id: value.id, + status: status as i32, + err_msg, } } } -impl TryFrom for ResponseFromSink { - type Error = Error; - - fn try_from(value: SinkResponse) -> Result { - let value = value - .result - .ok_or(Error::Sink("result is empty".to_string()))?; - +impl From for ResponseFromSink { + fn from(value: sink_response::Result) -> Self { let status = match value.status() { Success => ResponseStatusFromSink::Success, Failure => ResponseStatusFromSink::Failed(value.err_msg), Fallback => ResponseStatusFromSink::Fallback, }; - - Ok(Self { + Self { id: value.id, status, - }) + } } } @@ -418,6 +402,7 @@ mod tests { use chrono::TimeZone; use numaflow_pb::clients::sink::sink_response::Result as SinkResult; + use numaflow_pb::clients::sink::SinkResponse; use numaflow_pb::clients::source::Offset as SourceOffset; use numaflow_pb::objects::isb::{ Body, Header, Message as ProtoMessage, MessageId, MessageInfo, @@ -444,26 +429,6 @@ mod tests { assert_eq!(format!("{}", message_id), "vertex-123-0"); } - #[test] - fn test_offset_to_ack_request() { - let offset = Offset::String(StringOffset { - offset: BASE64_STANDARD.encode("123"), - partition_idx: 1, - }); - let ack_request: AckRequest = offset.try_into().unwrap(); - assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); - - let offset = Offset::Int(IntOffset::new(42, 1)); - let result: Result = offset.try_into(); - - // Assert that the conversion results in an error - assert!(result.is_err()); - - if let Err(e) = result { - assert_eq!(e.to_string(), "Source Error - IntOffset not supported"); - } - } - #[test] fn test_message_to_vec_u8() { let message = Message { @@ -622,28 +587,34 @@ mod tests { status: ResponseStatusFromSink::Success, }; - let sink_response: SinkResponse = response.into(); - assert_eq!(sink_response.result.unwrap().status, Success as i32); + let sink_result: sink_response::Result = response.into(); + assert_eq!(sink_result.status, Success as i32); } #[test] fn test_sink_response_to_response_from_sink() { let sink_response = SinkResponse { - result: Some(SinkResult { + results: vec![SinkResult { id: "123".to_string(), status: Success as i32, err_msg: "".to_string(), - }), + }], handshake: None, status: None, }; - let response: Result = sink_response.try_into(); - assert!(response.is_ok()); + let results: Vec = sink_response + .results + .into_iter() + .map(Into::into) + .collect::>(); + assert!(!results.is_empty()); - let response = response.unwrap(); - assert_eq!(response.id, "123"); - assert_eq!(response.status, ResponseStatusFromSink::Success); + assert_eq!(results.get(0).unwrap().id, "123"); + assert_eq!( + results.get(0).unwrap().status, + ResponseStatusFromSink::Success + ); } #[test] @@ -692,14 +663,12 @@ mod tests { // Test conversion from Offset to AckRequest for StringOffset let offset = Offset::String(StringOffset::new(BASE64_STANDARD.encode("42"), 1)); - let result: Result = offset.try_into(); - assert!(result.is_ok()); - let ack_request = result.unwrap(); - assert_eq!(ack_request.request.unwrap().offset.unwrap().partition_id, 1); + let offset: Result = offset.try_into(); + assert_eq!(offset.unwrap().partition_id, 1); // Test conversion from Offset to AckRequest for IntOffset (should fail) let offset = Offset::Int(IntOffset::new(42, 1)); - let result: Result = offset.try_into(); + let result: Result = offset.try_into(); assert!(result.is_err()); } } diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index c81c16231c..3aaf97ab78 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -757,7 +757,7 @@ mod tests { impl source::Sourcer for SimpleSource { async fn read(&self, _: SourceReadRequest, _: Sender) {} - async fn ack(&self, _: Offset) {} + async fn ack(&self, _: Vec) {} async fn pending(&self) -> usize { 0 diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 598a3d9e83..bbdeaf3a9d 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -300,7 +300,7 @@ mod tests { impl source::Sourcer for SimpleSource { async fn read(&self, _: SourceReadRequest, _: Sender) {} - async fn ack(&self, _: Offset) {} + async fn ack(&self, _: Vec) {} async fn pending(&self) -> usize { 0 diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index 793eae4526..f84cade170 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -625,11 +625,13 @@ mod tests { .extend(message_offsets) } - async fn ack(&self, offset: Offset) { - self.yet_to_be_acked - .write() - .unwrap() - .remove(&String::from_utf8(offset.offset).unwrap()); + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_be_acked + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } } async fn pending(&self) -> usize { diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index ffe1c06944..a9724780ea 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -328,7 +328,7 @@ mod tests { #[cfg(feature = "nats-tests")] #[tokio::test] - async fn test_forwarder_for_source_vetex() { + async fn test_forwarder_for_source_vertex() { // Unique names for the streams we use in this test let streams = vec![ "default-test-forwarder-for-source-vertex-out-0", diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 576f78499f..84fb5a0c3b 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -225,7 +225,7 @@ mod tests { impl source::Sourcer for SimpleSource { async fn read(&self, _request: SourceReadRequest, _transmitter: Sender) {} - async fn ack(&self, _offset: Offset) {} + async fn ack(&self, _offset: Vec) {} async fn pending(&self) -> usize { 0 diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 92d05230ad..5799291eaf 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,3 +1,7 @@ +use crate::message::{Message, ResponseFromSink}; +use crate::sink::Sink; +use crate::Error; +use crate::Result; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use tokio::sync::mpsc; @@ -5,11 +9,6 @@ use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use crate::error; -use crate::message::{Message, ResponseFromSink}; -use crate::sink::Sink; -use crate::Error; - const DEFAULT_CHANNEL_SIZE: usize = 1000; /// User-Defined Sink code writes messages to a custom [Sink]. @@ -19,7 +18,7 @@ pub struct UserDefinedSink { } impl UserDefinedSink { - pub(crate) async fn new(mut client: SinkClient) -> error::Result { + pub(crate) async fn new(mut client: SinkClient) -> Result { let (sink_tx, sink_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let sink_stream = ReceiverStream::new(sink_rx); @@ -59,7 +58,7 @@ impl UserDefinedSink { impl Sink for UserDefinedSink { /// writes a set of messages to the sink. - async fn sink(&mut self, messages: Vec) -> error::Result> { + async fn sink(&mut self, messages: Vec) -> Result> { let requests: Vec = messages.into_iter().map(|message| message.into()).collect(); let num_requests = requests.len(); @@ -88,7 +87,7 @@ impl Sink for UserDefinedSink { // response only once it has read all the requests. // We wait for num_requests + 1 responses because the last response will be the EOT response. let mut responses = Vec::new(); - for i in 0..num_requests + 1 { + loop { let response = self .resp_stream .message() @@ -96,12 +95,20 @@ impl Sink for UserDefinedSink { .ok_or(Error::Sink("failed to receive response".to_string()))?; if response.status.map_or(false, |s| s.eot) { - if i != num_requests { + if responses.len() != num_requests { log::error!("received EOT message before all responses are received, we will wait indefinitely for the remaining responses"); + } else { + break; } continue; } - responses.push(response.try_into()?); + responses.extend( + response + .results + .into_iter() + .map(Into::into) + .collect::>(), + ); } Ok(responses) @@ -112,7 +119,6 @@ impl Sink for UserDefinedSink { mod tests { use chrono::offset::Utc; use numaflow::sink; - use numaflow_pb::clients::sink::sink_client::SinkClient; use tokio::sync::mpsc; use tracing::info; diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 00a3dd47a3..03162b53ac 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -185,25 +185,24 @@ impl UserDefinedSourceAck { impl SourceAcker for UserDefinedSourceAck { async fn ack(&mut self, offsets: Vec) -> Result<()> { - let n = offsets.len(); - - // send n ack requests - for offset in offsets { - let request = offset.try_into()?; - self.ack_tx - .send(request) - .await - .map_err(|e| Error::Source(e.to_string()))?; - } + let ack_offsets: Result> = + offsets.into_iter().map(TryInto::try_into).collect(); + + self.ack_tx + .send(AckRequest { + request: Some(source::ack_request::Request { + offsets: ack_offsets?, + }), + handshake: None, + }) + .await + .map_err(|e| Error::Source(e.to_string()))?; - // make sure we get n responses for the n requests. - for _ in 0..n { - let _ = self - .ack_resp_stream - .message() - .await? - .ok_or(Error::Source("failed to receive ack response".to_string()))?; - } + let _ = self + .ack_resp_stream + .message() + .await? + .ok_or(Error::Source("failed to receive ack response".to_string()))?; Ok(()) } @@ -284,11 +283,13 @@ mod tests { self.yet_to_ack.write().unwrap().extend(message_offsets) } - async fn ack(&self, offset: Offset) { - self.yet_to_ack - .write() - .unwrap() - .remove(&String::from_utf8(offset.offset).unwrap()); + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_ack + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } } async fn pending(&self) -> usize { diff --git a/rust/numaflow-pb/src/clients/sink.v1.rs b/rust/numaflow-pb/src/clients/sink.v1.rs index 612e5693c3..3fd8289d10 100644 --- a/rust/numaflow-pb/src/clients/sink.v1.rs +++ b/rust/numaflow-pb/src/clients/sink.v1.rs @@ -61,8 +61,8 @@ pub struct TransmissionStatus { /// SinkResponse is the individual response of each message written to the sink. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SinkResponse { - #[prost(message, optional, tag = "1")] - pub result: ::core::option::Option, + #[prost(message, repeated, tag = "1")] + pub results: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "2")] pub handshake: ::core::option::Option, #[prost(message, optional, tag = "3")] diff --git a/rust/numaflow-pb/src/clients/source.v1.rs b/rust/numaflow-pb/src/clients/source.v1.rs index f60a48315c..1b96a0e77f 100644 --- a/rust/numaflow-pb/src/clients/source.v1.rs +++ b/rust/numaflow-pb/src/clients/source.v1.rs @@ -179,8 +179,8 @@ pub mod ack_request { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Request { /// Required field holding the offset to be acked - #[prost(message, optional, tag = "1")] - pub offset: ::core::option::Option, + #[prost(message, repeated, tag = "1")] + pub offsets: ::prost::alloc::vec::Vec, } } /// From 426141a5e595e8cb4c827f48fea0e1bd286e4a11 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 1 Nov 2024 06:36:32 -0700 Subject: [PATCH 133/188] fix(docs): use manifests from main branch in quick-start (#2197) --- docs/quick-start.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/quick-start.md b/docs/quick-start.md index 61c39d9436..540bcd00d3 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -28,8 +28,8 @@ Once you have completed all the prerequisites, run the following command lines t ```shell kubectl create ns numaflow-system -kubectl apply -n numaflow-system -f https://raw.githubusercontent.com/numaproj/numaflow/stable/config/install.yaml -kubectl apply -f https://raw.githubusercontent.com/numaproj/numaflow/stable/examples/0-isbsvc-jetstream.yaml +kubectl apply -n numaflow-system -f https://raw.githubusercontent.com/numaproj/numaflow/main/config/install.yaml +kubectl apply -f https://raw.githubusercontent.com/numaproj/numaflow/main/examples/0-isbsvc-jetstream.yaml ``` ## Creating a simple pipeline @@ -39,7 +39,7 @@ As an example, we will create a `simple pipeline` that contains a source vertex Run the command below to create a simple pipeline. ```shell -kubectl apply -f https://raw.githubusercontent.com/numaproj/numaflow/stable/examples/1-simple-pipeline.yaml +kubectl apply -f https://raw.githubusercontent.com/numaproj/numaflow/main/examples/1-simple-pipeline.yaml ``` To view a list of pipelines you've created, run: @@ -92,14 +92,13 @@ This should generate an output like the sample below: 2022/08/25 23:59:39 (out) {"Data":"jk4nN/a7Dhc=","Createdts":1661471978707963534} ``` - Numaflow also comes with a built-in user interface. -**NOTE**: Please install the metrics server if your local Kubernetes cluster does not bring it by default (e.g., Kind). +**NOTE**: Please install the metrics server if your local Kubernetes cluster does not bring it by default (e.g., Kind). You can install it by running the below command. ```shell -kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml +kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml kubectl patch -n kube-system deployment metrics-server --type=json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--kubelet-insecure-tls"}]' ``` @@ -117,7 +116,7 @@ This renders the following UI on https://localhost:8443/. The pipeline can be deleted by issuing the following command: ```shell -kubectl delete -f https://raw.githubusercontent.com/numaproj/numaflow/stable/examples/1-simple-pipeline.yaml +kubectl delete -f https://raw.githubusercontent.com/numaproj/numaflow/main/examples/1-simple-pipeline.yaml ``` ## Creating an advanced pipeline From 9a89fd42a2a5d72ee94559291a4e16ff46993b94 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Sat, 2 Nov 2024 23:01:38 +0530 Subject: [PATCH 134/188] chore: Load docker image from buildx runner (#2198) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b5cec67822..450f9dc491 100644 --- a/Makefile +++ b/Makefile @@ -194,7 +194,7 @@ build-rust-in-docker: mkdir -p dist -$(DOCKER) container ls --all --filter=ancestor='$(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)' --format "{{.ID}}" | xargs $(DOCKER) rm -$(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) - DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) --target rust-builder -f $(DOCKERFILE) . + DOCKER_BUILDKIT=1 $(DOCKER) build --build-arg "BASE_IMAGE=$(DEV_BASE_IMAGE)" $(DOCKER_BUILD_ARGS) -t $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) --target rust-builder -f $(DOCKERFILE) . --load export CTR=$$($(DOCKER) create $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION)) && $(DOCKER) cp $$CTR:/root/numaflow dist/numaflow-rs-linux-$(HOST_ARCH) && $(DOCKER) rm $$CTR && $(DOCKER) image rm $(IMAGE_NAMESPACE)/$(BINARY_NAME)-rust-builder:$(VERSION) .PHONY: build-rust-in-docker-multi From 83dd0704421f6f29ef1ebf5031364cb314659e5c Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 4 Nov 2024 23:49:59 +0530 Subject: [PATCH 135/188] chore: minor fixes for rust source and sink (#2201) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config/pipeline.rs | 2 - rust/numaflow-core/src/config/pipeline/isb.rs | 8 -- rust/numaflow-core/src/pipeline.rs | 33 ++++---- .../pipeline/forwarder/source_forwarder.rs | 6 ++ .../src/pipeline/isb/jetstream/reader.rs | 81 +++++++++++-------- .../src/pipeline/isb/jetstream/writer.rs | 10 ++- 6 files changed, 78 insertions(+), 62 deletions(-) diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 0a5ba67508..45d1f59c66 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -353,8 +353,6 @@ mod tests { reader_config: BufferReaderConfig { partitions: 1, streams: vec![("default-simple-pipeline-out-0".into(), 0)], - batch_size: 500, - read_timeout: Duration::from_secs(1), wip_ack_interval: Duration::from_secs(1), }, partitions: 0, diff --git a/rust/numaflow-core/src/config/pipeline/isb.rs b/rust/numaflow-core/src/config/pipeline/isb.rs index 704d19de08..c010f9a15d 100644 --- a/rust/numaflow-core/src/config/pipeline/isb.rs +++ b/rust/numaflow-core/src/config/pipeline/isb.rs @@ -3,7 +3,6 @@ use std::fmt; use std::time::Duration; const DEFAULT_PARTITION_IDX: u16 = 0; -const DEFAULT_BATCH_SIZE: usize = 500; const DEFAULT_PARTITIONS: u16 = 1; const DEFAULT_MAX_LENGTH: usize = 30000; const DEFAULT_USAGE_LIMIT: f64 = 0.8; @@ -11,7 +10,6 @@ const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; const DEFAULT_WIP_ACK_INTERVAL_MILLIS: u64 = 1000; -const DEFAULT_READ_TIMEOUT_MILLIS: u64 = 1000; pub(crate) mod jetstream { const DEFAULT_URL: &str = "localhost:4222"; @@ -78,8 +76,6 @@ impl fmt::Display for BufferFullStrategy { pub(crate) struct BufferReaderConfig { pub(crate) partitions: u16, pub(crate) streams: Vec<(String, u16)>, - pub(crate) batch_size: usize, - pub(crate) read_timeout: Duration, pub(crate) wip_ack_interval: Duration, } @@ -88,9 +84,7 @@ impl Default for BufferReaderConfig { BufferReaderConfig { partitions: DEFAULT_PARTITIONS, streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], - batch_size: DEFAULT_BATCH_SIZE, wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), - read_timeout: Duration::from_millis(DEFAULT_READ_TIMEOUT_MILLIS), } } } @@ -145,9 +139,7 @@ mod tests { let expected = BufferReaderConfig { partitions: DEFAULT_PARTITIONS, streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], - batch_size: DEFAULT_BATCH_SIZE, wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), - read_timeout: Duration::from_millis(DEFAULT_READ_TIMEOUT_MILLIS), }; let config = BufferReaderConfig::default(); assert_eq!(config, expected); diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index a9724780ea..f5896b3cc7 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,11 +1,11 @@ -use std::collections::HashMap; - -use async_nats::jetstream; use async_nats::jetstream::Context; +use async_nats::{jetstream, ConnectOptions}; use futures::future::try_join_all; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use std::collections::HashMap; +use std::time::Duration; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; @@ -289,17 +289,22 @@ async fn create_transformer( /// Creates a jetstream context based on the provided configuration async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Result { - let js_client = match (config.user, config.password) { - (Some(user), Some(password)) => { - async_nats::connect_with_options( - config.url, - async_nats::ConnectOptions::with_user_and_password(user, password), - ) - .await - } - _ => async_nats::connect(config.url).await, + // TODO: make these configurable. today this is hardcoded on Golang code too. + let mut opts = ConnectOptions::new() + .max_reconnects(None) // -1 for unlimited reconnects + .ping_interval(Duration::from_secs(3)) + .max_reconnects(None) + .ping_interval(Duration::from_secs(3)) + .retry_on_initial_connect(); + + if let (Some(user), Some(password)) = (config.user, config.password) { + opts = opts.user_and_password(user, password); } - .map_err(|e| error::Error::Connection(e.to_string()))?; + + let js_client = async_nats::connect_with_options(&config.url, opts) + .await + .map_err(|e| error::Error::Connection(e.to_string()))?; + Ok(jetstream::new(js_client)) } @@ -562,8 +567,6 @@ mod tests { .enumerate() .map(|(i, key)| (key.to_string(), i as u16)) .collect(), - batch_size: 500, - read_timeout: Duration::from_secs(1), wip_ack_interval: Duration::from_secs(1), }, partitions: 0, diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index 5dd94290de..9ba2ba94fd 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -162,6 +162,7 @@ impl Forwarder { /// Writes messages to the jetstream, it writes to all the downstream buffers. async fn write_to_jetstream(&mut self, messages: Vec) -> Result<(), Error> { + let start_time = tokio::time::Instant::now(); if messages.is_empty() { return Ok(()); } @@ -186,6 +187,11 @@ impl Forwarder { .await .map_err(|e| Error::Forwarder(format!("Failed to write to jetstream {:?}", e)))??; } + debug!( + len = messages.len(), + elapsed_ms = start_time.elapsed().as_millis(), + "Wrote messages to jetstream", + ); Ok(()) } } diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 5f3d2926bf..46faf2e95b 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -3,13 +3,14 @@ use std::time::Duration; use async_nats::jetstream::{ consumer::PullConsumer, AckKind, Context, Message as JetstreamMessage, }; + use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio::time::{self, Instant}; use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; -use tracing::{error, warn}; +use tracing::{debug, error, info, warn}; use crate::config::pipeline::isb::BufferReaderConfig; use crate::config::pipeline::PipelineConfig; @@ -72,7 +73,8 @@ impl JetstreamReader { cancel_token: CancellationToken, pipeline_config: &PipelineConfig, ) -> Result<(Receiver, JoinHandle>)> { - let (messages_tx, messages_rx) = mpsc::channel(2 * self.config.batch_size); + // FIXME: factor of 2 should be configurable, at the least a const + let (messages_tx, messages_rx) = mpsc::channel(2 * pipeline_config.batch_size); let handle: JoinHandle> = tokio::spawn({ let this = self.clone(); @@ -104,41 +106,44 @@ impl JetstreamReader { .messages() .await .unwrap() - .chunks_timeout(this.config.batch_size, this.config.read_timeout); + .chunks_timeout(pipeline_config.batch_size, pipeline_config.read_timeout); tokio::pin!(chunk_stream); // The .next() call will not return if there is no data even if read_timeout is // reached. + let mut total_messages = 0; + let mut chunk_time = Instant::now(); + let mut start_time = Instant::now(); while let Some(messages) = chunk_stream.next().await { + debug!( + len = messages.len(), + elapsed_ms = chunk_time.elapsed().as_millis(), + "Received messages from Jetstream", + ); + total_messages += messages.len(); for message in messages { - let jetstream_message = match message { - Ok(message) => message, - Err(e) => { - error!(?e, "Failed to fetch messages from the Jetstream"); - continue; - } - }; - - let msg_info = match jetstream_message.info() { - Ok(info) => info, - Err(e) => { - error!(?e, "Failed to get message info from Jetstream"); - continue; - } - }; + let jetstream_message = message.map_err(|e| { + Error::ISB(format!( + "Error while fetching message from Jetstream: {:?}", + e + )) + })?; + + let msg_info = jetstream_message.info().map_err(|e| { + Error::ISB(format!( + "Error while fetching message info from Jetstream: {:?}", + e + )) + })?; let mut message: Message = - match jetstream_message.payload.clone().try_into() { - Ok(message) => message, - Err(e) => { - error!( - ?e, - "Failed to parse message payload received from Jetstream" - ); - continue; - } - }; + jetstream_message.payload.clone().try_into().map_err(|e| { + Error::ISB(format!( + "Error while converting Jetstream message to Message: {:?}", + e + )) + })?; message.offset = Some(Offset::Int(IntOffset::new( msg_info.stream_sequence, @@ -158,21 +163,31 @@ impl JetstreamReader { ack: ack_tx, }; - if messages_tx.send(read_message).await.is_err() { - error!("Failed to send message to the channel"); - return Ok(()); - } + messages_tx.send(read_message).await.map_err(|e| { + Error::ISB(format!("Error while sending message to channel: {:?}", e)) + })?; forward_pipeline_metrics() .forwarder .data_read .get_or_create(labels) .inc(); + + if start_time.elapsed() >= Duration::from_millis(1000) { + info!( + len = total_messages, + elapsed_ms = start_time.elapsed().as_millis(), + "Total messages read from Jetstream" + ); + start_time = Instant::now(); + total_messages = 0; + } } if cancel_token.is_cancelled() { warn!("Cancellation token is cancelled. Exiting JetstreamReader"); break; } + chunk_time = Instant::now(); } Ok(()) } @@ -279,8 +294,6 @@ mod tests { let buf_reader_config = BufferReaderConfig { partitions: 0, streams: vec![], - batch_size: 2, - read_timeout: Duration::from_millis(1000), wip_ack_interval: Duration::from_millis(5), }; let js_reader = JetstreamReader::new( diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 65d10963ca..9fbc7603a9 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -12,7 +12,7 @@ use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; @@ -207,7 +207,7 @@ impl JetstreamWriter { /// an error it means it is fatal non-retryable error. pub(super) async fn blocking_write(&self, payload: Vec) -> Result { let js_ctx = self.js_ctx.clone(); - + let start_time = tokio::time::Instant::now(); loop { match js_ctx .publish(self.stream_name.clone(), Bytes::from(payload.clone())) @@ -219,8 +219,12 @@ impl JetstreamWriter { // should we return an error here? Because duplicate messages are not fatal // But it can mess up the watermark progression because the offset will be // same as the previous message offset - warn!("Duplicate message detected, ignoring {:?}", ack); + warn!(ack = ?ack, "Duplicate message detected, ignoring"); } + debug!( + elapsed_ms = start_time.elapsed().as_millis(), + "Blocking write successful in", + ); return Ok(ack); } Err(e) => { From 9c1d3cef6ca817f0e0595dc07b727ce8ae597e4e Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 4 Nov 2024 21:10:16 -0800 Subject: [PATCH 136/188] feat: block isbsvc deleting when there is linked pipeline (#2202) Signed-off-by: Derek Wang --- ...w.numaproj.io_interstepbufferservices.yaml | 1 + config/install.yaml | 1 + config/namespace-install.yaml | 1 + pkg/apis/numaflow/v1alpha1/isbsvc_types.go | 14 ++-- pkg/reconciler/isbsvc/controller.go | 1 + pkg/reconciler/isbsvc/installer/installer.go | 27 ++++++++ .../isbsvc/installer/installer_test.go | 67 +++++++++++++++++++ 7 files changed, 106 insertions(+), 6 deletions(-) diff --git a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml index 993551ebbc..5e1c014d71 100644 --- a/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_interstepbufferservices.yaml @@ -3174,6 +3174,7 @@ spec: - Pending - Running - Failed + - Deleting type: string type: type: string diff --git a/config/install.yaml b/config/install.yaml index 11653adc71..8b354dba04 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -3173,6 +3173,7 @@ spec: - Pending - Running - Failed + - Deleting type: string type: type: string diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 025269f458..3ee395c7ff 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -3173,6 +3173,7 @@ spec: - Pending - Running - Failed + - Deleting type: string type: type: string diff --git a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go index 56cbde600d..5e5165b63d 100644 --- a/pkg/apis/numaflow/v1alpha1/isbsvc_types.go +++ b/pkg/apis/numaflow/v1alpha1/isbsvc_types.go @@ -20,14 +20,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +kubebuilder:validation:Enum="";Pending;Running;Failed +// +kubebuilder:validation:Enum="";Pending;Running;Failed;Deleting type ISBSvcPhase string const ( - ISBSvcPhaseUnknown ISBSvcPhase = "" - ISBSvcPhasePending ISBSvcPhase = "Pending" - ISBSvcPhaseRunning ISBSvcPhase = "Running" - ISBSvcPhaseFailed ISBSvcPhase = "Failed" + ISBSvcPhaseUnknown ISBSvcPhase = "" + ISBSvcPhasePending ISBSvcPhase = "Pending" + ISBSvcPhaseRunning ISBSvcPhase = "Running" + ISBSvcPhaseFailed ISBSvcPhase = "Failed" + ISBSvcPhaseDeleting ISBSvcPhase = "Deleting" // ISBSvcConditionConfigured has the status True when the InterStepBufferService // has valid configuration. @@ -150,7 +151,8 @@ func (iss *InterStepBufferServiceStatus) SetObservedGeneration(value int64) { // IsHealthy indicates whether the InterStepBufferService is healthy or not func (iss *InterStepBufferServiceStatus) IsHealthy() bool { - if iss.Phase != ISBSvcPhaseRunning { + // Deleting is a special case, we don't want to mark it as unhealthy as Pipeline reconciliation relies on it + if iss.Phase != ISBSvcPhaseRunning && iss.Phase != ISBSvcPhaseDeleting { return false } return iss.IsReady() diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index 1cddae5d4b..6f3cd212f9 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -101,6 +101,7 @@ func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc // Finalizer logic should be added here. if err := installer.Uninstall(ctx, isbSvc, r.client, r.kubeClient, r.config, log, r.recorder); err != nil { log.Errorw("Failed to uninstall", zap.Error(err)) + isbSvc.Status.SetPhase(dfv1.ISBSvcPhaseDeleting, err.Error()) return err } controllerutil.RemoveFinalizer(isbSvc, finalizerName) diff --git a/pkg/reconciler/isbsvc/installer/installer.go b/pkg/reconciler/isbsvc/installer/installer.go index e1730416b6..0c295b9dc6 100644 --- a/pkg/reconciler/isbsvc/installer/installer.go +++ b/pkg/reconciler/isbsvc/installer/installer.go @@ -91,6 +91,13 @@ func getInstaller(isbSvc *dfv1.InterStepBufferService, client client.Client, kub // // It could also be used to check if the ISB Service object can be safely deleted. func Uninstall(ctx context.Context, isbSvc *dfv1.InterStepBufferService, client client.Client, kubeClient kubernetes.Interface, config *reconciler.GlobalConfig, logger *zap.SugaredLogger, recorder record.EventRecorder) error { + pls, err := referencedPipelines(ctx, client, isbSvc) + if err != nil { + return fmt.Errorf("failed to check if there is any pipeline using this InterStepBufferService, %w", err) + } + if pls > 0 { + return fmt.Errorf("can not delete InterStepBufferService %q which has %d pipelines connected", isbSvc.Name, pls) + } installer, err := getInstaller(isbSvc, client, kubeClient, config, logger, recorder) if err != nil { logger.Errorw("Failed to get an installer", zap.Error(err)) @@ -98,3 +105,23 @@ func Uninstall(ctx context.Context, isbSvc *dfv1.InterStepBufferService, client } return installer.Uninstall(ctx) } + +func referencedPipelines(ctx context.Context, c client.Client, isbSvc *dfv1.InterStepBufferService) (int, error) { + pipelines := &dfv1.PipelineList{} + if err := c.List(ctx, pipelines, &client.ListOptions{ + Namespace: isbSvc.Namespace, + }); err != nil { + return 0, err + } + result := 0 + for _, pl := range pipelines.Items { + isbSvcName := pl.Spec.InterStepBufferServiceName + if isbSvcName == "" { + isbSvcName = dfv1.DefaultISBSvcName + } + if isbSvcName == isbSvc.Name { + result++ + } + } + return result, nil +} diff --git a/pkg/reconciler/isbsvc/installer/installer_test.go b/pkg/reconciler/isbsvc/installer/installer_test.go index ce6e5dc124..7faeb6b9f3 100644 --- a/pkg/reconciler/isbsvc/installer/installer_test.go +++ b/pkg/reconciler/isbsvc/installer/installer_test.go @@ -250,4 +250,71 @@ func TestUnInstall(t *testing.T) { err := Uninstall(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) assert.NoError(t, err) }) + + t.Run("test has pl connected", func(t *testing.T) { + testPipeline := &dfv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl", + Namespace: testNamespace, + }, + Spec: dfv1.PipelineSpec{ + InterStepBufferServiceName: testISBSName, + }, + } + err := cl.Create(ctx, testPipeline) + assert.NoError(t, err) + testObj := testJetStreamIsbSvc.DeepCopy() + err = Uninstall(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar(), record.NewFakeRecorder(64)) + assert.Error(t, err) + assert.Contains(t, err.Error(), "connected") + }) +} + +func Test_referencedPipelines(t *testing.T) { + cl := fake.NewClientBuilder().Build() + ctx := context.TODO() + + t.Run("test no referenced pls", func(t *testing.T) { + testObj := testJetStreamIsbSvc.DeepCopy() + pls, err := referencedPipelines(ctx, cl, testObj) + assert.NoError(t, err) + assert.Equal(t, 0, pls) + }) + + t.Run("test having referenced pls - non default isbsvc", func(t *testing.T) { + testPipeline := &dfv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl", + Namespace: testNamespace, + }, + Spec: dfv1.PipelineSpec{ + InterStepBufferServiceName: testISBSName, + }, + } + err := cl.Create(ctx, testPipeline) + assert.NoError(t, err) + testObj := testJetStreamIsbSvc.DeepCopy() + pls, err := referencedPipelines(ctx, cl, testObj) + assert.NoError(t, err) + assert.Equal(t, 1, pls) + }) + + t.Run("test having referenced pls - default isbsvc", func(t *testing.T) { + testPipeline := &dfv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pl-1", + Namespace: testNamespace, + }, + Spec: dfv1.PipelineSpec{ + InterStepBufferServiceName: "", + }, + } + err := cl.Create(ctx, testPipeline) + assert.NoError(t, err) + testObj := testJetStreamIsbSvc.DeepCopy() + testObj.Name = "default" + pls, err := referencedPipelines(ctx, cl, testObj) + assert.NoError(t, err) + assert.Equal(t, 1, pls) + }) } From 9140f799b56905fffccfccff45015d6cf7e18008 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 7 Nov 2024 01:32:38 +0530 Subject: [PATCH 137/188] chore: shutdown when we see non retryable udf errors (#2204) Signed-off-by: Yashash H L --- pkg/forwarder/interfaces.go | 4 +- pkg/sinks/forward/forward.go | 41 +++++++----- pkg/sinks/sink.go | 27 ++++---- pkg/sinks/udsink/udsink_grpc.go | 12 ++-- pkg/sources/errors/errors.go | 72 +++++++++++++++++++++ pkg/sources/forward/data_forward.go | 72 +++++++++++---------- pkg/sources/source.go | 35 +++++----- pkg/sources/udsource/user_defined_source.go | 28 ++++++-- pkg/udf/forward/forward.go | 69 ++++++++++---------- pkg/udf/map_udf.go | 29 ++++----- 10 files changed, 246 insertions(+), 143 deletions(-) create mode 100644 pkg/sources/errors/errors.go diff --git a/pkg/forwarder/interfaces.go b/pkg/forwarder/interfaces.go index db628a4083..e745899ffc 100644 --- a/pkg/forwarder/interfaces.go +++ b/pkg/forwarder/interfaces.go @@ -49,7 +49,9 @@ func (gw GoWhere) WhereTo(ks []string, ts []string, id string) ([]VertexBuffer, // StarterStopper starts/stops the forwarding. type StarterStopper interface { - Start() <-chan struct{} + // Start returns a channel that can be used to listen for errors. If + // the channel is closed without any error, it means the forwarder has stopped. + Start() <-chan error Stop() ForceStop() } diff --git a/pkg/sinks/forward/forward.go b/pkg/sinks/forward/forward.go index ad53b5fa51..832611c6dc 100644 --- a/pkg/sinks/forward/forward.go +++ b/pkg/sinks/forward/forward.go @@ -111,9 +111,9 @@ func NewDataForward( } // Start starts reading the buffer and forwards to sinker. Call `Stop` to stop. -func (df *DataForward) Start() <-chan struct{} { +func (df *DataForward) Start() <-chan error { log := logging.FromContext(df.ctx) - stopped := make(chan struct{}) + stopped := make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { @@ -137,7 +137,11 @@ func (df *DataForward) Start() <-chan struct{} { // shutdown the fromBufferPartition should be empty. } // keep doing what you are good at - df.forwardAChunk(df.ctx) + if err := df.forwardAChunk(df.ctx); err != nil { + log.Errorw("Failed to forward a chunk", zap.Error(err)) + stopped <- err + return + } } }() @@ -176,7 +180,7 @@ func (df *DataForward) Start() <-chan struct{} { // for a chunk of messages returned by the first Read call. It will return only if only we are successfully able to ack // the message after forwarding, barring any platform errors. The platform errors include buffer-full, // buffer-not-reachable, etc., but does not include errors due to WhereTo, etc. -func (df *DataForward) forwardAChunk(ctx context.Context) { +func (df *DataForward) forwardAChunk(ctx context.Context) error { start := time.Now() totalBytes := 0 dataBytes := 0 @@ -207,12 +211,12 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { zap.Int64("offset", processorWMB.Offset), zap.Int64("watermark", processorWMB.Watermark), zap.Bool("idle", processorWMB.Idle)) - return + return nil } // if the validation passed, we will publish the watermark to all the toBuffer partitions. idlehandler.PublishIdleWatermark(ctx, df.sinkWriter.GetPartitionIdx(), df.sinkWriter, df.wmPublisher, df.idleManager, df.opts.logger, df.vertexName, df.pipelineName, dfv1.VertexTypeSink, df.vertexReplica, wmb.Watermark(time.UnixMilli(processorWMB.Watermark))) - return + return nil } var dataMessages = make([]*isb.ReadMessage, 0, len(readMessages)) @@ -266,7 +270,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { if err != nil { df.opts.logger.Errorw("failed to write to sink", zap.Error(err)) df.fromBufferPartition.NoAck(ctx, readOffsets) - return + return err } // Only when fallback is configured, it is possible to return fallbackMessages. If there's any, write to the fallback sink. @@ -277,7 +281,8 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { _, _, err = df.writeToSink(ctx, df.opts.fbSinkWriter, fallbackMessages, true) if err != nil { df.opts.logger.Errorw("Failed to write to fallback sink", zap.Error(err)) - return + df.fromBufferPartition.NoAck(ctx, readOffsets) + return err } } @@ -300,7 +305,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { if err != nil { df.opts.logger.Errorw("Failed to ack from buffer", zap.Error(err)) metrics.AckMessageError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) - return + return nil } metrics.AckMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) @@ -311,6 +316,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { } // ProcessingTimes of the entire forwardAChunk metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) + return nil } // ackFromBuffer acknowledges an array of offsets back to fromBufferPartition and is a blocking call or until shutdown has been initiated. @@ -390,20 +396,26 @@ func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWr _writeOffsets, errs := sinkWriter.Write(ctx, messagesToTry) for idx, msg := range messagesToTry { if err = errs[idx]; err != nil { + var udsinkErr = new(udsink.ApplyUDSinkErr) + if errors.As(err, &udsinkErr) { + if udsinkErr.IsInternalErr() { + return false, err + } + } // if we are asked to write to fallback sink, check if the fallback sink is configured, // and we are not already in the fallback sink write path. - if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter != nil && !isFbSinkWriter { + if errors.Is(err, udsink.WriteToFallbackErr) && df.opts.fbSinkWriter != nil && !isFbSinkWriter { fallbackMessages = append(fallbackMessages, msg) continue } // if we are asked to write to fallback but no fallback sink is configured, we will retry the messages to the same sink - if errors.Is(err, &udsink.WriteToFallbackErr) && df.opts.fbSinkWriter == nil { + if errors.Is(err, udsink.WriteToFallbackErr) && df.opts.fbSinkWriter == nil { df.opts.logger.Error("Asked to write to fallback but no fallback sink is configured, retrying the message to the same sink") } // if we are asked to write to fallback sink inside the fallback sink, we will retry the messages to the fallback sink - if errors.Is(err, &udsink.WriteToFallbackErr) && isFbSinkWriter { + if errors.Is(err, udsink.WriteToFallbackErr) && isFbSinkWriter { df.opts.logger.Error("Asked to write to fallback sink inside the fallback sink, retrying the message to fallback sink") } @@ -444,9 +456,8 @@ func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWr } return true, nil }) - // If we exited out of the loop and it was due to a forced shutdown we should exit - // TODO(Retry-Sink): Check for ctx done separately? That should be covered in shutdown - if ok, _ := df.IsShuttingDown(); err != nil && ok { + + if err != nil { return nil, nil, err } // Check what actions are required once the writing loop is completed diff --git a/pkg/sinks/sink.go b/pkg/sinks/sink.go index 73075dc1cc..3355cbe70c 100644 --- a/pkg/sinks/sink.go +++ b/pkg/sinks/sink.go @@ -256,21 +256,20 @@ func (u *SinkProcessor) Start(ctx context.Context) error { defer finalWg.Done() log.Infow("Start processing sink messages ", zap.String("isbsvc", string(u.ISBSvcType)), zap.String("fromPartition ", fromBufferPartitionName)) stopped := sinkForwarder.Start() - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - for { - <-stopped - log.Info("Sink forwarder stopped, exiting sink processor...") - return + select { + case <-ctx.Done(): // context cancelled case + log.Info("Context cancelled, stopping forwarder for partition...", zap.String("partition", fromBufferPartitionName)) + sinkForwarder.Stop() + if err := <-stopped; err != nil { + log.Errorw("Sink forwarder stopped with error", zap.String("fromPartition", fromBufferPartitionName), zap.Error(err)) } - }() - <-ctx.Done() - log.Infow("SIGTERM exiting inside partition...", zap.String("fromPartition", fromBufferPartitionName)) - sinkForwarder.Stop() - wg.Wait() - log.Infow("Exited for partition...", zap.String("fromPartition", fromBufferPartitionName)) + log.Info("Exited for partition...", zap.String("partition", fromBufferPartitionName)) + case err := <-stopped: // critical error case + if err != nil { + log.Errorw("Sink forwarder stopped with error", zap.String("fromPartition", fromBufferPartitionName), zap.Error(err)) + cancel() + } + } }(df, readers[index].GetName()) } diff --git a/pkg/sinks/udsink/udsink_grpc.go b/pkg/sinks/udsink/udsink_grpc.go index 6b9a1a77eb..1656c26111 100644 --- a/pkg/sinks/udsink/udsink_grpc.go +++ b/pkg/sinks/udsink/udsink_grpc.go @@ -29,16 +29,16 @@ import ( ) var ( - WriteToFallbackErr = ApplyUDSinkErr{ + WriteToFallbackErr error = &ApplyUDSinkErr{ UserUDSinkErr: true, Message: "write to fallback sink", } - UnknownUDSinkErr = ApplyUDSinkErr{ + UnknownUDSinkErr error = &ApplyUDSinkErr{ UserUDSinkErr: true, Message: "unknown error in udsink", } - NotFoundErr = ApplyUDSinkErr{ + NotFoundErr error = &ApplyUDSinkErr{ UserUDSinkErr: true, Message: "not found in response", } @@ -114,7 +114,7 @@ func (u *UDSgRPCBasedUDSink) ApplySink(ctx context.Context, requests []*sinkpb.S } for i, m := range requests { if r, existing := resMap[m.Request.GetId()]; !existing { - errs[i] = &NotFoundErr + errs[i] = NotFoundErr } else { if r.GetStatus() == sinkpb.Status_FAILURE { if r.GetErrMsg() != "" { @@ -123,10 +123,10 @@ func (u *UDSgRPCBasedUDSink) ApplySink(ctx context.Context, requests []*sinkpb.S Message: r.GetErrMsg(), } } else { - errs[i] = &UnknownUDSinkErr + errs[i] = UnknownUDSinkErr } } else if r.GetStatus() == sinkpb.Status_FALLBACK { - errs[i] = &WriteToFallbackErr + errs[i] = WriteToFallbackErr } else { errs[i] = nil } diff --git a/pkg/sources/errors/errors.go b/pkg/sources/errors/errors.go new file mode 100644 index 0000000000..2d6582fcde --- /dev/null +++ b/pkg/sources/errors/errors.go @@ -0,0 +1,72 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +// SourceReadErr represents any source read related error +type SourceReadErr struct { + Message string + Retryable bool +} + +func (e *SourceReadErr) Error() string { + return e.Message +} + +func (e *SourceReadErr) Is(target error) bool { + return target.Error() == e.Error() +} + +// IsRetryable is true if the error is retryable +func (e *SourceReadErr) IsRetryable() bool { + return e.Retryable +} + +type SourceAckErr struct { + Message string + Retryable bool +} + +func (e *SourceAckErr) Error() string { + return e.Message +} + +func (e *SourceAckErr) Is(target error) bool { + return target.Error() == e.Error() +} + +// IsRetryable is true if the error is retryable +func (e *SourceAckErr) IsRetryable() bool { + return e.Retryable +} + +type SourcePendingErr struct { + Message string + Retryable bool +} + +func (e *SourcePendingErr) Error() string { + return e.Message +} + +func (e *SourcePendingErr) Is(target error) bool { + return target.Error() == e.Error() +} + +// IsRetryable is true if the error is retryable +func (e *SourcePendingErr) IsRetryable() bool { + return e.Retryable +} diff --git a/pkg/sources/forward/data_forward.go b/pkg/sources/forward/data_forward.go index fc22d8ac0b..63d230652c 100644 --- a/pkg/sources/forward/data_forward.go +++ b/pkg/sources/forward/data_forward.go @@ -32,6 +32,7 @@ import ( "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/idlehandler" "github.com/numaproj/numaflow/pkg/shared/logging" + errors2 "github.com/numaproj/numaflow/pkg/sources/errors" "github.com/numaproj/numaflow/pkg/sources/sourcer" "github.com/numaproj/numaflow/pkg/watermark/entity" "github.com/numaproj/numaflow/pkg/watermark/fetch" @@ -119,9 +120,9 @@ func NewDataForward( } // Start starts reading from source and forwards to the next buffers. Call `Stop` to stop. -func (df *DataForward) Start() <-chan struct{} { +func (df *DataForward) Start() <-chan error { log := logging.FromContext(df.ctx) - stopped := make(chan struct{}) + stopped := make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { @@ -145,7 +146,10 @@ func (df *DataForward) Start() <-chan struct{} { // shutdown the reader should be empty. } // keep doing what you are good at - df.forwardAChunk(df.ctx) + if err := df.forwardAChunk(df.ctx); err != nil { + stopped <- err + return + } } }() @@ -188,7 +192,7 @@ func (df *DataForward) Start() <-chan struct{} { // for a chunk of messages returned by the first Read call. It will return only if only we are successfully able to ack // the message after forwarding, barring any platform errors. The platform errors include buffer-full, // buffer-not-reachable, etc., but do not include errors due to user code transformer, WhereTo, etc. -func (df *DataForward) forwardAChunk(ctx context.Context) { +func (df *DataForward) forwardAChunk(ctx context.Context) error { start := time.Now() totalBytes := 0 // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide @@ -204,13 +208,21 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.reader.GetName(), }).Inc() + + // if the error is not retryable, we should return the error. + var readErr = new(errors2.SourceReadErr) + if errors.As(err, &readErr) { + if !readErr.IsRetryable() { + return err + } + } } // if there are no read messages, we return early. if len(readMessages) == 0 { // not idling, so nothing much to do if !df.srcIdleHandler.IsSourceIdling() { - return + return nil } // if the source is idling, we will publish idle watermark to the source and all the toBuffers @@ -243,7 +255,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { } // len(readMessages) == 0, so we do not have anything more to do - return + return nil } // reset the idle handler because we have read messages @@ -314,7 +326,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { readWriteMessagePairs, err = df.applyTransformer(ctx, readMessages) if err != nil { df.opts.logger.Errorw("failed to apply source transformer", zap.Error(err)) - return + return err } df.opts.logger.Debugw("concurrent applyTransformer completed", @@ -381,7 +393,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { for _, message := range m.WriteMessages { if err = df.whereToStep(message, messageToStep); err != nil { df.opts.logger.Errorw("failed in whereToStep", zap.Error(err)) - return + return err } } // get the list of source partitions for which we have read messages, we will use this to publish watermarks to toVertices @@ -392,7 +404,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { writeOffsets, err = df.writeToBuffers(ctx, messageToStep) if err != nil { df.opts.logger.Errorw("failed to write to toBuffers", zap.Error(err)) - return + return err } // activeWatermarkBuffers records the buffers that the publisher has published @@ -463,8 +475,14 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.reader.GetName(), }).Add(float64(len(readOffsets))) - - return + // if the error is not retryable, we should return the error. + var ackErr = new(errors2.SourceAckErr) + if errors.As(err, &ackErr) { + if !ackErr.IsRetryable() { + return err + } + } + return nil } metrics.AckMessagesCount.With(map[string]string{ metrics.LabelVertex: df.vertexName, @@ -487,13 +505,17 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { metrics.LabelVertexType: string(dfv1.VertexTypeSource), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), }).Observe(float64(time.Since(start).Microseconds())) + return nil } func (df *DataForward) ackFromSource(ctx context.Context, offsets []isb.Offset) error { // for all the sources, we either ack all offsets or none. // when a batch ack fails, the source Ack() function populate the error array with the same error; // hence we can just return the first error. - return df.reader.Ack(ctx, offsets)[0] + if errs := df.reader.Ack(ctx, offsets); len(errs) > 0 { + return errs[0] + } + return nil } // writeToBuffers is a blocking call until all the messages have been forwarded to all the toBuffers, or a shutdown @@ -638,29 +660,11 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. // the skip flag is set. The ShutDown flag will only if there is an InternalErr and ForceStop has been invoked. // The UserError retry will be done on the applyTransformer. func (df *DataForward) applyTransformer(ctx context.Context, messages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - for { - transformResults, err := df.opts.transformer.ApplyTransform(ctx, messages) - if err != nil { - df.opts.logger.Errorw("Transformer.Apply error", zap.Error(err)) - // TODO: implement retry with backoff etc. - time.Sleep(df.opts.retryInterval) - // keep retrying, I cannot think of a use case where a user could say, errors are fine :-) - // as a platform, we should not lose or corrupt data. - // this does not mean we should prohibit this from a shutdown. - if ok, _ := df.IsShuttingDown(); ok { - df.opts.logger.Errorw("Transformer.Apply, Stop called while stuck on an internal error", zap.Error(err)) - metrics.PlatformError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Inc() - return nil, err - } - continue - } - return transformResults, nil + transformResults, err := df.opts.transformer.ApplyTransform(ctx, messages) + if err != nil { + return nil, err } + return transformResults, nil } // whereToStep executes the WhereTo interfaces and then updates the to step's writeToBuffers buffer. diff --git a/pkg/sources/source.go b/pkg/sources/source.go index 69bc0c0099..6ed2b78d5b 100644 --- a/pkg/sources/source.go +++ b/pkg/sources/source.go @@ -21,7 +21,6 @@ import ( "fmt" "os" "strconv" - "sync" "time" "go.uber.org/zap" @@ -295,19 +294,6 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { return fmt.Errorf("failed to create source forwarder, error: %w", err) } - log.Infow("Start processing source messages", zap.String("isbs", string(sp.ISBSvcType)), zap.Any("to", sp.VertexInstance.Vertex.GetToBuffers())) - stopped := sourceForwarder.Start() - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - for { - <-stopped - log.Info("Source forwarder stopped, exiting...") - return - } - }() - metricsOpts := metrics.NewMetricsOptions(ctx, sp.VertexInstance.Vertex, healthCheckers, []isb.LagReader{sourceReader}) ms := metrics.NewMetricsServer(sp.VertexInstance.Vertex, metricsOpts...) if shutdown, err := ms.Start(ctx); err != nil { @@ -315,10 +301,23 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { } else { defer func() { _ = shutdown(context.Background()) }() } - <-ctx.Done() - log.Info("SIGTERM, exiting...") - sourceForwarder.Stop() - wg.Wait() + + log.Infow("Start processing source messages", zap.String("isbs", string(sp.ISBSvcType)), zap.Any("to", sp.VertexInstance.Vertex.GetToBuffers())) + stopped := sourceForwarder.Start() + select { + case <-ctx.Done(): // context cancelled case + log.Info("Context cancelled, stopping forwarder for partition...") + sourceForwarder.Stop() + if err := <-stopped; err != nil { + log.Errorw("Source forwarder stopped with error", zap.Error(err)) + } + log.Info("Exited source forwarder...") + case err := <-stopped: // critical error case + if err != nil { + log.Errorw("Source forwarder stopped with error", zap.Error(err)) + cancel() + } + } // close all the sourceReader wm stores for _, wmStore := range sourceWmStores { diff --git a/pkg/sources/udsource/user_defined_source.go b/pkg/sources/udsource/user_defined_source.go index 5ba77019a1..37a36a46db 100644 --- a/pkg/sources/udsource/user_defined_source.go +++ b/pkg/sources/udsource/user_defined_source.go @@ -25,6 +25,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" "github.com/numaproj/numaflow/pkg/shared/logging" + "github.com/numaproj/numaflow/pkg/sources/errors" "github.com/numaproj/numaflow/pkg/sources/sourcer" ) @@ -49,7 +50,6 @@ type userDefinedSource struct { // NewUserDefinedSource returns a new user-defined source reader. func NewUserDefinedSource(ctx context.Context, vertexInstance *dfv1.VertexInstance, sourceApplier *GRPCBasedUDSource, opts ...Option) (sourcer.SourceReader, error) { var err error - u := &userDefinedSource{ vertexName: vertexInstance.Vertex.Spec.Name, pipelineName: vertexInstance.Vertex.Spec.PipelineName, @@ -82,18 +82,38 @@ func (u *userDefinedSource) Partitions(ctx context.Context) []int32 { // Read reads the messages from the user-defined source. func (u *userDefinedSource) Read(ctx context.Context, count int64) ([]*isb.ReadMessage, error) { - return u.sourceApplier.ApplyReadFn(ctx, count, u.readTimeout) + messages, err := u.sourceApplier.ApplyReadFn(ctx, count, u.readTimeout) + if err != nil { + return nil, &errors.SourceReadErr{ + Message: err.Error(), + Retryable: false, + } + } + return messages, nil } // Ack acknowledges the messages from the user-defined source // If there is an error, return the error using an error array func (u *userDefinedSource) Ack(ctx context.Context, offsets []isb.Offset) []error { - return []error{u.sourceApplier.ApplyAckFn(ctx, offsets)} + if err := u.sourceApplier.ApplyAckFn(ctx, offsets); err != nil { + return []error{&errors.SourceAckErr{ + Message: err.Error(), + Retryable: false, + }} + } + return []error{} } // Pending returns the number of pending messages in the user-defined source func (u *userDefinedSource) Pending(ctx context.Context) (int64, error) { - return u.sourceApplier.ApplyPendingFn(ctx) + pending, err := u.sourceApplier.ApplyPendingFn(ctx) + if err != nil { + return 0, &errors.SourcePendingErr{ + Message: err.Error(), + Retryable: false, + } + } + return pending, nil } func (u *userDefinedSource) Close() error { diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index e081359cf1..2d61e32408 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -112,9 +112,9 @@ func NewInterStepDataForward(vertexInstance *dfv1.VertexInstance, fromStep isb.B } // Start starts reading the buffer and forwards to the next buffers. Call `Stop` to stop. -func (isdf *InterStepDataForward) Start() <-chan struct{} { +func (isdf *InterStepDataForward) Start() <-chan error { log := logging.FromContext(isdf.ctx) - stopped := make(chan struct{}) + stopped := make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { @@ -137,8 +137,12 @@ func (isdf *InterStepDataForward) Start() <-chan struct{} { // once context.Done() is called, we still have to try to forwardAChunk because in graceful // shutdown the fromBufferPartition should be empty. } - // keep doing what you are good at - isdf.forwardAChunk(isdf.ctx) + // keep doing what you are good at, if we get an error we will stop. + if err := isdf.forwardAChunk(isdf.ctx); err != nil { + log.Errorw("Failed to forward a chunk", zap.Error(err)) + stopped <- err + return + } } }() @@ -170,7 +174,7 @@ func (isdf *InterStepDataForward) Start() <-chan struct{} { // for a chunk of messages returned by the first Read call. It will return only if only we are successfully able to ack // the message after forwarding, barring any platform errors. The platform errors include buffer-full, // buffer-not-reachable, etc., but does not include errors due to user code UDFs, WhereTo, etc. -func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { +func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) error { start := time.Now() totalBytes := 0 dataBytes := 0 @@ -201,7 +205,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { zap.Int64("offset", processorWMB.Offset), zap.Int64("watermark", processorWMB.Watermark), zap.Bool("idle", processorWMB.Idle)) - return + return nil } // if the validation passed, we will publish the watermark to all the toBuffer partitions. @@ -212,7 +216,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { } } } - return + return nil } var dataMessages = make([]*isb.ReadMessage, 0, len(readMessages)) @@ -227,6 +231,17 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { dataBytes += len(m.Payload) } } + + // If we don't have any data messages(we received only wmbs), we can ack all the readOffsets and return early. + if len(dataMessages) == 0 { + if err := isdf.ackFromBuffer(ctx, readOffsets); err != nil { + isdf.opts.logger.Errorw("Failed to ack from buffer", zap.Error(err)) + metrics.AckMessageError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) + return err + } + return nil + } + metrics.ReadDataMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(dataMessages))) metrics.ReadMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readMessages))) metrics.ReadBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(totalBytes)) @@ -254,7 +269,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { isdf.opts.logger.Errorw("failed to streamMessage", zap.Error(err)) // As there's no partial failure, non-ack all the readOffsets isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return + return err } } else { // create space for writeMessages specific to each step as we could forward to all the steps too. @@ -271,7 +286,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { isdf.opts.logger.Errorw("failed to applyUDF", zap.Error(err)) // As there's no partial failure, non-ack all the readOffsets isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return + return err } // let's figure out which vertex to send the results to. @@ -282,7 +297,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { if err := isdf.whereToStep(message, messageToStep, m.ReadMessage); err != nil { isdf.opts.logger.Errorw("failed in whereToStep", zap.Error(err)) isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return + return err } } } @@ -292,7 +307,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { if err != nil { isdf.opts.logger.Errorw("failed to write to toBuffers", zap.Error(err)) isdf.fromBufferPartition.NoAck(ctx, readOffsets) - return + return err } isdf.opts.logger.Debugw("writeToBuffers completed") } @@ -348,7 +363,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { if err != nil { isdf.opts.logger.Errorw("Failed to ack from buffer", zap.Error(err)) metrics.AckMessageError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) - return + return err } metrics.AckMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) @@ -360,6 +375,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) { } // ProcessingTimes of the entire forwardAChunk metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) + return nil } // streamMessage streams the data messages to the next step. @@ -376,7 +392,7 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage // Ensure dataMessages length is 1 for streaming if len(dataMessages) != 1 { errMsg := "data message size is not 1 with map UDF streaming" - isdf.opts.logger.Errorw(errMsg) + isdf.opts.logger.Errorw(errMsg, zap.Int("dataMessagesSize", len(dataMessages))) return nil, errors.New(errMsg) } @@ -618,29 +634,12 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar // the skip flag is set. ShutDown flag will only if there is an InternalErr and ForceStop has been invoked. // The UserError retry will be done on the ApplyUDF. func (isdf *InterStepDataForward) applyUDF(ctx context.Context, readMessages []*isb.ReadMessage) ([]isb.ReadWriteMessagePair, error) { - for { - writeMessages, err := isdf.opts.unaryMapUdfApplier.ApplyMap(ctx, readMessages) - if err != nil { - isdf.opts.logger.Errorw("mapUDF.Apply error", zap.Error(err)) - // TODO: implement retry with backoff etc. - select { - case <-ctx.Done(): - // no point in retrying if the context is cancelled - return nil, err - case <-time.After(isdf.opts.retryInterval): - } - // keep retrying, I cannot think of a use case where a user could say, errors are fine :-) - // as a platform we should not lose or corrupt data. - // this does not mean we should prohibit this from a shutdown. - if ok, _ := isdf.IsShuttingDown(); ok { - isdf.opts.logger.Errorw("mapUDF.Apply, Stop called while stuck on an internal error", zap.Error(err)) - metrics.PlatformError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() - return nil, err - } - continue - } - return writeMessages, nil + writeMessages, err := isdf.opts.unaryMapUdfApplier.ApplyMap(ctx, readMessages) + if err != nil { + isdf.opts.logger.Errorw("mapUDF.Apply error", zap.Error(err)) + return nil, err } + return writeMessages, nil } // whereToStep executes the WhereTo interfaces and then updates the to step's writeToBuffers buffer. diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 4a913eca1a..8cf10f36a4 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -291,24 +291,21 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { go func(fromBufferPartitionName string, isdf *forward.InterStepDataForward) { defer finalWg.Done() log.Infow("Start processing udf messages", zap.String("isbsvc", string(u.ISBSvcType)), zap.String("from", fromBufferPartitionName), zap.Any("to", u.VertexInstance.Vertex.GetToBuffers())) - stopped := isdf.Start() - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - for { - <-stopped - log.Info("Forwarder stopped, exiting udf data processor for partition " + fromBufferPartitionName + "...") - return + select { + case <-ctx.Done(): + log.Info("Context cancelled, stopping forwarder for partition...", zap.String("partition", fromBufferPartitionName)) + isdf.Stop() + if err := <-stopped; err != nil { + log.Errorw("Map forwarder stopped with error", zap.String("fromPartition", fromBufferPartitionName), zap.Error(err)) } - }() - - <-ctx.Done() - log.Info("SIGTERM, exiting inside partition...", zap.String("partition", fromBufferPartitionName)) - isdf.Stop() - wg.Wait() - log.Info("Exited for partition...", zap.String("partition", fromBufferPartitionName)) + log.Info("Exited for partition...", zap.String("partition", fromBufferPartitionName)) + case err := <-stopped: + if err != nil { + log.Errorw("Map forwarder stopped with error", zap.String("fromPartition", fromBufferPartitionName), zap.Error(err)) + cancel() + } + } }(bufferPartition, df) } // create lag readers from buffer readers From 7e91f971552cc0f54b83c4b129bfa6d80e1eac33 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Wed, 6 Nov 2024 14:51:10 -0800 Subject: [PATCH 138/188] chore: re-enable e2e tests (#2210) Signed-off-by: Sidhant Kohli --- test/map-e2e/map_test.go | 23 ++++--- test/map-e2e/testdata/flatmap-batch.yaml | 55 ++++++++--------- test/map-e2e/testdata/flatmap-stream.yaml | 61 +++++++++---------- test/map-e2e/testdata/flatmap.yaml | 56 ++++++++--------- test/transformer-e2e/transformer_test.go | 21 +++---- .../testdata/simple-source-python.yaml | 1 + 6 files changed, 105 insertions(+), 112 deletions(-) diff --git a/test/map-e2e/map_test.go b/test/map-e2e/map_test.go index 60821a869b..a1d171a253 100644 --- a/test/map-e2e/map_test.go +++ b/test/map-e2e/map_test.go @@ -30,7 +30,6 @@ type MapSuite struct { E2ESuite } -// FIXME(sink-streaming) python sdk func (s *MapSuite) TestBatchMapUDFunctionAndSink() { w := s.Given().Pipeline("@testdata/flatmap-batch.yaml"). When(). @@ -43,8 +42,8 @@ func (s *MapSuite) TestBatchMapUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("rust-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("rust-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). @@ -55,7 +54,7 @@ func (s *MapSuite) TestBatchMapUDFunctionAndSink() { w.Expect(). VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - //VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). VertexPodLogContains("rust-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } @@ -72,8 +71,8 @@ func (s *MapSuite) TestUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) @@ -82,8 +81,8 @@ func (s *MapSuite) TestUDFunctionAndSink() { w.Expect(). VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). - VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) - //VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) + VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)). + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(3)) } func (s *MapSuite) TestMapStreamUDFunctionAndSink() { @@ -99,8 +98,8 @@ func (s *MapSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("in", LogSourceVertexStarted). VertexPodLogContains("go-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("go-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). - //VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). + VertexPodLogContains("python-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-split", LogUDFVertexStarted, PodLogCheckOptionWithContainer("numa")). VertexPodLogContains("java-udsink", SinkVertexStarted, PodLogCheckOptionWithContainer("numa")) @@ -111,8 +110,8 @@ func (s *MapSuite) TestMapStreamUDFunctionAndSink() { VertexPodLogContains("go-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). VertexPodLogContains("go-udsink-2", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) - //w.Expect(). - // VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) + w.Expect(). + VertexPodLogContains("python-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) w.Expect(). VertexPodLogContains("java-udsink", "hello", PodLogCheckOptionWithContainer("udsink"), PodLogCheckOptionWithCount(4)) } diff --git a/test/map-e2e/testdata/flatmap-batch.yaml b/test/map-e2e/testdata/flatmap-batch.yaml index 780e1a59da..f4a36213a6 100644 --- a/test/map-e2e/testdata/flatmap-batch.yaml +++ b/test/map-e2e/testdata/flatmap-batch.yaml @@ -25,30 +25,29 @@ spec: # https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/log image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always -# FIXME(sink-streaming) python sdk -# - name: python-split -# scale: -# min: 1 -# udf: -# container: -# args: -# - python -# - example.py -# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/batchmap/flatmap -# image: quay.io/numaio/numaflow-python/batch-map-flatmap:stable -# imagePullPolicy: Always -# - name: python-udsink -# scale: -# min: 1 -# sink: -# udsink: -# container: -# args: -# - python -# - example.py -# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log -# image: quay.io/numaio/numaflow-python/sink-log:stable -# imagePullPolicy: Always + - name: python-split + scale: + min: 1 + udf: + container: + args: + - python + - example.py + # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/batchmap/flatmap + image: quay.io/numaio/numaflow-python/batch-map-flatmap:stable + imagePullPolicy: Always + - name: python-udsink + scale: + min: 1 + sink: + udsink: + container: + args: + - python + - example.py + # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log + image: quay.io/numaio/numaflow-python/sink-log:stable + imagePullPolicy: Always - name: rust-split scale: min: 1 @@ -88,10 +87,10 @@ spec: to: go-split - from: go-split to: go-udsink -# - from: in -# to: python-split -# - from: python-split -# to: python-udsink + - from: in + to: python-split + - from: python-split + to: python-udsink - from: in to: rust-split - from: rust-split diff --git a/test/map-e2e/testdata/flatmap-stream.yaml b/test/map-e2e/testdata/flatmap-stream.yaml index 0c749d82f7..503ffcaf3a 100644 --- a/test/map-e2e/testdata/flatmap-stream.yaml +++ b/test/map-e2e/testdata/flatmap-stream.yaml @@ -36,33 +36,32 @@ spec: # https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/log image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always -# FIXME(sink-streaming) python sdk -# - name: python-split -# partitions: 3 -# limits: -# readBatchSize: 1 -# scale: -# min: 1 -# udf: -# container: -# args: -# - python -# - example.py -# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/mapstream/flatmap_stream -# image: quay.io/numaio/numaflow-python/map-flatmap-stream:stable -# imagePullPolicy: Always -# - name: python-udsink -# scale: -# min: 1 -# sink: -# udsink: -# container: -# args: -# - python -# - example.py -# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log -# image: quay.io/numaio/numaflow-python/sink-log:stable -# imagePullPolicy: Always + - name: python-split + partitions: 3 + limits: + readBatchSize: 1 + scale: + min: 1 + udf: + container: + args: + - python + - example.py + # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/mapstream/flatmap_stream + image: quay.io/numaio/numaflow-python/map-flatmap-stream:stable + imagePullPolicy: Always + - name: python-udsink + scale: + min: 1 + sink: + udsink: + container: + args: + - python + - example.py + # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log + image: quay.io/numaio/numaflow-python/sink-log:stable + imagePullPolicy: Always - name: java-split partitions: 3 limits: @@ -90,10 +89,10 @@ spec: to: go-udsink - from: go-split to: go-udsink-2 -# - from: in -# to: python-split -# - from: python-split -# to: python-udsink + - from: in + to: python-split + - from: python-split + to: python-udsink - from: in to: java-split - from: java-split diff --git a/test/map-e2e/testdata/flatmap.yaml b/test/map-e2e/testdata/flatmap.yaml index 5082d54b00..c645605548 100644 --- a/test/map-e2e/testdata/flatmap.yaml +++ b/test/map-e2e/testdata/flatmap.yaml @@ -24,31 +24,29 @@ spec: # https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/log image: quay.io/numaio/numaflow-go/sink-log:stable imagePullPolicy: Always - -# FIXME(sink-streaming) python sdk -# - name: python-split -# scale: -# min: 1 -# udf: -# container: -# args: -# - python -# - example.py -# # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/map/flatmap -# image: quay.io/numaio/numaflow-python/map-flatmap:stable -# imagePullPolicy: Always -# - name: python-udsink -# scale: -# min: 1 -# sink: -# udsink: -# container: -# args: -# - python -# - example.py -# # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log -# image: quay.io/numaio/numaflow-python/sink-log:stable -# imagePullPolicy: Always + - name: python-split + scale: + min: 1 + udf: + container: + args: + - python + - example.py + # Split input message into an array with comma, https://github.com/numaproj/numaflow-python/tree/main/examples/map/flatmap + image: quay.io/numaio/numaflow-python/map-flatmap:stable + imagePullPolicy: Always + - name: python-udsink + scale: + min: 1 + sink: + udsink: + container: + args: + - python + - example.py + # https://github.com/numaproj/numaflow-python/tree/main/examples/sink/log + image: quay.io/numaio/numaflow-python/sink-log:stable + imagePullPolicy: Always - name: java-split scale: min: 1 @@ -71,10 +69,10 @@ spec: to: go-split - from: go-split to: go-udsink -# - from: in -# to: python-split -# - from: python-split -# to: python-udsink + - from: in + to: python-split + - from: python-split + to: python-udsink - from: in to: java-split - from: java-split diff --git a/test/transformer-e2e/transformer_test.go b/test/transformer-e2e/transformer_test.go index a8a4905a31..9ba8f542fb 100644 --- a/test/transformer-e2e/transformer_test.go +++ b/test/transformer-e2e/transformer_test.go @@ -174,22 +174,19 @@ func (s *TransformerSuite) TestSourceTransformer() { } var wg sync.WaitGroup - wg.Add(1) - // FIXME: Enable these tests after corresponding SDKs are changed to support bidirectional streaming - //go func() { - // defer wg.Done() - // s.testSourceTransformer("python") - //}() - //go func() { - // defer wg.Done() - // s.testSourceTransformer("java") - //}() + wg.Add(4) + go func() { + defer wg.Done() + s.testSourceTransformer("python") + }() + go func() { + defer wg.Done() + s.testSourceTransformer("java") + }() go func() { defer wg.Done() s.testSourceTransformer("go") }() - - wg.Add(1) go func() { defer wg.Done() s.testSourceTransformer("rust") diff --git a/test/udsource-e2e/testdata/simple-source-python.yaml b/test/udsource-e2e/testdata/simple-source-python.yaml index 9862b63bb6..a64960e9fe 100644 --- a/test/udsource-e2e/testdata/simple-source-python.yaml +++ b/test/udsource-e2e/testdata/simple-source-python.yaml @@ -13,6 +13,7 @@ spec: # A simple user-defined source for e2e testing # See https://github.com/numaproj/numaflow-python/tree/main/examples/source/simple_source image: quay.io/numaio/numaflow-python/simple-source:stable + imagePullPolicy: Always limits: readBatchSize: 500 - name: out From 63d5f774fecc0284ea92ad3934e7c2c8e4a58b6e Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:25:49 +0530 Subject: [PATCH 139/188] feat: metrics visualiser for mono vertex (#2195) Signed-off-by: adarsh0728 Signed-off-by: veds-g Co-authored-by: Adarsh Jain --- .../namespaced-numaflow-server.yaml | 60 ++ config/advanced-install/numaflow-server.yaml | 61 ++ .../base/numaflow-server/kustomization.yaml | 3 +- .../numaflow-server-deployment.yaml | 5 + .../numaflow-server-metrics-proxy-config.yaml | 54 + .../numaflow-cmd-params-config.yaml | 4 +- config/install.yaml | 61 ++ config/namespace-install.yaml | 60 ++ examples/21-simple-mono-vertex.yaml | 2 + go.mod | 2 +- go.sum | 4 + server/apis/interface.go | 4 + server/apis/v1/handler.go | 269 ++++- server/apis/v1/handler_test.go | 991 ++++++++++++++++++ server/apis/v1/promql_service.go | 238 +++++ server/apis/v1/promql_service_test.go | 473 +++++++++ server/apis/v1/response_metrics.go | 81 ++ server/apis/v1/response_metrics_discovery.go | 23 + server/apis/v1/response_pod.go | 29 + server/cmd/server/start.go | 65 +- server/cmd/server/start_test.go | 4 +- server/routes/routes.go | 26 +- ui/package.json | 3 + ui/src/App.tsx | 2 + .../common/SlidingSidebar/index.tsx | 10 +- .../partials/NodeInfo/partials/Pods/index.tsx | 240 ++++- .../Pods/partials/Containers/index.tsx | 8 +- .../Pods/partials/PodDetails/index.tsx | 110 +- .../{PodInfo => ContainerInfo}/index.test.tsx | 0 .../partials/ContainerInfo/index.tsx | 231 ++++ .../partials/ContainerInfo/style.css | 24 + .../PodDetails/partials/Metrics/index.tsx | 99 ++ .../Metrics/partials/EmptyChart/index.tsx | 16 + .../Metrics/partials/EmptyChart/style.css | 20 + .../Metrics/partials/LineChart/index.tsx | 273 +++++ .../partials/common/Dropdown/index.tsx | 116 ++ .../partials/common/FiltersDropdown/index.tsx | 252 +++++ .../partials/common/TimeRange/index.tsx | 62 ++ .../PodDetails/partials/Metrics/style.css | 0 .../partials/Metrics/utils/constants.ts | 37 + .../PodDetails/partials/PodInfo/index.tsx | 107 -- .../PodDetails/partials/PodLogs/index.tsx | 202 ++-- .../Pods/partials/PodDetails/style.css | 23 + .../partials/SearchablePodsHeatMap/index.tsx | 19 +- .../partials/PodsHeatMap/index.tsx | 44 +- .../partials/HexagonHeatMap/index.tsx | 7 +- .../partials/PodsHeatMap/style.css | 2 + .../plugin/NumaflowMonitorApp/App.tsx | 1 + ui/src/types/declarations/app.d.ts | 1 + ui/src/types/declarations/pods.d.ts | 31 +- .../metricsDiscoveryDataFetch.ts | 51 + ui/src/utils/fetchWrappers/metricsFetch.ts | 56 + ui/src/utils/index.tsx | 41 + ui/src/utils/models/systemInfo.ts | 1 + ui/yarn.lock | 164 ++- 55 files changed, 4446 insertions(+), 326 deletions(-) create mode 100644 config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml create mode 100644 server/apis/v1/promql_service.go create mode 100644 server/apis/v1/promql_service_test.go create mode 100644 server/apis/v1/response_metrics.go create mode 100644 server/apis/v1/response_metrics_discovery.go create mode 100644 server/apis/v1/response_pod.go rename ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/{PodInfo => ContainerInfo}/index.test.tsx (100%) create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/style.css create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/style.css create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/style.css create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts delete mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.tsx create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/style.css create mode 100644 ui/src/utils/fetchWrappers/metricsDiscoveryDataFetch.ts create mode 100644 ui/src/utils/fetchWrappers/metricsFetch.ts diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index 5262b8056f..d316bf1e00 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -136,6 +136,61 @@ metadata: name: numaflow-server-local-user-config --- apiVersion: v1 +data: + config.yaml: | + # url is a required field, it should be the url of the service to which the metrics proxy will connect + # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port + # example for local prometheus service + # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 + patterns: + - name: mono_vertex_histogram + object: mono-vertex + title: Processing Time Latency + description: This query pattern is for P99,P90 and P50 quantiles for a mono-vertex across different dimensions + expr: | + histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration]))) + params: + - name: quantile + required: true + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_processing_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + # Add histogram metrics similar to the pattern above + #- metric_name: monovtx_sink_time_bucket + # required_filters: + # - namespace + # - mvtx_name + # dimensions: + # - name: pod + # #expr: optional + # filters: + # - name: pod + # required: false + # - name: mono-vertex + # #expr: optional +kind: ConfigMap +metadata: + name: numaflow-server-metrics-proxy-config +--- +apiVersion: v1 data: rbac-conf.yaml: | policy.default: role:readonly @@ -294,6 +349,8 @@ spec: subPath: index.html - mountPath: /etc/numaflow name: rbac-config + - mountPath: /etc/numaflow/metrics-proxy + name: metrics-proxy-config initContainers: - args: - server-init @@ -336,3 +393,6 @@ spec: - configMap: name: numaflow-server-rbac-config name: rbac-config + - configMap: + name: numaflow-server-metrics-proxy-config + name: metrics-proxy-config diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index 5e7982d7a3..b4ba7fd488 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -143,6 +143,62 @@ metadata: namespace: numaflow-system --- apiVersion: v1 +data: + config.yaml: | + # url is a required field, it should be the url of the service to which the metrics proxy will connect + # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port + # example for local prometheus service + # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 + patterns: + - name: mono_vertex_histogram + object: mono-vertex + title: Processing Time Latency + description: This query pattern is for P99,P90 and P50 quantiles for a mono-vertex across different dimensions + expr: | + histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration]))) + params: + - name: quantile + required: true + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_processing_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + # Add histogram metrics similar to the pattern above + #- metric_name: monovtx_sink_time_bucket + # required_filters: + # - namespace + # - mvtx_name + # dimensions: + # - name: pod + # #expr: optional + # filters: + # - name: pod + # required: false + # - name: mono-vertex + # #expr: optional +kind: ConfigMap +metadata: + name: numaflow-server-metrics-proxy-config + namespace: numaflow-system +--- +apiVersion: v1 data: rbac-conf.yaml: | policy.default: role:readonly @@ -305,6 +361,8 @@ spec: subPath: index.html - mountPath: /etc/numaflow name: rbac-config + - mountPath: /etc/numaflow/metrics-proxy + name: metrics-proxy-config initContainers: - args: - server-init @@ -347,3 +405,6 @@ spec: - configMap: name: numaflow-server-rbac-config name: rbac-config + - configMap: + name: numaflow-server-metrics-proxy-config + name: metrics-proxy-config diff --git a/config/base/numaflow-server/kustomization.yaml b/config/base/numaflow-server/kustomization.yaml index 501d806eba..5cf9871e56 100644 --- a/config/base/numaflow-server/kustomization.yaml +++ b/config/base/numaflow-server/kustomization.yaml @@ -7,4 +7,5 @@ resources: - numaflow-server-deployment.yaml - numaflow-server-service.yaml - numaflow-server-local-user-config.yaml - - numaflow-server-secrets.yaml \ No newline at end of file + - numaflow-server-secrets.yaml + - numaflow-server-metrics-proxy-config.yaml \ No newline at end of file diff --git a/config/base/numaflow-server/numaflow-server-deployment.yaml b/config/base/numaflow-server/numaflow-server-deployment.yaml index c07228640d..94916afe91 100644 --- a/config/base/numaflow-server/numaflow-server-deployment.yaml +++ b/config/base/numaflow-server/numaflow-server-deployment.yaml @@ -26,6 +26,9 @@ spec: - name: rbac-config configMap: name: numaflow-server-rbac-config + - name: metrics-proxy-config + configMap: + name: numaflow-server-metrics-proxy-config initContainers: - name: server-init image: quay.io/numaproj/numaflow:latest @@ -73,6 +76,8 @@ spec: subPath: index.html - mountPath: /etc/numaflow name: rbac-config + - mountPath: /etc/numaflow/metrics-proxy + name: metrics-proxy-config env: - name: NAMESPACE valueFrom: diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml new file mode 100644 index 0000000000..f970cea63b --- /dev/null +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: numaflow-server-metrics-proxy-config +data: + config.yaml: | + # url is a required field, it should be the url of the service to which the metrics proxy will connect + # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port + # example for local prometheus service + # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 + patterns: + - name: mono_vertex_histogram + object: mono-vertex + title: Processing Time Latency + description: This query pattern is for P99,P90 and P50 quantiles for a mono-vertex across different dimensions + expr: | + histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration]))) + params: + - name: quantile + required: true + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_processing_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + # Add histogram metrics similar to the pattern above + #- metric_name: monovtx_sink_time_bucket + # required_filters: + # - namespace + # - mvtx_name + # dimensions: + # - name: pod + # #expr: optional + # filters: + # - name: pod + # required: false + # - name: mono-vertex + # #expr: optional diff --git a/config/base/shared-config/numaflow-cmd-params-config.yaml b/config/base/shared-config/numaflow-cmd-params-config.yaml index 81e8c2acb7..03213b3f43 100644 --- a/config/base/shared-config/numaflow-cmd-params-config.yaml +++ b/config/base/shared-config/numaflow-cmd-params-config.yaml @@ -17,7 +17,7 @@ data: # The configuration has to be: lease.duration > lease.renew.deadline > lease.renew.period # controller.leader.election.lease.duration: 15s # - ### The duration that the acting controlplane will retry refreshing leadership before giving up. + ### The duration that the acting controlplane will retry refreshing leadership before giving up. # Default value is 10 seconds. # The configuration has to be: lease.duration > lease.renew.deadline > lease.renew.period # controller.leader.election.lease.renew.deadline: 10s @@ -52,4 +52,4 @@ data: ### The protocol used to connect to the Pipeline daemon service from Numaflow UX server. # Could be either 'grpc' or 'http', defaults to 'grpc'. # - # server.daemon.client.protocol: grpc \ No newline at end of file + # server.daemon.client.protocol: grpc diff --git a/config/install.yaml b/config/install.yaml index 8b354dba04..b82dd97583 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28447,6 +28447,62 @@ metadata: namespace: numaflow-system --- apiVersion: v1 +data: + config.yaml: | + # url is a required field, it should be the url of the service to which the metrics proxy will connect + # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port + # example for local prometheus service + # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 + patterns: + - name: mono_vertex_histogram + object: mono-vertex + title: Processing Time Latency + description: This query pattern is for P99,P90 and P50 quantiles for a mono-vertex across different dimensions + expr: | + histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration]))) + params: + - name: quantile + required: true + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_processing_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + # Add histogram metrics similar to the pattern above + #- metric_name: monovtx_sink_time_bucket + # required_filters: + # - namespace + # - mvtx_name + # dimensions: + # - name: pod + # #expr: optional + # filters: + # - name: pod + # required: false + # - name: mono-vertex + # #expr: optional +kind: ConfigMap +metadata: + name: numaflow-server-metrics-proxy-config + namespace: numaflow-system +--- +apiVersion: v1 data: rbac-conf.yaml: | policy.default: role:readonly @@ -28822,6 +28878,8 @@ spec: subPath: index.html - mountPath: /etc/numaflow name: rbac-config + - mountPath: /etc/numaflow/metrics-proxy + name: metrics-proxy-config initContainers: - args: - server-init @@ -28864,3 +28922,6 @@ spec: - configMap: name: numaflow-server-rbac-config name: rbac-config + - configMap: + name: numaflow-server-metrics-proxy-config + name: metrics-proxy-config diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 3ee395c7ff..ea7a6f6140 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28335,6 +28335,61 @@ metadata: name: numaflow-server-local-user-config --- apiVersion: v1 +data: + config.yaml: | + # url is a required field, it should be the url of the service to which the metrics proxy will connect + # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port + # example for local prometheus service + # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 + patterns: + - name: mono_vertex_histogram + object: mono-vertex + title: Processing Time Latency + description: This query pattern is for P99,P90 and P50 quantiles for a mono-vertex across different dimensions + expr: | + histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration]))) + params: + - name: quantile + required: true + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_processing_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + # Add histogram metrics similar to the pattern above + #- metric_name: monovtx_sink_time_bucket + # required_filters: + # - namespace + # - mvtx_name + # dimensions: + # - name: pod + # #expr: optional + # filters: + # - name: pod + # required: false + # - name: mono-vertex + # #expr: optional +kind: ConfigMap +metadata: + name: numaflow-server-metrics-proxy-config +--- +apiVersion: v1 data: rbac-conf.yaml: | policy.default: role:readonly @@ -28702,6 +28757,8 @@ spec: subPath: index.html - mountPath: /etc/numaflow name: rbac-config + - mountPath: /etc/numaflow/metrics-proxy + name: metrics-proxy-config initContainers: - args: - server-init @@ -28744,3 +28801,6 @@ spec: - configMap: name: numaflow-server-rbac-config name: rbac-config + - configMap: + name: numaflow-server-metrics-proxy-config + name: metrics-proxy-config diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index a47dbe3123..9ca99cf1bc 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -1,3 +1,5 @@ +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex metadata: name: simple-mono-vertex spec: diff --git a/go.mod b/go.mod index c9048c770d..22ca740da5 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( google.golang.org/grpc v1.66.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 google.golang.org/protobuf v1.34.2 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.31.0 k8s.io/apimachinery v0.31.0 k8s.io/client-go v0.31.0 @@ -209,7 +210,6 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/gengo v0.0.0-20240911193312-2b36238f13e9 // indirect diff --git a/go.sum b/go.sum index b60aad18ee..c0bb7ce203 100644 --- a/go.sum +++ b/go.sum @@ -396,6 +396,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -470,6 +472,8 @@ github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt/v2 v2.5.8 h1:uvdSzwWiEGWGXf+0Q+70qv6AQdvcvxrv9hPM0RiPamE= diff --git a/server/apis/interface.go b/server/apis/interface.go index 2e2c87d30f..45b1144205 100644 --- a/server/apis/interface.go +++ b/server/apis/interface.go @@ -40,6 +40,8 @@ type Handler interface { ListVertexPods(c *gin.Context) ListPodsMetrics(c *gin.Context) PodLogs(c *gin.Context) + GetMonoVertexPodsInfo(c *gin.Context) + GetVertexPodsInfo(c *gin.Context) GetNamespaceEvents(c *gin.Context) GetPipelineStatus(c *gin.Context) ListMonoVertices(c *gin.Context) @@ -47,4 +49,6 @@ type Handler interface { ListMonoVertexPods(c *gin.Context) CreateMonoVertex(c *gin.Context) GetMonoVertexMetrics(c *gin.Context) + GetMetricData(c *gin.Context) + DiscoverMetrics(c *gin.Context) } diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 460d863435..20b77f9153 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -34,11 +34,12 @@ import ( lru "github.com/hashicorp/golang-lru/v2" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - metricsversiond "k8s.io/metrics/pkg/client/clientset/versioned" + metricsclientv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" "k8s.io/utils/ptr" dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" @@ -91,7 +92,8 @@ func WithReadOnlyMode() HandlerOption { type handler struct { kubeClient kubernetes.Interface - metricsClient *metricsversiond.Clientset + metricsClient metricsclientv1beta1.MetricsV1beta1Interface + promQlServiceObj PromQl numaflowClient dfv1clients.NumaflowV1alpha1Interface daemonClientsCache *lru.Cache[string, daemonclient.DaemonClient] mvtDaemonClientsCache *lru.Cache[string, mvtdaemonclient.MonoVertexDaemonClient] @@ -102,7 +104,7 @@ type handler struct { } // NewHandler is used to provide a new instance of the handler type -func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *LocalUsersAuthObject, opts ...HandlerOption) (*handler, error) { +func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *LocalUsersAuthObject, promQlServiceObj PromQl, opts ...HandlerOption) (*handler, error) { var ( k8sRestConfig *rest.Config err error @@ -115,7 +117,7 @@ func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *Lo if err != nil { return nil, fmt.Errorf("failed to get kubeclient, %w", err) } - metricsClient := metricsversiond.NewForConfigOrDie(k8sRestConfig) + metricsClient := metricsclientv1beta1.NewForConfigOrDie(k8sRestConfig) numaflowClient := dfv1versiond.NewForConfigOrDie(k8sRestConfig).NumaflowV1alpha1() daemonClientsCache, _ := lru.NewWithEvict[string, daemonclient.DaemonClient](500, func(key string, value daemonclient.DaemonClient) { _ = value.Close() @@ -123,6 +125,7 @@ func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *Lo mvtDaemonClientsCache, _ := lru.NewWithEvict[string, mvtdaemonclient.MonoVertexDaemonClient](500, func(key string, value mvtdaemonclient.MonoVertexDaemonClient) { _ = value.Close() }) + o := defaultHandlerOptions() for _, opt := range opts { if opt != nil { @@ -132,6 +135,7 @@ func NewHandler(ctx context.Context, dexObj *DexObject, localUsersAuthObject *Lo return &handler{ kubeClient: kubeClient, metricsClient: metricsClient, + promQlServiceObj: promQlServiceObj, numaflowClient: numaflowClient, daemonClientsCache: daemonClientsCache, mvtDaemonClientsCache: mvtDaemonClientsCache, @@ -901,7 +905,7 @@ func (h *handler) ListPodsMetrics(c *gin.Context) { ns := c.Param("namespace") limit, _ := strconv.ParseInt(c.Query("limit"), 10, 64) - metrics, err := h.metricsClient.MetricsV1beta1().PodMetricses(ns).List(c, metav1.ListOptions{ + metrics, err := h.metricsClient.PodMetricses(ns).List(c, metav1.ListOptions{ Limit: limit, Continue: c.Query("continue"), }) @@ -923,6 +927,7 @@ func (h *handler) PodLogs(c *gin.Context) { Container: c.Query("container"), Follow: c.Query("follow") == "true", TailLines: tailLines, + Previous: c.Query("previous") == "true", } stream, err := h.kubeClient.CoreV1().Pods(ns).GetLogs(pod, logOptions).Stream(c) @@ -936,6 +941,60 @@ func (h *handler) PodLogs(c *gin.Context) { h.streamLogs(c, stream) } +func (h *handler) GetMonoVertexPodsInfo(c *gin.Context) { + var response = make([]PodDetails, 0) + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + pods, err := h.kubeClient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dfv1.KeyMonoVertexName, monoVertex), + }) + if err != nil { + h.respondWithError(c, fmt.Sprintf("GetMonoVertexPodInfo: Failed to get a list of pods: namespace %q mono vertex %q: %s", + ns, monoVertex, err.Error())) + return + } + if pods == nil || len(pods.Items) == 0 { + h.respondWithError(c, fmt.Sprintf("GetMonoVertexPodInfo: No pods found for mono vertex %q in namespace %q", monoVertex, ns)) + return + } + for _, pod := range pods.Items { + podDetails, err := h.getPodDetails(pod) + if err != nil { + h.respondWithError(c, fmt.Sprintf("GetMonoVertexPodInfo: Failed to get the pod details: %v", err)) + return + } else { + response = append(response, podDetails) + } + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, response)) +} + +func (h *handler) GetVertexPodsInfo(c *gin.Context) { + var response = make([]PodDetails, 0) + ns, pipeline, vertex := c.Param("namespace"), c.Param("pipeline"), c.Param("vertex") + pods, err := h.kubeClient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyPipelineName, pipeline, dfv1.KeyVertexName, vertex), + }) + if err != nil { + h.respondWithError(c, fmt.Sprintf("GetVertexPodsInfo: Failed to get a list of pods: namespace %q pipeline %q vertex %q: %s", + ns, pipeline, vertex, err.Error())) + return + } + if pods == nil || len(pods.Items) == 0 { + h.respondWithError(c, fmt.Sprintf("GetVertexPodsInfo: No pods found for pipeline %q vertex %q in namespace %q", pipeline, vertex, ns)) + return + } + for _, pod := range pods.Items { + podDetails, err := h.getPodDetails(pod) + if err != nil { + h.respondWithError(c, fmt.Sprintf("GetVertexPodsInfo: Failed to get the pod details: %v", err)) + return + } else { + response = append(response, podDetails) + } + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, response)) + +} func (h *handler) parseTailLines(query string) *int64 { if query == "" { return nil @@ -1180,6 +1239,94 @@ func (h *handler) GetMonoVertexHealth(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, response)) } +func (h *handler) GetMetricData(c *gin.Context) { + var requestBody MetricsRequestBody + if h.promQlServiceObj == nil { + h.respondWithError(c, "Failed to get the prometheus query service") + return + } + if err := bindJson(c, &requestBody); err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to decode JSON request body to metrics query spec, %s", err.Error())) + return + } + // builds prom query + promQl, err := h.promQlServiceObj.BuildQuery(requestBody) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to build the prometheus query%v", err)) + return + } + // default start time is 30 minutes before the current time + if requestBody.StartTime == "" { + requestBody.StartTime = time.Now().Add(-30 * time.Minute).Format(time.RFC3339) + } + // default end time is the current time + if requestBody.EndTime == "" { + requestBody.EndTime = time.Now().Format(time.RFC3339) + } + + startTime, _ := time.Parse(time.RFC3339, requestBody.StartTime) + endTime, _ := time.Parse(time.RFC3339, requestBody.EndTime) + + result, err := h.promQlServiceObj.QueryPrometheus(context.Background(), promQl, startTime, endTime) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to execute the prometheus query, %s", err.Error())) + return + } + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, result)) +} + +// DiscoverMetrics is used to provide a metrics list for each +// dimension along with necessary params and filters for a given object +func (h *handler) DiscoverMetrics(c *gin.Context) { + // Get the object for which the metrics are to be discovered + // Ex. mono-vertex, pipeline, etc. + object := c.Param("object") + + configData := h.promQlServiceObj.GetConfigData() + if configData == nil { + h.respondWithError(c, "PrometheusClient metric config is not available") + return + } + + var discoveredMetrics MetricsDiscoveryResponse + + for _, pattern := range configData.Patterns { + if pattern.Object == object { + for _, metric := range pattern.Metrics { + var requiredFilters []Filter + // Populate the required filters + for _, filter := range metric.Filters { + requiredFilters = append(requiredFilters, Filter{ + Name: filter, + Required: true, + }) + } + // Computing dimension data for each metric + var dimensionData []Dimensions + for _, dimension := range metric.Dimensions { + var combinedFilters = requiredFilters + // Add the dimension filters + for _, filter := range dimension.Filters { + combinedFilters = append(combinedFilters, Filter{ + Name: filter.Name, + Required: filter.Required, + }) + } + dimensionData = append(dimensionData, Dimensions{ + Name: dimension.Name, + Filters: combinedFilters, + Params: pattern.Params, + }) + } + + discoveredMetrics = append(discoveredMetrics, NewDiscoveryResponse(metric.Name, dimensionData)) + } + } + } + + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, discoveredMetrics)) +} + // getAllNamespaces is a utility used to fetch all the namespaces in the cluster // except the kube system namespaces func getAllNamespaces(h *handler) ([]string, error) { @@ -1425,3 +1572,115 @@ func (h *handler) getMonoVertexDaemonClient(ns, mvtName string) (mvtdaemonclient return mvtDaemonClient, nil } } + +func (h *handler) getPodDetails(pod corev1.Pod) (PodDetails, error) { + podDetails := PodDetails{ + Name: pod.Name, + Status: string(pod.Status.Phase), + Message: pod.Status.Message, + Reason: pod.Status.Reason, + } + + metricsClient := h.metricsClient + + // container details of a pod + containerDetails := h.getContainerDetails(pod) + podDetails.ContainerDetailsMap = containerDetails + + // cpu/memory details of a pod + podMetrics, err := metricsClient.PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + if err == nil { + totalCPU := resource.NewQuantity(0, resource.DecimalSI) + totalMemory := resource.NewQuantity(0, resource.BinarySI) + for _, container := range podMetrics.Containers { + containerName := container.Name + cpuQuantity := container.Usage.Cpu() + memQuantity := container.Usage.Memory() + details, ok := containerDetails[containerName] + if !ok { + details = ContainerDetails{Name: container.Name} // Initialize if not found + } + if cpuQuantity != nil { + details.TotalCPU = strconv.FormatInt(cpuQuantity.MilliValue(), 10) + "m" + totalCPU.Add(*cpuQuantity) + } + if memQuantity != nil { + details.TotalMemory = strconv.FormatInt(memQuantity.Value()/(1024*1024), 10) + "Mi" + totalMemory.Add(*memQuantity) + } + containerDetails[containerName] = details + } + if totalCPU != nil { + podDetails.TotalCPU = strconv.FormatInt(totalCPU.MilliValue(), 10) + "m" + } + + if totalMemory != nil { + podDetails.TotalMemory = strconv.FormatInt(totalMemory.Value()/(1024*1024), 10) + "Mi" + } + } + return podDetails, nil +} + +func (h *handler) getContainerDetails(pod corev1.Pod) map[string]ContainerDetails { + var containerDetailsMap = make(map[string]ContainerDetails) + for _, status := range pod.Status.ContainerStatuses { + containerName := status.Name + details := ContainerDetails{ + Name: status.Name, + ID: status.ContainerID, + State: h.getContainerStatus(status.State), + RestartCount: status.RestartCount, + } + if status.State.Waiting != nil { + details.WaitingReason = status.State.Waiting.Reason + details.WaitingMessage = status.State.Waiting.Message + } + if status.LastTerminationState.Terminated != nil { + details.LastTerminationReason = status.LastTerminationState.Terminated.Reason + details.LastTerminationMessage = status.LastTerminationState.Terminated.Message + } + if status.State.Running != nil { + details.LastStartedAt = status.State.Running.StartedAt.Format(time.RFC3339) + } + containerDetailsMap[containerName] = details + } + + // Get CPU/Memory requests and limits from Pod spec + for _, container := range pod.Spec.Containers { + cpuRequest := container.Resources.Requests.Cpu().MilliValue() + memRequest := container.Resources.Requests.Memory().Value() / (1024 * 1024) + cpuLimit := container.Resources.Limits.Cpu().MilliValue() + memLimit := container.Resources.Limits.Memory().Value() / (1024 * 1024) + // Get the existing ContainerDetails or create a new one + details, ok := containerDetailsMap[container.Name] + if !ok { + details = ContainerDetails{Name: container.Name} // Initialize if not found + } + if cpuRequest != 0 { + details.RequestedCPU = strconv.FormatInt(cpuRequest, 10) + "m" + } + if memRequest != 0 { + details.RequestedMemory = strconv.FormatInt(memRequest, 10) + "Mi" + } + if cpuLimit != 0 { + details.LimitCPU = strconv.FormatInt(cpuLimit, 10) + "m" + } + if memLimit != 0 { + details.LimitMemory = strconv.FormatInt(memLimit, 10) + "Mi" + } + containerDetailsMap[container.Name] = details + } + return containerDetailsMap +} + +func (h *handler) getContainerStatus(state corev1.ContainerState) string { + if state.Running != nil { + return "Running" + } else if state.Waiting != nil { + return "Waiting" + } else if state.Terminated != nil { + return "Terminated" + } else { + return "Unknown" + } +} diff --git a/server/apis/v1/handler_test.go b/server/apis/v1/handler_test.go index 2fa52a771d..aeae24d8a0 100644 --- a/server/apis/v1/handler_test.go +++ b/server/apis/v1/handler_test.go @@ -17,9 +17,31 @@ limitations under the License. package v1 import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "reflect" "testing" + "time" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + k8stesting "k8s.io/client-go/testing" + metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake" + metricsclientv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + + "github.com/gin-gonic/gin" + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" + fakeClient "k8s.io/client-go/kubernetes/fake" ) var ( @@ -27,6 +49,89 @@ var ( invalidPatchSpec = `{"spec": {"limits": {"readTimeout": "5s"}}}` ) +// Mock PromQl service for testing +type mockPromQlService struct { + configData *PrometheusConfig +} + +func NewmockPromQlService(configData *PrometheusConfig) PromQl { + return &mockPromQlService{ + configData: configData, + } +} + +func (m *mockPromQlService) BuildQuery(MetricsRequestBody) (string, error) { + return "", nil +} + +func (m *mockPromQlService) QueryPrometheus(context.Context, string, time.Time, time.Time) (model.Value, error) { + return nil, nil +} + +func (m *mockPromQlService) GetConfigData() *PrometheusConfig { + return m.configData +} + +func (m *mockPromQlService) PopulateReqMap(MetricsRequestBody) map[string]string { + return map[string]string{} +} + +func (m *mockPromQlService) DisableMetricsChart() bool { + return m.configData == nil +} + +type MockMetricsClient struct { + podMetrics *metricsv1beta1.PodMetrics +} + +func (m *MockMetricsClient) PodMetricses(namespace string) metricsclientv1beta1.PodMetricsInterface { + return &MockPodMetricsInterface{podMetrics: m.podMetrics} +} +func (m *MockMetricsClient) NodeMetricses() metricsclientv1beta1.NodeMetricsInterface { + return &MockNodeMetricsInterface{} +} + +func (m *MockMetricsClient) RESTClient() rest.Interface { + return nil +} + +type MockPodMetricsInterface struct { + podMetrics *metricsv1beta1.PodMetrics +} +type MockNodeMetricsInterface struct{} + +func (m *MockNodeMetricsInterface) Get(ctx context.Context, name string, opts metav1.GetOptions) (*metricsv1beta1.NodeMetrics, error) { + // Mock the node metrics data here + return nil, nil +} + +func (m *MockNodeMetricsInterface) List(ctx context.Context, opts metav1.ListOptions) (*metricsv1beta1.NodeMetricsList, error) { + // Mock the node metrics data here + return nil, nil +} + +func (m *MockNodeMetricsInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (m *MockPodMetricsInterface) Get(ctx context.Context, name string, opts metav1.GetOptions) (*metricsv1beta1.PodMetrics, error) { + // Mock the pod metrics data here + if m.podMetrics == nil { + return nil, fmt.Errorf("pod metrics not found") + } + return m.podMetrics, nil +} + +func (m *MockPodMetricsInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + // Mock the pod metrics data here + return nil, nil +} + +func (m *MockPodMetricsInterface) List(ctx context.Context, opts metav1.ListOptions) (*metricsv1beta1.PodMetricsList, error) { + // Mock the pod metrics data here + return nil, nil +} + func TestValidatePipelinePatch(t *testing.T) { err := validatePipelinePatch([]byte(validPatchSpec)) @@ -37,3 +142,889 @@ func TestValidatePipelinePatch(t *testing.T) { assert.Equal(t, "only spec.lifecycle is allowed for patching", err.Error()) } + +func TestHandler_DiscoverMetrics(t *testing.T) { + tests := []struct { + name string + object string + configPatterns []Pattern + want MetricsDiscoveryResponse + }{ + { + name: "empty patterns", + object: "pipeline", + configPatterns: []Pattern{}, + want: MetricsDiscoveryResponse{}, + }, + { + name: "no matching object", + object: "pipeline", + configPatterns: []Pattern{ + { + Object: "vertex", + Metrics: []Metric{ + { + Name: "test_metric", + Filters: []string{"namespace"}, + }, + }, + }, + }, + want: []DiscoveryResponse{}, + }, + { + name: "single metric with required filters", + object: "pipeline", + configPatterns: []Pattern{ + { + Object: "pipeline", + Params: []Params{ + { + Name: "quantile", + Required: true, + }, + }, + Metrics: []Metric{ + { + Name: "processing_rate", + Filters: []string{"namespace", "pipeline"}, + Dimensions: []Dimension{ + { + Name: "vertex", + Filters: []Filter{ + {Name: "vertex", Required: true}, + }, + }, + }, + }, + }, + }, + }, + want: []DiscoveryResponse{ + { + MetricName: "processing_rate", + Dimensions: []Dimensions{ + { + Name: "vertex", + Filters: []Filter{ + {Name: "namespace", Required: true}, + {Name: "pipeline", Required: true}, + {Name: "vertex", Required: true}, + }, + Params: []Params{{ + Name: "quantile", + Required: true, + }}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Create mock promql service + promQlServiceObj := NewmockPromQlService(&PrometheusConfig{ + Patterns: tt.configPatterns, + }) + + // Create handler with service + h := &handler{ + promQlServiceObj: promQlServiceObj, + } + + // Create gin test context + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Params = []gin.Param{ + { + Key: "object", + Value: tt.object, + }, + } + + // Call the handler + h.DiscoverMetrics(c) + + // Check response + var response NumaflowAPIResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + if err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + // Convert response data to MetricsDiscoveryResponse + responseBytes, err := json.Marshal(response.Data) + if err != nil { + t.Fatalf("Failed to marshal response data: %v", err) + } + + var got MetricsDiscoveryResponse + if err := json.Unmarshal(responseBytes, &got); err != nil { + t.Fatalf("Failed to unmarshal metrics discovery response: %v", err) + } + + if !reflect.DeepEqual(got, tt.want) && len(got) != 0 { + t.Errorf("DiscoverMetrics() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestHandler_GetMonoVertexPodsInfo(t *testing.T) { + tests := []struct { + name string + namespace string + monoVertex string + pods *corev1.PodList + podMetrics *metricsv1beta1.PodMetricsList + expectedCode int + expectedError string + simulateError bool + }{ + { + name: "successful get pods info", + namespace: "test-ns", + monoVertex: "test-mvt", + pods: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-1", + Namespace: "test-ns", + Labels: map[string]string{ + "numaflow.numaproj.io/mono-vertex-name": "test-mvt", + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + podMetrics: &metricsv1beta1.PodMetricsList{ + Items: []metricsv1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-1", + Namespace: "test-ns", + }, + Containers: []metricsv1beta1.ContainerMetrics{ + { + Name: "test-container", + Usage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("150m"), + corev1.ResourceMemory: resource.MustParse("150Mi"), + }, + }, + }, + }, + }, + }, + expectedCode: http.StatusOK, + }, + { + name: "no pods found", + namespace: "test-ns", + monoVertex: "test-mvt", + pods: &corev1.PodList{ + Items: []corev1.Pod{}, + }, + expectedCode: http.StatusOK, + expectedError: "GetMonoVertexPodInfo: No pods found for mono vertex \"test-mvt\" in namespace \"test-ns\"", + }, + { + name: "error listing pods", + namespace: "test-ns", + monoVertex: "test-mvt", + pods: nil, + expectedCode: http.StatusOK, + expectedError: "GetMonoVertexPodInfo: Failed to get a list of pods: namespace \"test-ns\" mono vertex \"test-mvt\":", + simulateError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Params = gin.Params{ + {Key: "namespace", Value: tt.namespace}, + {Key: "mono-vertex", Value: tt.monoVertex}, + } + + kubeClient := fakeClient.NewSimpleClientset() + if tt.simulateError { + // Create a more specific reactor that matches the exact List call + kubeClient.Fake.PrependReactor("list", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { + listAction, ok := action.(k8stesting.ListAction) + if !ok { + return false, nil, nil + } + // Verify this is the correct list call + if listAction.GetListRestrictions().Labels.String() == fmt.Sprintf("%s=%s", dfv1.KeyMonoVertexName, tt.monoVertex) { + return true, nil, fmt.Errorf("simulated error") + } + return false, nil, nil + }) + + // Verify the reactor is working + _, err := kubeClient.CoreV1().Pods(tt.namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dfv1.KeyMonoVertexName, tt.monoVertex), + }) + assert.Error(t, err, "Expected error from fake client") + assert.Contains(t, err.Error(), "simulated error") + } else if tt.pods != nil { + for _, pod := range tt.pods.Items { + _, err := kubeClient.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + assert.NoError(t, err) + } + // Only verify pod count for non-error cases + if tt.name == "successful get pods info" { + pods, err := kubeClient.CoreV1().Pods(tt.namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, pods.Items, 1, "Expected one pod to be created") + } + } + + metricsClient := metricsfake.NewSimpleClientset() + if tt.podMetrics != nil { + // Setup reactor for Get() instead of List() + metricsClient.Fake.PrependReactor("get", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { + getAction, ok := action.(k8stesting.GetAction) + if !ok { + t.Errorf("Expected GetAction but got %v", action) + return false, nil, nil + } + + // Return the metrics for the specific pod + for _, metric := range tt.podMetrics.Items { + if metric.Name == getAction.GetName() && metric.Namespace == getAction.GetNamespace() { + return true, &metric, nil + } + } + return true, nil, fmt.Errorf("pod metrics not found") + }) + } + + h := &handler{ + kubeClient: kubeClient, + metricsClient: metricsClient.MetricsV1beta1(), + } + + h.GetMonoVertexPodsInfo(c) + + assert.Equal(t, tt.expectedCode, w.Code) + + var response NumaflowAPIResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + + if tt.expectedError != "" { + assert.NotNil(t, response.ErrMsg) + assert.Contains(t, *response.ErrMsg, tt.expectedError) + } else { + assert.Nil(t, response.ErrMsg) + assert.NotNil(t, response.Data) + + // Convert response.Data to []PodInfo + responseBytes, err := json.Marshal(response.Data) + assert.NoError(t, err) + var podInfos []PodDetails + err = json.Unmarshal(responseBytes, &podInfos) + assert.NoError(t, err) + + assert.Len(t, podInfos, len(tt.pods.Items)) + + if len(podInfos) > 0 { + assert.Equal(t, "test-pod-1", podInfos[0].Name) + assert.Equal(t, string(corev1.PodRunning), podInfos[0].Status) + assert.Equal(t, "150m", podInfos[0].TotalCPU) + assert.Equal(t, "150Mi", podInfos[0].TotalMemory) + } + } + }) + } +} + +func TestHandler_GetVertexPodsInfo(t *testing.T) { + tests := []struct { + name string + namespace string + pipeline string + vertex string + pods *corev1.PodList + podMetrics *metricsv1beta1.PodMetricsList + expectedCode int + expectedError string + simulateError bool + }{ + { + name: "successful get pods info", + namespace: "test-ns", + pipeline: "test-pipeline", + vertex: "test-vertex", + expectedCode: http.StatusOK, + pods: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-1", + Namespace: "test-ns", + Labels: map[string]string{ + dfv1.KeyPipelineName: "test-pipeline", + dfv1.KeyVertexName: "test-vertex", + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + podMetrics: &metricsv1beta1.PodMetricsList{ + Items: []metricsv1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-1", + Namespace: "test-ns", + }, + Containers: []metricsv1beta1.ContainerMetrics{ + { + Name: "test-container", + Usage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("150m"), + corev1.ResourceMemory: resource.MustParse("150Mi"), + }, + }, + }, + }, + }, + }, + }, + { + name: "error_listing_pods", + namespace: "test-ns", + pipeline: "test-pipeline", + vertex: "test-vertex", + expectedCode: http.StatusOK, + expectedError: "GetVertexPodsInfo: Failed to get a list of pods: namespace \"test-ns\" pipeline \"test-pipeline\" vertex \"test-vertex\": simulated error", + simulateError: true, + }, + { + name: "no_pods_found", + namespace: "test-ns", + pipeline: "test-pipeline", + vertex: "test-vertex", + expectedCode: http.StatusOK, + expectedError: "GetVertexPodsInfo: No pods found for pipeline \"test-pipeline\" vertex \"test-vertex\" in namespace \"test-ns\"", + pods: &corev1.PodList{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Params = gin.Params{ + {Key: "namespace", Value: tt.namespace}, + {Key: "pipeline", Value: tt.pipeline}, + {Key: "vertex", Value: tt.vertex}, + } + + kubeClient := fakeClient.NewSimpleClientset() + if tt.simulateError { + kubeClient.Fake.PrependReactor("list", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { + listAction, ok := action.(k8stesting.ListAction) + if !ok { + return false, nil, nil + } + expectedSelector := fmt.Sprintf("%s=%s,%s=%s", + dfv1.KeyPipelineName, tt.pipeline, + dfv1.KeyVertexName, tt.vertex, + ) + if listAction.GetListRestrictions().Labels.String() == expectedSelector { + return true, nil, fmt.Errorf("simulated error") + } + return false, nil, nil + }) + + // Verify the reactor is working + _, err := kubeClient.CoreV1().Pods(tt.namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + dfv1.KeyPipelineName, tt.pipeline, + dfv1.KeyVertexName, tt.vertex, + ), + }) + assert.Error(t, err, "Expected error from fake client") + assert.Contains(t, err.Error(), "simulated error") + } else if tt.pods != nil { + for _, pod := range tt.pods.Items { + _, err := kubeClient.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + assert.NoError(t, err) + } + + if tt.name == "successful get pods info" { + pods, err := kubeClient.CoreV1().Pods(tt.namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, pods.Items, 1, "Expected one pod to be created") + } + } + + metricsClient := metricsfake.NewSimpleClientset() + if tt.podMetrics != nil { + metricsClient.Fake.PrependReactor("get", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { + getAction, ok := action.(k8stesting.GetAction) + if !ok { + return false, nil, nil + } + + for _, metric := range tt.podMetrics.Items { + if metric.Name == getAction.GetName() && metric.Namespace == getAction.GetNamespace() { + return true, &metric, nil + } + } + return true, nil, fmt.Errorf("pod metrics not found") + }) + } + + h := &handler{ + kubeClient: kubeClient, + metricsClient: metricsClient.MetricsV1beta1(), + } + + h.GetVertexPodsInfo(c) + + assert.Equal(t, tt.expectedCode, w.Code) + + var response NumaflowAPIResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + + if tt.expectedError != "" { + assert.NotNil(t, response.ErrMsg) + assert.Contains(t, *response.ErrMsg, tt.expectedError) + } else { + assert.Nil(t, response.ErrMsg) + podInfos := make([]PodDetails, 0) + responseBytes, err := json.Marshal(response.Data) + assert.NoError(t, err) + err = json.Unmarshal(responseBytes, &podInfos) + assert.NoError(t, err) + assert.Len(t, podInfos, 1) + assert.Equal(t, "test-pod-1", podInfos[0].Name) + assert.Equal(t, string(corev1.PodRunning), podInfos[0].Status) + assert.Equal(t, "150m", podInfos[0].TotalCPU) + assert.Equal(t, "150Mi", podInfos[0].TotalMemory) + } + }) + } +} + +func TestHandler_GetPodDetails(t *testing.T) { + + mockPodMetrics := &metricsv1beta1.PodMetrics{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Containers: []metricsv1beta1.ContainerMetrics{ + { + Name: "container-1", + Usage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + { + Name: "container-2", + Usage: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("150m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + }, + } + + h := handler{ + metricsClient: &MockMetricsClient{podMetrics: mockPodMetrics}, + } + now := metav1.NewTime(time.Now()) // Initialize now here + + tests := []struct { + name string + pod corev1.Pod + expectedDetails PodDetails + expectedErr error + }{ + { + name: "Successful pod details retrieval", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container-1", + ContainerID: "docker://container-1-id", + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{StartedAt: now}}, + RestartCount: 0, + }, + { + Name: "container-2", + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "WaitingReason", Message: "WaitingMessage"}}, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-1", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(200*1024*1024, + resource.BinarySI), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(400*1024*1024, resource.BinarySI), + }, + }, + }, + { + Name: "container-2", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(150, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(300*1024*1024, resource.BinarySI), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(600*1024*1024, resource.BinarySI), + }, + }, + }, + }, + }, + }, + expectedDetails: PodDetails{ + Name: "test-pod", + Status: "Running", + Message: "", + Reason: "", + TotalCPU: "250m", + TotalMemory: "500Mi", + ContainerDetailsMap: map[string]ContainerDetails{ + "container-1": { + Name: "container-1", + ID: "docker://container-1-id", + State: "Running", + RestartCount: 0, + LastStartedAt: now.Format(time.RFC3339), + RequestedCPU: "100m", + RequestedMemory: "200Mi", + LimitCPU: "200m", + LimitMemory: "400Mi", + TotalCPU: "100m", + TotalMemory: "200Mi", + LastTerminationReason: "", + LastTerminationMessage: "", + WaitingReason: "", + WaitingMessage: "", + }, + "container-2": { + Name: "container-2", + State: "Waiting", + RestartCount: 0, + WaitingReason: "WaitingReason", + WaitingMessage: "WaitingMessage", + RequestedCPU: "150m", + RequestedMemory: "300Mi", + LimitCPU: "300m", + LimitMemory: "600Mi", + TotalCPU: "150m", + TotalMemory: "300Mi", + LastTerminationReason: "", + LastTerminationMessage: "", + LastStartedAt: "", + ID: "", + }, + }, + }, + expectedErr: nil, + }, + // Add more test cases for different scenarios: + // - Pod with terminated containers + // - Pod with no container statuses + // - Error getting pod metrics + // ... + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + podDetails, err := h.getPodDetails(tt.pod) + + assert.Equal(t, tt.expectedErr, err) + assert.Equal(t, tt.expectedDetails, podDetails) + }) + } +} + +func TestHandler_GetContainerDetails(t *testing.T) { + now := metav1.NewTime(time.Now()) + + tests := []struct { + name string + pod corev1.Pod + expectedResult map[string]ContainerDetails + }{ + { + name: "Pod with running and waiting containers", + pod: corev1.Pod{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "running-container", + ContainerID: "docker://abc123", + RestartCount: 2, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: now, + }, + }, + }, + { + Name: "waiting-container", + ContainerID: "docker://def456", + RestartCount: 1, + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + Message: "Back-off pulling image", + }, + }, + LastTerminationState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Reason: "Error", + Message: "Container crashed", + }, + }, + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "running-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + }, + }, + { + Name: "waiting-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("150m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("600Mi"), + }, + }, + }, + }, + }, + }, + expectedResult: map[string]ContainerDetails{ + "running-container": { + Name: "running-container", + ID: "docker://abc123", + State: "Running", + RestartCount: 2, + LastStartedAt: now.Format(time.RFC3339), + RequestedCPU: "100m", + RequestedMemory: "200Mi", + LimitCPU: "200m", + LimitMemory: "400Mi", + WaitingReason: "", + WaitingMessage: "", + LastTerminationReason: "", + LastTerminationMessage: "", + }, + "waiting-container": { + Name: "waiting-container", + ID: "docker://def456", + State: "Waiting", + RestartCount: 1, + LastStartedAt: "", + RequestedCPU: "150m", + RequestedMemory: "300Mi", + LimitCPU: "300m", + LimitMemory: "600Mi", + WaitingReason: "ImagePullBackOff", + WaitingMessage: "Back-off pulling image", + LastTerminationReason: "Error", + LastTerminationMessage: "Container crashed", + }, + }, + }, + { + name: "Pod with no container statuses but with spec", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "spec-only-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + }, + }, + }, + }, + }, + expectedResult: map[string]ContainerDetails{ + "spec-only-container": { + Name: "spec-only-container", + RequestedCPU: "100m", + RequestedMemory: "200Mi", + LimitCPU: "200m", + LimitMemory: "400Mi", + }, + }, + }, + { + name: "Empty pod", + pod: corev1.Pod{}, + expectedResult: map[string]ContainerDetails{}, + }, + } + + h := &handler{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := h.getContainerDetails(tt.pod) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestHandler_GetContainerStatus(t *testing.T) { + h := &handler{} + + tests := []struct { + name string + containerState corev1.ContainerState + expectedStatus string + }{ + { + name: "Running container", + containerState: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.NewTime(time.Now()), + }, + }, + expectedStatus: "Running", + }, + { + name: "Waiting container", + containerState: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + Message: "Back-off pulling image", + }, + }, + expectedStatus: "Waiting", + }, + { + name: "Terminated container", + containerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + Message: "Container crashed", + }, + }, + expectedStatus: "Terminated", + }, + { + name: "Empty container state", + containerState: corev1.ContainerState{}, + expectedStatus: "Unknown", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + status := h.getContainerStatus(tt.containerState) + assert.Equal(t, tt.expectedStatus, status) + }) + } +} diff --git a/server/apis/v1/promql_service.go b/server/apis/v1/promql_service.go new file mode 100644 index 0000000000..935691d9e6 --- /dev/null +++ b/server/apis/v1/promql_service.go @@ -0,0 +1,238 @@ +package v1 + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/prometheus/client_golang/api" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" +) + +// PrometheusClient interface for the Prometheus HTTP client +type PrometheusClient interface { + // Do implement client methods here + Do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// PrometheusAPI interface for the Prometheus API +type PrometheusAPI interface { + QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) +} + +// Prometheus struct holds the client and API +type Prometheus struct { + Client PrometheusClient + Api PrometheusAPI +} + +var newPrometheusClient = func(url string) (*Prometheus, error) { + if url == "" { + return nil, fmt.Errorf("prometheus server url is not set") + } + client, err := api.NewClient(api.Config{ + Address: url, + }) + if err != nil { + return nil, fmt.Errorf("failed to create prometheus client, %w", err) + } + v1api := v1.NewAPI(client) + return &Prometheus{ + Client: client, + Api: v1api, + }, nil +} + +type PromQl interface { + QueryPrometheus(context.Context, string, time.Time, time.Time) (model.Value, error) + BuildQuery(MetricsRequestBody) (string, error) + PopulateReqMap(MetricsRequestBody) map[string]string + GetConfigData() *PrometheusConfig + DisableMetricsChart() bool +} + +type PromQlService struct { + PrometheusClient *Prometheus + PlaceHolders map[string]map[string][]string + Expression map[string]map[string]string + ConfigData *PrometheusConfig +} + +func formatDimension(dimension string) string { + switch dimension { + case "mono-vertex": + return "mvtx_name" + default: + return dimension + } +} + +// builds key, val pair string for labels +func formatMapLabels(labels map[string]string) string { + if len(labels) == 0 { + return "" + } + var builder strings.Builder + first := true + + for k, v := range labels { + if !first { + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("%s= \"%s\"", k, v)) + first = false + } + return builder.String() +} + +// substitutes placeholders in expr with req values +// throws err if any required placeholder is not present/empty in reqMap +func substitutePlaceHolders(expr string, placeholders []string, reqMap map[string]string) (string, error) { + for _, match := range placeholders { + key := match + val, ok := reqMap[key] + if !ok || val == "" { + return "", fmt.Errorf("missing the %s field in the request body", key) + } + expr = strings.Replace(expr, key, val, -1) + } + return expr, nil +} + +// NewPromQlServiceObject creates a new PromQlService instance +func NewPromQlServiceObject() (PromQl, error) { + var ( + // map of [metric_name][dimension] = expr + expressions = make(map[string]map[string]string) + placeHolders = make(map[string]map[string][]string) + client *Prometheus + config *PrometheusConfig + err error + ) + + var serviceObj = &PromQlService{ + PrometheusClient: client, + PlaceHolders: placeHolders, + Expression: expressions, + ConfigData: config, + } + + // load prometheus metric config. + config, err = loadPrometheusMetricConfig() + if err != nil { + // return serviceObj with nil config data & client. Do not return error as this is not critical. + return serviceObj, nil + } + serviceObj.ConfigData = config + + // prometheus client instance. + client, err = newPrometheusClient(config.ServerUrl) + if err != nil { + // return serviceObj with nil prometheus client. Do not return error as this is not critical. + return serviceObj, nil + } + serviceObj.PrometheusClient = client + + for _, pattern := range config.Patterns { + patternExpression := pattern.Expression + for _, metric := range pattern.Metrics { + metricName := metric.Name + for _, dimension := range metric.Dimensions { + dimensionName := dimension.Name + _, ok := expressions[metricName] + if !ok { + expressions[metricName] = make(map[string]string) + } + if dimension.Expression != "" { + expressions[metricName][dimensionName] = dimension.Expression + } else { + expressions[metricName][dimensionName] = patternExpression + } + expr := expressions[metricName][dimensionName] + placeHoldersArr := make([]string, 0) + re := regexp.MustCompile(`\$(\w+)`) + matches := re.FindAllStringSubmatch(expr, -1) + for _, match := range matches { + placeHoldersArr = append(placeHoldersArr, match[0]) + } + _, ok = placeHolders[metricName] + if !ok { + placeHolders[metricName] = map[string][]string{} + } + placeHolders[metricName][dimensionName] = placeHoldersArr + } + } + } + + serviceObj.PlaceHolders = placeHolders + serviceObj.Expression = expressions + return serviceObj, nil +} + +// PopulateReqMap populate map based on req fields +func (b *PromQlService) PopulateReqMap(requestBody MetricsRequestBody) map[string]string { + reqMap := map[string]string{ + "$metric_name": requestBody.MetricName, + "$filters": formatMapLabels(requestBody.Filters), + "$dimension": formatDimension(requestBody.Dimension), + "$quantile": requestBody.Quantile, + "$duration": requestBody.Duration, + } + return reqMap +} + +// BuildQuery build constructs the PromQL query string +func (b *PromQlService) BuildQuery(requestBody MetricsRequestBody) (string, error) { + var query string + var metricName = requestBody.MetricName + var dimension = requestBody.Dimension + if metricName == "" || dimension == "" { + return query, fmt.Errorf("missing metric name or dimension in the request body") + } + expr, ok := b.Expression[metricName][dimension] + if !ok { + return query, fmt.Errorf(`expression is not defined for "%s" dimension of "%s" metric`, dimension, metricName) + } + placeHolders, ok := b.PlaceHolders[metricName][dimension] + if !ok { + return query, fmt.Errorf(`placeholders are not defined for "%s" dimension of "%s" metric`, dimension, metricName) + } + + if expr == "" || len(placeHolders) == 0 { + return query, fmt.Errorf(`expression or placeholders do not exist for for "%s" dimension of "%s" metric in the metrics config`, dimension, metricName) + } + reqMap := b.PopulateReqMap(requestBody) + query, err := substitutePlaceHolders(expr, placeHolders, reqMap) + if err != nil { + return "", fmt.Errorf("failed to substitute placeholders: %w", err) + } + return query, nil +} + +// QueryPrometheus query prometheus server +func (b *PromQlService) QueryPrometheus(ctx context.Context, promql string, start, end time.Time) (model.Value, error) { + if b.PrometheusClient == nil { + return nil, fmt.Errorf("prometheus client is not defined") + } + r := v1.Range{ + Start: start, + End: end, + Step: time.Minute, + } + result, _, err := b.PrometheusClient.Api.QueryRange(ctx, promql, r, v1.WithTimeout(5*time.Second)) + return result, err +} + +// GetConfigData returns the PrometheusConfig +func (b *PromQlService) GetConfigData() *PrometheusConfig { + return b.ConfigData +} + +func (b *PromQlService) DisableMetricsChart() bool { + // disable metrics charts if metric config or prometheus client is nil + return b.ConfigData == nil || b.PrometheusClient == nil +} diff --git a/server/apis/v1/promql_service_test.go b/server/apis/v1/promql_service_test.go new file mode 100644 index 0000000000..8ad0fac810 --- /dev/null +++ b/server/apis/v1/promql_service_test.go @@ -0,0 +1,473 @@ +package v1 + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strings" + "testing" + "time" + + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" +) + +// MockPrometheusAPI is a mock implementation of the PrometheusAPI interface +type MockPrometheusAPI struct{} +type MockPrometheusClient struct{} + +func (m *MockPrometheusClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + return nil, nil, nil +} + +// QueryRange mock implementation +func (m *MockPrometheusAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { + // Create a mock Prometheus API response + mockResponse := model.Matrix{ + &model.SampleStream{ + Metric: model.Metric{"pipeline": "simple-pipeline"}, + Values: []model.SamplePair{ + {Timestamp: 1728364347, Value: 3442.72526560}, + {Timestamp: 1728364407, Value: 3446.17140174}, + }, + }, + } + return mockResponse, nil, nil +} + +// comparePrometheusQueries compares two Prometheus queries, ignoring the order of filters within the curly braces +func comparePrometheusQueries(query1, query2 string) bool { + // Extract the filter portions of the queries + filters1 := extractfilters(query1) + filters2 := extractfilters(query2) + // Compare the filter portions using reflect.DeepEqual, which ignores order + return reflect.DeepEqual(filters1, filters2) +} + +// extractfilters extracts the key-value pairs within the curly braces +// from a Prometheus query using a regular expression. +func extractfilters(query string) map[string]string { + re := regexp.MustCompile(`\{(.*?)\}`) // Regex to match content within curly braces + match := re.FindStringSubmatch(query) + + if len(match) < 2 { // No match found + return nil + } + + filterstring := match[1] // Get the captured group (content within braces) + filterPairs := strings.Split(filterstring, ",") + filters := make(map[string]string) + + for _, pair := range filterPairs { + parts := strings.Split(pair, "=") + if len(parts) == 2 { // Ensure valid key-value pair + filters[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + + return filters +} + +func Test_PopulateReqMap(t *testing.T) { + t.Run("Map creation with all fields", func(t *testing.T) { + requestBody := MetricsRequestBody{ + MetricName: "test_metric", + Filters: map[string]string{"filter1": "value1", "filter2": "value2"}, + Dimension: "group1", + Duration: "5m", + Quantile: "0.95", + StartTime: "2024-10-08T12:00:00Z", + EndTime: "2024-10-08T13:00:00Z", + } + expectedMap := map[string]string{ + "$metric_name": "test_metric", + "$filters": "filter1= \"value1\", filter2= \"value2\"", + "$dimension": "group1", + "$quantile": "0.95", + "$duration": "5m", + } + + promQlService := &PromQlService{} + actualMap := promQlService.PopulateReqMap(requestBody) + + assert.Equal(t, actualMap["$metric_name"], expectedMap["$metric_name"]) + assert.Equal(t, actualMap["$quantile"], expectedMap["$quantile"]) + assert.Equal(t, actualMap["$duration"], expectedMap["$duration"]) + assert.Equal(t, actualMap["$dimension"], expectedMap["$dimension"]) + if !comparePrometheusQueries(expectedMap["$filters"], actualMap["$filters"]) { + t.Errorf("filters do not match") + } + }) + + t.Run("Mapping with empty fields", func(t *testing.T) { + requestBody := MetricsRequestBody{ + MetricName: "test_metric", + } + expectedMap := map[string]string{ + "$metric_name": "test_metric", + "$filters": "", + "$dimension": "", + "$quantile": "", + "$duration": "", + } + + promQlService := &PromQlService{} + actualMap := promQlService.PopulateReqMap(requestBody) + assert.Equal(t, actualMap["$metric_name"], expectedMap["$metric_name"]) + assert.Equal(t, actualMap["$quantile"], expectedMap["$quantile"]) + assert.Equal(t, actualMap["$duration"], expectedMap["$duration"]) + assert.Equal(t, actualMap["$dimension"], expectedMap["$dimension"]) + + if !comparePrometheusQueries(expectedMap["$filters"], actualMap["$filters"]) { + t.Errorf("filters do not match") + } + }) +} +func Test_PromQueryBuilder(t *testing.T) { + var service = &PromQlService{ + PlaceHolders: map[string]map[string][]string{ + "test_metric": { + "test_dimension": {"$quantile", "$dimension", "$metric_name", "$filters", "$duration"}, + }, + }, + Expression: map[string]map[string]string{ + "test_metric": { + "test_dimension": "histogram_quantile($quantile, sum by($dimension,le) (rate($metric_name{$filters}[$duration])))", + }, + }, + } + + tests := []struct { + name string + requestBody MetricsRequestBody + expectedQuery string + expectError bool + }{ + { + name: "Successful template substitution", + requestBody: MetricsRequestBody{ + MetricName: "test_metric", + Quantile: "0.90", + Duration: "5m", + Dimension: "test_dimension", + Filters: map[string]string{ + "namespace": "test_namespace", + "mvtx_name": "test-mono-vertex", + "pod": "test-pod", + }, + }, + expectedQuery: `histogram_quantile(0.90, sum by(test_dimension,le) (rate(test_bucket{namespace= "test_namespace", mvtx_name= "test-mono-vertex", pod= "test-pod"}[5m])))`, + }, + { + name: "Missing placeholder in req", + requestBody: MetricsRequestBody{ + MetricName: "test_metric", + Duration: "5m", + Dimension: "test_dimension", + Filters: map[string]string{ + "namespace": "test_namespace", + "mvtx_name": "test-mono-vertex", + "pod": "test-pod", + }, + }, + expectError: true, + }, + { + name: "Missing metric name in service config", + requestBody: MetricsRequestBody{ + MetricName: "test_bucket", + Duration: "5m", + Dimension: "test_dimension", + Filters: map[string]string{ + "namespace": "test_namespace", + "mvtx_name": "test-mono-vertex", + "pod": "test-pod", + }, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actualQuery, err := service.BuildQuery(tt.requestBody) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if !comparePrometheusQueries(tt.expectedQuery, actualQuery) { + t.Errorf("Prometheus queries do not match.\nExpected: %s\nGot: %s", tt.expectedQuery, actualQuery) + } else { + t.Log("Prometheus queries match!") + } + } + }) + } +} +func Test_QueryPrometheus(t *testing.T) { + t.Run("Successful query", func(t *testing.T) { + mockAPI := &MockPrometheusAPI{} + promQlService := &PromQlService{ + PrometheusClient: &Prometheus{ + Api: mockAPI, + }, + } + query := `histogram_quantile(0.99, sum by (pipeline, le) (rate(forwarder_udf_processing_time_bucket{namespace="default", pipeline="simple-pipeline"}[5m])))` + startTime := time.Now().Add(-30 * time.Minute) + endTime := time.Now() + + ctx := context.Background() + result, err := promQlService.QueryPrometheus(ctx, query, startTime, endTime) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // for query range , response should be a matrix + matrix, ok := result.(model.Matrix) + assert.True(t, ok) + assert.Equal(t, 1, matrix.Len()) + }) + t.Run("Prometheus client is nil", func(t *testing.T) { + service := &PromQlService{ + PrometheusClient: nil, + } + _, err := service.QueryPrometheus(context.Background(), "up", time.Now().Add(-10*time.Minute), time.Now()) + if err == nil { + t.Fatalf("expected an error, got nil") + } + expectedError := "prometheus client is not defined" + if err.Error() != expectedError { + t.Errorf("expected error %v, got %v", expectedError, err) + } + }) +} + +func TestGetConfigData(t *testing.T) { + tests := []struct { + name string + service *PromQlService + expected *PrometheusConfig + }{ + { + name: "returns nil when config is not set", + service: &PromQlService{ + ConfigData: nil, + }, + expected: nil, + }, + { + name: "returns config when config is set", + service: &PromQlService{ + ConfigData: &PrometheusConfig{ + ServerUrl: "http://test.com", + }, + }, + expected: &PrometheusConfig{ + ServerUrl: "http://test.com", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.service.GetConfigData() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestDisableMetricsChart(t *testing.T) { + tests := []struct { + name string + service *PromQlService + expected bool + }{ + { + name: "returns true when both config and client are nil", + service: &PromQlService{ + ConfigData: nil, + PrometheusClient: nil, + }, + expected: true, + }, + { + name: "returns true when only config is nil", + service: &PromQlService{ + ConfigData: nil, + PrometheusClient: &Prometheus{}, + }, + expected: true, + }, + { + name: "returns true when only client is nil", + service: &PromQlService{ + ConfigData: &PrometheusConfig{}, + PrometheusClient: nil, + }, + expected: true, + }, + { + name: "returns false when both config and client are set", + service: &PromQlService{ + ConfigData: &PrometheusConfig{}, + PrometheusClient: &Prometheus{}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.service.DisableMetricsChart() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestNewPromQlServiceObject(t *testing.T) { + // Test cases with different scenarios + tests := []struct { + name string + mockConfig *PrometheusConfig + mockConfigErr error + mockClient *Prometheus + mockClientErr error + expectedExpr map[string]map[string]string + expectedPlaceholders map[string]map[string][]string + }{ + { + name: "Successful initialization", + mockConfig: &PrometheusConfig{ + ServerUrl: "http://prometheus:9090", + Patterns: []Pattern{ + { + Expression: `sum(rate($metric{label="$label"}[$interval])) by ($groupBy)`, + Metrics: []Metric{ + { + Name: "metric_1", + Dimensions: []Dimension{ + {Name: "label", Expression: ""}, + {Name: "groupBy", Expression: "$groupBy"}, + }, + }, + }, + }, + }, + }, + mockClient: &Prometheus{ + Client: &MockPrometheusClient{}, + Api: &MockPrometheusAPI{}, + }, + expectedExpr: map[string]map[string]string{ + "metric_1": { + "label": `sum(rate($metric{label="$label"}[$interval])) by ($groupBy)`, + "groupBy": "$groupBy", + }, + }, + expectedPlaceholders: map[string]map[string][]string{ + "metric_1": { + "label": []string{"$metric", "$label", "$interval", "$groupBy"}, + "groupBy": []string{"$groupBy"}, + }, + }, + }, + { + name: "Error loading config", + mockConfigErr: fmt.Errorf("config error"), + expectedExpr: nil, + expectedPlaceholders: nil, + }, + { + name: "Error creating client", + mockConfig: &PrometheusConfig{ + ServerUrl: "http://prometheus:9090", + }, + mockClientErr: fmt.Errorf("client error"), + expectedExpr: nil, + expectedPlaceholders: nil, + }, + { + name: "Empty config file", + mockConfig: &PrometheusConfig{}, // Empty config + expectedExpr: nil, + expectedPlaceholders: nil, + }, + { + name: "Invalid config file format", + mockConfigErr: fmt.Errorf("yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into main.PrometheusConfig"), // Simulate invalid YAML error + expectedExpr: nil, + expectedPlaceholders: nil, + }, + { + name: "Missing server URL in config", + mockConfig: &PrometheusConfig{ + Patterns: []Pattern{ // ServerUrl is missing + { + Expression: `sum(rate($metric{label="$label"}[$interval])) by ($groupBy)`, + Metrics: []Metric{ + { + Name: "metric_1", + Dimensions: []Dimension{ + {Name: "label", Expression: ""}, + {Name: "groupBy", Expression: "$groupBy"}, + }, + }, + }, + }, + }, + }, + mockClientErr: fmt.Errorf("prometheus server url is not set"), // Expect client creation error + expectedExpr: nil, + expectedPlaceholders: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Mock loadPrometheusMetricConfig and newPrometheusClient + originalLoadConfig := loadPrometheusMetricConfig + originalNewClient := newPrometheusClient + defer func() { + loadPrometheusMetricConfig = originalLoadConfig + newPrometheusClient = originalNewClient + }() + + loadPrometheusMetricConfig = func() (*PrometheusConfig, error) { + return tt.mockConfig, tt.mockConfigErr + } + + newPrometheusClient = func(url string) (*Prometheus, error) { + return tt.mockClient, tt.mockClientErr + } + + service, err := NewPromQlServiceObject() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + promService := service.(*PromQlService) + + if tt.mockConfigErr != nil || tt.mockClientErr != nil || tt.mockConfig.ServerUrl == "" { + // If there's an error in loading config or creating client, + // expressions and placeholders should be nil (empty maps) + if len(promService.Expression) != 0 { // Check for empty map + t.Errorf("Expressions mismatch. Got: %v, Want: %v", promService.Expression, map[string]map[string]string{}) + } + if len(promService.PlaceHolders) != 0 { // Check for empty map + t.Errorf("Placeholders mismatch. Got: %v, Want: %v", promService.PlaceHolders, map[string]map[string][]string{}) + } + } else { + // Otherwise, compare with the expected values + if !reflect.DeepEqual(promService.Expression, tt.expectedExpr) { + t.Errorf("Expressions mismatch. Got: %v, Want: %v", promService.Expression, tt.expectedExpr) + } + + if !reflect.DeepEqual(promService.PlaceHolders, tt.expectedPlaceholders) { + t.Errorf("Placeholders mismatch. Got: %v, Want: %v", promService.PlaceHolders, tt.expectedPlaceholders) + } + } + }) + } +} diff --git a/server/apis/v1/response_metrics.go b/server/apis/v1/response_metrics.go new file mode 100644 index 0000000000..3b4a26507d --- /dev/null +++ b/server/apis/v1/response_metrics.go @@ -0,0 +1,81 @@ +package v1 + +import ( + "os" + + "gopkg.in/yaml.v2" +) + +const ( + metricsProxyConfigPath = "/etc/numaflow/metrics-proxy/config.yaml" +) + +type MetricsRequestBody struct { + MetricName string `json:"metric_name"` + Dimension string `json:"dimension"` + Filters map[string]string `json:"filters"` + Duration string `json:"duration"` + Quantile string `json:"quantile"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time"` +} + +type Filter struct { + Name string `yaml:"name"` + Required bool `yaml:"required"` +} + +type Dimension struct { + Name string `yaml:"name"` + Expression string `yaml:"expr"` + Filters []Filter `yaml:"filters"` +} + +type Metric struct { + Name string `yaml:"metric_name"` + // array of required labels. + Filters []string `yaml:"required_filters"` + //array of dimensions and their data + Dimensions []Dimension `yaml:"dimensions"` +} + +type Params struct { + Name string `yaml:"name"` + Required bool `yaml:"required"` +} + +type Pattern struct { + Name string `yaml:"name" json:"name"` + Object string `yaml:"object" json:"object"` + Title string `yaml:"title"` + Description string `yaml:"description"` + Expression string `yaml:"expr"` + Params []Params `yaml:"params"` + Metrics []Metric `yaml:"metrics"` +} + +type PrometheusConfig struct { + // prometheus server url in the config + ServerUrl string `yaml:"url"` + // patterns in the config + Patterns []Pattern `yaml:"patterns"` +} + +var loadPrometheusMetricConfig = func() (*PrometheusConfig, error) { + var ( + data []byte + promConfig PrometheusConfig + err error + ) + + data, err = os.ReadFile(metricsProxyConfigPath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(data, &promConfig) + + if err != nil { + return nil, err + } + return &promConfig, nil +} diff --git a/server/apis/v1/response_metrics_discovery.go b/server/apis/v1/response_metrics_discovery.go new file mode 100644 index 0000000000..ef2a160e1f --- /dev/null +++ b/server/apis/v1/response_metrics_discovery.go @@ -0,0 +1,23 @@ +package v1 + +type Dimensions struct { + Name string `json:"name"` + Filters []Filter `json:"filters"` + Params []Params `json:"params"` +} + +type DiscoveryResponse struct { + MetricName string `json:"metric_name"` + Dimensions []Dimensions `json:"dimensions"` +} + +// MetricsDiscoveryResponse is a list of DiscoveryResponse +type MetricsDiscoveryResponse []DiscoveryResponse + +// NewDiscoveryResponse creates a new DiscoveryResponse object for each metric. +func NewDiscoveryResponse(metricName string, dimensions []Dimensions) DiscoveryResponse { + return DiscoveryResponse{ + MetricName: metricName, + Dimensions: dimensions, + } +} diff --git a/server/apis/v1/response_pod.go b/server/apis/v1/response_pod.go new file mode 100644 index 0000000000..4580179fba --- /dev/null +++ b/server/apis/v1/response_pod.go @@ -0,0 +1,29 @@ +package v1 + +type PodDetails struct { + Name string `json:"name"` + Status string `json:"status"` + Message string `json:"message"` + Reason string `json:"reason"` + ContainerDetailsMap map[string]ContainerDetails `json:"containerDetailsMap"` + TotalCPU string `json:"totalCPU"` + TotalMemory string `json:"totalMemory"` +} + +type ContainerDetails struct { + Name string `json:"name"` + ID string `json:"id"` + State string `json:"state"` + LastStartedAt string `json:"lastStartedAt"` + RestartCount int32 `json:"restartCount"` + LastTerminationReason string `json:"lastTerminationReason"` + LastTerminationMessage string `json:"lastTerminationMessage"` + WaitingReason string `json:"waitingReason"` + WaitingMessage string `json:"waitingMessage"` + TotalCPU string `json:"totalCPU"` + TotalMemory string `json:"totalMemory"` + RequestedCPU string `json:"requestedCPU"` + RequestedMemory string `json:"requestedMemory"` + LimitCPU string `json:"limitCPU"` + LimitMemory string `json:"limitMemory"` +} diff --git a/server/cmd/server/start.go b/server/cmd/server/start.go index aa4e3403b8..2cb651e459 100644 --- a/server/cmd/server/start.go +++ b/server/cmd/server/start.go @@ -106,6 +106,7 @@ func (s *server) Start(ctx context.Context) { ManagedNamespace: s.options.ManagedNamespace, Namespaced: s.options.Namespaced, IsReadOnly: s.options.ReadOnly, + DisableMetricsCharts: true, // defaults to true Version: numaflow.GetVersion().String(), DaemonClientProtocol: s.options.DaemonClientProtocol, }, @@ -176,35 +177,39 @@ func UrlRewrite(r *gin.Engine) gin.HandlerFunc { // The value is a RouteInfo object. func CreateAuthRouteMap(baseHref string) authz.RouteMap { return authz.RouteMap{ - "GET:" + baseHref + "api/v1/sysinfo": authz.NewRouteInfo(authz.ObjectPipeline, false), - "GET:" + baseHref + "api/v1/authinfo": authz.NewRouteInfo(authz.ObjectEvents, false), - "GET:" + baseHref + "api/v1/namespaces": authz.NewRouteInfo(authz.ObjectEvents, false), - "GET:" + baseHref + "api/v1/cluster-summary": authz.NewRouteInfo(authz.ObjectPipeline, false), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines": authz.NewRouteInfo(authz.ObjectPipeline, true), - "POST:" + baseHref + "api/v1/namespaces/:namespace/pipelines": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/health": authz.NewRouteInfo(authz.ObjectPipeline, true), - "PUT:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), - "DELETE:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), - "PATCH:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), - "POST:" + baseHref + "api/v1/namespaces/:namespace/isb-services": authz.NewRouteInfo(authz.ObjectISBSvc, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/isb-services": authz.NewRouteInfo(authz.ObjectISBSvc, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), - "PUT:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), - "DELETE:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/isbs": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/watermarks": authz.NewRouteInfo(authz.ObjectPipeline, true), - "PUT:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/metrics": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex/pods": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/metrics/namespaces/:namespace/pods": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/pods/:pod/logs": authz.NewRouteInfo(authz.ObjectPipeline, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/events": authz.NewRouteInfo(authz.ObjectEvents, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex": authz.NewRouteInfo(authz.ObjectMonoVertex, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods": authz.NewRouteInfo(authz.ObjectMonoVertex, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/metrics": authz.NewRouteInfo(authz.ObjectMonoVertex, true), - "POST:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), - "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/health": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/sysinfo": authz.NewRouteInfo(authz.ObjectPipeline, false), + "GET:" + baseHref + "api/v1/authinfo": authz.NewRouteInfo(authz.ObjectEvents, false), + "GET:" + baseHref + "api/v1/namespaces": authz.NewRouteInfo(authz.ObjectEvents, false), + "GET:" + baseHref + "api/v1/cluster-summary": authz.NewRouteInfo(authz.ObjectPipeline, false), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines": authz.NewRouteInfo(authz.ObjectPipeline, true), + "POST:" + baseHref + "api/v1/namespaces/:namespace/pipelines": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/health": authz.NewRouteInfo(authz.ObjectPipeline, true), + "PUT:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), + "DELETE:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), + "PATCH:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline": authz.NewRouteInfo(authz.ObjectPipeline, true), + "POST:" + baseHref + "api/v1/namespaces/:namespace/isb-services": authz.NewRouteInfo(authz.ObjectISBSvc, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/isb-services": authz.NewRouteInfo(authz.ObjectISBSvc, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), + "PUT:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), + "DELETE:" + baseHref + "api/v1/namespaces/:namespace/isb-services/:isb-service": authz.NewRouteInfo(authz.ObjectISBSvc, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/isbs": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/watermarks": authz.NewRouteInfo(authz.ObjectPipeline, true), + "PUT:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/metrics": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex/pods": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/metrics/namespaces/:namespace/pods": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pods/:pod/logs": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods-info": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex/pods-info": authz.NewRouteInfo(authz.ObjectPipeline, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/events": authz.NewRouteInfo(authz.ObjectEvents, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/pods": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/metrics": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "POST:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "GET:" + baseHref + "api/v1/namespaces/:namespace/mono-vertices/:mono-vertex/health": authz.NewRouteInfo(authz.ObjectMonoVertex, true), + "POST:" + baseHref + "api/v1/metrics-proxy": authz.NewRouteInfo(authz.ObjectAll, true), + "GET:" + baseHref + "api/v1/metrics-discovery/object/:object": authz.NewRouteInfo(authz.ObjectAll, true), } } diff --git a/server/cmd/server/start_test.go b/server/cmd/server/start_test.go index 13518ee163..574408a678 100644 --- a/server/cmd/server/start_test.go +++ b/server/cmd/server/start_test.go @@ -25,12 +25,12 @@ import ( func TestCreateAuthRouteMap(t *testing.T) { t.Run("empty base", func(t *testing.T) { got := CreateAuthRouteMap("") - assert.Equal(t, 30, len(got)) + assert.Equal(t, 34, len(got)) }) t.Run("customize base", func(t *testing.T) { got := CreateAuthRouteMap("abcdefg") - assert.Equal(t, 30, len(got)) + assert.Equal(t, 34, len(got)) for k := range got { assert.Contains(t, k, "abcdefg") } diff --git a/server/routes/routes.go b/server/routes/routes.go index c80181d789..1872e90181 100644 --- a/server/routes/routes.go +++ b/server/routes/routes.go @@ -34,6 +34,7 @@ type SystemInfo struct { ManagedNamespace string `json:"managedNamespace"` Namespaced bool `json:"namespaced"` IsReadOnly bool `json:"isReadOnly"` + DisableMetricsCharts bool `json:"disableMetricsCharts"` Version string `json:"version"` DaemonClientProtocol string `json:"daemonClientProtocol"` } @@ -58,6 +59,15 @@ func Routes(ctx context.Context, r *gin.Engine, sysInfo SystemInfo, authInfo Aut panic(err) } + // promql service instance. + promQlServiceObj, err := v1.NewPromQlServiceObject() + if err != nil { + panic(err) + } + + // disable metrics charts if metric config or prometheus client is not set. + sysInfo.DisableMetricsCharts = promQlServiceObj.DisableMetricsChart() + // noAuthGroup is a group of routes that do not require AuthN/AuthZ no matter whether auth is enabled. noAuthGroup := r.Group(baseHref + "auth/v1") v1RoutesNoAuth(noAuthGroup, dexObj, localUsersAuthObj) @@ -72,9 +82,9 @@ func Routes(ctx context.Context, r *gin.Engine, sysInfo SystemInfo, authInfo Aut } // Add the AuthN/AuthZ middleware to the group. r1Group.Use(authMiddleware(ctx, authorizer, dexObj, localUsersAuthObj, authRouteMap)) - v1Routes(ctx, r1Group, dexObj, localUsersAuthObj, sysInfo.IsReadOnly, sysInfo.DaemonClientProtocol) + v1Routes(ctx, r1Group, dexObj, localUsersAuthObj, promQlServiceObj, sysInfo.IsReadOnly, sysInfo.DaemonClientProtocol) } else { - v1Routes(ctx, r1Group, nil, nil, sysInfo.IsReadOnly, sysInfo.DaemonClientProtocol) + v1Routes(ctx, r1Group, nil, nil, promQlServiceObj, sysInfo.IsReadOnly, sysInfo.DaemonClientProtocol) } r1Group.GET("/sysinfo", func(c *gin.Context) { c.JSON(http.StatusOK, v1.NewNumaflowAPIResponse(nil, sysInfo)) @@ -98,12 +108,12 @@ func v1RoutesNoAuth(r gin.IRouter, dexObj *v1.DexObject, localUsersAuthObject *v // v1Routes defines the routes for the v1 API. For adding a new route, add a new handler function // for the route along with an entry in the RouteMap in auth/route_map.go. -func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUsersAuthObject *v1.LocalUsersAuthObject, isReadOnly bool, daemonClientProtocol string) { +func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUsersAuthObject *v1.LocalUsersAuthObject, promQlServiceObj v1.PromQl, isReadOnly bool, daemonClientProtocol string) { handlerOpts := []v1.HandlerOption{v1.WithDaemonClientProtocol(daemonClientProtocol)} if isReadOnly { handlerOpts = append(handlerOpts, v1.WithReadOnlyMode()) } - handler, err := v1.NewHandler(ctx, dexObj, localUsersAuthObject, handlerOpts...) + handler, err := v1.NewHandler(ctx, dexObj, localUsersAuthObject, promQlServiceObj, handlerOpts...) if err != nil { panic(err) } @@ -151,6 +161,10 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/metrics/namespaces/:namespace/pods", handler.ListPodsMetrics) // Get pod logs. r.GET("/namespaces/:namespace/pods/:pod/logs", handler.PodLogs) + // Get the pod metrics for a mono vertex. + r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/pods-info", handler.GetMonoVertexPodsInfo) + // Get the pod metrics for a pipeline vertex. + r.GET("/namespaces/:namespace/pipelines/:pipeline/vertices/:vertex/pods-info", handler.GetVertexPodsInfo) // List of the Kubernetes events of a namespace. r.GET("/namespaces/:namespace/events", handler.GetNamespaceEvents) // List all mono vertices for a given namespace. @@ -165,6 +179,10 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/metrics", handler.GetMonoVertexMetrics) // Get the health information of a mono vertex. r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/health", handler.GetMonoVertexHealth) + // Get the time series data across different dimensions. + r.POST("/metrics-proxy", handler.GetMetricData) + // Discover the metrics for a given object type. + r.GET("/metrics-discovery/object/:object", handler.DiscoverMetrics) } // authMiddleware is the middleware for AuthN/AuthZ. diff --git a/ui/package.json b/ui/package.json index 4e7f890b03..c65f7419e6 100644 --- a/ui/package.json +++ b/ui/package.json @@ -29,6 +29,7 @@ "@monaco-editor/react": "^4.5.2", "@mui/icons-material": "^5.6.2", "@mui/material": "^5.6.3", + "@mui/x-date-pickers": "^7.21.0", "@stardazed/streams-polyfill": "^2.4.0", "@testing-library/jest-dom": "^6.1.4", "@testing-library/react": "^14.0.0", @@ -51,6 +52,7 @@ "d3-scale": "^4.0.2", "d3-selection": "^3.0.0", "dagre": "^0.8.5", + "dayjs": "^1.11.13", "moment": "^2.29.4", "monaco-editor": "0.40.0", "msw": "^0.47.4", @@ -62,6 +64,7 @@ "react-test-renderer": "^18.0.0", "react-toastify": "^9.1.1", "reactflow": "^11.11.4", + "recharts": "^2.13.0", "resize-observer-polyfill": "^1.5.1", "typescript": "^4.4.2", "web-vitals": "^2.1.0", diff --git a/ui/src/App.tsx b/ui/src/App.tsx index 4bad9078f2..4877c7fe2e 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -46,6 +46,7 @@ export const AppContext = React.createContext({ namespace: "", isPlugin: false, isReadOnly: false, + disableMetricsCharts: true, // eslint-disable-next-line @typescript-eslint/no-empty-function setSidebarProps: () => {}, errors: [], @@ -288,6 +289,7 @@ function App(props: AppProps) { namespace, isPlugin: false, isReadOnly: systemInfo?.isReadOnly || false, + disableMetricsCharts: systemInfo?.disableMetricsCharts ?? true, sidebarProps, setSidebarProps, errors, diff --git a/ui/src/components/common/SlidingSidebar/index.tsx b/ui/src/components/common/SlidingSidebar/index.tsx index b225eca341..f4b8f2a328 100644 --- a/ui/src/components/common/SlidingSidebar/index.tsx +++ b/ui/src/components/common/SlidingSidebar/index.tsx @@ -105,9 +105,15 @@ export function SlidingSidebar({ ? MIN_WIDTH_BY_TYPE[SidebarType.ERRORS] : type === SidebarType.VERSION_DETAILS ? MIN_WIDTH_BY_TYPE[SidebarType.VERSION_DETAILS] - : pageWidth * 0.75 + : pageWidth * 0.85 + ); + const [minWidth] = useState( + type === SidebarType.ERRORS + ? MIN_WIDTH_BY_TYPE[SidebarType.ERRORS] + : type === SidebarType.VERSION_DETAILS + ? MIN_WIDTH_BY_TYPE[SidebarType.VERSION_DETAILS] + : pageWidth * 0.5 ); - const [minWidth] = useState(0); const [modalOnClose, setModalOnClose] = useState< SpecEditorModalProps | undefined >(); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx index 92cdccd453..91baaeed84 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx @@ -1,6 +1,13 @@ // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-nocheck -import { useState, useEffect, useMemo, useCallback, ChangeEvent } from "react"; +import { + ChangeEvent, + useCallback, + useContext, + useEffect, + useMemo, + useState, +} from "react"; import Box from "@mui/material/Box"; import Paper from "@mui/material/Paper"; import CircularProgress from "@mui/material/CircularProgress"; @@ -10,16 +17,21 @@ import { EventType } from "@visx/event/lib/types"; import { Containers } from "./partials/Containers"; import { PodDetail } from "./partials/PodDetails"; import { SearchablePodsHeatMap } from "./partials/SearchablePodsHeatMap"; -import { PodInfo } from "./partials/PodDetails/partials/PodInfo"; +import { ContainerInfo } from "./partials/PodDetails/partials/ContainerInfo"; import { usePodsViewFetch } from "../../../../../../../../../utils/fetcherHooks/podsViewFetch"; import { notifyError } from "../../../../../../../../../utils/error"; +import { AppContext, AppContextProps } from "../../../../../../../../../App"; +import { getBaseHref } from "../../../../../../../../../utils"; import { + ContainerInfoProps, Hexagon, Pod, + PodSpecificInfoProps, PodsProps, } from "../../../../../../../../../types/declarations/pods"; export function Pods(props: PodsProps) { + const { host } = useContext(AppContext); const { namespaceId, pipelineId, vertexId, type } = props; if (!namespaceId || !pipelineId || !vertexId) { @@ -46,6 +58,99 @@ export function Pods(props: PodsProps) { setSelectedContainer ); + const [containerInfo, setContainerInfo] = useState< + ContainerInfoProps | undefined + >(undefined); + const [podSpecificInfo, setPodSpecificInfo] = useState< + PodSpecificInfoProps | undefined + >(undefined); + const [requestKey, setRequestKey] = useState(`${Date.now()}`); + + const getContainerInfo = useCallback((podsData, podName, containerName) => { + const selectedPod = podsData?.find((pod) => pod?.name === podName); + if (selectedPod) { + return selectedPod?.containerDetailsMap[containerName]; + } else { + return null; + } + }, []); + + const getPodSpecificInfo = useCallback((podsData, podName) => { + const podSpecificInfo: PodSpecificInfoProps = {}; + const selectedPod = podsData?.find((pod) => pod?.name === podName); + if (selectedPod) { + podSpecificInfo.name = selectedPod?.name; + podSpecificInfo.reason = selectedPod?.reason; + podSpecificInfo.status = selectedPod?.status; + podSpecificInfo.message = selectedPod?.message; + podSpecificInfo.totalCPU = selectedPod?.totalCPU; + podSpecificInfo.totalMemory = selectedPod?.totalMemory; + let restartCount = 0; + for (const container in selectedPod?.containerDetailsMap) { + restartCount += + selectedPod?.containerDetailsMap?.[container].restartCount; + } + podSpecificInfo.restartCount = restartCount; + } + return podSpecificInfo; + }, []); + + useEffect(() => { + const fetchPodInfo = async () => { + try { + const response = await fetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}${ + type === "monoVertex" + ? `/mono-vertices` + : `/pipelines/${pipelineId}/vertices` + }/${vertexId}/pods-info?refreshKey=${requestKey}` + ); + if (!response.ok) { + throw new Error("Failed to fetch pod details"); + } + const data = await response.json(); + const containerInfo = getContainerInfo( + data?.data, + selectedPod?.name, + selectedContainer + ); + const podSpecificInfo = getPodSpecificInfo( + data?.data, + selectedPod?.name + ); + setContainerInfo(containerInfo); + setPodSpecificInfo(podSpecificInfo); + } catch (error) { + setContainerInfo({ error: "Failed to fetch pod details" }); + } + }; + fetchPodInfo(); + }, [ + namespaceId, + host, + getBaseHref, + type, + pipelineId, + vertexId, + getContainerInfo, + getPodSpecificInfo, + requestKey, + selectedPod, + selectedContainer, + setPodSpecificInfo, + setContainerInfo, + ]); + + useEffect(() => { + // Refresh pod details every 30 sec + const interval = setInterval(() => { + setRequestKey(`${Date.now()}`); + }, 30000); + return () => { + clearInterval(interval); + }; + }, []); + // This useEffect notifies about the errors while querying for the pods of the vertex useEffect(() => { if (podsErr) notifyError(podsErr); @@ -67,11 +172,11 @@ export function Pods(props: PodsProps) { const containerSelector = useMemo(() => { return ( - - - Select a container + + + Select a container - + - - Select a pod by name + + + Select a pod by name - - + + {pods && selectedPod && ( - + + + {/*pod details container*/} - {podSearchDetails} - - {containerSelector} - - + {/*pod and container selector*/} - + + {podSearchDetails} + + {containerSelector} + + + {/*pod and container info*/} + + + + + {/*logs and metrics container*/} + + {podDetail} + - {podDetail} ); } diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/Containers/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/Containers/index.tsx index 367cc4fab1..2b0235fa94 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/Containers/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/Containers/index.tsx @@ -8,8 +8,12 @@ export function Containers(props: ContainerProps) { if (!pod) return null; return ( - - + + {pod?.containers?.map((c: string) => { return ( (AppContext); + + const [selectedTab, setSelectedTab] = useState(0); + const handleTabChange = (_: any, newValue: number) => { + setSelectedTab(newValue); + }; + return ( - Container Logs - + + + {!disableMetricsCharts && type === "monoVertex" && ( + + )} + + + {!disableMetricsCharts && type === "monoVertex" && ( + + )} ); } diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.test.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.test.tsx similarity index 100% rename from ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.test.tsx rename to ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.test.tsx diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.tsx new file mode 100644 index 0000000000..5a225b0dca --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/index.tsx @@ -0,0 +1,231 @@ +import React from "react"; +import Box from "@mui/material/Box"; +import { getPodContainerUsePercentages } from "../../../../../../../../../../../../../utils"; +import { PodInfoProps } from "../../../../../../../../../../../../../types/declarations/pods"; + +import "./style.css"; + +export function ContainerInfo({ + pod, + podDetails, + containerName, + containerInfo, + podSpecificInfo, +}: PodInfoProps) { + const resourceUsage = getPodContainerUsePercentages( + pod, + podDetails, + containerName + ); + + // CPU + let usedCPU: string | undefined = + podDetails?.containerMap instanceof Map + ? podDetails?.containerMap?.get(containerName)?.cpu + : undefined; + let specCPU: string | undefined = + pod?.containerSpecMap instanceof Map + ? pod?.containerSpecMap?.get(containerName)?.cpu + : undefined; + if (!usedCPU) { + usedCPU = "?"; + } else if (usedCPU.endsWith("n")) { + usedCPU = `${(parseFloat(usedCPU) / 1e6).toFixed(2)}m`; + } + if (!specCPU) { + specCPU = "?"; + } + let cpuPercent = "unavailable"; + if (resourceUsage?.cpuPercent) { + cpuPercent = `${resourceUsage.cpuPercent?.toFixed(2)}%`; + } + // Memory + let usedMem: string | undefined = + podDetails?.containerMap instanceof Map + ? podDetails?.containerMap?.get(containerName)?.memory + : undefined; + let specMem: string | undefined = + pod?.containerSpecMap instanceof Map + ? pod?.containerSpecMap?.get(containerName)?.memory + : undefined; + if (!usedMem) { + usedMem = "?"; + } else if (usedMem.endsWith("Ki")) { + usedMem = `${(parseFloat(usedMem) / 1024).toFixed(2)}Mi`; + } + if (!specMem) { + specMem = "?"; + } + let memPercent = "unavailable"; + if (resourceUsage?.memoryPercent) { + memPercent = `${resourceUsage.memoryPercent.toFixed(2)}%`; + } + + return ( + + + + {/*container info*/} + Container Info + + + Name + {containerName} + + + + Status + + {containerInfo?.state || "Unknown"} + + + + + Last Started At + + {containerInfo?.lastStartedAt || "N/A"} + + + + + CPU + + {`${usedCPU} / ${specCPU}`} {` (${cpuPercent})`} + + + + + Memory + + {`${usedMem} / ${specMem}`} {` (${memPercent})`} + + + + + Restart Count + + {containerInfo?.restartCount ?? "Unknown"} + + + + {containerInfo?.lastTerminationReason && ( + + Last Termination Reason + + {containerInfo?.lastTerminationReason} + + + )} + + {containerInfo?.lastTerminationMessage && ( + + Last Termination Message + + {containerInfo?.lastTerminationMessage} + + + )} + + {containerInfo?.waitingReason && ( + + Waiting Reason + + {containerInfo?.waitingReason} + + + )} + + {containerInfo?.waitingMessage && ( + + Waiting Message + + {containerInfo?.waitingMessage} + + + )} + + {/*pod info*/} + + Pod Info + + + + Name + + {pod?.name?.slice(0, pod.name?.lastIndexOf("-"))} + + + + + Status + + {podSpecificInfo?.status || "Unknown"} + + + + + Restart Count + + {podSpecificInfo?.restartCount ?? "Unknown"} + + + + {podSpecificInfo?.totalCPU && ( + + CPU + + {podSpecificInfo?.totalCPU} + + + )} + + {podSpecificInfo?.totalMemory && ( + + Memory + + {podSpecificInfo?.totalMemory} + + + )} + + {podSpecificInfo?.reason && ( + + Reason + {podSpecificInfo?.reason} + + )} + + {podSpecificInfo?.message && ( + + Message + + {podSpecificInfo?.message} + + + )} + + + + ); +} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/style.css new file mode 100644 index 0000000000..7ffe83b08b --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/ContainerInfo/style.css @@ -0,0 +1,24 @@ +.category-title { + font-weight: 600; + font-size: 2rem; +} + +.outer-box { + display: flex; + width: 100%; + border-bottom: 1px solid #DCDCDC; + padding: 1rem 1rem 1rem 2rem; +} + +.inner-box-title { + font-weight: 600; + width: 45%; + font-size: 1.4rem; + font-family: "IBM Plex Sans", sans-serif; +} + +.inner-box-value { + font-size: 1.4rem; + font-family: "IBM Plex Sans", sans-serif; + flex-grow: 1; +} \ No newline at end of file diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx new file mode 100644 index 0000000000..220cb0f0cc --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx @@ -0,0 +1,99 @@ +import React, { useState } from "react"; +import Box from "@mui/material/Box"; +import CircularProgress from "@mui/material/CircularProgress"; +import { Accordion, AccordionDetails, AccordionSummary } from "@mui/material"; +import ExpandMoreIcon from "@mui/icons-material/ExpandMore"; + +import LineChartComponent from "./partials/LineChart"; +import { useMetricsDiscoveryDataFetch } from "../../../../../../../../../../../../../utils/fetchWrappers/metricsDiscoveryDataFetch"; +import { dimensionReverseMap, metricNameMap } from "./utils/constants"; + +import "./style.css"; + +export interface MetricsProps { + namespaceId: string; + pipelineId: string; + type: string; + vertexId?: string; +} + +export function Metrics({ namespaceId, pipelineId, type }: MetricsProps) { + const { + metricsDiscoveryData: discoveredMetrics, + error: discoveredMetricsError, + loading: discoveredMetricsLoading, + } = useMetricsDiscoveryDataFetch({ + objectType: dimensionReverseMap[type], + }); + + const [expanded, setExpanded] = useState>(new Set()); + + const handleAccordionChange = + (panel: string) => (_: any, isExpanded: boolean) => { + setExpanded((prevExpanded) => { + const newExpanded = new Set(prevExpanded); + isExpanded ? newExpanded.add(panel) : newExpanded.delete(panel); + return newExpanded; + }); + }; + + if (discoveredMetricsLoading) { + return ( + + + + ); + } + + if (discoveredMetricsError) { + return ( + + Failed to discover metrics for the {type}: {discoveredMetricsError} + + ); + } + + if (discoveredMetrics == undefined) return No metrics found; + + return ( + + {discoveredMetrics?.data?.map((metric: any) => { + const panelId = `${metric?.metric_name}-panel`; + return ( + + } + aria-controls={`${metric?.metric_name}-content`} + id={`${metric?.metric_name}-header`} + > + + {metricNameMap[metric?.metric_name] || metric?.metric_name} + + + + {expanded?.has(panelId) && ( + + )} + + + ); + })} + + ); +} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/index.tsx new file mode 100644 index 0000000000..bff96c03c4 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/index.tsx @@ -0,0 +1,16 @@ +import Box from "@mui/material/Box"; +import CircleIcon from "@mui/icons-material/Circle"; + +import "./style.css"; + +const EmptyChart = () => { + return ( + + + + No data for the selected filters. + + ); +}; + +export default EmptyChart; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/style.css new file mode 100644 index 0000000000..441c1a01df --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/EmptyChart/style.css @@ -0,0 +1,20 @@ +.empty_chart_container { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + height: 20rem; + border: 0.1rem dashed #ccc; + border-radius: 0.5rem; + padding: 2rem; +} + +.circle_icon { + color: #999; + margin-bottom: 1.5rem; + font-size: 4rem !important; +} + +.empty_text { + color: #666; +} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx new file mode 100644 index 0000000000..6152c83c29 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx @@ -0,0 +1,273 @@ +import { useCallback, useEffect, useState } from "react"; +import { + CartesianGrid, + Legend, + Line, + LineChart, + ResponsiveContainer, + Tooltip, + XAxis, + YAxis, +} from "recharts"; +import Box from "@mui/material/Box"; +import CircularProgress from "@mui/material/CircularProgress"; +import Dropdown from "../common/Dropdown"; +import TimeRange from "../common/TimeRange"; +import FiltersDropdown from "../common/FiltersDropdown"; +import EmptyChart from "../EmptyChart"; +import { useMetricsFetch } from "../../../../../../../../../../../../../../../utils/fetchWrappers/metricsFetch"; + +// TODO have a check for metricReq against metric object to ensure required fields are passed +const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { + const [transformedData, setTransformedData] = useState([]); + const [chartLabels, setChartLabels] = useState([]); + const [metricsReq, setMetricsReq] = useState({ + metric_name: metric?.metric_name, + }); + const [paramsList, setParamsList] = useState([]); + // store all filters for each selected dimension + const [filtersList, setFiltersList] = useState([]); + const [filters, setFilters] = useState({}); + + const getRandomColor = useCallback((index: number) => { + const hue = (index * 137.508) % 360; + return `hsl(${hue}, 70%, 50%)`; + }, []); + + const getFilterValue = useCallback( + (filterName: string) => { + switch (filterName) { + case "namespace": + return namespaceId; + case "mvtx_name": + case "pipeline": + return pipelineId; + default: + return ""; + } + }, + [namespaceId, pipelineId] + ); + + const updateFilterList = useCallback( + (dimensionVal: string) => { + const newFilters = + metric?.dimensions + ?.find((dimension: any) => dimension?.name === dimensionVal) + ?.filters?.map((param: any) => ({ + name: param?.Name, + required: param?.Required, + })) || []; + setFiltersList(newFilters); + }, + [metric, setFiltersList] + ); + + const updateFilters = useCallback(() => { + const newFilters: any = {}; + filtersList?.forEach((filterElement: any) => { + if (filterElement?.name && filterElement?.required) { + newFilters[filterElement.name] = getFilterValue(filterElement.name); + } + }); + setFilters(newFilters); + }, [filtersList, getFilterValue, setFilters]); + + useEffect(() => { + if (metricsReq?.dimension) { + updateFilterList(metricsReq.dimension); + } + }, [metricsReq, updateFilterList]); + + useEffect(() => { + if (filtersList?.length) updateFilters(); + }, [filtersList]); + + const updateParams = useCallback(() => { + const initParams = [{ name: "dimension", required: "true" }]; + // taking dimension[0] as all will have same params + const newParams = + metric?.dimensions?.[0]?.params?.map((param: any) => ({ + name: param?.Name, + required: param?.Required, + })) || []; + + setParamsList([...initParams, ...newParams]); + }, [metric, setParamsList]); + + // update params once initially + useEffect(() => { + updateParams(); + }, [updateParams]); + + const { chartData, error, isLoading } = useMetricsFetch({ + metricReq: metricsReq, + filters, + }); + + const groupByLabel = useCallback((dimension: string) => { + switch (dimension) { + case "mono-vertex": + return "mvtx_name"; + default: + return dimension; + } + }, []); + + const updateChartData = useCallback(() => { + if (chartData) { + const labels: any[] = []; + const transformedData: any[] = []; + const label = groupByLabel(metricsReq?.dimension); + chartData?.forEach((item) => { + const labelVal = item?.metric?.[label]; + labels.push(labelVal); + item?.values?.forEach(([timestamp, value]: [number, string]) => { + const date = new Date(timestamp * 1000); + const hours = date.getHours().toString().padStart(2, "0"); + const minutes = date.getMinutes().toString().padStart(2, "0"); + const formattedTime = `${hours}:${minutes}`; + const ele = transformedData?.find( + (data) => data?.time === formattedTime + ); + if (!ele) { + const dataObject: Record = { time: formattedTime }; + dataObject[labelVal] = parseFloat(value); + transformedData.push(dataObject); + } else { + ele[labelVal] = parseFloat(value); + } + }); + }); + transformedData.sort((a, b) => { + const [hoursA, minutesA] = a.time.split(":").map(Number); + const [hoursB, minutesB] = b.time.split(":").map(Number); + return hoursA * 60 + minutesA - (hoursB * 60 + minutesB); + }); + setChartLabels(labels); + setTransformedData(transformedData); + } + }, [chartData, metricsReq, groupByLabel]); + + useEffect(() => { + if (chartData) updateChartData(); + }, [chartData, updateChartData]); + + if (paramsList?.length === 0) return <>; + + return ( + + + {paramsList + ?.filter((param) => !["start_time", "end_time"]?.includes(param.name)) + ?.map((param: any) => { + return ( + + + + ); + })} + + {paramsList + ?.filter((param) => ["start_time", "end_time"]?.includes(param.name)) + ?.map((param: any) => { + return ( + + + + ); + })} + + + {filtersList?.filter((filterEle: any) => !filterEle?.required)?.length > + 0 && ( + + Filters + !filterEle?.required + )} + namespaceId={namespaceId} + pipelineId={pipelineId} + type={type} + setFilters={setFilters} + /> + + )} + + {isLoading && ( + + + + )} + + {!isLoading && error && } + + {!isLoading && !error && transformedData?.length > 0 && ( + + + + + + + {chartLabels?.map((value, index) => ( + + ))} + + + + + + )} + + {!isLoading && !error && transformedData?.length === 0 && } + + ); +}; + +export default LineChartComponent; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx new file mode 100644 index 0000000000..4f950836f5 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx @@ -0,0 +1,116 @@ +import { useEffect, useMemo, useState } from "react"; +import FormControl from "@mui/material/FormControl"; +import InputLabel from "@mui/material/InputLabel"; +import Select from "@mui/material/Select"; +import MenuItem from "@mui/material/MenuItem"; +import { + dimensionMap, + dimensionReverseMap, + durationMap, + durationOptions, + quantileMap, + quantileOptions, +} from "../../../utils/constants"; + +export interface MetricDropDownProps { + metric: any; + type: string; + field: string; + setMetricReq: any; +} + +const Dropdown = ({ + metric, + type, + field, + setMetricReq, +}: MetricDropDownProps) => { + const getInitialValue = useMemo(() => { + switch (field) { + case "dimension": + return dimensionReverseMap[type]; + case "quantile": + return quantileOptions[quantileOptions.length-1]; + case "duration": + return durationOptions[0]; + default: + return ""; + } + }, [field, dimensionReverseMap, type, quantileOptions, durationOptions]); + + const [value, setValue] = useState(getInitialValue); + const fieldName = field.charAt(0).toUpperCase() + field.slice(1); + + // Update metricsReq with the initial value + useEffect(() => { + setMetricReq((prev: any) => ({ ...prev, [field]: getInitialValue })); + }, [getInitialValue, field, setMetricReq]); + + const getDropDownEntries = useMemo(() => { + switch (field) { + case "dimension": + return metric?.dimensions?.map((dimension: any) => ( + + {dimensionMap[dimension?.name]} + + )); + case "quantile": + return quantileOptions?.map((quantile: string) => ( + + {quantileMap[quantile]} + + )); + case "duration": + return durationOptions?.map((duration: string) => ( + + {durationMap[duration]} + + )); + default: + return <>; + } + }, [ + field, + metric, + dimensionMap, + quantileOptions, + quantileMap, + durationOptions, + durationMap, + ]); + + return ( + + + {fieldName} + + + + ); +}; + +export default Dropdown; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx new file mode 100644 index 0000000000..6cbc89794f --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx @@ -0,0 +1,252 @@ +import React, { + useCallback, + useContext, + useEffect, + useMemo, + useState, +} from "react"; +import { + Button, + Popover, + MenuItem, + Typography, + Box, + IconButton, +} from "@mui/material"; +import AddIcon from "@mui/icons-material/Add"; +import CloseIcon from "@mui/icons-material/Close"; +import ArrowForwardIcon from "@mui/icons-material/ArrowForward"; +import { AppContextProps } from "../../../../../../../../../../../../../../../../types/declarations/app"; +import { AppContext } from "../../../../../../../../../../../../../../../../App"; +import { getBaseHref } from "../../../../../../../../../../../../../../../../utils"; + +export interface FiltersDropdownProps { + items: any; + namespaceId: string; + pipelineId: string; + type: string; + setFilters: any; +} + +const FiltersDropdown = ({ + items, + namespaceId, + pipelineId, + type, + setFilters, +}: FiltersDropdownProps) => { + const { host } = useContext(AppContext); + const [anchorEl, setAnchorEl] = useState(null); + const [selectedFilters, setSelectedFilters] = useState([]); + const [activeFilters, setActiveFilters] = useState([]); + const [podsData, setPodsData] = useState([]); + + useEffect(() => { + const filtersMap = selectedFilters.reduce((acc, filter) => { + const [key, value] = filter.split(":"); + if (key && value) { + acc[key] = value; + } + return acc; + }, {}); + + setFilters((prevState: any) => ({ + ...prevState, + ...filtersMap, + })); + }, [selectedFilters, setFilters]); + + const fetchPodsData = useCallback( + (callback: (data: any[] | null) => void) => { + const fetchData = async () => { + try { + const response = await fetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/${ + type === "monoVertex" ? "mono-vertices" : "pipeline" + }/${pipelineId}/pods` + ); + if (!response.ok) { + callback(null); + return; + } + const data = await response.json(); + const formattedData = data?.data + ?.filter((pod: any) => !pod?.metadata?.name.includes("daemon")) + .map((pod: any) => ({ + name: pod?.metadata?.name, + })); + callback(formattedData); + } catch (error) { + callback(null); + } + }; + + fetchData(); + }, + [host, namespaceId, pipelineId, type] + ); + + useEffect(() => { + fetchPodsData((data) => { + if (data) { + setPodsData(data); + } + }); + }, [fetchPodsData]); + + const getFilterValues = useCallback( + (filterName: string) => { + switch (filterName) { + case "pod": + return podsData; + default: + return null; + } + }, + [podsData] + ); + + const getNestedFilterList = useMemo(() => { + return items?.map((item: any) => { + return { + name: item?.name, + subfilters: getFilterValues(item?.name), + }; + }); + }, [items, getFilterValues]); + + const handleClick = (event: any) => { + setAnchorEl(event?.currentTarget); + }; + + const handleClose = () => { + setAnchorEl(null); + setActiveFilters([]); + }; + + // Handle selection of filters + const handleFilterSelect = (filter: any, level: number) => { + if (filter?.subfilters) { + setActiveFilters((prev) => [...prev.slice(0, level), filter]); + } else { + setSelectedFilters((prev) => [ + ...prev.filter( + (filter) => !filter.startsWith(`${activeFilters[0]?.name}:`) + ), + `${activeFilters[0]?.name}:${filter.name}`, + ]); + handleClose(); + } + }; + + // Remove selected filter + const handleRemoveFilter = (filterName: string) => { + setSelectedFilters((prev) => + prev.filter((filter) => filter !== filterName) + ); + const key = filterName.split(":")[0]; + setFilters((prev: any) => { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { [key]: _, ...rest } = prev; + return rest; + }); + }; + + // Render the dynamic filter menu levels + const renderMenuItems = (filters: any, level: number) => { + return filters.map((filter: any, index: number) => ( + handleFilterSelect(filter, level)} + sx={{ + width: "fit-content", + minWidth: "15rem", + fontSize: "1.6rem", + display: "flex", + justifyContent: "space-between", + }} + > + {filter?.name} + {level === 0 && ( + + )} + + )); + }; + + return ( + + {selectedFilters.map((filter, index) => ( + + {filter} + handleRemoveFilter(filter)} + sx={{ marginLeft: "0.5rem" }} + > + + + + ))} + + + + + + {/* Render the first level */} + {renderMenuItems(getNestedFilterList, 0)} + + {/* Render sublevels dynamically based on activeFilters */} + {activeFilters.map((filter: any, index) => ( + + {renderMenuItems(filter?.subfilters, index + 1)} + + ))} + + + + ); +}; + +export default FiltersDropdown; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx new file mode 100644 index 0000000000..9f27417960 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx @@ -0,0 +1,62 @@ +import { useCallback, useEffect, useMemo, useState } from "react"; +import Box from "@mui/material/Box"; +import { DateTimePicker, LocalizationProvider } from "@mui/x-date-pickers"; +import { AdapterDayjs } from "@mui/x-date-pickers/AdapterDayjs"; +import dayjs from "dayjs"; + +export interface MetricTimeRangeProps { + field: string; + setMetricReq: any; +} + +const TimeRange = ({ field, setMetricReq }: MetricTimeRangeProps) => { + const getInitialValue = useMemo(() => { + switch (field) { + case "start_time": + return dayjs().subtract(1, "hour"); + case "end_time": + return dayjs(); + default: + return null; + } + }, [field]); + + const [time, setTime] = useState(getInitialValue); + + // Update metricsReq with the initial value + useEffect(() => { + setMetricReq((prev: any) => ({ ...prev, [field]: getInitialValue })); + }, [getInitialValue, field, setMetricReq]); + + const handleTimeChange = useCallback( + (newValue: dayjs.Dayjs | null) => { + if (newValue && newValue.isValid()) { + setTime(newValue); + setMetricReq((prev: any) => ({ ...prev, [field]: newValue })); + } + }, + [setTime] + ); + + return ( + + + + + + ); +}; + +export default TimeRange; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/style.css new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts new file mode 100644 index 0000000000..3f5bf52f09 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -0,0 +1,37 @@ +export const durationOptions = ["1m", "5m", "10m"]; + +export const durationMap: { [p: string]: string } = { + "1m": "1 min", + "5m": "5 mins", + "10m": "10 mins", +}; + +export const quantileOptions = ["0.50", "0.90", "0.95", "0.99"]; + +export const quantileMap: { [p: string]: string } = { + "0.50": "50th Percentile", + "0.90": "90th Percentile", + "0.95": "95th Percentile", + "0.99": "99th Percentile", +}; + +export const dimensionMap: { [p: string]: string } = { + "mono-vertex": "MonoVertex", + pod: "Pod", + pipeline: "Pipeline", +}; + +export const dimensionReverseMap: { [p: string]: string } = { + monoVertex: "mono-vertex", + pipeline: "pipeline", + pod: "pod", +}; + +export const metricNameMap: { [p: string]: string } = { + monovtx_ack_time_bucket: "Mono Vertex Ack Time Latency (in micro seconds)", + monovtx_read_time_bucket: "Mono Vertex Read Time Latency (in micro seconds)", + monovtx_processing_time_bucket: + "Mono Vertex Processing Time Latency (in micro seconds)", + monovtx_sink_time_bucket: + "Mono Vertex Sink Write Time Latency (in micro seconds)", +}; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.tsx deleted file mode 100644 index 2a64233333..0000000000 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodInfo/index.tsx +++ /dev/null @@ -1,107 +0,0 @@ -import React from "react"; -import Box from "@mui/material/Box"; -import Table from "@mui/material/Table"; -import TableRow from "@mui/material/TableRow"; -import TableCell from "@mui/material/TableCell"; -import TableBody from "@mui/material/TableBody"; -import TableContainer from "@mui/material/TableContainer"; -import { getPodContainerUsePercentages } from "../../../../../../../../../../../../../utils"; -import { PodInfoProps } from "../../../../../../../../../../../../../types/declarations/pods"; - -export function PodInfo({ pod, podDetails, containerName }: PodInfoProps) { - const resourceUsage = getPodContainerUsePercentages( - pod, - podDetails, - containerName - ); - - // CPU - let usedCPU: string | undefined = - podDetails?.containerMap instanceof Map - ? podDetails?.containerMap?.get(containerName)?.cpu - : undefined; - let specCPU: string | undefined = - pod?.containerSpecMap instanceof Map - ? pod?.containerSpecMap?.get(containerName)?.cpu - : undefined; - if (!usedCPU) { - usedCPU = "?"; - } else if (usedCPU.endsWith("n")) { - usedCPU = `${(parseFloat(usedCPU) / 1e6).toFixed(2)}m`; - } - if (!specCPU) { - specCPU = "?"; - } - let cpuPercent = "unavailable"; - if (resourceUsage?.cpuPercent) { - cpuPercent = `${resourceUsage.cpuPercent?.toFixed(2)}%`; - } - // Memory - let usedMem: string | undefined = - podDetails?.containerMap instanceof Map - ? podDetails?.containerMap?.get(containerName)?.memory - : undefined; - let specMem: string | undefined = - pod?.containerSpecMap instanceof Map - ? pod?.containerSpecMap?.get(containerName)?.memory - : undefined; - if (!usedMem) { - usedMem = "?"; - } else if (usedMem.endsWith("Ki")) { - usedMem = `${(parseFloat(usedMem) / 1024).toFixed(2)}Mi`; - } - if (!specMem) { - specMem = "?"; - } - let memPercent = "unavailable"; - if (resourceUsage?.memoryPercent) { - memPercent = `${resourceUsage.memoryPercent.toFixed(2)}%`; - } - const podName = pod?.name?.slice(0, pod?.name?.lastIndexOf("-")); - - return ( - - Container Info - - - - - - Pod - {podName} - - - Container - {containerName} - - - CPU % - {cpuPercent} - - - CPU - {`${usedCPU} / ${specCPU}`} - - - MEMORY % - {memPercent} - - - MEMORY - {`${usedMem} / ${specMem}`} - - -
-
-
-
- ); -} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodLogs/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodLogs/index.tsx index a040704274..565775eddd 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodLogs/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/PodLogs/index.tsx @@ -82,6 +82,7 @@ const logColor = (log: string, colorMode: string): string => { export function PodLogs({ namespaceId, podName, containerName }: PodLogsProps) { const [logs, setLogs] = useState([]); + const [previousLogs, setPreviousLogs] = useState([]); const [filteredLogs, setFilteredLogs] = useState([]); const [logRequestKey, setLogRequestKey] = useState(""); const [reader, setReader] = useState< @@ -92,11 +93,13 @@ export function PodLogs({ namespaceId, podName, containerName }: PodLogsProps) { const [paused, setPaused] = useState(false); const [colorMode, setColorMode] = useState("light"); const [logsOrder, setLogsOrder] = useState("desc"); + const [showPreviousLogs, setShowPreviousLogs] = useState(false); const { host } = useContext(AppContext); useEffect(() => { // reset logs in memory on any log source change setLogs([]); + setPreviousLogs([]); // and start logs again if paused setPaused(false); }, [namespaceId, podName, containerName]); @@ -150,6 +153,42 @@ export function PodLogs({ namespaceId, podName, containerName }: PodLogsProps) { .catch(console.error); }, [namespaceId, podName, containerName, reader, paused, host]); + useEffect(() => { + if (showPreviousLogs) { + setPreviousLogs([]); + const url = `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/pods/${podName}/logs?container=${containerName}&follow=true&tailLines=${MAX_LOGS}&previous=true`; + fetch(url) + .then((response) => { + if (response && response.body) { + const reader = response.body + .pipeThrough(new TextDecoderStream()) + .getReader(); + + reader.read().then(function process({ done, value }) { + if (done) { + return; + } + if (value) { + setPreviousLogs((prevLogs) => { + const latestLogs = parsePodLogs(value); + let updated = [...prevLogs, ...latestLogs]; + if (updated.length > MAX_LOGS) { + updated = updated.slice(updated.length - MAX_LOGS); + } + return updated; + }); + } + return reader.read().then(process); + }); + } + }) + .catch(console.error); + } else { + // Clear previous logs when the checkbox is unchecked + setPreviousLogs([]); + } + }, [showPreviousLogs, namespaceId, podName, containerName, host]); + useEffect(() => { if (!search) { setFilteredLogs(logs); @@ -204,8 +243,13 @@ export function PodLogs({ namespaceId, podName, containerName }: PodLogsProps) { const logsBtnStyle = { height: "2.4rem", width: "2.4rem" }; return ( - - + + - - {logsOrder === "asc" && - filteredLogs.map((l: string, idx) => ( - - - - ))} - {logsOrder === "desc" && - filteredLogs - .slice() - .reverse() - .map((l: string, idx) => ( - - - - ))} + setShowPreviousLogs(event.target.checked)} + sx={{ "& .MuiSvgIcon-root": { fontSize: 24 }, height: "4.2rem" }} + /> + } + label={ + + Show previous terminated container + + } + /> + + + + {logsOrder === "asc" && + (showPreviousLogs ? previousLogs : filteredLogs).map( + (l: string, idx) => ( + + + + ) + )} + {logsOrder === "desc" && + (showPreviousLogs ? previousLogs : filteredLogs) + .slice() + .reverse() + .map((l: string, idx) => ( + + + + ))} + + ); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/style.css new file mode 100644 index 0000000000..6a16ccb5d3 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/style.css @@ -0,0 +1,23 @@ +.vertex-details-tab-panel { + height: 100%; +} + +.vertex-details-tab.MuiTab-root { + font-size: 1.4rem !important; + font-style: normal !important; + font-weight: 600 !important; + color: #6B6C72 !important; + text-transform: none !important; +} + +.vertex-details-tab-selected.MuiTab-root { + font-size: 1.4rem !important; + font-style: normal !important; + font-weight: 600 !important; + color: #393A3D !important; + text-transform: none !important; +} + +.vertex-details-tabs > .MuiTabs-scroller > .MuiTabs-indicator { + background-color: #037C8F !important; +} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/index.tsx index 9c25745b31..4f63351974 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/index.tsx @@ -10,16 +10,13 @@ export const SearchablePodsHeatMap = ({ onPodClick, selectedPod, }: SearchablePodsHeatMapProps) => { - // return ( - - {pods?.length > 0 && ( - - - Select a pod by resource - + pods?.length > 0 && ( + + + Select a pod by resource + + - )} - + + ) ); }; diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/index.tsx index cb076f3012..acec1f770b 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/index.tsx @@ -1,3 +1,4 @@ +import { useEffect, useRef, useState } from "react"; import HexagonHeatMap from "./partials/HexagonHeatMap"; import Box from "@mui/material/Box"; import { @@ -20,6 +21,26 @@ export const PodsHeatMap = ({ onPodClick, selectedPod, }: PodsHeatMapProps) => { + const containerRef = useRef(null); + const [maximumWidth, setMaximumWidth] = useState(0); + + useEffect(() => { + const updateWidth = () => { + if (containerRef.current) { + setMaximumWidth(containerRef.current.offsetWidth * 0.7); + } + }; + + const resizeObserver = new ResizeObserver(updateWidth); + if (containerRef.current) { + resizeObserver.observe(containerRef.current); + } + + return () => { + resizeObserver.disconnect(); + }; + }, []); + const cpuColors = { infinite: [100, 100000], red: [76, 1000], @@ -173,15 +194,12 @@ export const PodsHeatMap = ({ @@ -191,36 +209,26 @@ export const PodsHeatMap = ({ - + - + diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/partials/HexagonHeatMap/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/partials/HexagonHeatMap/index.tsx index ff169b0a9a..57ab0ab7f1 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/partials/HexagonHeatMap/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/partials/HexagonHeatMap/index.tsx @@ -12,10 +12,9 @@ import { import "./style.css"; export const MAXIMUM_RADIUS = 15; -export const MAXIMUM_WIDTH = 350; export const MAXIMUM_HEIGHT = 100; export const TOOLTIP_OFFSET = 0; -export const MIN_HEXAGONS = 7; +export const MIN_HEXAGONS = 1; export const DEFAULT_COLOR = "#76b3f7"; export const DEFAULT_STROKE = "rgb(25, 118, 210)"; //"#ffffff" export const DEFAULT_OPACITY = 0.5; @@ -28,6 +27,7 @@ function HexagonHeatMap({ tooltipComponent, tooltipClass, selected, + containerWidth, }: HexagonHeatMapProps) { const [hover, setHover] = useState(null); const margin = { @@ -36,11 +36,12 @@ function HexagonHeatMap({ bottom: 0, left: 30, }; + const MAXIMUM_WIDTH = containerWidth; const sqrtOfTotal = Math.ceil(Math.sqrt(data.length)); // The number of columns and rows of the heatmap - // encourage more count along horizontal direction by adding 3 + // encourage more count along horizontal direction by adding 1 const mapColumns = data.length > sqrtOfTotal + MIN_HEXAGONS ? sqrtOfTotal + MIN_HEXAGONS diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/style.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/style.css index 8f06dda051..e1bc01a862 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/style.css +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/SearchablePodsHeatMap/partials/PodsHeatMap/style.css @@ -11,6 +11,8 @@ font-size: 2rem; font-style: normal; font-weight: 500; + margin-left: 4%; + width: 46%; line-height: 2.8rem; } diff --git a/ui/src/components/plugin/NumaflowMonitorApp/App.tsx b/ui/src/components/plugin/NumaflowMonitorApp/App.tsx index 9b65030418..4309a38c64 100644 --- a/ui/src/components/plugin/NumaflowMonitorApp/App.tsx +++ b/ui/src/components/plugin/NumaflowMonitorApp/App.tsx @@ -215,6 +215,7 @@ function App(props: AppProps) { namespace, isPlugin: true, isReadOnly: systemInfo?.isReadOnly || false, + disableMetricsCharts: systemInfo?.disableMetricsCharts ?? true, sidebarProps, setSidebarProps, errors, diff --git a/ui/src/types/declarations/app.d.ts b/ui/src/types/declarations/app.d.ts index 74a3356a0f..f601ffbf7f 100644 --- a/ui/src/types/declarations/app.d.ts +++ b/ui/src/types/declarations/app.d.ts @@ -22,6 +22,7 @@ export interface AppContextProps { namespace: string; isPlugin: boolean; isReadOnly: boolean; + disableMetricsCharts: boolean; sidebarProps?: SlidingSideBarProps; setSidebarProps: (props: SlidingSideBarProps | undefined) => void; errors: AppError[]; diff --git a/ui/src/types/declarations/pods.d.ts b/ui/src/types/declarations/pods.d.ts index 31d551da0f..b108a8fbc8 100644 --- a/ui/src/types/declarations/pods.d.ts +++ b/ui/src/types/declarations/pods.d.ts @@ -88,19 +88,48 @@ export interface HexagonHeatMapProps { tooltipComponent: any; tooltipClass?: string; selected: string | undefined; + containerWidth: number; } export interface PodDetailProps { namespaceId: string; + pipelineId: string; + type: string; containerName: string; pod: Pod; podDetails: PodDetail; } - +export interface ContainerInfoProps { + state: string; + restartCount: number; + lastStartedAt: string; + lastTerminationReason?: string; + lastTerminationMessage?: string; + waitingReason?: string; + waitingMessage?: string; + requestedCPU?: string; + requestedMemory?: string; + limitCPU?: string; + limitMemory?: string; + totalCPU?: string; + totalMemory?: string; +} + +export interface PodSpecificInfoProps { + name: string; + status: string; + message: string; + reason: string; + restartCount: number; + totalCPU: string; + totalMemory: string; +} export interface PodInfoProps { pod: Pod; podDetails: PodDetail; containerName: string; + containerInfo: ContainerInfoProps; + podSpecificInfo: PodSpecificInfoProps; } export interface PodLogsProps { diff --git a/ui/src/utils/fetchWrappers/metricsDiscoveryDataFetch.ts b/ui/src/utils/fetchWrappers/metricsDiscoveryDataFetch.ts new file mode 100644 index 0000000000..f169326a32 --- /dev/null +++ b/ui/src/utils/fetchWrappers/metricsDiscoveryDataFetch.ts @@ -0,0 +1,51 @@ +import { useContext, useEffect, useState } from "react"; +import { getBaseHref } from "../index"; +import { AppContextProps } from "../../types/declarations/app"; +import { AppContext } from "../../App"; + +export interface MetricsDiscoveryDataProps { + objectType: string; +} + +export interface MetricsDiscoveryInfo { + data: any; +} + +export const useMetricsDiscoveryDataFetch = ( + props: MetricsDiscoveryDataProps +) => { + const [metricsDiscoveryData, setMetricsDiscoveryData] = useState< + MetricsDiscoveryInfo | undefined + >(undefined); + const [errMsg, setErrMsg] = useState(""); + const [loading, setLoading] = useState(true); + + const { host } = useContext(AppContext); + const { objectType } = props; + + useEffect(() => { + const getMetricsDiscoveryData = async () => { + try { + setLoading(true); + const response = await fetch( + `${host}${getBaseHref()}/api/v1/metrics-discovery/object/${objectType}` + ); + if (!response.ok) { + const errMsg = `Failed to discover metrics for the ${objectType}. Response code: ${response.status}`; + setErrMsg(errMsg); + setLoading(false); + } + const data = await response.json(); + setMetricsDiscoveryData(data); + setLoading(false); + } catch (e: any) { + setErrMsg(e.message); + setLoading(false); + } + }; + + getMetricsDiscoveryData(); + }, []); + + return { metricsDiscoveryData, error: errMsg, loading }; +}; diff --git a/ui/src/utils/fetchWrappers/metricsFetch.ts b/ui/src/utils/fetchWrappers/metricsFetch.ts new file mode 100644 index 0000000000..e33414a68e --- /dev/null +++ b/ui/src/utils/fetchWrappers/metricsFetch.ts @@ -0,0 +1,56 @@ +import { useEffect, useState, useContext } from "react"; +import { AppContextProps } from "../../types/declarations/app"; +import { AppContext } from "../../App"; +import { getBaseHref } from "../index"; + +export interface useMetricsFetchProps { + metricReq: any; + filters: any; +} + +export const useMetricsFetch = ({ + metricReq, + filters, +}: useMetricsFetchProps) => { + const { host } = useContext(AppContext); + const urlPath = `${host}${getBaseHref()}/api/v1/metrics-proxy`; + const [chartData, setChartData] = useState(null); + const [error, setError] = useState(null); + const [isLoading, setIsLoading] = useState(false); + + useEffect(() => { + const fetchData = async () => { + setIsLoading(true); + + try { + const response = await fetch(urlPath, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ ...metricReq, filters }), + }); + const data = await response.json(); + if (data?.data === null) { + setChartData(null); + setError(data?.errMsg); + } else { + setChartData(data?.data); + setError(null); + } + } catch (e) { + console.error("Error fetching data:", e); + if (e instanceof Error) { + setError(e); + } else { + setError(null); + } + } finally { + setIsLoading(false); + } + }; + fetchData(); + }, [metricReq, filters]); + + return { chartData, error, isLoading }; +}; diff --git a/ui/src/utils/index.tsx b/ui/src/utils/index.tsx index 231e944545..3c3cab7127 100644 --- a/ui/src/utils/index.tsx +++ b/ui/src/utils/index.tsx @@ -194,6 +194,47 @@ export function quantityToScalar(quantity: string): number | bigint { } } + +export function calculateMemoryPercent(usageMemory: string, requestedMemory: string): string { + try { + // Extract numeric values from strings (removing "Mi" suffix) + const usage = parseInt(usageMemory.replace("Mi", ""), 10); + const request = parseInt(requestedMemory.replace("Mi", ""), 10); + + // Handle invalid input or zero requested memory + if (isNaN(usage) || isNaN(request) || request === 0) { + return "unavailable" + } + + // Calculate percentage + const percent = (usage / request) * 100; + return `${percent.toFixed(2)}%`; + + } catch (error) { + return "unavailable"; + } +} + +export function calculateCPUPercent(usageCPU: string, requestedCPU: string): string{ + try { + // Extract numeric values from strings (removing "m" suffix) + const usage = parseInt(usageCPU.replace("m", ""), 10); + const request = parseInt(requestedCPU.replace("m", ""), 10); + + // Handle invalid input or zero requested CPU + if (isNaN(usage) || isNaN(request) || request === 0) { + return "unavailable"; + } + + // Calculate percentage + const percent = (usage / request) * 100; + return `${percent.toFixed(2)}%`; + } catch (error) { + return "unavailable"; + } +} + + export function getPodContainerUsePercentages( pod: Pod, podDetails: PodDetail, diff --git a/ui/src/utils/models/systemInfo.ts b/ui/src/utils/models/systemInfo.ts index 4d66f62714..f9d59bbdba 100644 --- a/ui/src/utils/models/systemInfo.ts +++ b/ui/src/utils/models/systemInfo.ts @@ -2,5 +2,6 @@ export interface SystemInfo { managedNamespace: string; namespaced: boolean; isReadOnly: boolean; + disableMetricsCharts: boolean; version: string; } diff --git a/ui/yarn.lock b/ui/yarn.lock index 41a958626a..3fde2777e1 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -1147,6 +1147,13 @@ dependencies: regenerator-runtime "^0.14.0" +"@babel/runtime@^7.25.7": + version "7.25.7" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.7.tgz#7ffb53c37a8f247c8c4d335e89cdf16a2e0d0fb6" + integrity sha512-FjoyLe754PMiYsFaN5C94ttGiOmBNYTf6pLr4xXHAT5uctHb092PBszndLDR5XA/jghQvn4n7JMHl7dmTgbm9w== + dependencies: + regenerator-runtime "^0.14.0" + "@babel/template@^7.24.7", "@babel/template@^7.3.3": version "7.24.7" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.7.tgz#02efcee317d0609d2c07117cb70ef8fb17ab7315" @@ -1945,6 +1952,11 @@ resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.15.tgz#dadd232fe9a70be0d526630675dff3b110f30b53" integrity sha512-nbo7yPhtKJkdf9kcVOF8JZHPZTmqXjJ/tI0bdWgHg5tp9AnIN4Y7f7wm9T+0SyGYJk76+GYZ8Q5XaTYAsUHN0Q== +"@mui/types@^7.2.18": + version "7.2.18" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.18.tgz#4b6385ed2f7828ef344113cdc339d6fdf8e4bc23" + integrity sha512-uvK9dWeyCJl/3ocVnTOS6nlji/Knj8/tVqVX03UVTpdmTJYu/s4jtDd9Kvv0nRGE0CUSNW1UYAci7PYypjealg== + "@mui/utils@^5.16.4": version "5.16.4" resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.16.4.tgz#8e50e27a630e3d8eeb3e9d3bc31cbb0e4956f5fd" @@ -1956,6 +1968,39 @@ prop-types "^15.8.1" react-is "^18.3.1" +"@mui/utils@^5.16.6 || ^6.0.0": + version "6.1.4" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-6.1.4.tgz#44deebc8e576695836c9bda870d755c8f079e54d" + integrity sha512-v0wXkyh3/Hpw48ivlNvgs4ZT6M8BIEAMdLgvct59rQBggYFhoAVKyliKDzdj37CnIlYau3DYIn7x5bHlRYFBow== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/types" "^7.2.18" + "@types/prop-types" "^15.7.13" + clsx "^2.1.1" + prop-types "^15.8.1" + react-is "^18.3.1" + +"@mui/x-date-pickers@^7.21.0": + version "7.21.0" + resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-7.21.0.tgz#78de7e81bdf863d443d7963777dfc3052ae3c320" + integrity sha512-WLpuTu3PvhYwd7IAJSuDWr1Zd8c5C8Cc7rpAYCaV5+tGBoEP0C2UKqClMR4F1wTiU2a7x3dzgQzkcgK72yyqDw== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/utils" "^5.16.6 || ^6.0.0" + "@mui/x-internals" "7.21.0" + "@types/react-transition-group" "^4.4.11" + clsx "^2.1.1" + prop-types "^15.8.1" + react-transition-group "^4.4.5" + +"@mui/x-internals@7.21.0": + version "7.21.0" + resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-7.21.0.tgz#daca984059015b27efdb47bb44dc7ff4a6816673" + integrity sha512-94YNyZ0BhK5Z+Tkr90RKf47IVCW8R/1MvdUhh6MCQg6sZa74jsX+x+gEZ4kzuCqOsuyTyxikeQ8vVuCIQiP7UQ== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/utils" "^5.16.6 || ^6.0.0" + "@nicolo-ribaudo/eslint-scope-5-internals@5.1.1-v1": version "5.1.1-v1" resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz#dbf733a965ca47b1973177dc0bb6c889edcfb129" @@ -2382,7 +2427,7 @@ resolved "https://registry.yarnpkg.com/@types/cookie/-/cookie-0.4.1.tgz#bfd02c1f2224567676c1545199f87c3a861d878d" integrity sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q== -"@types/d3-array@*": +"@types/d3-array@*", "@types/d3-array@^3.0.3": version "3.2.1" resolved "https://registry.yarnpkg.com/@types/d3-array/-/d3-array-3.2.1.tgz#1f6658e3d2006c4fceac53fde464166859f8b8c5" integrity sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg== @@ -2446,7 +2491,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-dsv/-/d3-dsv-3.0.7.tgz#0a351f996dc99b37f4fa58b492c2d1c04e3dac17" integrity sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g== -"@types/d3-ease@*": +"@types/d3-ease@*", "@types/d3-ease@^3.0.0": version "3.0.2" resolved "https://registry.yarnpkg.com/@types/d3-ease/-/d3-ease-3.0.2.tgz#e28db1bfbfa617076f7770dd1d9a48eaa3b6c51b" integrity sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA== @@ -2480,7 +2525,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz#6023fb3b2d463229f2d680f9ac4b47466f71f17b" integrity sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg== -"@types/d3-interpolate@*": +"@types/d3-interpolate@*", "@types/d3-interpolate@^3.0.1": version "3.0.4" resolved "https://registry.yarnpkg.com/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz#412b90e84870285f2ff8a846c6eb60344f12a41c" integrity sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA== @@ -2524,7 +2569,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz#fc0db9c10e789c351f4c42d96f31f2e4df8f5644" integrity sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw== -"@types/d3-scale@*": +"@types/d3-scale@*", "@types/d3-scale@^4.0.2": version "4.0.8" resolved "https://registry.yarnpkg.com/@types/d3-scale/-/d3-scale-4.0.8.tgz#d409b5f9dcf63074464bf8ddfb8ee5a1f95945bb" integrity sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ== @@ -2543,7 +2588,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-selection/-/d3-selection-3.0.10.tgz#98cdcf986d0986de6912b5892e7c015a95ca27fe" integrity sha512-cuHoUgS/V3hLdjJOLTT691+G2QoqAjCVLmr4kJXR4ha56w1Zdu8UUQ5TxLRqudgNjwXeQxKMq4j+lyf9sWuslg== -"@types/d3-shape@*": +"@types/d3-shape@*", "@types/d3-shape@^3.1.0": version "3.1.6" resolved "https://registry.yarnpkg.com/@types/d3-shape/-/d3-shape-3.1.6.tgz#65d40d5a548f0a023821773e39012805e6e31a72" integrity sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA== @@ -2562,7 +2607,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-time-format/-/d3-time-format-4.0.3.tgz#d6bc1e6b6a7db69cccfbbdd4c34b70632d9e9db2" integrity sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg== -"@types/d3-time@*": +"@types/d3-time@*", "@types/d3-time@^3.0.0": version "3.0.3" resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-3.0.3.tgz#3c186bbd9d12b9d84253b6be6487ca56b54f88be" integrity sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw== @@ -2572,7 +2617,7 @@ resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-2.1.4.tgz#43587aa57d565ab60a1d2201edeebc497d5c1252" integrity sha512-BTfLsxTeo7yFxI/haOOf1ZwJ6xKgQLT9dCp+EcmQv87Gox6X+oKl4mLKfO6fnWm3P22+A6DknMNEZany8ql2Rw== -"@types/d3-timer@*": +"@types/d3-timer@*", "@types/d3-timer@^3.0.0": version "3.0.2" resolved "https://registry.yarnpkg.com/@types/d3-timer/-/d3-timer-3.0.2.tgz#70bbda77dc23aa727413e22e214afa3f0e852f70" integrity sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw== @@ -2803,6 +2848,11 @@ resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.12.tgz#12bb1e2be27293c1406acb6af1c3f3a1481d98c6" integrity sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q== +"@types/prop-types@^15.7.13": + version "15.7.13" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.13.tgz#2af91918ee12d9d32914feb13f5326658461b451" + integrity sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA== + "@types/q@^1.5.1": version "1.5.8" resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.8.tgz#95f6c6a08f2ad868ba230ead1d2d7f7be3db3837" @@ -2856,6 +2906,13 @@ dependencies: "@types/react" "*" +"@types/react-transition-group@^4.4.11": + version "4.4.11" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.11.tgz#d963253a611d757de01ebb241143b1017d5d63d5" + integrity sha512-RM05tAniPZ5DZPzzNFP+DmrcOdD0efDUxMy3145oljWSl3x9ZV5vhme98gTxFrj2lhXvmGNnUiuDyJgY9IKkNA== + dependencies: + "@types/react" "*" + "@types/react@*", "@types/react@^18.0.0": version "18.3.3" resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.3.tgz#9679020895318b0915d7a3ab004d92d33375c45f" @@ -4164,7 +4221,7 @@ clsx@^1.0.4, clsx@^1.1.1: resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== -clsx@^2.1.0, clsx@^2.1.1: +clsx@^2.0.0, clsx@^2.1.0, clsx@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA== @@ -4611,7 +4668,7 @@ d3-array@2, d3-array@^2.3.0: dependencies: internmap "^1.0.0" -"d3-array@2 - 3", "d3-array@2.10.0 - 3": +"d3-array@2 - 3", "d3-array@2.10.0 - 3", d3-array@^3.1.6: version "3.2.4" resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== @@ -4646,7 +4703,7 @@ d3-color@1: d3-dispatch "1 - 3" d3-selection "3" -"d3-ease@1 - 3": +"d3-ease@1 - 3", d3-ease@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-3.0.1.tgz#9658ac38a2140d59d346160f1f6c30fda0bd12f4" integrity sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w== @@ -4661,7 +4718,7 @@ d3-color@1: resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== -"d3-interpolate@1 - 3", "d3-interpolate@1.2.0 - 3": +"d3-interpolate@1 - 3", "d3-interpolate@1.2.0 - 3", d3-interpolate@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" integrity sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g== @@ -4687,6 +4744,11 @@ d3-path@1, d3-path@^1.0.5: resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-1.0.9.tgz#48c050bb1fe8c262493a8caf5524e3e9591701cf" integrity sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg== +d3-path@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" + integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== + d3-scale@^3.3.0: version "3.3.0" resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-3.3.0.tgz#28c600b29f47e5b9cd2df9749c206727966203f3" @@ -4721,6 +4783,13 @@ d3-shape@^1.0.6, d3-shape@^1.2.0: dependencies: d3-path "1" +d3-shape@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-3.2.0.tgz#a1a839cbd9ba45f28674c69d7f855bcf91dfc6a5" + integrity sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA== + dependencies: + d3-path "^3.1.0" + "d3-time-format@2 - 3": version "3.0.0" resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-3.0.0.tgz#df8056c83659e01f20ac5da5fdeae7c08d5f1bb6" @@ -4742,14 +4811,14 @@ d3-shape@^1.0.6, d3-shape@^1.2.0: dependencies: d3-array "2" -"d3-time@1 - 3", "d3-time@2.1.1 - 3": +"d3-time@1 - 3", "d3-time@2.1.1 - 3", d3-time@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/d3-time/-/d3-time-3.1.0.tgz#9310db56e992e3c0175e1ef385e545e48a9bb5c7" integrity sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q== dependencies: d3-array "2 - 3" -"d3-timer@1 - 3": +"d3-timer@1 - 3", d3-timer@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== @@ -4825,6 +4894,11 @@ data-view-byte-offset@^1.0.0: es-errors "^1.3.0" is-data-view "^1.0.1" +dayjs@^1.11.13: + version "1.11.13" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" + integrity sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg== + debounce@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/debounce/-/debounce-1.2.1.tgz#38881d8f4166a5c5848020c11827b834bcb3e0a5" @@ -4851,6 +4925,11 @@ debug@^3.2.7: dependencies: ms "^2.1.1" +decimal.js-light@^2.4.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/decimal.js-light/-/decimal.js-light-2.5.1.tgz#134fd32508f19e208f4fb2f8dac0d2626a867934" + integrity sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg== + decimal.js@^10.2.1: version "10.4.3" resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.4.3.tgz#1044092884d245d1b7f65725fa4ad4c6f781cc23" @@ -5755,7 +5834,7 @@ etag@~1.8.1: resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== -eventemitter3@^4.0.0: +eventemitter3@^4.0.0, eventemitter3@^4.0.1: version "4.0.7" resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== @@ -5851,6 +5930,11 @@ fast-diff@^1.1.2: resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== +fast-equals@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d" + integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ== + fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" @@ -9645,6 +9729,15 @@ react-shallow-renderer@^16.15.0: object-assign "^4.1.1" react-is "^16.12.0 || ^17.0.0 || ^18.0.0" +react-smooth@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/react-smooth/-/react-smooth-4.0.1.tgz#6200d8699bfe051ae40ba187988323b1449eab1a" + integrity sha512-OE4hm7XqR0jNOq3Qmk9mFLyd6p2+j6bvbPJ7qlB7+oo0eNcL2l7WQzG6MBnT3EXY6xzkLMUBec3AfewJdA0J8w== + dependencies: + fast-equals "^5.0.1" + prop-types "^15.8.1" + react-transition-group "^4.4.5" + react-test-renderer@^18.0.0: version "18.3.1" resolved "https://registry.yarnpkg.com/react-test-renderer/-/react-test-renderer-18.3.1.tgz#e693608a1f96283400d4a3afead6893f958b80b4" @@ -9742,6 +9835,27 @@ readdirp@~3.6.0: dependencies: picomatch "^2.2.1" +recharts-scale@^0.4.4: + version "0.4.5" + resolved "https://registry.yarnpkg.com/recharts-scale/-/recharts-scale-0.4.5.tgz#0969271f14e732e642fcc5bd4ab270d6e87dd1d9" + integrity sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w== + dependencies: + decimal.js-light "^2.4.1" + +recharts@^2.13.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/recharts/-/recharts-2.13.0.tgz#a293322ea357491393cc7ad6fcbb1e5f8e99bc93" + integrity sha512-sbfxjWQ+oLWSZEWmvbq/DFVdeRLqqA6d0CDjKx2PkxVVdoXo16jvENCE+u/x7HxOO+/fwx//nYRwb8p8X6s/lQ== + dependencies: + clsx "^2.0.0" + eventemitter3 "^4.0.1" + lodash "^4.17.21" + react-is "^18.3.1" + react-smooth "^4.0.0" + recharts-scale "^0.4.4" + tiny-invariant "^1.3.1" + victory-vendor "^36.6.8" + recursive-readdir@^2.2.2: version "2.2.3" resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.3.tgz#e726f328c0d69153bcabd5c322d3195252379372" @@ -10856,7 +10970,7 @@ thunky@^1.0.2: resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d" integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA== -tiny-invariant@^1.0.2: +tiny-invariant@^1.0.2, tiny-invariant@^1.3.1: version "1.3.3" resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.3.3.tgz#46680b7a873a0d5d10005995eb90a70d74d60127" integrity sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg== @@ -11254,6 +11368,26 @@ vary@~1.1.2: resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== +victory-vendor@^36.6.8: + version "36.9.2" + resolved "https://registry.yarnpkg.com/victory-vendor/-/victory-vendor-36.9.2.tgz#668b02a448fa4ea0f788dbf4228b7e64669ff801" + integrity sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ== + dependencies: + "@types/d3-array" "^3.0.3" + "@types/d3-ease" "^3.0.0" + "@types/d3-interpolate" "^3.0.1" + "@types/d3-scale" "^4.0.2" + "@types/d3-shape" "^3.1.0" + "@types/d3-time" "^3.0.0" + "@types/d3-timer" "^3.0.0" + d3-array "^3.1.6" + d3-ease "^3.0.1" + d3-interpolate "^3.0.1" + d3-scale "^4.0.2" + d3-shape "^3.1.0" + d3-time "^3.0.0" + d3-timer "^3.0.1" + w3c-hr-time@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd" From afe6914a76fa36a9e603af8d1e8dba31e0fe1caa Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Fri, 8 Nov 2024 18:24:23 -0500 Subject: [PATCH 140/188] chore: update version compatibility matrix (#2212) Signed-off-by: Keran Yang --- go.mod | 2 +- go.sum | 4 +- pkg/sdkclient/serverinfo/serverinfo_test.go | 2 +- pkg/sdkclient/serverinfo/types.go | 61 ++++++++++---------- rust/numaflow-core/src/monovertex.rs | 17 +++--- rust/numaflow-core/src/shared/server_info.rs | 26 ++++----- 6 files changed, 56 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 22ca740da5..b5b7d57160 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe github.com/nats-io/nats-server/v2 v2.10.20 github.com/nats-io/nats.go v1.37.0 - github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b + github.com/numaproj/numaflow-go v0.9.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.55.0 diff --git a/go.sum b/go.sum index c0bb7ce203..48b6fba0ed 100644 --- a/go.sum +++ b/go.sum @@ -487,8 +487,8 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b h1:UEhFHfBwe2DwtnYzdFteTZ2tKwMX739llzfebfEMGg4= -github.com/numaproj/numaflow-go v0.8.2-0.20241030023053-f6819383aa7b/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= +github.com/numaproj/numaflow-go v0.9.0 h1:+bIREyAys6ArWdnTJb6FUbrhLf2oTF6hb+8x65Qzrws= +github.com/numaproj/numaflow-go v0.9.0/go.mod h1:FaCMeV0V9SiLcVf2fwT+GeTJHNaK2gdQsTAIqQ4x7oc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/sdkclient/serverinfo/serverinfo_test.go b/pkg/sdkclient/serverinfo/serverinfo_test.go index 105775de17..b9a3320850 100644 --- a/pkg/sdkclient/serverinfo/serverinfo_test.go +++ b/pkg/sdkclient/serverinfo/serverinfo_test.go @@ -35,7 +35,7 @@ func Test_SDKServerInfo(t *testing.T) { Protocol: TCP, Language: Java, MinimumNumaflowVersion: "1.3.0-rc1", - Version: "v0.8.0", + Version: "v0.9.0", Metadata: map[string]string{"key1": "value1", "key2": "value2"}, } err := write(info, WithServerInfoFilePath(filepath)) diff --git a/pkg/sdkclient/serverinfo/types.go b/pkg/sdkclient/serverinfo/types.go index 23f6e2394d..1e1ae65c6f 100644 --- a/pkg/sdkclient/serverinfo/types.go +++ b/pkg/sdkclient/serverinfo/types.go @@ -81,49 +81,48 @@ This is because the go semver library considers pre-releases to be invalid if th Therefore, we have to put a pre-release version of the stable version in the map and choose the largest one. For python, we use "rc100" as the largest pre-release version. For go, rust, we use "-z" as the largest pre-release version. E.g., if the minimum supported version is "0.8.0", then put "0.8.0-z" for java, go, rust, "0.8.0rc100" for python. -A constraint ">=0.8.0-z" will match any pre-release version of 0.8.0, including "0.8.0-rc1", "0.8.0-rc2", etc. More details about version comparison can be found in the PEP 440 and semver documentation. */ var minimumSupportedSDKVersions = sdkConstraints{ Python: map[ContainerType]string{ - // meaning the minimum supported python SDK version is 0.8.0 - sourcer: "0.8.0rc100", - sourcetransformer: "0.8.0rc100", - sinker: "0.8.0rc100", - mapper: "0.8.0rc100", - reducer: "0.8.0rc100", - reducestreamer: "0.8.0rc100", - sessionreducer: "0.8.0rc100", - sideinput: "0.8.0rc100", - fbsinker: "0.8.0rc100", + // meaning the minimum supported python SDK version is 0.9.0 + sourcer: "0.9.0rc100", + sourcetransformer: "0.9.0rc100", + sinker: "0.9.0rc100", + mapper: "0.9.0rc100", + reducer: "0.9.0rc100", + reducestreamer: "0.9.0rc100", + sessionreducer: "0.9.0rc100", + sideinput: "0.9.0rc100", + fbsinker: "0.9.0rc100", }, Go: map[ContainerType]string{ // meaning the minimum supported go SDK version is 0.8.0 - sourcer: "0.8.0-z", - sourcetransformer: "0.8.0-z", - sinker: "0.8.0-z", - mapper: "0.8.0-z", - reducer: "0.8.0-z", - reducestreamer: "0.8.0-z", - sessionreducer: "0.8.0-z", - sideinput: "0.8.0-z", - fbsinker: "0.8.0-z", + sourcer: "0.9.0-z", + sourcetransformer: "0.9.0-z", + sinker: "0.9.0-z", + mapper: "0.9.0-z", + reducer: "0.9.0-z", + reducestreamer: "0.9.0-z", + sessionreducer: "0.9.0-z", + sideinput: "0.9.0-z", + fbsinker: "0.9.0-z", }, Java: map[ContainerType]string{ - // meaning the minimum supported go SDK version is 0.8.0 - sourcer: "0.8.0-z", - sourcetransformer: "0.8.0-z", - sinker: "0.8.0-z", - mapper: "0.8.0-z", - reducer: "0.8.0-z", - reducestreamer: "0.8.0-z", - sessionreducer: "0.8.0-z", - sideinput: "0.8.0-z", - fbsinker: "0.8.0-z", + // meaning the minimum supported java SDK version is 0.8.0 + sourcer: "0.9.0-z", + sourcetransformer: "0.9.0-z", + sinker: "0.9.0-z", + mapper: "0.9.0-z", + reducer: "0.9.0-z", + reducestreamer: "0.9.0-z", + sessionreducer: "0.9.0-z", + sideinput: "0.9.0-z", + fbsinker: "0.9.0-z", }, Rust: map[ContainerType]string{ - // meaning the minimum supported go SDK version is 0.1.0 + // meaning the minimum supported rust SDK version is 0.2.0 sourcer: "0.1.0-z", sourcetransformer: "0.1.0-z", sinker: "0.1.0-z", diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index bbdeaf3a9d..7fdc21cde7 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -333,10 +333,6 @@ mod tests { #[tokio::test] async fn run_forwarder() { - let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let src_sock_file = tmp_dir.path().join("source.sock"); - let src_info_file = tmp_dir.path().join("sourcer-server-info"); let server_info_obj = ServerInfo { protocol: "uds".to_string(), language: "rust".to_string(), @@ -345,6 +341,11 @@ mod tests { metadata: None, }; + let (src_shutdown_tx, src_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let src_sock_file = tmp_dir.path().join("source.sock"); + let src_info_file = tmp_dir.path().join("sourcer-server-info"); + write_server_info(src_info_file.to_str().unwrap(), &server_info_obj) .await .unwrap(); @@ -363,14 +364,14 @@ mod tests { let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let sink_sock_file = tmp_dir.path().join("sink.sock"); - let sink_server_info = tmp_dir.path().join("sinker-server-info"); + let sink_info_file = tmp_dir.path().join("sinker-server-info"); - write_server_info(sink_server_info.to_str().unwrap(), &server_info_obj) + write_server_info(sink_info_file.to_str().unwrap(), &server_info_obj) .await .unwrap(); let server_socket = sink_sock_file.clone(); - let server_info = sink_server_info.clone(); + let server_info = sink_info_file.clone(); let sink_server_handle = tokio::spawn(async move { sink::Server::new(SimpleSink) .with_socket_file(server_socket) @@ -408,7 +409,7 @@ mod tests { components::sink::UserDefinedConfig { socket_path: sink_sock_file.to_str().unwrap().to_string(), grpc_max_message_size: 1024, - server_info_path: sink_server_info.to_str().unwrap().to_string(), + server_info_path: sink_info_file.to_str().unwrap().to_string(), }, ), retry_config: Default::default(), diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 7af7d378c2..9783b9ba97 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -142,7 +142,7 @@ fn check_sdk_compatibility( if !specifiers.contains(&sdk_version_pep440) { return Err(Error::ServerInfo(format!( - "SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", + "Python SDK version {} must be upgraded to at least {}, in order to work with the current numaflow version", sdk_version_pep440, human_readable(sdk_required_version) ))); } @@ -358,20 +358,20 @@ mod version { // NOTE: the string content of the keys matches the corresponding server info file name. // DO NOT change it unless the server info file name is changed. let mut go_version_map = HashMap::new(); - go_version_map.insert("sourcer".to_string(), "0.8.0-z".to_string()); - go_version_map.insert("sourcetransformer".to_string(), "0.8.0-z".to_string()); - go_version_map.insert("sinker".to_string(), "0.8.0-z".to_string()); - go_version_map.insert("fb-sinker".to_string(), "0.8.0-z".to_string()); + go_version_map.insert("sourcer".to_string(), "0.9.0-z".to_string()); + go_version_map.insert("sourcetransformer".to_string(), "0.9.0-z".to_string()); + go_version_map.insert("sinker".to_string(), "0.9.0-z".to_string()); + go_version_map.insert("fb-sinker".to_string(), "0.9.0-z".to_string()); let mut python_version_map = HashMap::new(); - python_version_map.insert("sourcer".to_string(), "0.8.0rc100".to_string()); - python_version_map.insert("sourcetransformer".to_string(), "0.8.0rc100".to_string()); - python_version_map.insert("sinker".to_string(), "0.8.0rc100".to_string()); - python_version_map.insert("fb-sinker".to_string(), "0.8.0rc100".to_string()); + python_version_map.insert("sourcer".to_string(), "0.9.0rc100".to_string()); + python_version_map.insert("sourcetransformer".to_string(), "0.9.0rc100".to_string()); + python_version_map.insert("sinker".to_string(), "0.9.0rc100".to_string()); + python_version_map.insert("fb-sinker".to_string(), "0.9.0rc100".to_string()); let mut java_version_map = HashMap::new(); - java_version_map.insert("sourcer".to_string(), "0.8.0-z".to_string()); - java_version_map.insert("sourcetransformer".to_string(), "0.8.0-z".to_string()); - java_version_map.insert("sinker".to_string(), "0.8.0-z".to_string()); - java_version_map.insert("fb-sinker".to_string(), "0.8.0-z".to_string()); + java_version_map.insert("sourcer".to_string(), "0.9.0-z".to_string()); + java_version_map.insert("sourcetransformer".to_string(), "0.9.0-z".to_string()); + java_version_map.insert("sinker".to_string(), "0.9.0-z".to_string()); + java_version_map.insert("fb-sinker".to_string(), "0.9.0-z".to_string()); let mut rust_version_map = HashMap::new(); rust_version_map.insert("sourcer".to_string(), "0.1.0-z".to_string()); rust_version_map.insert("sourcetransformer".to_string(), "0.1.0-z".to_string()); From 180049dad7430739e365ae69714aaef749c7085a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:38:14 -0800 Subject: [PATCH 141/188] docs: updated CHANGELOG.md (#2215) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1688eb88cf..a2cc0e895e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,103 @@ # Changelog +## v1.4.0 (2024-11-08) + + * [6892c115](https://github.com/numaproj/numaflow/commit/6892c11590ea482c186724e55837dbcfb2100ce3) Update manifests to v1.4.0 + * [63d5f774](https://github.com/numaproj/numaflow/commit/63d5f774fecc0284ea92ad3934e7c2c8e4a58b6e) feat: metrics visualiser for mono vertex (#2195) + * [9c1d3cef](https://github.com/numaproj/numaflow/commit/9c1d3cef6ca817f0e0595dc07b727ce8ae597e4e) feat: block isbsvc deleting when there is linked pipeline (#2202) + * [426141a5](https://github.com/numaproj/numaflow/commit/426141a5e595e8cb4c827f48fea0e1bd286e4a11) fix(docs): use manifests from main branch in quick-start (#2197) + * [00a74df0](https://github.com/numaproj/numaflow/commit/00a74df0f03a8548b3f11776c598c61266801706) doc: monovertex (#2193) + * [eca3b0c0](https://github.com/numaproj/numaflow/commit/eca3b0c0be314939422ee18cdc938546d3b9e4e3) feat:KafkaSource supports KafkaVersion modification (#2191) + * [5b777826](https://github.com/numaproj/numaflow/commit/5b7778260c85c74fc73bf098d5d4609d2f8e2a42) feat: source and sink implementation in Rust (blocking implementation) (#2190) + * [e98ff980](https://github.com/numaproj/numaflow/commit/e98ff980577ee8c161d41fb3b00fcda6db20c9e7) chore(deps): bump http-proxy-middleware from 2.0.6 to 2.0.7 in /ui (#2188) + * [8e98c085](https://github.com/numaproj/numaflow/commit/8e98c0854bc3c17626238b2c58326cac5a602a05) fix: refine vertex/mvtx pod clean up logic (#2185) + * [ee27af35](https://github.com/numaproj/numaflow/commit/ee27af35aa7920d26068e4c03cb6efdf874f08fc) fix(metrics): fix incorrect metric label and add docs (#2180) + * [f21e75bc](https://github.com/numaproj/numaflow/commit/f21e75bcf1e133d26eed83cec9983501f3648ae3) fix(controller): incorporate instance into lease lock name (#2177) + * [7b02290d](https://github.com/numaproj/numaflow/commit/7b02290d3c8ee665916625fb119490192b4560bd) feat: config management for numaflow rust (#2172) + * [187398cc](https://github.com/numaproj/numaflow/commit/187398ccd1569316ad7303cdc86f7faed98e1eb1) fix: main branch, offset type got updated (#2171) + * [dc137c24](https://github.com/numaproj/numaflow/commit/dc137c24b3cc842c8a3e048fa928bc2a54f4d759) feat: blackhole sink for Monovertex (#2167) + * [9bd7e1b2](https://github.com/numaproj/numaflow/commit/9bd7e1b2925ad8714d86114618fe93f967d2b7fe) feat: check if the buffer is full before writing to ISB (#2166) + * [3d6e47ff](https://github.com/numaproj/numaflow/commit/3d6e47ffc119d8347a2087fb951f2061c516bc94) feat: ISB(jetstream) writer framework (#2160) + * [8bf96793](https://github.com/numaproj/numaflow/commit/8bf96793aa477d85d31dac01edc36c9201f55fc2) fix: create histogram buckets in a range (#2144) + * [c95d9308](https://github.com/numaproj/numaflow/commit/c95d930830912ceef3516b46994508c56214d236) feat: support multiple controller with instance config (#2153) + * [1ea4d2ea](https://github.com/numaproj/numaflow/commit/1ea4d2ea3f4a7b2ab939976eba5308d6cb0a9da0) feat: Log sink implementation for Monovertex (#2150) + * [6fb36acf](https://github.com/numaproj/numaflow/commit/6fb36acfc31f07bd53bfadf587fd2253dda9fe34) feat: Unify MapStream and Unary Map Operations Using a Shared gRPC Protocol (#2149) + * [fb328854](https://github.com/numaproj/numaflow/commit/fb328854d8a49aa915aaf7d3843ebcfdfd6c81a9) feat: actor pattern for forwarder + sink trait (#2141) + * [bc12925f](https://github.com/numaproj/numaflow/commit/bc12925f550d05732a435581570d6e1c0948f377) feat: set kafka keys if setKey is set (#2146) + * [dd08bcab](https://github.com/numaproj/numaflow/commit/dd08bcab15c7dad09930cb158b8b98caa3698d0e) feat: Unify Batch Map and Unary Map Operations Using a Shared gRPC Protocol (#2139) + * [271e459a](https://github.com/numaproj/numaflow/commit/271e459a5deb13f77906fb58c8308151ef6415a1) feat: add keys into kafka header while producing (#2143) + * [206ff7f7](https://github.com/numaproj/numaflow/commit/206ff7f72bf83e19edf17eb36861865585b1ce9c) fix: pipeline pausing race conditions of draining and terminating source (#2131) + * [d340a4e8](https://github.com/numaproj/numaflow/commit/d340a4e83311d487c2a1b3a75447168a01e3943e) feat: expose ports for user defined containers (#2135) + * [fae53fa2](https://github.com/numaproj/numaflow/commit/fae53fa2dcef7ef18d0d068368aaf6b832410c1e) feat: integrate tickgen with monovertex (#2136) + * [d5c96fd9](https://github.com/numaproj/numaflow/commit/d5c96fd9538d6eebe7267f89f677c837b309d6a0) feat: Use gRPC bidirectional streaming for map (#2120) + * [ceb8f5b7](https://github.com/numaproj/numaflow/commit/ceb8f5b721c097310a5b91d89d1bd3df8648f284) feat: Make Generator Support Leaky Bucket (#2129) + * [fcef5053](https://github.com/numaproj/numaflow/commit/fcef50536cf85373e0ba8ac5162a27fd4e58db5f) refactor: generate static gRPC clients (#2128) + * [3182db3a](https://github.com/numaproj/numaflow/commit/3182db3af3a13ace6e7c2cb26d3be9df04173a5a) feat: generator based on ticker (#2126) + * [06515a2c](https://github.com/numaproj/numaflow/commit/06515a2cbfc3a183131cab54394bb5d2c546e046) fix: create buffers and buckets before updating Vertices (#2112) + * [7586ffb0](https://github.com/numaproj/numaflow/commit/7586ffb056f3155fd9f13ba8dee33be38851ce94) Debugging unit test timeout in CI (#2118) + * [dc25c4dc](https://github.com/numaproj/numaflow/commit/dc25c4dc11c7fd5125c53bfff2b39fa49b9c8368) feat: implement Source trait and use it for user-defined source (#2114) + * [3dbed43e](https://github.com/numaproj/numaflow/commit/3dbed43ea652ed5e4913e2346e60816d52b258ed) feat: container-level version compatibility check for monovertex (#2108) + * [6aacb6ea](https://github.com/numaproj/numaflow/commit/6aacb6ea8bf656c1d65888deba4c21b0aea5de73) chore(deps): bump tonic from 0.12.2 to 0.12.3 in /rust (#2111) + * [e69551ba](https://github.com/numaproj/numaflow/commit/e69551ba07d14dee5dccd90b28cf8b497943f415) feat: Use gRPC bidirectional streaming for source transformer (#2071) + * [5b8b8ddd](https://github.com/numaproj/numaflow/commit/5b8b8dddac727e53bcfbf4c8071221b284e606e9) chore(deps): bump rollup from 2.79.1 to 2.79.2 in /ui (#2096) + * [6cdec2d6](https://github.com/numaproj/numaflow/commit/6cdec2d6d1325866e99a204389bc9dc460146cbf) feat: Bidirectional Streaming for UDSink (#2080) + * [895a7780](https://github.com/numaproj/numaflow/commit/895a7780410b7bb5a43a4ab6f4dd55c1c145561f) feat: container-type level version compatibility check (#2087) + * [6d1ebd04](https://github.com/numaproj/numaflow/commit/6d1ebd04f2089c81bd8e0c5e763cd7c363cb7623) feat: add pause for monovertex (#2077) + * [b4f92785](https://github.com/numaproj/numaflow/commit/b4f9278570f67cba3d85fffe7ca287c5b00da489) fix: rollback codegen script (#2079) + * [40e960a4](https://github.com/numaproj/numaflow/commit/40e960a44184c876173e6bdf69b216df6296bf73) feat: Bidirectional Streaming for User Defined Source (#2056) + * [669dc186](https://github.com/numaproj/numaflow/commit/669dc186a0d885df92716b627ded236fab7476e7) Fix: Use Merge patch rather than json patch for `pause-timestamp` annotation apply (#2078) + * [ed543ad2](https://github.com/numaproj/numaflow/commit/ed543ad2e7824f3e6b508de5b07ba08e1d7d9b66) fix: support version compatibility check for pre-release versions (#2069) + * [9995ff81](https://github.com/numaproj/numaflow/commit/9995ff813d39489d22c94e574adae9e6a8a4ebe8) feat: allow customization on readyz and livez config (#2068) + * [cbe9054f](https://github.com/numaproj/numaflow/commit/cbe9054f8507639dac3a48b7b8eeb9e236ce706e) doc: example for PVC (#2067) + * [692fbeec](https://github.com/numaproj/numaflow/commit/692fbeec1b94d8ff66a82b9c3fe5d8242962750b) fix: skip updating phase for resource check (#2065) + * [c6003314](https://github.com/numaproj/numaflow/commit/c6003314c8f77905fbd86ddccab12853ca6c63a1) chore(deps): bump express from 4.19.2 to 4.21.0 in /ui (#2061) + * [0811eb4a](https://github.com/numaproj/numaflow/commit/0811eb4aff59dda8b9143a7420b2beb415143d27) fix: Fix numaflow-rs binary location in image (#2050) + * [ba40b150](https://github.com/numaproj/numaflow/commit/ba40b1500416a258fe131273d3cfc4b46a93a88f) fix: builtin transformer should keep the keys (#2047) + * [c4b4d006](https://github.com/numaproj/numaflow/commit/c4b4d0068012f06980595437b3bc39c73cace8ef) feat: rolling update for Pipeline Vertex (#2040) + * [32878877](https://github.com/numaproj/numaflow/commit/3287887761fa5a8da12ca70c5ce53947cbe896ec) feat: rolling update for MonoVertex (#2029) + * [cf90e258](https://github.com/numaproj/numaflow/commit/cf90e258261b50d95db2787cfe23e9008c2ab72a) fix: pause lifecyle changes and add drained status (#2028) + * [40a3d2f5](https://github.com/numaproj/numaflow/commit/40a3d2f5bd3ac57e075bc23b076c1e5df8436fc8) feat: allow configurable retryStrategy (#2010) + * [55230e84](https://github.com/numaproj/numaflow/commit/55230e84fd86f05bcac96dd4b42afe73aa1b2e4a) chore(deps): bump webpack from 5.93.0 to 5.94.0 in /ui (#2018) + * [a77c9391](https://github.com/numaproj/numaflow/commit/a77c9391e9e6dbdd00cbc50376b90b99eebc6cc5) fix: add latency metrics for mvtx (#2013) + * [35c6f099](https://github.com/numaproj/numaflow/commit/35c6f0991d6821b728c82bee6161e265dc2c1ba6) feat: introduce `readyReplicas` for Vertex and MonoVertex (#2014) + * [2ba54117](https://github.com/numaproj/numaflow/commit/2ba54117d7015126c6894d196d42848bd2e37644) feat: enable resourceClaims for vertex and monovtx (#2009) + * [53d1131d](https://github.com/numaproj/numaflow/commit/53d1131d82c8029e546c2f39305d1bcf80f1b60e) fix: log format with config load error (#2000) + * [91f372ca](https://github.com/numaproj/numaflow/commit/91f372ca9ea413041ad157746530481d78114fcf) feat: more flexible scaling with `replicasPerScaleUp` and `replicasPerScaleDown` (#2003) + * [102d1de1](https://github.com/numaproj/numaflow/commit/102d1de1230a5a9baf29128757b12e6af4413bf3) chore(deps): bump micromatch from 4.0.7 to 4.0.8 in /ui (#2002) + * [ae02243b](https://github.com/numaproj/numaflow/commit/ae02243b3f30de8da407b148bbac7cb2e48a68c4) fix: e2e testing isbsvc deletion timeout issue (#1997) + * [deb1626e](https://github.com/numaproj/numaflow/commit/deb1626ece55579d30e6d9003abe854980cc2923) fix: test coverage generation for Rust code (#1993) + * [6918e6f4](https://github.com/numaproj/numaflow/commit/6918e6f47e9309173dd67e6fc0c105d2cd9814f2) fix: do not pass scale info to MonoVertex (#1990) + * [3f735f76](https://github.com/numaproj/numaflow/commit/3f735f76425a15d8670f145e69e3caa044037a2c) fix: adding not available for negative processing rates (#1983) + * [33bbbad4](https://github.com/numaproj/numaflow/commit/33bbbad4d7b16f9494d4164993b1cb9d32acc18b) fix: minor perf improvements of mvtx fallback sink (#1967) + * [af2f6522](https://github.com/numaproj/numaflow/commit/af2f65220afa80fc8f4bf684cc9ce58234c2bb80) fix: remove coloring in logs (#1975) + * [a7074aa8](https://github.com/numaproj/numaflow/commit/a7074aa80345e41c39770e7d069e14c29eaff9e0) doc: update roadmap (#1970) + * [e1bfd1b2](https://github.com/numaproj/numaflow/commit/e1bfd1b2d016d64bfb9d6ac546cc3489c96b806d) refactor: re-arrange e2e tests (#1961) + * [42671138](https://github.com/numaproj/numaflow/commit/42671138250d67f6eacddf33d4b5d5e069e5674f) fix: replicas derived in UI from mvtx status instead of spec (#1965) + * [b54a4cd3](https://github.com/numaproj/numaflow/commit/b54a4cd3e555ee3e29c603f7f2ea1c15ccd88f7a) feat: add health for monovertex (#1954) + * [cbad6996](https://github.com/numaproj/numaflow/commit/cbad6996f063acf1f4a3d2d8fc2ec1acff6ee912) feat: enable fallback sink for mvtx (#1957) + * [c14abd5d](https://github.com/numaproj/numaflow/commit/c14abd5de5cfc4d88f396c17231233b4e9fc2c5f) feat: Mono vertex UI (#1941) + * [c4b5d05c](https://github.com/numaproj/numaflow/commit/c4b5d05c24c189684043688fa657295bf4495dcd) fix: default resources mutated when applying templates (#1948) + * [9e963867](https://github.com/numaproj/numaflow/commit/9e9638677a35384e9acd12a1ecca1390fdf72b3e) feat: autoscaling for MonoVertex (#1927) + * [97f94283](https://github.com/numaproj/numaflow/commit/97f94283817f994549e8e5cb0b78bf9e8444eabf) fix: retry failed messages for MonoVertex sink (#1933) + * [2017f0c0](https://github.com/numaproj/numaflow/commit/2017f0c0f3a7fd3f7842fc70575644e38b69d294) Add Lockheed to Users.md (#1934) + * [8b7a9a16](https://github.com/numaproj/numaflow/commit/8b7a9a16e89bc5f81d36c1abb44201ad850c32bc) feat: add server-info support and versioning to MonoVertex (#1918) + * [c399d051](https://github.com/numaproj/numaflow/commit/c399d051466017dc331552531ea31d44a20bae66) feat: source to sink with an optional transformer without ISB (#1904) + +### Contributors + + * Derek Wang + * Julie Vogelman + * Keran Yang + * Sidhant Kohli + * Sreekanth + * Vedant Gupta + * Vigith Maurice + * Yashash H L + * dependabot[bot] + * mdwarne1 + * qianbeibuzui + * xdevxy + ## v1.3.3 (2024-10-09) * [4f31aad7](https://github.com/numaproj/numaflow/commit/4f31aad7f51cce59700ef53f363d06afeb6d6aee) Update manifests to v1.3.3 From 8f7132ddc3e71928a33bfd03bf39318bbe1ea666 Mon Sep 17 00:00:00 2001 From: Vigith Maurice Date: Sat, 9 Nov 2024 10:16:43 -0800 Subject: [PATCH 142/188] doc: roadmap update (#2217) Signed-off-by: Vigith Maurice --- README.md | 2 +- docs/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index be45ad6409..45fc7d7bdb 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Numaflow, created by the Intuit Argo team to address community needs for continu ## Roadmap -- Mono Vertex to bypass ISB for simple use cases (1.4) +- Inbuilt Debugging Experience (1.5) ## Demo diff --git a/docs/README.md b/docs/README.md index 6388ec5326..7fdb4eb789 100644 --- a/docs/README.md +++ b/docs/README.md @@ -26,7 +26,7 @@ Welcome to Numaflow! A Kubernetes-native, serverless platform for running scalab ## Roadmap -- Mono Vertex to bypass ISB for simple use cases (1.4) +- Inbuilt Debugging Experience (1.5) ## Demo From d507e19ff76eb0e68db33434802216f633a4763d Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 18 Nov 2024 14:44:13 -0800 Subject: [PATCH 143/188] feat: add sdk infomation metrics (#2208) Signed-off-by: Derek Wang --- examples/21-simple-mono-vertex.yaml | 2 +- pkg/metrics/metrics.go | 13 ++ pkg/sdkclient/serverinfo/serverinfo.go | 20 +- pkg/sdkclient/serverinfo/serverinfo_test.go | 20 +- pkg/sdkclient/serverinfo/types.go | 97 ++++----- pkg/sideinputs/manager/manager.go | 2 + pkg/sinks/sink.go | 2 + pkg/sources/source.go | 2 + pkg/udf/map_udf.go | 1 + pkg/udf/reduce_udf.go | 3 + rust/Cargo.lock | 26 +-- rust/numaflow-core/src/metrics.rs | 203 +++++++++---------- rust/numaflow-core/src/monovertex.rs | 59 +++++- rust/numaflow-core/src/pipeline.rs | 16 +- rust/numaflow-core/src/shared/server_info.rs | 176 ++++++++++------ rust/numaflow-core/src/shared/utils.rs | 11 +- rust/servesink/Cargo.toml | 4 +- 17 files changed, 375 insertions(+), 282 deletions(-) diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index 9ca99cf1bc..2a437b44b3 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -14,4 +14,4 @@ spec: sink: udsink: container: - image: quay.io/numaio/numaflow-rs/sink-log:stable \ No newline at end of file + image: quay.io/numaio/numaflow-rs/sink-log:stable diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 95ce17b984..4f8bfe1aac 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -33,9 +33,22 @@ const ( LabelPartitionName = "partition_name" LabelMonoVertexName = "mvtx_name" + LabelComponent = "component" + LabelComponentName = "component_name" + LabelSDKLanguage = "language" + LabelSDKVersion = "version" + LabelSDKType = "type" // container type, e.g sourcer, sourcetransformer, sinker, etc. see serverinfo.ContainerType + LabelReason = "reason" ) +var ( + SDKInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "sdk_info", + Help: "A metric with a constant value '1', labeled by SDK information such as version, language, and type", + }, []string{LabelComponent, LabelComponentName, LabelSDKType, LabelSDKVersion, LabelSDKLanguage}) +) + // Generic forwarder metrics var ( // ReadMessagesCount is used to indicate the number of total messages read diff --git a/pkg/sdkclient/serverinfo/serverinfo.go b/pkg/sdkclient/serverinfo/serverinfo.go index f94d83e072..3e5a291501 100644 --- a/pkg/sdkclient/serverinfo/serverinfo.go +++ b/pkg/sdkclient/serverinfo/serverinfo.go @@ -67,9 +67,9 @@ func waitForServerInfo(timeout time.Duration, filePath string) (*ServerInfo, err minNumaflowVersion := serverInfo.MinimumNumaflowVersion sdkLanguage := serverInfo.Language numaflowVersion := numaflow.GetVersion().Version - containerType, err := getContainerType(filePath) - if err != nil { - return nil, fmt.Errorf("failed to get container type: %w", err) + containerType := getContainerType(filePath) + if containerType == ContainerTypeUnknown { + return nil, fmt.Errorf("unknown container type") } // If MinimumNumaflowVersion is empty, skip the numaflow compatibility check as there was an @@ -221,11 +221,15 @@ func checkSDKCompatibility(sdkVersion string, sdkLanguage Language, containerTyp // getContainerType returns the container type from the server info file path // serverInfoFilePath is in the format of "/var/run/numaflow/{ContainerType}-server-info" -func getContainerType(serverInfoFilePath string) (ContainerType, error) { +func getContainerType(serverInfoFilePath string) ContainerType { splits := strings.Split(serverInfoFilePath, "/") - if containerType := strings.TrimSuffix(splits[len(splits)-1], "-server-info"); containerType == "" { - return "", fmt.Errorf("failed to get container type from server info file path: %s", serverInfoFilePath) - } else { - return ContainerType(containerType), nil + containerType := ContainerType(strings.TrimSuffix(splits[len(splits)-1], "-server-info")) + switch containerType { + case ContainerTypeSourcer, ContainerTypeSourcetransformer, ContainerTypeSinker, ContainerTypeMapper, + ContainerTypeReducer, ContainerTypeReducestreamer, ContainerTypeSessionreducer, + ContainerTypeSideinput, ContainerTypeFbsinker: + return containerType + default: + return ContainerTypeUnknown } } diff --git a/pkg/sdkclient/serverinfo/serverinfo_test.go b/pkg/sdkclient/serverinfo/serverinfo_test.go index b9a3320850..a5b57aed48 100644 --- a/pkg/sdkclient/serverinfo/serverinfo_test.go +++ b/pkg/sdkclient/serverinfo/serverinfo_test.go @@ -186,16 +186,16 @@ func Test_CheckNumaflowCompatibility(t *testing.T) { func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { var testMinimumSupportedSDKVersions = sdkConstraints{ Python: map[ContainerType]string{ - sourcer: "0.6.0rc100", + ContainerTypeSourcer: "0.6.0rc100", }, Go: map[ContainerType]string{ - sourcer: "0.6.0-z", + ContainerTypeSourcer: "0.6.0-z", }, Java: map[ContainerType]string{ - sourcer: "0.6.0-z", + ContainerTypeSourcer: "0.6.0-z", }, Rust: map[ContainerType]string{ - sourcer: "0.1.0-z", + ContainerTypeSourcer: "0.1.0-z", }, } tests := []struct { @@ -283,7 +283,7 @@ func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, sourcer, tt.minimumSupportedSDKVersions) + err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, ContainerTypeSourcer, tt.minimumSupportedSDKVersions) if tt.shouldErr { assert.Error(t, err, "Expected error") assert.Contains(t, err.Error(), tt.errMessage) @@ -298,16 +298,16 @@ func Test_CheckSDKCompatibility_MinimumBeingStableReleases(t *testing.T) { func Test_CheckSDKCompatibility_MinimumBeingPreReleases(t *testing.T) { var testMinimumSupportedSDKVersions = sdkConstraints{ Python: map[ContainerType]string{ - sourcer: "0.6.0b1", + ContainerTypeSourcer: "0.6.0b1", }, Go: map[ContainerType]string{ - sourcer: "0.6.0-rc2", + ContainerTypeSourcer: "0.6.0-rc2", }, Java: map[ContainerType]string{ - sourcer: "0.6.0-rc2", + ContainerTypeSourcer: "0.6.0-rc2", }, Rust: map[ContainerType]string{ - sourcer: "0.1.0-rc3", + ContainerTypeSourcer: "0.1.0-rc3", }, } tests := []struct { @@ -395,7 +395,7 @@ func Test_CheckSDKCompatibility_MinimumBeingPreReleases(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, sourcer, tt.minimumSupportedSDKVersions) + err := checkSDKCompatibility(tt.sdkVersion, tt.sdkLanguage, ContainerTypeSourcer, tt.minimumSupportedSDKVersions) if tt.shouldErr { assert.Error(t, err, "Expected error") assert.Contains(t, err.Error(), tt.errMessage) diff --git a/pkg/sdkclient/serverinfo/types.go b/pkg/sdkclient/serverinfo/types.go index 1e1ae65c6f..520fe38489 100644 --- a/pkg/sdkclient/serverinfo/types.go +++ b/pkg/sdkclient/serverinfo/types.go @@ -32,15 +32,16 @@ type ContainerType string // the string content matches the corresponding server info file name. // DO NOT change it unless the server info file name is changed. const ( - sourcer ContainerType = "sourcer" - sourcetransformer ContainerType = "sourcetransformer" - sinker ContainerType = "sinker" - mapper ContainerType = "mapper" - reducer ContainerType = "reducer" - reducestreamer ContainerType = "reducestreamer" - sessionreducer ContainerType = "sessionreducer" - sideinput ContainerType = "sideinput" - fbsinker ContainerType = "fb-sinker" + ContainerTypeSourcer ContainerType = "sourcer" + ContainerTypeSourcetransformer ContainerType = "sourcetransformer" + ContainerTypeSinker ContainerType = "sinker" + ContainerTypeMapper ContainerType = "mapper" + ContainerTypeReducer ContainerType = "reducer" + ContainerTypeReducestreamer ContainerType = "reducestreamer" + ContainerTypeSessionreducer ContainerType = "sessionreducer" + ContainerTypeSideinput ContainerType = "sideinput" + ContainerTypeFbsinker ContainerType = "fb-sinker" + ContainerTypeUnknown ContainerType = "unknown" ) type sdkConstraints map[Language]map[ContainerType]string @@ -87,51 +88,51 @@ More details about version comparison can be found in the PEP 440 and semver doc var minimumSupportedSDKVersions = sdkConstraints{ Python: map[ContainerType]string{ // meaning the minimum supported python SDK version is 0.9.0 - sourcer: "0.9.0rc100", - sourcetransformer: "0.9.0rc100", - sinker: "0.9.0rc100", - mapper: "0.9.0rc100", - reducer: "0.9.0rc100", - reducestreamer: "0.9.0rc100", - sessionreducer: "0.9.0rc100", - sideinput: "0.9.0rc100", - fbsinker: "0.9.0rc100", + ContainerTypeSourcer: "0.9.0rc100", + ContainerTypeSourcetransformer: "0.9.0rc100", + ContainerTypeSinker: "0.9.0rc100", + ContainerTypeMapper: "0.9.0rc100", + ContainerTypeReducer: "0.9.0rc100", + ContainerTypeReducestreamer: "0.9.0rc100", + ContainerTypeSessionreducer: "0.9.0rc100", + ContainerTypeSideinput: "0.9.0rc100", + ContainerTypeFbsinker: "0.9.0rc100", }, Go: map[ContainerType]string{ - // meaning the minimum supported go SDK version is 0.8.0 - sourcer: "0.9.0-z", - sourcetransformer: "0.9.0-z", - sinker: "0.9.0-z", - mapper: "0.9.0-z", - reducer: "0.9.0-z", - reducestreamer: "0.9.0-z", - sessionreducer: "0.9.0-z", - sideinput: "0.9.0-z", - fbsinker: "0.9.0-z", + // meaning the minimum supported go SDK version is 0.9.0 + ContainerTypeSourcer: "0.9.0-z", + ContainerTypeSourcetransformer: "0.9.0-z", + ContainerTypeSinker: "0.9.0-z", + ContainerTypeMapper: "0.9.0-z", + ContainerTypeReducer: "0.9.0-z", + ContainerTypeReducestreamer: "0.9.0-z", + ContainerTypeSessionreducer: "0.9.0-z", + ContainerTypeSideinput: "0.9.0-z", + ContainerTypeFbsinker: "0.9.0-z", }, Java: map[ContainerType]string{ - // meaning the minimum supported java SDK version is 0.8.0 - sourcer: "0.9.0-z", - sourcetransformer: "0.9.0-z", - sinker: "0.9.0-z", - mapper: "0.9.0-z", - reducer: "0.9.0-z", - reducestreamer: "0.9.0-z", - sessionreducer: "0.9.0-z", - sideinput: "0.9.0-z", - fbsinker: "0.9.0-z", + // meaning the minimum supported go SDK version is 0.9.0 + ContainerTypeSourcer: "0.9.0-z", + ContainerTypeSourcetransformer: "0.9.0-z", + ContainerTypeSinker: "0.9.0-z", + ContainerTypeMapper: "0.9.0-z", + ContainerTypeReducer: "0.9.0-z", + ContainerTypeReducestreamer: "0.9.0-z", + ContainerTypeSessionreducer: "0.9.0-z", + ContainerTypeSideinput: "0.9.0-z", + ContainerTypeFbsinker: "0.9.0-z", }, Rust: map[ContainerType]string{ - // meaning the minimum supported rust SDK version is 0.2.0 - sourcer: "0.1.0-z", - sourcetransformer: "0.1.0-z", - sinker: "0.1.0-z", - mapper: "0.1.0-z", - reducer: "0.1.0-z", - reducestreamer: "0.1.0-z", - sessionreducer: "0.1.0-z", - sideinput: "0.1.0-z", - fbsinker: "0.1.0-z", + // meaning the minimum supported go SDK version is 0.1.0 + ContainerTypeSourcer: "0.1.0-z", + ContainerTypeSourcetransformer: "0.1.0-z", + ContainerTypeSinker: "0.1.0-z", + ContainerTypeMapper: "0.1.0-z", + ContainerTypeReducer: "0.1.0-z", + ContainerTypeReducestreamer: "0.1.0-z", + ContainerTypeSessionreducer: "0.1.0-z", + ContainerTypeSideinput: "0.1.0-z", + ContainerTypeFbsinker: "0.1.0-z", }, } diff --git a/pkg/sideinputs/manager/manager.go b/pkg/sideinputs/manager/manager.go index 5aea876c72..51f3127dc2 100644 --- a/pkg/sideinputs/manager/manager.go +++ b/pkg/sideinputs/manager/manager.go @@ -27,6 +27,7 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isbsvc" + "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/sdkclient" "github.com/numaproj/numaflow/pkg/sdkclient/serverinfo" "github.com/numaproj/numaflow/pkg/sdkclient/sideinput" @@ -87,6 +88,7 @@ func (sim *sideInputsManager) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentSideInputManager, fmt.Sprintf("%s-%s", sim.pipelineName, sim.sideInput.Name), string(serverinfo.ContainerTypeSideinput), serverInfo.Version, string(serverInfo.Language)).Set(1) // Create a new gRPC client for Side Input sideInputClient, err := sideinput.New(serverInfo) diff --git a/pkg/sinks/sink.go b/pkg/sinks/sink.go index 3355cbe70c..18702074d4 100644 --- a/pkg/sinks/sink.go +++ b/pkg/sinks/sink.go @@ -154,6 +154,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeSinker), serverInfo.Version, string(serverInfo.Language)).Set(1) sdkClient, err := sinkclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { @@ -183,6 +184,7 @@ func (u *SinkProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeFbsinker), serverInfo.Version, string(serverInfo.Language)).Set(1) sdkClient, err := sinkclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.FbSinkAddr)) if err != nil { diff --git a/pkg/sources/source.go b/pkg/sources/source.go index 6ed2b78d5b..9394b31507 100644 --- a/pkg/sources/source.go +++ b/pkg/sources/source.go @@ -199,6 +199,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeSourcer), serverInfo.Version, string(serverInfo.Language)).Set(1) srcClient, err := sourceclient.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { @@ -238,6 +239,7 @@ func (sp *SourceProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeSourcetransformer), serverInfo.Version, string(serverInfo.Language)).Set(1) srcTransformerClient, err := sourcetransformer.New(ctx, serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { diff --git a/pkg/udf/map_udf.go b/pkg/udf/map_udf.go index 8cf10f36a4..4f5439703a 100644 --- a/pkg/udf/map_udf.go +++ b/pkg/udf/map_udf.go @@ -139,6 +139,7 @@ func (u *MapUDFProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeMapper), serverInfo.Version, string(serverInfo.Language)).Set(1) // track all the resources that need to be closed var resourcesToClose []io.Closer diff --git a/pkg/udf/reduce_udf.go b/pkg/udf/reduce_udf.go index a1c9500f2e..503c1675fe 100644 --- a/pkg/udf/reduce_udf.go +++ b/pkg/udf/reduce_udf.go @@ -101,6 +101,7 @@ func (u *ReduceUDFProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeReducestreamer), serverInfo.Version, string(serverInfo.Language)).Set(1) client, err = reducer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize), sdkclient.WithUdsSockAddr(sdkclient.ReduceStreamAddr)) } else { // Wait for server info to be ready @@ -108,6 +109,7 @@ func (u *ReduceUDFProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeReducer), serverInfo.Version, string(serverInfo.Language)).Set(1) client, err = reducer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) } if err != nil { @@ -134,6 +136,7 @@ func (u *ReduceUDFProcessor) Start(ctx context.Context) error { if err != nil { return err } + metrics.SDKInfo.WithLabelValues(dfv1.ComponentVertex, fmt.Sprintf("%s-%s", pipelineName, vertexName), string(serverinfo.ContainerTypeSessionreducer), serverInfo.Version, string(serverInfo.Language)).Set(1) client, err := sessionreducer.New(serverInfo, sdkclient.WithMaxMessageSize(maxMessageSize)) if err != nil { diff --git a/rust/Cargo.lock b/rust/Cargo.lock index ee664937a7..48a609ce9e 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1589,28 +1589,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "numaflow" -version = "0.1.1" -source = "git+https://github.com/numaproj/numaflow-rs.git?rev=30d8ce1972fd3f0c0b8059fee209516afeef0088#30d8ce1972fd3f0c0b8059fee209516afeef0088" -dependencies = [ - "chrono", - "futures-util", - "hyper-util", - "prost", - "prost-types", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tonic", - "tonic-build", - "tracing", - "uuid", -] - [[package]] name = "numaflow" version = "0.1.1" @@ -1648,7 +1626,7 @@ dependencies = [ "hyper-util", "kube", "log", - "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=ddd879588e11455921f1ca958ea2b3c076689293)", + "numaflow 0.1.1", "numaflow-models", "numaflow-pb", "parking_lot", @@ -2656,7 +2634,7 @@ dependencies = [ name = "servesink" version = "0.1.0" dependencies = [ - "numaflow 0.1.1 (git+https://github.com/numaproj/numaflow-rs.git?rev=30d8ce1972fd3f0c0b8059fee209516afeef0088)", + "numaflow 0.1.1", "reqwest 0.12.8", "tokio", "tonic", diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 3aaf97ab78..0cce19f956 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -30,6 +30,16 @@ use tracing::{debug, error, info}; use crate::source::SourceHandle; use crate::Error; +pub const COMPONENT_MVTX: &str = "mono-vertex"; + +// SDK information +const SDK_INFO: &str = "sdk_info"; +const COMPONENT: &str = "component"; +const COMPONENT_NAME: &str = "component_name"; +const SDK_VERSION: &str = "version"; +const SDK_LANGUAGE: &str = "language"; +const SDK_TYPE: &str = "type"; + // Define the labels for the metrics // Note: Please keep consistent with the definitions in MonoVertex daemon const MVTX_NAME_LABEL: &str = "mvtx_name"; @@ -132,6 +142,34 @@ fn global_registry() -> &'static GlobalRegistry { GLOBAL_REGISTRY.get_or_init(GlobalRegistry::new) } +/// GlobalMetrics is a struct which is used for storing the global metrics +pub(crate) struct GlobalMetrics { + pub(crate) sdk_info: Family, Gauge>, +} + +impl GlobalMetrics { + fn new() -> Self { + let metrics = Self { + sdk_info: Family::, Gauge>::default(), + }; + let mut registry = global_registry().registry.lock(); + // Register all the metrics to the global registry + registry.register( + SDK_INFO, + "A metric with a constant value '1', labeled by SDK information such as version, language, and type", + metrics.sdk_info.clone(), + ); + metrics + } +} + +/// GLOBAL_METRICS is the GlobalMetrics object which stores the metrics +static GLOBAL_METRICS: OnceLock = OnceLock::new(); + +pub(crate) fn global_metrics() -> &'static GlobalMetrics { + GLOBAL_METRICS.get_or_init(GlobalMetrics::new) +} + /// MonoVtxMetrics is a struct which is used for storing the metrics related to MonoVertex // These fields are exposed as pub to be used by other modules for // changing the value of the metrics @@ -366,6 +404,24 @@ pub(crate) fn forward_pipeline_metrics() -> &'static PipelineMetrics { PIPELINE_METRICS.get_or_init(PipelineMetrics::new) } +// sdk_info_labels is a helper function used to build the labels used in sdk_info +pub(crate) fn sdk_info_labels( + component: String, + component_name: String, + language: String, + version: String, + container_type: String, +) -> Vec<(String, String)> { + let labels = vec![ + (COMPONENT.to_string(), component), + (COMPONENT_NAME.to_string(), component_name), + (SDK_LANGUAGE.to_string(), language), + (SDK_VERSION.to_string(), version), + (SDK_TYPE.to_string(), container_type), + ]; + labels +} + /// MONOVTX_METRICS_LABELS are used to store the common labels used in the metrics static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new(); @@ -1011,6 +1067,16 @@ mod tests { #[test] fn test_metric_names() { + let global_metrics = global_metrics(); + let sdk_labels = sdk_info_labels( + "component".to_string(), + "component_name".to_string(), + "language".to_string(), + "version".to_string(), + "container_type".to_string(), + ); + global_metrics.sdk_info.get_or_create(&sdk_labels).set(1); + let metrics = forward_mvtx_metrics(); // Use a fixed set of labels instead of the ones from mvtx_forward_metric_labels() since other test functions may also set it. let common_labels = vec![ @@ -1050,120 +1116,41 @@ mod tests { let mut buffer = String::new(); encode(&mut buffer, &state).unwrap(); - let expected = r#" -# HELP monovtx_read A Counter to keep track of the total number of messages read from the source. -# TYPE monovtx_read counter -monovtx_read_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_ack A Counter to keep track of the total number of messages acknowledged by the sink. -# TYPE monovtx_ack counter -monovtx_ack_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_read_bytes A Counter to keep track of the total number of bytes read from the source. -# TYPE monovtx_read_bytes counter -monovtx_read_bytes_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_dropped A Counter to keep track of the total number of messages dropped by the monovtx. -# TYPE monovtx_dropped counter -monovtx_dropped_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_pending A Gauge to keep track of the total number of pending messages for the monovtx. -# TYPE monovtx_pending gauge -monovtx_pending{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10 -# HELP monovtx_processing_time A Histogram to keep track of the total time taken to forward a chunk, in microseconds. -# TYPE monovtx_processing_time histogram -monovtx_processing_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10.0 -monovtx_processing_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_processing_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_read_time A Histogram to keep track of the total time taken to Read from the Source, in microseconds. -# TYPE monovtx_read_time histogram -monovtx_read_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 3.0 -monovtx_read_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_read_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_ack_time A Histogram to keep track of the total time taken to Ack to the Source, in microseconds. -# TYPE monovtx_ack_time histogram -monovtx_ack_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 2.0 -monovtx_ack_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_ack_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_transformer_time A Histogram to keep track of the total time taken to Transform, in microseconds. -# TYPE monovtx_transformer_time histogram -monovtx_transformer_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 5.0 -monovtx_transformer_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_transformer_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_sink_write A Counter to keep track of the total number of messages written to the sink. -# TYPE monovtx_sink_write counter -monovtx_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_sink_time A Histogram to keep track of the total time taken to Write to the Sink, in microseconds. -# TYPE monovtx_sink_time histogram -monovtx_sink_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 4.0 -monovtx_sink_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="592.5071727239734",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="3510.6474972935645",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="20800.83823051903",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="123246.4585025357",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="730244.1067557994",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="4326748.710922221",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="25636296.457956219",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="151896895.33417253",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="899999999.9999987",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -monovtx_sink_time_bucket{le="+Inf",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# HELP monovtx_fallback_sink_write A Counter to keep track of the total number of messages written to the fallback sink. -# TYPE monovtx_fallback_sink_write counter -monovtx_fallback_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1 -# EOF - "#; - - // The registry may contains metrics from other tests also. Extract the ones created from this test using the unique labels we specify. - let labels = common_labels - .iter() - .map(|(k, v)| format!("{}=\"{}\"", k, v)) - .collect::>() - .join(","); + let expected = [ + r#"sdk_info{component="component",component_name="component_name",language="language",version="version",type="container_type"} 1"#, + r#"monovtx_read_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_ack_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_read_bytes_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_dropped_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_pending{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10"#, + r#"monovtx_processing_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 10.0"#, + r#"monovtx_processing_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_processing_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_read_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 3.0"#, + r#"monovtx_read_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_read_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_ack_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 2.0"#, + r#"monovtx_ack_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_ack_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_transformer_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 5.0"#, + r#"monovtx_transformer_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_transformer_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_sink_time_sum{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 4.0"#, + r#"monovtx_sink_time_count{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_sink_time_bucket{le="100.0",mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + r#"monovtx_fallback_sink_write_total{mvtx_name="test-monovertex-metric-names",mvtx_replica="3"} 1"#, + ]; let got = buffer .trim() .lines() - .filter(|line| line.starts_with('#') || line.contains(&labels)) + .filter(|line| !line.starts_with("#")) .collect::>() .join("\n"); - assert_eq!(got.trim(), expected.trim()); + for t in expected { + assert!(got.contains(t)); + } } } diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 7fdc21cde7..185c96bfe9 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -10,7 +10,7 @@ use crate::config::components::{sink, source, transformer}; use crate::config::monovertex::MonovertexConfig; use crate::error::{self, Error}; use crate::metrics; -use crate::shared::server_info::check_for_server_compatibility; +use crate::shared::server_info::{sdk_server_info, ContainerType}; use crate::shared::utils; use crate::shared::utils::{ create_rpc_channel, wait_until_sink_ready, wait_until_source_ready, @@ -38,12 +38,24 @@ pub(crate) async fn start_forwarder( &config.source_config.source_type { // do server compatibility check - check_for_server_compatibility( + let server_info = sdk_server_info( source_config.server_info_path.clone().into(), cln_token.clone(), ) .await?; + let metric_labels = metrics::sdk_info_labels( + metrics::COMPONENT_MVTX.to_string(), + config.name.clone(), + server_info.language, + server_info.version, + ContainerType::Sourcer.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + let mut source_grpc_client = SourceClient::new(create_rpc_channel(source_config.socket_path.clone().into()).await?) .max_encoding_message_size(source_config.grpc_max_message_size) @@ -59,12 +71,24 @@ pub(crate) async fn start_forwarder( &config.sink_config.sink_type { // do server compatibility check - check_for_server_compatibility( + let server_info = sdk_server_info( udsink_config.server_info_path.clone().into(), cln_token.clone(), ) .await?; + let metric_labels = metrics::sdk_info_labels( + metrics::COMPONENT_MVTX.to_string(), + config.name.clone(), + server_info.language, + server_info.version, + ContainerType::Sinker.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + let mut sink_grpc_client = SinkClient::new(create_rpc_channel(udsink_config.socket_path.clone().into()).await?) .max_encoding_message_size(udsink_config.grpc_max_message_size) @@ -79,12 +103,24 @@ pub(crate) async fn start_forwarder( let fb_sink_grpc_client = if let Some(fb_sink) = &config.fb_sink_config { if let sink::SinkType::UserDefined(fb_sink_config) = &fb_sink.sink_type { // do server compatibility check - check_for_server_compatibility( + let server_info = sdk_server_info( fb_sink_config.server_info_path.clone().into(), cln_token.clone(), ) .await?; + let metric_labels = metrics::sdk_info_labels( + metrics::COMPONENT_MVTX.to_string(), + config.name.clone(), + server_info.language, + server_info.version, + ContainerType::FbSinker.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + let mut fb_sink_grpc_client = SinkClient::new( create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?, ) @@ -105,12 +141,25 @@ pub(crate) async fn start_forwarder( &transformer.transformer_type { // do server compatibility check - check_for_server_compatibility( + let server_info = sdk_server_info( transformer_config.server_info_path.clone().into(), cln_token.clone(), ) .await?; + let metric_labels = metrics::sdk_info_labels( + metrics::COMPONENT_MVTX.to_string(), + config.name.clone(), + server_info.language, + server_info.version, + ContainerType::SourceTransformer.to_string(), + ); + + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + let mut transformer_grpc_client = SourceTransformClient::new( create_rpc_channel(transformer_config.socket_path.clone().into()).await?, ) diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index f5896b3cc7..52719c8827 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -15,7 +15,7 @@ use crate::config::pipeline::PipelineConfig; use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; use crate::pipeline::isb::jetstream::reader::JetstreamReader; use crate::pipeline::isb::jetstream::WriterHandle; -use crate::shared::server_info::check_for_server_compatibility; +use crate::shared::server_info::sdk_server_info; use crate::shared::utils; use crate::shared::utils::{ create_rpc_channel, start_metrics_server, wait_until_source_ready, wait_until_transformer_ready, @@ -231,11 +231,14 @@ async fn create_source_type( )) } SourceType::UserDefined(udsource_config) => { - check_for_server_compatibility( + _ = sdk_server_info( udsource_config.server_info_path.clone().into(), cln_token.clone(), ) .await?; + + // TODO: Add sdk info metric + let mut source_grpc_client = SourceClient::new( create_rpc_channel(udsource_config.socket_path.clone().into()).await?, ) @@ -267,11 +270,10 @@ async fn create_transformer( if let config::components::transformer::TransformerType::UserDefined(ud_transformer) = &transformer_config.transformer_type { - check_for_server_compatibility( - ud_transformer.socket_path.clone().into(), - cln_token.clone(), - ) - .await?; + _ = sdk_server_info(ud_transformer.socket_path.clone().into(), cln_token.clone()) + .await?; + // TODO: Add sdk info metric + let mut transformer_grpc_client = SourceTransformClient::new( create_rpc_channel(ud_transformer.socket_path.clone().into()).await?, ) diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 9783b9ba97..9e7cf0b04f 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::fmt; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -18,6 +19,60 @@ use crate::shared::server_info::version::SdkConstraints; // Equivalent to U+005C__END__. const END: &str = "U+005C__END__"; +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub enum ContainerType { + Sourcer, + SourceTransformer, + Sinker, + Mapper, + Reducer, + ReduceStreamer, + SessionReducer, + SideInput, + FbSinker, + Unknown, +} + +impl ContainerType { + fn as_str(&self) -> &'static str { + match self { + ContainerType::Sourcer => "sourcer", + ContainerType::SourceTransformer => "sourcetransformer", + ContainerType::Sinker => "sinker", + ContainerType::Mapper => "mapper", + ContainerType::Reducer => "reducer", + ContainerType::ReduceStreamer => "reducestreamer", + ContainerType::SessionReducer => "sessionreducer", + ContainerType::SideInput => "sideinput", + ContainerType::FbSinker => "fb-sinker", + ContainerType::Unknown => "unknown", + } + } +} + +impl fmt::Display for ContainerType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl From for ContainerType { + fn from(s: String) -> Self { + match s.as_str() { + "sourcer" => ContainerType::Sourcer, + "sourcetransformer" => ContainerType::SourceTransformer, + "sinker" => ContainerType::Sinker, + "mapper" => ContainerType::Mapper, + "reducer" => ContainerType::Reducer, + "reducestreamer" => ContainerType::ReduceStreamer, + "sessionreducer" => ContainerType::SessionReducer, + "sideinput" => ContainerType::SideInput, + "fb-sinker" => ContainerType::FbSinker, + _ => ContainerType::Unknown, + } + } +} + /// ServerInfo structure to store server-related information #[derive(Serialize, Deserialize, Debug)] pub(crate) struct ServerInfo { @@ -35,10 +90,10 @@ pub(crate) struct ServerInfo { /// check_for_server_compatibility waits until the server info file is ready and check whether the /// server is compatible with Numaflow. -pub(crate) async fn check_for_server_compatibility( +pub(crate) async fn sdk_server_info( file_path: PathBuf, cln_token: CancellationToken, -) -> error::Result<()> { +) -> error::Result { // Read the server info file let server_info = read_server_info(&file_path, cln_token).await?; @@ -49,14 +104,11 @@ pub(crate) async fn check_for_server_compatibility( let sdk_version = &server_info.version; let min_numaflow_version = &server_info.minimum_numaflow_version; let sdk_language = &server_info.language; - let container_type = get_container_type(&file_path).unwrap_or(""); + let container_type = get_container_type(&file_path).unwrap_or(ContainerType::Unknown); // Get version information let version_info = version::get_version_info(); let numaflow_version = &version_info.version; - info!("Version_info: {:?}", version_info); - - // Check minimum numaflow version compatibility if specified if min_numaflow_version.is_empty() { warn!("Failed to get the minimum numaflow version, skipping numaflow version compatibility check"); } else if !numaflow_version.contains("latest") @@ -76,12 +128,12 @@ pub(crate) async fn check_for_server_compatibility( check_sdk_compatibility( sdk_version, sdk_language, - container_type, + &container_type, min_supported_sdk_versions, )?; } - Ok(()) + Ok(server_info) } /// Checks if the current numaflow version is compatible with the given minimum numaflow version. @@ -115,7 +167,7 @@ fn check_numaflow_compatibility( fn check_sdk_compatibility( sdk_version: &str, sdk_language: &str, - container_type: &str, + container_type: &ContainerType, min_supported_sdk_versions: &SdkConstraints, ) -> error::Result<()> { // Check if the SDK language is present in the minimum supported SDK versions @@ -264,14 +316,10 @@ fn trim_after_dash(input: &str) -> &str { /// Extracts the container type from the server info file. /// The file name is in the format of -server-info. -fn get_container_type(server_info_file: &Path) -> Option<&str> { +fn get_container_type(server_info_file: &Path) -> Option { let file_name = server_info_file.file_name()?; let container_type = file_name.to_str()?.trim_end_matches("-server-info"); - if container_type.is_empty() { - None - } else { - Some(container_type) - } + Some(ContainerType::from(container_type.to_string())) } /// Reads the server info file and returns the parsed ServerInfo struct. @@ -348,7 +396,9 @@ mod version { use std::env; use std::sync::LazyLock; - pub(crate) type SdkConstraints = HashMap>; + use super::ContainerType; + + pub(crate) type SdkConstraints = HashMap>; // MINIMUM_SUPPORTED_SDK_VERSIONS is the minimum supported version of each SDK for the current numaflow version. static MINIMUM_SUPPORTED_SDK_VERSIONS: LazyLock = LazyLock::new(|| { @@ -357,26 +407,26 @@ mod version { // please follow the instruction there to update the value // NOTE: the string content of the keys matches the corresponding server info file name. // DO NOT change it unless the server info file name is changed. - let mut go_version_map = HashMap::new(); - go_version_map.insert("sourcer".to_string(), "0.9.0-z".to_string()); - go_version_map.insert("sourcetransformer".to_string(), "0.9.0-z".to_string()); - go_version_map.insert("sinker".to_string(), "0.9.0-z".to_string()); - go_version_map.insert("fb-sinker".to_string(), "0.9.0-z".to_string()); + let mut go_version_map: HashMap = HashMap::new(); + go_version_map.insert(ContainerType::Sourcer, "0.9.0-z".to_string()); + go_version_map.insert(ContainerType::SourceTransformer, "0.9.0-z".to_string()); + go_version_map.insert(ContainerType::Sinker, "0.9.0-z".to_string()); + go_version_map.insert(ContainerType::FbSinker, "0.9.0-z".to_string()); let mut python_version_map = HashMap::new(); - python_version_map.insert("sourcer".to_string(), "0.9.0rc100".to_string()); - python_version_map.insert("sourcetransformer".to_string(), "0.9.0rc100".to_string()); - python_version_map.insert("sinker".to_string(), "0.9.0rc100".to_string()); - python_version_map.insert("fb-sinker".to_string(), "0.9.0rc100".to_string()); + python_version_map.insert(ContainerType::Sourcer, "0.9.0rc100".to_string()); + python_version_map.insert(ContainerType::SourceTransformer, "0.9.0rc100".to_string()); + python_version_map.insert(ContainerType::Sinker, "0.9.0rc100".to_string()); + python_version_map.insert(ContainerType::FbSinker, "0.9.0rc100".to_string()); let mut java_version_map = HashMap::new(); - java_version_map.insert("sourcer".to_string(), "0.9.0-z".to_string()); - java_version_map.insert("sourcetransformer".to_string(), "0.9.0-z".to_string()); - java_version_map.insert("sinker".to_string(), "0.9.0-z".to_string()); - java_version_map.insert("fb-sinker".to_string(), "0.9.0-z".to_string()); + java_version_map.insert(ContainerType::Sourcer, "0.9.0-z".to_string()); + java_version_map.insert(ContainerType::SourceTransformer, "0.9.0-z".to_string()); + java_version_map.insert(ContainerType::Sinker, "0.9.0-z".to_string()); + java_version_map.insert(ContainerType::FbSinker, "0.9.0-z".to_string()); let mut rust_version_map = HashMap::new(); - rust_version_map.insert("sourcer".to_string(), "0.1.0-z".to_string()); - rust_version_map.insert("sourcetransformer".to_string(), "0.1.0-z".to_string()); - rust_version_map.insert("sinker".to_string(), "0.1.0-z".to_string()); - rust_version_map.insert("fb-sinker".to_string(), "0.1.0-z".to_string()); + rust_version_map.insert(ContainerType::Sourcer, "0.1.0-z".to_string()); + rust_version_map.insert(ContainerType::SourceTransformer, "0.1.0-z".to_string()); + rust_version_map.insert(ContainerType::Sinker, "0.1.0-z".to_string()); + rust_version_map.insert(ContainerType::FbSinker, "0.1.0-z".to_string()); let mut m = HashMap::new(); m.insert("go".to_string(), go_version_map); @@ -471,7 +521,7 @@ mod tests { const TCP: &str = "tcp"; const PYTHON: &str = "python"; const GOLANG: &str = "go"; - const TEST_CONTAINER_TYPE: &str = "sourcer"; + const TEST_CONTAINER_TYPE: ContainerType = ContainerType::Sourcer; async fn write_server_info( svr_info: &ServerInfo, @@ -523,13 +573,13 @@ mod tests { // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being stable releases fn create_sdk_constraints_stable_versions() -> SdkConstraints { let mut go_version_map = HashMap::new(); - go_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.10.0-z".to_string()); + go_version_map.insert(TEST_CONTAINER_TYPE, "0.10.0-z".to_string()); let mut python_version_map = HashMap::new(); - python_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "1.2.0rc100".to_string()); + python_version_map.insert(TEST_CONTAINER_TYPE, "1.2.0rc100".to_string()); let mut java_version_map = HashMap::new(); - java_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "2.0.0-z".to_string()); + java_version_map.insert(TEST_CONTAINER_TYPE, "2.0.0-z".to_string()); let mut rust_version_map = HashMap::new(); - rust_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.1.0-z".to_string()); + rust_version_map.insert(TEST_CONTAINER_TYPE, "0.1.0-z".to_string()); let mut m = HashMap::new(); m.insert("go".to_string(), go_version_map); @@ -542,13 +592,13 @@ mod tests { // Helper function to create a SdkConstraints struct with minimum supported SDK versions all being pre-releases fn create_sdk_constraints_pre_release_versions() -> SdkConstraints { let mut go_version_map = HashMap::new(); - go_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.10.0-rc2".to_string()); + go_version_map.insert(TEST_CONTAINER_TYPE, "0.10.0-rc2".to_string()); let mut python_version_map = HashMap::new(); - python_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "1.2.0b2".to_string()); + python_version_map.insert(TEST_CONTAINER_TYPE, "1.2.0b2".to_string()); let mut java_version_map = HashMap::new(); - java_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "2.0.0-rc2".to_string()); + java_version_map.insert(TEST_CONTAINER_TYPE, "2.0.0-rc2".to_string()); let mut rust_version_map = HashMap::new(); - rust_version_map.insert(TEST_CONTAINER_TYPE.to_string(), "0.1.0-rc3".to_string()); + rust_version_map.insert(TEST_CONTAINER_TYPE, "0.1.0-rc3".to_string()); let mut m = HashMap::new(); m.insert("go".to_string(), go_version_map); @@ -567,7 +617,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -583,7 +633,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -602,7 +652,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -618,7 +668,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -637,7 +687,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -653,7 +703,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -672,7 +722,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -688,7 +738,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -707,7 +757,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -723,7 +773,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -742,7 +792,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -758,7 +808,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -777,7 +827,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -793,7 +843,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -812,7 +862,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -828,7 +878,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -847,7 +897,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -863,7 +913,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -882,7 +932,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -898,7 +948,7 @@ mod tests { let result = check_sdk_compatibility( sdk_version, sdk_language, - TEST_CONTAINER_TYPE, + &TEST_CONTAINER_TYPE, &min_supported_sdk_versions, ); @@ -1012,7 +1062,7 @@ mod tests { async fn test_get_container_type_from_file_valid() { let file_path = PathBuf::from("/var/run/numaflow/sourcer-server-info"); let container_type = get_container_type(&file_path); - assert_eq!("sourcer", container_type.unwrap()); + assert_eq!(ContainerType::Sourcer, container_type.unwrap()); } #[tokio::test] diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index 84fb5a0c3b..a6cea7eaed 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -26,7 +26,7 @@ use crate::error; use crate::metrics::{ start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, }; -use crate::shared::server_info::check_for_server_compatibility; +use crate::shared::server_info::sdk_server_info; use crate::sink::{SinkClientType, SinkHandle}; use crate::source::SourceHandle; use crate::Error; @@ -186,16 +186,15 @@ pub(crate) async fn create_sink_handle( None, )), SinkType::UserDefined(ud_config) => { - check_for_server_compatibility( - ud_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; + _ = sdk_server_info(ud_config.server_info_path.clone().into(), cln_token.clone()) + .await?; let mut sink_grpc_client = SinkClient::new(create_rpc_channel(ud_config.socket_path.clone().into()).await?) .max_encoding_message_size(ud_config.grpc_max_message_size) .max_encoding_message_size(ud_config.grpc_max_message_size); wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; + // TODO: server info? + Ok(( SinkHandle::new( SinkClientType::UserDefined(sink_grpc_client.clone()), diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 8f6b6234a2..3e5f8677f1 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] tonic = "0.12.3" tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "30d8ce1972fd3f0c0b8059fee209516afeef0088" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } [dependencies.reqwest] version = "0.12.7" default-features = false -features = ["rustls-tls"] \ No newline at end of file +features = ["rustls-tls"] From 4f2568ba451fdb2cafdf13f5152689d37b03cb5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:20:35 -0800 Subject: [PATCH 144/188] chore(deps): bump cross-spawn from 7.0.3 to 7.0.6 in /ui (#2228) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index 3fde2777e1..e507771e3f 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -4440,9 +4440,9 @@ cross-fetch@^3.0.4, cross-fetch@^3.1.5: node-fetch "^2.6.12" cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" From 4924046d4f17785ff98e610c12c4dc761162ad77 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Mon, 18 Nov 2024 15:29:06 -0800 Subject: [PATCH 145/188] chore: rename and respect pipeline.deletionGracePeriodSeconds (#2226) Signed-off-by: Derek Wang --- api/json-schema/schema.json | 11 +- api/openapi-spec/swagger.json | 11 +- .../full/numaflow.numaproj.io_pipelines.yaml | 8 +- config/install.yaml | 8 +- config/namespace-install.yaml | 8 +- docs/APIs.md | 26 +- .../reference/pipeline-operations.md | 2 +- pkg/apis/numaflow/v1alpha1/generated.pb.go | 1073 +++++++++-------- pkg/apis/numaflow/v1alpha1/generated.proto | 12 +- pkg/apis/numaflow/v1alpha1/pipeline_types.go | 37 +- .../numaflow/v1alpha1/pipeline_types_test.go | 35 +- .../v1alpha1/zz_generated.deepcopy.go | 13 +- .../numaflow/v1alpha1/zz_generated.openapi.go | 15 +- pkg/reconciler/pipeline/controller.go | 12 +- pkg/reconciler/vertex/scaling/scaling.go | 2 +- rust/numaflow-models/src/models/lifecycle.rs | 13 +- server/apis/v1/handler.go | 6 +- server/apis/v1/health.go | 4 +- 18 files changed, 711 insertions(+), 585 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 990485939c..ffc4bbcfee 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -20922,8 +20922,13 @@ "io.numaproj.numaflow.v1alpha1.Lifecycle": { "properties": { "deleteGracePeriodSeconds": { - "description": "DeleteGracePeriodSeconds used to delete pipeline gracefully", - "format": "int32", + "description": "DeleteGracePeriodSeconds used to delete pipeline gracefully Deprecated: Use DeletionGracePeriodSeconds instead", + "format": "int64", + "type": "integer" + }, + "deletionGracePeriodSeconds": { + "description": "DeletionGracePeriodSeconds used to delete pipeline gracefully", + "format": "int64", "type": "integer" }, "desiredPhase": { @@ -20932,7 +20937,7 @@ }, "pauseGracePeriodSeconds": { "description": "PauseGracePeriodSeconds used to pause pipeline gracefully", - "format": "int32", + "format": "int64", "type": "integer" } }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 544cab1601..afb3bb8b07 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -20918,9 +20918,14 @@ "type": "object", "properties": { "deleteGracePeriodSeconds": { - "description": "DeleteGracePeriodSeconds used to delete pipeline gracefully", + "description": "DeleteGracePeriodSeconds used to delete pipeline gracefully Deprecated: Use DeletionGracePeriodSeconds instead", "type": "integer", - "format": "int32" + "format": "int64" + }, + "deletionGracePeriodSeconds": { + "description": "DeletionGracePeriodSeconds used to delete pipeline gracefully", + "type": "integer", + "format": "int64" }, "desiredPhase": { "description": "DesiredPhase used to bring the pipeline from current phase to desired phase", @@ -20929,7 +20934,7 @@ "pauseGracePeriodSeconds": { "description": "PauseGracePeriodSeconds used to pause pipeline gracefully", "type": "integer", - "format": "int32" + "format": "int64" } } }, diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index dda91dbf69..d035799a96 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -108,7 +108,11 @@ spec: properties: deleteGracePeriodSeconds: default: 30 - format: int32 + format: int64 + type: integer + deletionGracePeriodSeconds: + default: 30 + format: int64 type: integer desiredPhase: default: Running @@ -122,7 +126,7 @@ spec: type: string pauseGracePeriodSeconds: default: 30 - format: int32 + format: int64 type: integer type: object limits: diff --git a/config/install.yaml b/config/install.yaml index b82dd97583..d6332dc53f 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -9846,7 +9846,11 @@ spec: properties: deleteGracePeriodSeconds: default: 30 - format: int32 + format: int64 + type: integer + deletionGracePeriodSeconds: + default: 30 + format: int64 type: integer desiredPhase: default: Running @@ -9860,7 +9864,7 @@ spec: type: string pauseGracePeriodSeconds: default: 30 - format: int32 + format: int64 type: integer type: object limits: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index ea7a6f6140..e2769284de 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -9846,7 +9846,11 @@ spec: properties: deleteGracePeriodSeconds: default: 30 - format: int32 + format: int64 + type: integer + deletionGracePeriodSeconds: + default: 30 + format: int64 type: integer desiredPhase: default: Running @@ -9860,7 +9864,7 @@ spec: type: string pauseGracePeriodSeconds: default: 30 - format: int32 + format: int64 type: integer type: object limits: diff --git a/docs/APIs.md b/docs/APIs.md index 6a5a5fb930..57c594c67b 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -5497,7 +5497,7 @@ Description -deleteGracePeriodSeconds
int32 +deletionGracePeriodSeconds
int64 @@ -5505,7 +5505,7 @@ Description (Optional)

-DeleteGracePeriodSeconds used to delete pipeline gracefully +DeletionGracePeriodSeconds used to delete pipeline gracefully

@@ -5538,7 +5538,7 @@ phase -pauseGracePeriodSeconds
int32 +pauseGracePeriodSeconds
int64 @@ -5553,6 +5553,26 @@ PauseGracePeriodSeconds used to pause pipeline gracefully + + + + +deleteGracePeriodSeconds
int64 + + + + +(Optional) +

+ +DeleteGracePeriodSeconds used to delete pipeline gracefully Deprecated: +Use DeletionGracePeriodSeconds instead +

+ + + + + diff --git a/docs/user-guide/reference/pipeline-operations.md b/docs/user-guide/reference/pipeline-operations.md index 2103dc9a77..85fd52f336 100644 --- a/docs/user-guide/reference/pipeline-operations.md +++ b/docs/user-guide/reference/pipeline-operations.md @@ -36,4 +36,4 @@ The command below will bring the pipeline back to `Running` status. ## Delete a Pipeline -When deleting a pipeline, before terminating all the pods, it will try to wait for all the backlog messages that have already been ingested into the pipeline to be processed. However, it will not wait forever, if the backlog is too large, it will terminate the pods after `terminationGracePeriodSeconds`, which defaults to 30, and can be customized by setting `spec.lifecycle.terminationGracePeriodSeconds`. +When deleting a pipeline, before terminating all the pods, it will try to wait for all the backlog messages that have already been ingested into the pipeline to be processed. However, it will not wait forever, if the backlog is too large, it will terminate the pods after `deletionGracePeriodSeconds`, which defaults to 30, and can be customized by setting `spec.lifecycle.deletionGracePeriodSeconds`. diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index 4523f1c02e..cc34329198 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -2880,516 +2880,518 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8136 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x1c, 0x59, - 0x76, 0xde, 0xf4, 0x7f, 0xf7, 0x69, 0xfe, 0xcd, 0x95, 0x46, 0x43, 0x69, 0x35, 0x6a, 0x6d, 0xad, - 0x77, 0x57, 0x8e, 0x6d, 0x32, 0x43, 0xef, 0xcc, 0xce, 0xda, 0xde, 0x9d, 0x61, 0x93, 0xa2, 0x44, - 0x89, 0x94, 0xb8, 0xa7, 0x49, 0xcd, 0xac, 0x27, 0xde, 0x49, 0xb1, 0xea, 0xb2, 0x59, 0xc3, 0xea, - 0xaa, 0xde, 0xaa, 0x6a, 0x4a, 0x1c, 0xc7, 0x58, 0x7b, 0x37, 0xc1, 0x6c, 0x90, 0x04, 0x09, 0xfc, - 0x64, 0x20, 0x70, 0x82, 0x04, 0x01, 0xfc, 0x60, 0x38, 0x0f, 0x46, 0x36, 0x0f, 0x01, 0xf2, 0xe3, - 0x20, 0x48, 0x36, 0xff, 0x8b, 0x20, 0x40, 0x26, 0x0f, 0x21, 0xb2, 0x0c, 0xf2, 0x90, 0x00, 0x09, - 0x8c, 0x18, 0x89, 0x13, 0xc1, 0x88, 0x83, 0xfb, 0x57, 0x7f, 0x5d, 0x2d, 0x91, 0x5d, 0x4d, 0x8d, - 0xc6, 0x99, 0xb7, 0xee, 0x7b, 0xce, 0xfd, 0xce, 0xad, 0x5b, 0xb7, 0xee, 0x3d, 0xf7, 0x9c, 0x73, - 0xcf, 0x85, 0x5b, 0x5d, 0x2b, 0xd8, 0x1f, 0xec, 0x2e, 0x18, 0x6e, 0x6f, 0xd1, 0x19, 0xf4, 0xf4, - 0xbe, 0xe7, 0xbe, 0xcf, 0x7f, 0xec, 0xd9, 0xee, 0xc3, 0xc5, 0xfe, 0x41, 0x77, 0x51, 0xef, 0x5b, - 0x7e, 0x54, 0x72, 0xf8, 0xaa, 0x6e, 0xf7, 0xf7, 0xf5, 0x57, 0x17, 0xbb, 0xd4, 0xa1, 0x9e, 0x1e, - 0x50, 0x73, 0xa1, 0xef, 0xb9, 0x81, 0x4b, 0xbe, 0x1c, 0x01, 0x2d, 0x28, 0xa0, 0x05, 0x55, 0x6d, - 0xa1, 0x7f, 0xd0, 0x5d, 0x60, 0x40, 0x51, 0x89, 0x02, 0xba, 0xf2, 0x53, 0xb1, 0x16, 0x74, 0xdd, - 0xae, 0xbb, 0xc8, 0xf1, 0x76, 0x07, 0x7b, 0xfc, 0x1f, 0xff, 0xc3, 0x7f, 0x09, 0x39, 0x57, 0xb4, - 0x83, 0x37, 0xfc, 0x05, 0xcb, 0x65, 0xcd, 0x5a, 0x34, 0x5c, 0x8f, 0x2e, 0x1e, 0x0e, 0xb5, 0xe5, - 0xca, 0x97, 0x22, 0x9e, 0x9e, 0x6e, 0xec, 0x5b, 0x0e, 0xf5, 0x8e, 0xd4, 0xb3, 0x2c, 0x7a, 0xd4, - 0x77, 0x07, 0x9e, 0x41, 0xcf, 0x54, 0xcb, 0x5f, 0xec, 0xd1, 0x40, 0xcf, 0x92, 0xb5, 0x38, 0xaa, - 0x96, 0x37, 0x70, 0x02, 0xab, 0x37, 0x2c, 0xe6, 0xf5, 0xa7, 0x55, 0xf0, 0x8d, 0x7d, 0xda, 0xd3, - 0x87, 0xea, 0xfd, 0xf4, 0xa8, 0x7a, 0x83, 0xc0, 0xb2, 0x17, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x5d, - 0x49, 0xfb, 0x1d, 0x80, 0x0b, 0xcb, 0xbb, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xe5, 0x9a, 0xdb, 0xb4, - 0xd7, 0xb7, 0xf5, 0x80, 0x92, 0x03, 0xa8, 0xb3, 0x07, 0x32, 0xf5, 0x40, 0x9f, 0x2f, 0x5c, 0x2f, - 0xdc, 0x68, 0x2e, 0x2d, 0x2f, 0x8c, 0xf9, 0x02, 0x17, 0x36, 0x25, 0x50, 0x7b, 0xea, 0xe4, 0xb8, - 0x55, 0x57, 0xff, 0x30, 0x14, 0x40, 0x7e, 0xad, 0x00, 0x53, 0x8e, 0x6b, 0xd2, 0x0e, 0xb5, 0xa9, - 0x11, 0xb8, 0xde, 0x7c, 0xf1, 0x7a, 0xe9, 0x46, 0x73, 0xe9, 0x9b, 0x63, 0x4b, 0xcc, 0x78, 0xa2, - 0x85, 0x7b, 0x31, 0x01, 0x37, 0x9d, 0xc0, 0x3b, 0x6a, 0x5f, 0xfc, 0xc1, 0x71, 0xeb, 0x85, 0x93, - 0xe3, 0xd6, 0x54, 0x9c, 0x84, 0x89, 0x96, 0x90, 0x1d, 0x68, 0x06, 0xae, 0xcd, 0xba, 0xcc, 0x72, - 0x1d, 0x7f, 0xbe, 0xc4, 0x1b, 0x76, 0x6d, 0x41, 0x74, 0x35, 0x13, 0xbf, 0xc0, 0xc6, 0xd8, 0xc2, - 0xe1, 0xab, 0x0b, 0xdb, 0x21, 0x5b, 0xfb, 0x82, 0x04, 0x6e, 0x46, 0x65, 0x3e, 0xc6, 0x71, 0x08, - 0x85, 0x59, 0x9f, 0x1a, 0x03, 0xcf, 0x0a, 0x8e, 0x56, 0x5c, 0x27, 0xa0, 0x8f, 0x82, 0xf9, 0x32, - 0xef, 0xe5, 0x2f, 0x64, 0x41, 0x6f, 0xb9, 0x66, 0x27, 0xc9, 0xdd, 0xbe, 0x70, 0x72, 0xdc, 0x9a, - 0x4d, 0x15, 0x62, 0x1a, 0x93, 0x38, 0x30, 0x67, 0xf5, 0xf4, 0x2e, 0xdd, 0x1a, 0xd8, 0x76, 0x87, - 0x1a, 0x1e, 0x0d, 0xfc, 0xf9, 0x0a, 0x7f, 0x84, 0x1b, 0x59, 0x72, 0x36, 0x5c, 0x43, 0xb7, 0xef, - 0xef, 0xbe, 0x4f, 0x8d, 0x00, 0xe9, 0x1e, 0xf5, 0xa8, 0x63, 0xd0, 0xf6, 0xbc, 0x7c, 0x98, 0xb9, - 0xf5, 0x14, 0x12, 0x0e, 0x61, 0x93, 0x5b, 0xf0, 0x62, 0xdf, 0xb3, 0x5c, 0xde, 0x04, 0x5b, 0xf7, - 0xfd, 0x7b, 0x7a, 0x8f, 0xce, 0x57, 0xaf, 0x17, 0x6e, 0x34, 0xda, 0x97, 0x25, 0xcc, 0x8b, 0x5b, - 0x69, 0x06, 0x1c, 0xae, 0x43, 0x6e, 0x40, 0x5d, 0x15, 0xce, 0xd7, 0xae, 0x17, 0x6e, 0x54, 0xc4, - 0xd8, 0x51, 0x75, 0x31, 0xa4, 0x92, 0x35, 0xa8, 0xeb, 0x7b, 0x7b, 0x96, 0xc3, 0x38, 0xeb, 0xbc, - 0x0b, 0xaf, 0x66, 0x3d, 0xda, 0xb2, 0xe4, 0x11, 0x38, 0xea, 0x1f, 0x86, 0x75, 0xc9, 0x1d, 0x20, - 0x3e, 0xf5, 0x0e, 0x2d, 0x83, 0x2e, 0x1b, 0x86, 0x3b, 0x70, 0x02, 0xde, 0xf6, 0x06, 0x6f, 0xfb, - 0x15, 0xd9, 0x76, 0xd2, 0x19, 0xe2, 0xc0, 0x8c, 0x5a, 0xe4, 0x2d, 0x98, 0x93, 0xdf, 0x6a, 0xd4, - 0x0b, 0xc0, 0x91, 0x2e, 0xb2, 0x8e, 0xc4, 0x14, 0x0d, 0x87, 0xb8, 0x89, 0x09, 0x57, 0xf5, 0x41, - 0xe0, 0xf6, 0x18, 0x64, 0x52, 0xe8, 0xb6, 0x7b, 0x40, 0x9d, 0xf9, 0xe6, 0xf5, 0xc2, 0x8d, 0x7a, - 0xfb, 0xfa, 0xc9, 0x71, 0xeb, 0xea, 0xf2, 0x13, 0xf8, 0xf0, 0x89, 0x28, 0xe4, 0x3e, 0x34, 0x4c, - 0xc7, 0xdf, 0x72, 0x6d, 0xcb, 0x38, 0x9a, 0x9f, 0xe2, 0x0d, 0x7c, 0x55, 0x3e, 0x6a, 0x63, 0xf5, - 0x5e, 0x47, 0x10, 0x1e, 0x1f, 0xb7, 0xae, 0x0e, 0x4f, 0xa9, 0x0b, 0x21, 0x1d, 0x23, 0x0c, 0xb2, - 0xc9, 0x01, 0x57, 0x5c, 0x67, 0xcf, 0xea, 0xce, 0x4f, 0xf3, 0xb7, 0x71, 0x7d, 0xc4, 0x80, 0x5e, - 0xbd, 0xd7, 0x11, 0x7c, 0xed, 0x69, 0x29, 0x4e, 0xfc, 0xc5, 0x08, 0x81, 0x98, 0x30, 0xa3, 0x26, - 0xe3, 0x15, 0x5b, 0xb7, 0x7a, 0xfe, 0xfc, 0x0c, 0x1f, 0xbc, 0x3f, 0x36, 0x02, 0x13, 0xe3, 0xcc, - 0xed, 0x4b, 0xf2, 0x51, 0x66, 0x12, 0xc5, 0x3e, 0xa6, 0x30, 0xaf, 0xbc, 0x09, 0x2f, 0x0e, 0xcd, - 0x0d, 0x64, 0x0e, 0x4a, 0x07, 0xf4, 0x88, 0x4f, 0x7d, 0x0d, 0x64, 0x3f, 0xc9, 0x45, 0xa8, 0x1c, - 0xea, 0xf6, 0x80, 0xce, 0x17, 0x79, 0x99, 0xf8, 0xf3, 0x33, 0xc5, 0x37, 0x0a, 0xda, 0x5f, 0x2f, - 0xc1, 0x94, 0x9a, 0x71, 0x3a, 0x96, 0x73, 0x40, 0xde, 0x86, 0x92, 0xed, 0x76, 0xe5, 0xbc, 0xf9, - 0x73, 0x63, 0xcf, 0x62, 0x1b, 0x6e, 0xb7, 0x5d, 0x3b, 0x39, 0x6e, 0x95, 0x36, 0xdc, 0x2e, 0x32, - 0x44, 0x62, 0x40, 0xe5, 0x40, 0xdf, 0x3b, 0xd0, 0x79, 0x1b, 0x9a, 0x4b, 0xed, 0xb1, 0xa1, 0xef, - 0x32, 0x14, 0xd6, 0xd6, 0x76, 0xe3, 0xe4, 0xb8, 0x55, 0xe1, 0x7f, 0x51, 0x60, 0x13, 0x17, 0x1a, - 0xbb, 0xb6, 0x6e, 0x1c, 0xec, 0xbb, 0x36, 0x9d, 0x2f, 0xe5, 0x14, 0xd4, 0x56, 0x48, 0xe2, 0x35, - 0x87, 0x7f, 0x31, 0x92, 0x41, 0x0c, 0xa8, 0x0e, 0x4c, 0xdf, 0x72, 0x0e, 0xe4, 0x1c, 0xf8, 0xe6, - 0xd8, 0xd2, 0x76, 0x56, 0xf9, 0x33, 0xc1, 0xc9, 0x71, 0xab, 0x2a, 0x7e, 0xa3, 0x84, 0xd6, 0xfe, - 0xf7, 0x14, 0xcc, 0xa8, 0x97, 0xf4, 0x80, 0x7a, 0x01, 0x7d, 0x44, 0xae, 0x43, 0xd9, 0x61, 0x9f, - 0x26, 0x7f, 0xc9, 0xed, 0x29, 0x39, 0x5c, 0xca, 0xfc, 0x93, 0xe4, 0x14, 0xd6, 0x32, 0x31, 0x54, - 0x64, 0x87, 0x8f, 0xdf, 0xb2, 0x0e, 0x87, 0x11, 0x2d, 0x13, 0xbf, 0x51, 0x42, 0x93, 0x77, 0xa1, - 0xcc, 0x1f, 0x5e, 0x74, 0xf5, 0x57, 0xc7, 0x17, 0xc1, 0x1e, 0xbd, 0xce, 0x9e, 0x80, 0x3f, 0x38, - 0x07, 0x65, 0x43, 0x71, 0x60, 0xee, 0xc9, 0x8e, 0xfd, 0xb9, 0x1c, 0x1d, 0xbb, 0x26, 0x86, 0xe2, - 0xce, 0xea, 0x1a, 0x32, 0x44, 0xf2, 0x17, 0x0b, 0xf0, 0xa2, 0xe1, 0x3a, 0x81, 0xce, 0xf4, 0x0c, - 0xb5, 0xc8, 0xce, 0x57, 0xb8, 0x9c, 0x3b, 0x63, 0xcb, 0x59, 0x49, 0x23, 0xb6, 0x5f, 0x62, 0x6b, - 0xc6, 0x50, 0x31, 0x0e, 0xcb, 0x26, 0x7f, 0xb9, 0x00, 0x2f, 0xb1, 0xb9, 0x7c, 0x88, 0x99, 0xaf, - 0x40, 0x93, 0x6d, 0xd5, 0xe5, 0x93, 0xe3, 0xd6, 0x4b, 0xeb, 0x59, 0xc2, 0x30, 0xbb, 0x0d, 0xac, - 0x75, 0x17, 0xf4, 0x61, 0xb5, 0x84, 0xaf, 0x6e, 0xcd, 0xa5, 0x8d, 0x49, 0xaa, 0x3a, 0xed, 0xcf, - 0xc8, 0xa1, 0x9c, 0xa5, 0xd9, 0x61, 0x56, 0x2b, 0xc8, 0x4d, 0xa8, 0x1d, 0xba, 0xf6, 0xa0, 0x47, - 0xfd, 0xf9, 0x3a, 0x9f, 0x62, 0xaf, 0x64, 0x4d, 0xb1, 0x0f, 0x38, 0x4b, 0x7b, 0x56, 0xc2, 0xd7, - 0xc4, 0x7f, 0x1f, 0x55, 0x5d, 0x62, 0x41, 0xd5, 0xb6, 0x7a, 0x56, 0xe0, 0xf3, 0x85, 0xb3, 0xb9, - 0x74, 0x73, 0xec, 0xc7, 0x12, 0x9f, 0xe8, 0x06, 0x07, 0x13, 0x5f, 0x8d, 0xf8, 0x8d, 0x52, 0x00, - 0x9b, 0x0a, 0x7d, 0x43, 0xb7, 0xc5, 0xc2, 0xda, 0x5c, 0xfa, 0xda, 0xf8, 0x9f, 0x0d, 0x43, 0x69, - 0x4f, 0xcb, 0x67, 0xaa, 0xf0, 0xbf, 0x28, 0xb0, 0xc9, 0x2f, 0xc0, 0x4c, 0xe2, 0x6d, 0xfa, 0xf3, - 0x4d, 0xde, 0x3b, 0xaf, 0x64, 0xf5, 0x4e, 0xc8, 0x15, 0xad, 0x3c, 0x89, 0x11, 0xe2, 0x63, 0x0a, - 0x8c, 0xdc, 0x85, 0xba, 0x6f, 0x99, 0xd4, 0xd0, 0x3d, 0x7f, 0x7e, 0xea, 0x34, 0xc0, 0x73, 0x12, - 0xb8, 0xde, 0x91, 0xd5, 0x30, 0x04, 0x20, 0x0b, 0x00, 0x7d, 0xdd, 0x0b, 0x2c, 0xa1, 0xa8, 0x4e, - 0x73, 0xa5, 0x69, 0xe6, 0xe4, 0xb8, 0x05, 0x5b, 0x61, 0x29, 0xc6, 0x38, 0x18, 0x3f, 0xab, 0xbb, - 0xee, 0xf4, 0x07, 0x81, 0x58, 0x58, 0x1b, 0x82, 0xbf, 0x13, 0x96, 0x62, 0x8c, 0x83, 0xfc, 0x56, - 0x01, 0x3e, 0x13, 0xfd, 0x1d, 0xfe, 0xc8, 0x66, 0x27, 0xfe, 0x91, 0xb5, 0x4e, 0x8e, 0x5b, 0x9f, - 0xe9, 0x8c, 0x16, 0x89, 0x4f, 0x6a, 0x0f, 0xf9, 0xb0, 0x00, 0x33, 0x83, 0xbe, 0xa9, 0x07, 0xb4, - 0x13, 0xb0, 0x1d, 0x4f, 0xf7, 0x68, 0x7e, 0x8e, 0x37, 0xf1, 0xd6, 0xf8, 0xb3, 0x60, 0x02, 0x2e, - 0x7a, 0xcd, 0xc9, 0x72, 0x4c, 0x89, 0xd5, 0xde, 0x86, 0xe9, 0xe5, 0x41, 0xb0, 0xef, 0x7a, 0xd6, - 0x07, 0x5c, 0xfd, 0x27, 0x6b, 0x50, 0x09, 0xb8, 0x1a, 0x27, 0x34, 0x84, 0xcf, 0x67, 0xbd, 0x74, - 0xa1, 0x52, 0xdf, 0xa5, 0x47, 0x4a, 0x2f, 0x11, 0x2b, 0xb5, 0x50, 0xeb, 0x44, 0x75, 0xed, 0x4f, - 0x17, 0xa0, 0xd6, 0xd6, 0x8d, 0x03, 0x77, 0x6f, 0x8f, 0xbc, 0x03, 0x75, 0xcb, 0x09, 0xa8, 0x77, - 0xa8, 0xdb, 0x12, 0x76, 0x21, 0x06, 0x1b, 0x6e, 0x08, 0xa3, 0xc7, 0x63, 0xbb, 0x2f, 0x26, 0x68, - 0x75, 0x20, 0x77, 0x2d, 0x5c, 0x33, 0x5e, 0x97, 0x18, 0x18, 0xa2, 0x91, 0x16, 0x54, 0xfc, 0x80, - 0xf6, 0x7d, 0xbe, 0x06, 0x4e, 0x8b, 0x66, 0x74, 0x58, 0x01, 0x8a, 0x72, 0xed, 0xaf, 0x15, 0xa0, - 0xd1, 0xd6, 0x7d, 0xcb, 0x60, 0x4f, 0x49, 0x56, 0xa0, 0x3c, 0xf0, 0xa9, 0x77, 0xb6, 0x67, 0xe3, - 0xcb, 0xd6, 0x8e, 0x4f, 0x3d, 0xe4, 0x95, 0xc9, 0x7d, 0xa8, 0xf7, 0x75, 0xdf, 0x7f, 0xe8, 0x7a, - 0xa6, 0x5c, 0x7a, 0x4f, 0x09, 0x24, 0xb6, 0x09, 0xb2, 0x2a, 0x86, 0x20, 0x5a, 0x13, 0x22, 0xdd, - 0x43, 0xfb, 0xbd, 0x02, 0x5c, 0x68, 0x0f, 0xf6, 0xf6, 0xa8, 0x27, 0xb5, 0x62, 0xa9, 0x6f, 0x52, - 0xa8, 0x78, 0xd4, 0xb4, 0x7c, 0xd9, 0xf6, 0xd5, 0xb1, 0x07, 0x0a, 0x32, 0x14, 0xa9, 0xde, 0xf2, - 0xfe, 0xe2, 0x05, 0x28, 0xd0, 0xc9, 0x00, 0x1a, 0xef, 0x53, 0xb6, 0x1b, 0xa7, 0x7a, 0x4f, 0x3e, - 0xdd, 0xed, 0xb1, 0x45, 0xdd, 0xa1, 0x41, 0x87, 0x23, 0xc5, 0xb5, 0xe9, 0xb0, 0x10, 0x23, 0x49, - 0xda, 0xef, 0x54, 0x60, 0x6a, 0xc5, 0xed, 0xed, 0x5a, 0x0e, 0x35, 0x6f, 0x9a, 0x5d, 0x4a, 0xde, - 0x83, 0x32, 0x35, 0xbb, 0x54, 0x3e, 0xed, 0xf8, 0x8a, 0x07, 0x03, 0x8b, 0xd4, 0x27, 0xf6, 0x0f, - 0x39, 0x30, 0xd9, 0x80, 0x99, 0x3d, 0xcf, 0xed, 0x89, 0xb9, 0x7c, 0xfb, 0xa8, 0x2f, 0x75, 0xe7, - 0xf6, 0x8f, 0xa9, 0x0f, 0x67, 0x2d, 0x41, 0x7d, 0x7c, 0xdc, 0x82, 0xe8, 0x1f, 0xa6, 0xea, 0x92, - 0x77, 0x60, 0x3e, 0x2a, 0x09, 0x27, 0xb5, 0x15, 0xb6, 0x9d, 0xe1, 0xba, 0x53, 0xa5, 0x7d, 0xf5, - 0xe4, 0xb8, 0x35, 0xbf, 0x36, 0x82, 0x07, 0x47, 0xd6, 0x66, 0x53, 0xc5, 0x5c, 0x44, 0x14, 0x0b, - 0x8d, 0x54, 0x99, 0x26, 0xb4, 0x82, 0xf1, 0x7d, 0xdf, 0x5a, 0x4a, 0x04, 0x0e, 0x09, 0x25, 0x6b, - 0x30, 0x15, 0xb8, 0xb1, 0xfe, 0xaa, 0xf0, 0xfe, 0xd2, 0x94, 0xa1, 0x62, 0xdb, 0x1d, 0xd9, 0x5b, - 0x89, 0x7a, 0x04, 0xe1, 0x92, 0xfa, 0x9f, 0xea, 0xa9, 0x2a, 0xef, 0xa9, 0x2b, 0x27, 0xc7, 0xad, - 0x4b, 0xdb, 0x99, 0x1c, 0x38, 0xa2, 0x26, 0xf9, 0x95, 0x02, 0xcc, 0x28, 0x92, 0xec, 0xa3, 0xda, - 0x24, 0xfb, 0x88, 0xb0, 0x11, 0xb1, 0x9d, 0x10, 0x80, 0x29, 0x81, 0xda, 0xf7, 0x6b, 0xd0, 0x08, - 0xa7, 0x7a, 0xf2, 0x39, 0xa8, 0x70, 0x13, 0x84, 0xd4, 0xe0, 0xc3, 0x35, 0x9c, 0x5b, 0x2a, 0x50, - 0xd0, 0xc8, 0xe7, 0xa1, 0x66, 0xb8, 0xbd, 0x9e, 0xee, 0x98, 0xdc, 0xac, 0xd4, 0x68, 0x37, 0x99, - 0xea, 0xb2, 0x22, 0x8a, 0x50, 0xd1, 0xc8, 0x55, 0x28, 0xeb, 0x5e, 0x57, 0x58, 0x78, 0x1a, 0x62, - 0x3e, 0x5a, 0xf6, 0xba, 0x3e, 0xf2, 0x52, 0xf2, 0x15, 0x28, 0x51, 0xe7, 0x70, 0xbe, 0x3c, 0x5a, - 0x37, 0xba, 0xe9, 0x1c, 0x3e, 0xd0, 0xbd, 0x76, 0x53, 0xb6, 0xa1, 0x74, 0xd3, 0x39, 0x44, 0x56, - 0x87, 0x6c, 0x40, 0x8d, 0x3a, 0x87, 0xec, 0xdd, 0x4b, 0xd3, 0xcb, 0x67, 0x47, 0x54, 0x67, 0x2c, - 0x72, 0x9b, 0x10, 0x6a, 0x58, 0xb2, 0x18, 0x15, 0x04, 0xf9, 0x06, 0x4c, 0x09, 0x65, 0x6b, 0x93, - 0xbd, 0x13, 0x7f, 0xbe, 0xca, 0x21, 0x5b, 0xa3, 0xb5, 0x35, 0xce, 0x17, 0x99, 0xba, 0x62, 0x85, - 0x3e, 0x26, 0xa0, 0xc8, 0x37, 0xa0, 0xa1, 0x76, 0xc6, 0xea, 0xcd, 0x66, 0x5a, 0x89, 0xd4, 0x76, - 0x1a, 0xe9, 0xb7, 0x06, 0x96, 0x47, 0x7b, 0xd4, 0x09, 0xfc, 0xf6, 0x8b, 0xca, 0x6e, 0xa0, 0xa8, - 0x3e, 0x46, 0x68, 0x64, 0x77, 0xd8, 0xdc, 0x25, 0x6c, 0x35, 0x9f, 0x1b, 0x31, 0xab, 0x8f, 0x61, - 0xeb, 0xfa, 0x26, 0xcc, 0x86, 0xf6, 0x28, 0x69, 0xd2, 0x10, 0xd6, 0x9b, 0x2f, 0xb1, 0xea, 0xeb, - 0x49, 0xd2, 0xe3, 0xe3, 0xd6, 0x2b, 0x19, 0x46, 0x8d, 0x88, 0x01, 0xd3, 0x60, 0xe4, 0x03, 0x98, - 0xf1, 0xa8, 0x6e, 0x5a, 0x0e, 0xf5, 0xfd, 0x2d, 0xcf, 0xdd, 0xcd, 0xaf, 0x79, 0x72, 0x14, 0x31, - 0xec, 0x31, 0x81, 0x8c, 0x29, 0x49, 0xe4, 0x21, 0x4c, 0xdb, 0xd6, 0x21, 0x8d, 0x44, 0x37, 0x27, - 0x22, 0xfa, 0xc5, 0x93, 0xe3, 0xd6, 0xf4, 0x46, 0x1c, 0x18, 0x93, 0x72, 0x98, 0xa6, 0xd2, 0x77, - 0xbd, 0x40, 0xa9, 0xa7, 0x9f, 0x7d, 0xa2, 0x7a, 0xba, 0xe5, 0x7a, 0x41, 0xf4, 0x11, 0xb2, 0x7f, - 0x3e, 0x8a, 0xea, 0xda, 0xdf, 0xaa, 0xc0, 0xf0, 0x26, 0x2e, 0x39, 0xe2, 0x0a, 0x93, 0x1e, 0x71, - 0xe9, 0xd1, 0x20, 0xd6, 0x9e, 0x37, 0x64, 0xb5, 0x09, 0x8c, 0x88, 0x8c, 0x51, 0x5d, 0x9a, 0xf4, - 0xa8, 0x7e, 0x6e, 0x26, 0x9e, 0xe1, 0xe1, 0x5f, 0xfd, 0xf8, 0x86, 0x7f, 0xed, 0xd9, 0x0c, 0x7f, - 0xed, 0x7b, 0x65, 0x98, 0x59, 0xd5, 0x69, 0xcf, 0x75, 0x9e, 0xba, 0x8f, 0x2f, 0x3c, 0x17, 0xfb, - 0xf8, 0x1b, 0x50, 0xf7, 0x68, 0xdf, 0xb6, 0x0c, 0x5d, 0xa8, 0xeb, 0xd2, 0x6e, 0x8e, 0xb2, 0x0c, - 0x43, 0xea, 0x08, 0xfb, 0x4d, 0xe9, 0xb9, 0xb4, 0xdf, 0x94, 0x3f, 0x7e, 0xfb, 0x8d, 0xf6, 0x2b, - 0x45, 0xe0, 0xaa, 0x2d, 0xb9, 0x0e, 0x65, 0xa6, 0xb6, 0xa5, 0xad, 0x86, 0xfc, 0x6b, 0xe1, 0x14, - 0x72, 0x05, 0x8a, 0x81, 0x2b, 0xa7, 0x1b, 0x90, 0xf4, 0xe2, 0xb6, 0x8b, 0xc5, 0xc0, 0x25, 0x1f, - 0x00, 0x18, 0xae, 0x63, 0x5a, 0xca, 0x9d, 0x94, 0xef, 0xc1, 0xd6, 0x5c, 0xef, 0xa1, 0xee, 0x99, - 0x2b, 0x21, 0xa2, 0xd8, 0xc1, 0x47, 0xff, 0x31, 0x26, 0x8d, 0xbc, 0x09, 0x55, 0xd7, 0x59, 0x1b, - 0xd8, 0x36, 0xef, 0xd0, 0x46, 0xfb, 0x8b, 0x27, 0xc7, 0xad, 0xea, 0x7d, 0x5e, 0xf2, 0xf8, 0xb8, - 0x75, 0x59, 0xec, 0x88, 0xd8, 0xbf, 0xb7, 0x3d, 0x2b, 0xb0, 0x9c, 0x6e, 0xb8, 0xa1, 0x95, 0xd5, - 0xb4, 0x5f, 0x2d, 0x40, 0x73, 0xcd, 0x7a, 0x44, 0xcd, 0xb7, 0x2d, 0xc7, 0x74, 0x1f, 0x12, 0x84, - 0xaa, 0x4d, 0x9d, 0x6e, 0xb0, 0x3f, 0xe6, 0x8e, 0x53, 0xd8, 0x75, 0x38, 0x02, 0x4a, 0x24, 0xb2, - 0x08, 0x0d, 0xb1, 0x5f, 0xb1, 0x9c, 0x2e, 0xef, 0xc3, 0x7a, 0x34, 0xd3, 0x77, 0x14, 0x01, 0x23, - 0x1e, 0xed, 0x08, 0x5e, 0x1c, 0xea, 0x06, 0x62, 0x42, 0x39, 0xd0, 0xbb, 0x6a, 0x51, 0x59, 0x1b, - 0xbb, 0x83, 0xb7, 0xf5, 0x6e, 0xac, 0x73, 0xb9, 0x56, 0xb8, 0xad, 0x33, 0xad, 0x90, 0xa1, 0x6b, - 0x7f, 0x50, 0x80, 0xfa, 0xda, 0xc0, 0x31, 0xf8, 0xa6, 0xfe, 0xe9, 0xd6, 0x64, 0xa5, 0x62, 0x16, - 0x33, 0x55, 0xcc, 0x01, 0x54, 0x0f, 0x1e, 0x86, 0x2a, 0x68, 0x73, 0x69, 0x73, 0xfc, 0x51, 0x21, - 0x9b, 0xb4, 0x70, 0x97, 0xe3, 0x09, 0x67, 0xe7, 0x8c, 0x6c, 0x50, 0xf5, 0xee, 0xdb, 0x5c, 0xa8, - 0x14, 0x76, 0xe5, 0x2b, 0xd0, 0x8c, 0xb1, 0x9d, 0xc9, 0xef, 0xf1, 0xb7, 0xcb, 0x50, 0xbd, 0xd5, - 0xe9, 0x2c, 0x6f, 0xad, 0x93, 0xd7, 0xa0, 0x29, 0xfd, 0x60, 0xf7, 0xa2, 0x3e, 0x08, 0xdd, 0xa0, - 0x9d, 0x88, 0x84, 0x71, 0x3e, 0xa6, 0xc0, 0x7b, 0x54, 0xb7, 0x7b, 0xf2, 0x63, 0x09, 0x75, 0x07, - 0x64, 0x85, 0x28, 0x68, 0x44, 0x87, 0x99, 0x81, 0x4f, 0x3d, 0xd6, 0x85, 0x62, 0xbf, 0x2f, 0x3f, - 0x9b, 0x53, 0x5a, 0x04, 0xf8, 0x02, 0xb3, 0x93, 0x00, 0xc0, 0x14, 0x20, 0x79, 0x03, 0xea, 0xfa, - 0x20, 0xd8, 0xe7, 0x5b, 0x2e, 0xf1, 0x6d, 0x5c, 0xe5, 0x6e, 0x42, 0x59, 0xf6, 0xf8, 0xb8, 0x35, - 0x75, 0x17, 0xdb, 0xaf, 0xa9, 0xff, 0x18, 0x72, 0xb3, 0xc6, 0x29, 0x1b, 0x83, 0x6c, 0x5c, 0xe5, - 0xcc, 0x8d, 0xdb, 0x4a, 0x00, 0x60, 0x0a, 0x90, 0xbc, 0x0b, 0x53, 0x07, 0xf4, 0x28, 0xd0, 0x77, - 0xa5, 0x80, 0xea, 0x59, 0x04, 0xcc, 0x31, 0xa5, 0xff, 0x6e, 0xac, 0x3a, 0x26, 0xc0, 0x88, 0x0f, - 0x17, 0x0f, 0xa8, 0xb7, 0x4b, 0x3d, 0x57, 0xda, 0x2b, 0xa4, 0x90, 0xda, 0x59, 0x84, 0xcc, 0x9f, - 0x1c, 0xb7, 0x2e, 0xde, 0xcd, 0x80, 0xc1, 0x4c, 0x70, 0xed, 0xff, 0x14, 0x61, 0xf6, 0x96, 0x08, - 0x44, 0x70, 0x3d, 0xa1, 0x79, 0x90, 0xcb, 0x50, 0xf2, 0xfa, 0x03, 0x3e, 0x72, 0x4a, 0xc2, 0xd5, - 0x80, 0x5b, 0x3b, 0xc8, 0xca, 0xc8, 0x3b, 0x50, 0x37, 0xe5, 0x94, 0x21, 0xcd, 0x25, 0x63, 0x99, - 0xb6, 0xd4, 0x3f, 0x0c, 0xd1, 0xd8, 0xde, 0xb0, 0xe7, 0x77, 0x3b, 0xd6, 0x07, 0x54, 0x5a, 0x10, - 0xf8, 0xde, 0x70, 0x53, 0x14, 0xa1, 0xa2, 0xb1, 0x55, 0xf5, 0x80, 0x1e, 0x89, 0xfd, 0x73, 0x39, - 0x5a, 0x55, 0xef, 0xca, 0x32, 0x0c, 0xa9, 0xa4, 0xa5, 0x3e, 0x16, 0x36, 0x0a, 0xca, 0xc2, 0xf6, - 0xf3, 0x80, 0x15, 0xc8, 0xef, 0x86, 0x4d, 0x99, 0xef, 0x5b, 0x41, 0x40, 0x3d, 0xf9, 0x1a, 0xc7, - 0x9a, 0x32, 0xef, 0x70, 0x04, 0x94, 0x48, 0xe4, 0x27, 0xa0, 0xc1, 0xc1, 0xdb, 0xb6, 0xbb, 0xcb, - 0x5f, 0x5c, 0x43, 0x58, 0x81, 0x1e, 0xa8, 0x42, 0x8c, 0xe8, 0xda, 0x1f, 0x16, 0xe1, 0xd2, 0x2d, - 0x1a, 0x08, 0xad, 0x66, 0x95, 0xf6, 0x6d, 0xf7, 0x88, 0xe9, 0xd3, 0x48, 0xbf, 0x45, 0xde, 0x02, - 0xb0, 0xfc, 0xdd, 0xce, 0xa1, 0xc1, 0xbf, 0x03, 0xf1, 0x0d, 0x5f, 0x97, 0x9f, 0x24, 0xac, 0x77, - 0xda, 0x92, 0xf2, 0x38, 0xf1, 0x0f, 0x63, 0x75, 0xa2, 0x0d, 0x79, 0xf1, 0x09, 0x1b, 0xf2, 0x0e, - 0x40, 0x3f, 0xd2, 0xca, 0x4b, 0x9c, 0xf3, 0xa7, 0x95, 0x98, 0xb3, 0x28, 0xe4, 0x31, 0x98, 0x3c, - 0x7a, 0xb2, 0x03, 0x73, 0x26, 0xdd, 0xd3, 0x07, 0x76, 0x10, 0xee, 0x24, 0xe4, 0x47, 0x7c, 0xfa, - 0xcd, 0x48, 0x18, 0x24, 0xb1, 0x9a, 0x42, 0xc2, 0x21, 0x6c, 0xed, 0xef, 0x94, 0xe0, 0xca, 0x2d, - 0x1a, 0x84, 0x36, 0x3a, 0x39, 0x3b, 0x76, 0xfa, 0xd4, 0x60, 0x6f, 0xe1, 0xc3, 0x02, 0x54, 0x6d, - 0x7d, 0x97, 0xda, 0x6c, 0xf5, 0x62, 0x4f, 0xf3, 0xde, 0xd8, 0x0b, 0xc1, 0x68, 0x29, 0x0b, 0x1b, - 0x5c, 0x42, 0x6a, 0x69, 0x10, 0x85, 0x28, 0xc5, 0xb3, 0x49, 0xdd, 0xb0, 0x07, 0x7e, 0x20, 0x76, - 0x76, 0x52, 0x9f, 0x0c, 0x27, 0xf5, 0x95, 0x88, 0x84, 0x71, 0x3e, 0xb2, 0x04, 0x60, 0xd8, 0x16, - 0x75, 0x02, 0x5e, 0x4b, 0x7c, 0x57, 0x44, 0xbd, 0xdf, 0x95, 0x90, 0x82, 0x31, 0x2e, 0x26, 0xaa, - 0xe7, 0x3a, 0x56, 0xe0, 0x0a, 0x51, 0xe5, 0xa4, 0xa8, 0xcd, 0x88, 0x84, 0x71, 0x3e, 0x5e, 0x8d, - 0x06, 0x9e, 0x65, 0xf8, 0xbc, 0x5a, 0x25, 0x55, 0x2d, 0x22, 0x61, 0x9c, 0x8f, 0xad, 0x79, 0xb1, - 0xe7, 0x3f, 0xd3, 0x9a, 0xf7, 0x9b, 0x0d, 0xb8, 0x96, 0xe8, 0xd6, 0x40, 0x0f, 0xe8, 0xde, 0xc0, - 0xee, 0xd0, 0x40, 0xbd, 0xc0, 0x31, 0xd7, 0xc2, 0x3f, 0x17, 0xbd, 0x77, 0x11, 0xfe, 0x64, 0x4c, - 0xe6, 0xbd, 0x0f, 0x35, 0xf0, 0x54, 0xef, 0x7e, 0x11, 0x1a, 0x8e, 0x1e, 0xf8, 0xfc, 0xc3, 0x95, - 0xdf, 0x68, 0xa8, 0x86, 0xdd, 0x53, 0x04, 0x8c, 0x78, 0xc8, 0x16, 0x5c, 0x94, 0x5d, 0x7c, 0xf3, - 0x11, 0xdb, 0xf3, 0x53, 0x4f, 0xd4, 0x95, 0xcb, 0xa9, 0xac, 0x7b, 0x71, 0x33, 0x83, 0x07, 0x33, - 0x6b, 0x92, 0x4d, 0xb8, 0x60, 0x88, 0x90, 0x10, 0x6a, 0xbb, 0xba, 0xa9, 0x00, 0x85, 0x49, 0x34, - 0xdc, 0x1a, 0xad, 0x0c, 0xb3, 0x60, 0x56, 0xbd, 0xf4, 0x68, 0xae, 0x8e, 0x35, 0x9a, 0x6b, 0xe3, - 0x8c, 0xe6, 0xfa, 0x78, 0xa3, 0xb9, 0x71, 0xba, 0xd1, 0xcc, 0x7a, 0x9e, 0x8d, 0x23, 0xea, 0x31, - 0xf5, 0x44, 0xac, 0xb0, 0xb1, 0x88, 0xa3, 0xb0, 0xe7, 0x3b, 0x19, 0x3c, 0x98, 0x59, 0x93, 0xec, - 0xc2, 0x15, 0x51, 0x7e, 0xd3, 0x31, 0xbc, 0xa3, 0x3e, 0x5b, 0x78, 0x62, 0xb8, 0xcd, 0x84, 0x4d, - 0xfa, 0x4a, 0x67, 0x24, 0x27, 0x3e, 0x01, 0x85, 0xfc, 0x2c, 0x4c, 0x8b, 0xb7, 0xb4, 0xa9, 0xf7, - 0x39, 0xac, 0x88, 0x3f, 0x7a, 0x49, 0xc2, 0x4e, 0xaf, 0xc4, 0x89, 0x98, 0xe4, 0x25, 0xcb, 0x30, - 0xdb, 0x3f, 0x34, 0xd8, 0xcf, 0xf5, 0xbd, 0x7b, 0x94, 0x9a, 0xd4, 0xe4, 0x0e, 0xcf, 0x46, 0xfb, - 0x65, 0x65, 0xdd, 0xd9, 0x4a, 0x92, 0x31, 0xcd, 0x4f, 0xde, 0x80, 0x29, 0x3f, 0xd0, 0xbd, 0x40, - 0x1a, 0x82, 0xe7, 0x67, 0x44, 0x7c, 0x96, 0xb2, 0x93, 0x76, 0x62, 0x34, 0x4c, 0x70, 0x66, 0xae, - 0x17, 0xb3, 0xe7, 0xb7, 0x5e, 0xe4, 0x99, 0xad, 0xfe, 0x49, 0x11, 0xae, 0xdf, 0xa2, 0xc1, 0xa6, - 0xeb, 0x48, 0x33, 0x7a, 0xd6, 0xb2, 0x7f, 0x2a, 0x2b, 0x7a, 0x72, 0xd1, 0x2e, 0x4e, 0x74, 0xd1, - 0x2e, 0x4d, 0x68, 0xd1, 0x2e, 0x9f, 0xe3, 0xa2, 0xfd, 0xf7, 0x8a, 0xf0, 0x72, 0xa2, 0x27, 0xb7, - 0x5c, 0x53, 0x4d, 0xf8, 0x9f, 0x76, 0xe0, 0x29, 0x3a, 0xf0, 0xb1, 0xd0, 0x3b, 0xb9, 0x23, 0x34, - 0xa5, 0xf1, 0x7c, 0x37, 0xad, 0xf1, 0xbc, 0x9b, 0x67, 0xe5, 0xcb, 0x90, 0x70, 0xaa, 0x15, 0xef, - 0x0e, 0x10, 0x4f, 0xba, 0x6d, 0x23, 0x73, 0xb6, 0x54, 0x7a, 0xc2, 0x00, 0x50, 0x1c, 0xe2, 0xc0, - 0x8c, 0x5a, 0xa4, 0x03, 0x2f, 0xf9, 0xd4, 0x09, 0x2c, 0x87, 0xda, 0x49, 0x38, 0xa1, 0x0d, 0xbd, - 0x22, 0xe1, 0x5e, 0xea, 0x64, 0x31, 0x61, 0x76, 0xdd, 0x3c, 0xf3, 0xc0, 0xbf, 0x00, 0xae, 0x72, - 0x8a, 0xae, 0x99, 0x98, 0xc6, 0xf2, 0x61, 0x5a, 0x63, 0x79, 0x2f, 0xff, 0x7b, 0x1b, 0x4f, 0x5b, - 0x59, 0x02, 0xe0, 0x6f, 0x21, 0xae, 0xae, 0x84, 0x8b, 0x34, 0x86, 0x14, 0x8c, 0x71, 0xb1, 0x05, - 0x48, 0xf5, 0x73, 0x5c, 0x53, 0x09, 0x17, 0xa0, 0x4e, 0x9c, 0x88, 0x49, 0xde, 0x91, 0xda, 0x4e, - 0x65, 0x6c, 0x6d, 0xe7, 0x0e, 0x90, 0x84, 0xe1, 0x51, 0xe0, 0x55, 0x93, 0xf1, 0xc7, 0xeb, 0x43, - 0x1c, 0x98, 0x51, 0x6b, 0xc4, 0x50, 0xae, 0x4d, 0x76, 0x28, 0xd7, 0xc7, 0x1f, 0xca, 0xe4, 0x3d, - 0xb8, 0xcc, 0x45, 0xc9, 0xfe, 0x49, 0x02, 0x0b, 0xbd, 0xe7, 0xb3, 0x12, 0xf8, 0x32, 0x8e, 0x62, - 0xc4, 0xd1, 0x18, 0xec, 0xfd, 0x18, 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xad, 0x13, 0xad, 0x64, - 0xf0, 0x60, 0x66, 0x4d, 0x36, 0xc4, 0x02, 0x36, 0x0c, 0xf5, 0x5d, 0x9b, 0x9a, 0x32, 0xfe, 0x3a, - 0x1c, 0x62, 0xdb, 0x1b, 0x1d, 0x49, 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x75, 0x46, 0x35, 0xe5, - 0x16, 0xb7, 0xd2, 0xef, 0x25, 0xb4, 0x21, 0xa9, 0xeb, 0x84, 0x11, 0xf5, 0x2b, 0x69, 0x06, 0x1c, - 0xae, 0xc3, 0xb5, 0x44, 0xc3, 0xb3, 0xfa, 0x81, 0x9f, 0xc4, 0x9a, 0x49, 0x69, 0x89, 0x19, 0x3c, - 0x98, 0x59, 0x93, 0xe9, 0xe7, 0xfb, 0x54, 0xb7, 0x83, 0xfd, 0x24, 0xe0, 0x6c, 0x52, 0x3f, 0xbf, - 0x3d, 0xcc, 0x82, 0x59, 0xf5, 0x32, 0x17, 0xa4, 0xb9, 0xe7, 0x53, 0xad, 0xfa, 0x4e, 0x09, 0x2e, - 0xdf, 0xa2, 0x41, 0x18, 0x9a, 0xf6, 0xa9, 0x19, 0xe5, 0x63, 0x30, 0xa3, 0xfc, 0x46, 0x05, 0x2e, - 0xdc, 0xa2, 0xc1, 0x90, 0x36, 0xf6, 0xff, 0x69, 0xf7, 0x6f, 0xc2, 0x85, 0x28, 0x1a, 0xb2, 0x13, - 0xb8, 0x9e, 0x58, 0xcb, 0x53, 0xbb, 0xe5, 0xce, 0x30, 0x0b, 0x66, 0xd5, 0x23, 0xdf, 0x80, 0x97, - 0xf9, 0x52, 0xef, 0x74, 0x85, 0x7d, 0x56, 0x18, 0x13, 0x62, 0xe7, 0x79, 0x5a, 0x12, 0xf2, 0xe5, - 0x4e, 0x36, 0x1b, 0x8e, 0xaa, 0x4f, 0xbe, 0x0d, 0x53, 0x7d, 0xab, 0x4f, 0x6d, 0xcb, 0xe1, 0xfa, - 0x59, 0xee, 0x20, 0xa2, 0xad, 0x18, 0x58, 0xb4, 0x81, 0x8b, 0x97, 0x62, 0x42, 0x60, 0xe6, 0x48, - 0xad, 0x9f, 0xe3, 0x48, 0xfd, 0x1f, 0x45, 0xa8, 0xdd, 0xf2, 0xdc, 0x41, 0xbf, 0x7d, 0x44, 0xba, - 0x50, 0x7d, 0xc8, 0x9d, 0x67, 0xd2, 0x35, 0x35, 0xfe, 0x89, 0x02, 0xe1, 0x83, 0x8b, 0x54, 0x22, - 0xf1, 0x1f, 0x25, 0x3c, 0x1b, 0xc4, 0x07, 0xf4, 0x88, 0x9a, 0xd2, 0x87, 0x16, 0x0e, 0xe2, 0xbb, - 0xac, 0x10, 0x05, 0x8d, 0xf4, 0x60, 0x56, 0xb7, 0x6d, 0xf7, 0x21, 0x35, 0x37, 0xf4, 0x80, 0xfb, - 0xbd, 0xa5, 0x6f, 0xe5, 0xac, 0x66, 0x69, 0x1e, 0xcc, 0xb0, 0x9c, 0x84, 0xc2, 0x34, 0x36, 0x79, - 0x1f, 0x6a, 0x7e, 0xe0, 0x7a, 0x4a, 0xd9, 0x6a, 0x2e, 0xad, 0x8c, 0xff, 0xd2, 0xdb, 0x5f, 0xef, - 0x08, 0x28, 0x61, 0xb3, 0x97, 0x7f, 0x50, 0x09, 0xd0, 0x7e, 0xbd, 0x00, 0x70, 0x7b, 0x7b, 0x7b, - 0x4b, 0xba, 0x17, 0x4c, 0x28, 0xeb, 0x83, 0xd0, 0x51, 0x39, 0xbe, 0x43, 0x30, 0x11, 0xc8, 0x2b, - 0x7d, 0x78, 0x83, 0x60, 0x1f, 0x39, 0x3a, 0xf9, 0x71, 0xa8, 0x49, 0x05, 0x59, 0x76, 0x7b, 0x18, - 0x4f, 0x21, 0x95, 0x68, 0x54, 0x74, 0xed, 0xb7, 0x8b, 0x00, 0xeb, 0xa6, 0x4d, 0x3b, 0xea, 0x10, - 0x48, 0x23, 0xd8, 0xf7, 0xa8, 0xbf, 0xef, 0xda, 0xe6, 0x98, 0xde, 0x54, 0x6e, 0xf3, 0xdf, 0x56, - 0x20, 0x18, 0xe1, 0x11, 0x13, 0xa6, 0xfc, 0x80, 0xf6, 0x55, 0x6c, 0xef, 0x98, 0x4e, 0x94, 0x39, - 0x61, 0x17, 0x89, 0x70, 0x30, 0x81, 0x4a, 0x74, 0x68, 0x5a, 0x8e, 0x21, 0x3e, 0x90, 0xf6, 0xd1, - 0x98, 0x03, 0x69, 0x96, 0xed, 0x38, 0xd6, 0x23, 0x18, 0x8c, 0x63, 0x6a, 0xbf, 0x5b, 0x84, 0x4b, - 0x5c, 0x1e, 0x6b, 0x46, 0x22, 0x82, 0x97, 0xfc, 0xc9, 0xa1, 0x03, 0xab, 0x7f, 0xfc, 0x74, 0xa2, - 0xc5, 0x79, 0xc7, 0x4d, 0x1a, 0xe8, 0x91, 0x3e, 0x17, 0x95, 0xc5, 0x4e, 0xa9, 0x0e, 0xa0, 0xec, - 0xb3, 0xf9, 0x4a, 0xf4, 0x5e, 0x67, 0xec, 0x21, 0x94, 0xfd, 0x00, 0x7c, 0xf6, 0x0a, 0xbd, 0xc6, - 0x7c, 0xd6, 0xe2, 0xe2, 0xc8, 0x2f, 0x41, 0xd5, 0x0f, 0xf4, 0x60, 0xa0, 0x3e, 0xcd, 0x9d, 0x49, - 0x0b, 0xe6, 0xe0, 0xd1, 0x3c, 0x22, 0xfe, 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xae, 0x64, 0x57, - 0xdc, 0xb0, 0xfc, 0x80, 0xfc, 0x89, 0xa1, 0x6e, 0x3f, 0xe5, 0x1b, 0x67, 0xb5, 0x79, 0xa7, 0x87, - 0x67, 0x1a, 0x54, 0x49, 0xac, 0xcb, 0x03, 0xa8, 0x58, 0x01, 0xed, 0xa9, 0xfd, 0xe5, 0xfd, 0x09, - 0x3f, 0x7a, 0x6c, 0x69, 0x67, 0x52, 0x50, 0x08, 0xd3, 0xbe, 0x57, 0x1c, 0xf5, 0xc8, 0x7c, 0xf9, - 0xb0, 0x93, 0x51, 0xe2, 0x77, 0xf3, 0x45, 0x89, 0x27, 0x1b, 0x34, 0x1c, 0x2c, 0xfe, 0xa7, 0x86, - 0x83, 0xc5, 0xef, 0xe7, 0x0f, 0x16, 0x4f, 0x75, 0xc3, 0xc8, 0x98, 0xf1, 0x8f, 0x4a, 0x70, 0xf5, - 0x49, 0xc3, 0x86, 0xad, 0x67, 0x72, 0x74, 0xe6, 0x5d, 0xcf, 0x9e, 0x3c, 0x0e, 0xc9, 0x12, 0x54, - 0xfa, 0xfb, 0xba, 0xaf, 0x94, 0xb2, 0xab, 0x61, 0x98, 0x21, 0x2b, 0x7c, 0xcc, 0x26, 0x0d, 0xae, - 0xcc, 0xf1, 0xbf, 0x28, 0x58, 0xd9, 0x74, 0xdc, 0xa3, 0xbe, 0x1f, 0xd9, 0x04, 0xc2, 0xe9, 0x78, - 0x53, 0x14, 0xa3, 0xa2, 0x93, 0x00, 0xaa, 0xc2, 0xc4, 0x2c, 0x57, 0xa6, 0xf1, 0x03, 0xb9, 0x32, - 0x0e, 0x16, 0x44, 0x0f, 0x25, 0xbd, 0x15, 0x52, 0x16, 0x59, 0x80, 0x72, 0x10, 0x85, 0x79, 0xab, - 0xad, 0x79, 0x39, 0x43, 0x3f, 0xe5, 0x7c, 0x6c, 0x63, 0xef, 0xee, 0x72, 0xa3, 0xba, 0x29, 0xfd, - 0xe7, 0x96, 0xeb, 0x70, 0x85, 0xac, 0x14, 0x6d, 0xec, 0xef, 0x0f, 0x71, 0x60, 0x46, 0x2d, 0xed, - 0x5f, 0xd7, 0xe1, 0x52, 0xf6, 0x78, 0x60, 0xfd, 0x76, 0x48, 0x3d, 0x9f, 0x61, 0x17, 0x92, 0xfd, - 0xf6, 0x40, 0x14, 0xa3, 0xa2, 0x7f, 0xa2, 0x03, 0xce, 0x7e, 0xa3, 0x00, 0x97, 0x3d, 0xe9, 0x23, - 0x7a, 0x16, 0x41, 0x67, 0xaf, 0x08, 0x73, 0xc6, 0x08, 0x81, 0x38, 0xba, 0x2d, 0xe4, 0x6f, 0x14, - 0x60, 0xbe, 0x97, 0xb2, 0x73, 0x9c, 0xe3, 0x99, 0x4b, 0x7e, 0x8e, 0x62, 0x73, 0x84, 0x3c, 0x1c, - 0xd9, 0x12, 0xf2, 0x6d, 0x68, 0xf6, 0xd9, 0xb8, 0xf0, 0x03, 0xea, 0x18, 0x2a, 0x40, 0x74, 0xfc, - 0x2f, 0x69, 0x2b, 0xc2, 0x0a, 0xcf, 0x5c, 0x71, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0xe7, 0xfc, - 0x90, 0xe5, 0x0d, 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x2b, 0xf6, 0x1b, 0x0d, 0xf1, 0xad, 0x74, - 0x64, 0x19, 0x86, 0x54, 0xf2, 0x13, 0xd0, 0xe0, 0x2e, 0xa7, 0x65, 0xaf, 0xeb, 0xcf, 0x37, 0x78, - 0xb8, 0xd8, 0xb4, 0x08, 0x80, 0x93, 0x85, 0x18, 0xd1, 0xc9, 0x97, 0x60, 0x6a, 0x97, 0x7f, 0xbe, - 0xf2, 0xdc, 0xbd, 0xb0, 0x71, 0x71, 0x6d, 0xad, 0x1d, 0x2b, 0xc7, 0x04, 0x17, 0x59, 0x02, 0xa0, - 0xa1, 0x5f, 0x2e, 0x6d, 0xcf, 0x8a, 0x3c, 0x76, 0x18, 0xe3, 0x22, 0xaf, 0x40, 0x29, 0xb0, 0x7d, - 0x6e, 0xc3, 0xaa, 0x47, 0x5b, 0xd0, 0xed, 0x8d, 0x0e, 0xb2, 0x72, 0xed, 0x0f, 0x0b, 0x30, 0x9b, - 0x3a, 0x8e, 0xc4, 0xaa, 0x0c, 0x3c, 0x5b, 0x4e, 0x23, 0x61, 0x95, 0x1d, 0xdc, 0x40, 0x56, 0x4e, - 0xde, 0x93, 0x6a, 0x79, 0x31, 0x67, 0x8a, 0x91, 0x7b, 0x7a, 0xe0, 0x33, 0x3d, 0x7c, 0x48, 0x23, - 0xe7, 0x6e, 0xbe, 0xa8, 0x3d, 0x72, 0x1d, 0x88, 0xb9, 0xf9, 0x22, 0x1a, 0x26, 0x38, 0x53, 0x06, - 0xbf, 0xf2, 0x69, 0x0c, 0x7e, 0xda, 0xaf, 0x16, 0x63, 0x3d, 0x20, 0x35, 0xfb, 0xa7, 0xf4, 0xc0, - 0x17, 0xd8, 0x02, 0x1a, 0x2e, 0xee, 0x8d, 0xf8, 0xfa, 0xc7, 0x17, 0x63, 0x49, 0x25, 0x6f, 0x8b, - 0xbe, 0x2f, 0xe5, 0x3c, 0xc8, 0xbd, 0xbd, 0xd1, 0x11, 0xd1, 0x55, 0xea, 0xad, 0x85, 0xaf, 0xa0, - 0x7c, 0x4e, 0xaf, 0x40, 0xfb, 0x67, 0x25, 0x68, 0xde, 0x71, 0x77, 0x3f, 0x21, 0x11, 0xd4, 0xd9, - 0xcb, 0x54, 0xf1, 0x63, 0x5c, 0xa6, 0x76, 0xe0, 0xe5, 0x20, 0xb0, 0x3b, 0xd4, 0x70, 0x1d, 0xd3, - 0x5f, 0xde, 0x0b, 0xa8, 0xb7, 0x66, 0x39, 0x96, 0xbf, 0x4f, 0x4d, 0xe9, 0x4e, 0xfa, 0xcc, 0xc9, - 0x71, 0xeb, 0xe5, 0xed, 0xed, 0x8d, 0x2c, 0x16, 0x1c, 0x55, 0x97, 0x4f, 0x1b, 0xe2, 0xec, 0x28, - 0x3f, 0x5b, 0x25, 0x63, 0x6e, 0xc4, 0xb4, 0x11, 0x2b, 0xc7, 0x04, 0x97, 0xf6, 0x1f, 0x8a, 0xd0, - 0x08, 0x93, 0x47, 0x90, 0xcf, 0x43, 0x6d, 0xd7, 0x73, 0x0f, 0xa8, 0x27, 0x3c, 0x77, 0xf2, 0x6c, - 0x55, 0x5b, 0x14, 0xa1, 0xa2, 0x91, 0xcf, 0x41, 0x25, 0x70, 0xfb, 0x96, 0x91, 0x36, 0xa8, 0x6d, - 0xb3, 0x42, 0x14, 0x34, 0xfe, 0x21, 0xf0, 0xb0, 0x42, 0xfe, 0x54, 0xf5, 0xd8, 0x87, 0xc0, 0x4b, - 0x51, 0x52, 0xd5, 0x87, 0x50, 0x9e, 0xf8, 0x87, 0xf0, 0x85, 0x50, 0x05, 0xac, 0x24, 0xbf, 0xc4, - 0x94, 0xd2, 0xf6, 0x2e, 0x94, 0x7d, 0xdd, 0xb7, 0xe5, 0xf2, 0x96, 0x23, 0x5f, 0xc3, 0x72, 0x67, - 0x43, 0xe6, 0x6b, 0x58, 0xee, 0x6c, 0x20, 0x07, 0xd5, 0x7e, 0xbb, 0x04, 0x4d, 0xd1, 0xbf, 0x62, - 0xf6, 0x98, 0x64, 0x0f, 0xbf, 0xc9, 0x43, 0x2e, 0xfc, 0x41, 0x8f, 0x7a, 0xdc, 0x1c, 0x25, 0x27, - 0xc3, 0xb8, 0x1f, 0x21, 0x22, 0x86, 0x61, 0x17, 0x51, 0xd1, 0x1f, 0xed, 0xae, 0x67, 0x4b, 0x05, - 0x4f, 0x80, 0x22, 0x75, 0x5c, 0x19, 0x49, 0x19, 0x2e, 0x15, 0x77, 0x63, 0x34, 0x4c, 0x70, 0x6a, - 0x1f, 0x16, 0xa1, 0xb1, 0x61, 0xed, 0x51, 0xe3, 0xc8, 0xb0, 0xf9, 0x39, 0x55, 0x93, 0xda, 0x34, - 0xa0, 0xb7, 0x3c, 0xdd, 0xa0, 0x5b, 0xd4, 0xb3, 0x78, 0xfa, 0x26, 0xf6, 0x05, 0xf2, 0x39, 0x4e, - 0x9e, 0x53, 0x5d, 0x1d, 0xc1, 0x83, 0x23, 0x6b, 0x93, 0x75, 0x98, 0x32, 0xa9, 0x6f, 0x79, 0xd4, - 0xdc, 0x8a, 0x6d, 0x85, 0x3e, 0xaf, 0x5a, 0xb8, 0x1a, 0xa3, 0x3d, 0x3e, 0x6e, 0x4d, 0x2b, 0x13, - 0xa8, 0xd8, 0x13, 0x25, 0xaa, 0xb2, 0x49, 0xa5, 0xaf, 0x0f, 0xfc, 0xac, 0x36, 0xc6, 0x26, 0x95, - 0xad, 0x6c, 0x16, 0x1c, 0x55, 0x57, 0xab, 0x40, 0x69, 0xc3, 0xed, 0x6a, 0xdf, 0x2b, 0x41, 0x98, - 0xe7, 0x8b, 0xfc, 0xd9, 0x02, 0x34, 0x75, 0xc7, 0x71, 0x03, 0x99, 0x43, 0x4b, 0xf8, 0xf8, 0x31, - 0x77, 0x3a, 0xb1, 0x85, 0xe5, 0x08, 0x54, 0xb8, 0x87, 0x43, 0x97, 0x75, 0x8c, 0x82, 0x71, 0xd9, - 0x64, 0x90, 0xf2, 0x58, 0x6f, 0xe6, 0x6f, 0xc5, 0x29, 0xfc, 0xd3, 0x57, 0xbe, 0x06, 0x73, 0xe9, - 0xc6, 0x9e, 0xc5, 0xe1, 0x94, 0xcb, 0xf5, 0x5f, 0x04, 0x88, 0xa2, 0x56, 0x9e, 0x81, 0x99, 0xcc, - 0x4a, 0x98, 0xc9, 0xc6, 0x4f, 0xb6, 0x10, 0x35, 0x7a, 0xa4, 0x69, 0xec, 0x5b, 0x29, 0xd3, 0xd8, - 0xfa, 0x24, 0x84, 0x3d, 0xd9, 0x1c, 0xb6, 0x0b, 0x17, 0x22, 0xde, 0xe8, 0x9b, 0xbf, 0x9b, 0xfa, - 0x32, 0x85, 0xb6, 0xf7, 0xc5, 0x11, 0x5f, 0xe6, 0x6c, 0x2c, 0x8c, 0x68, 0xf8, 0xdb, 0xd4, 0xfe, - 0x66, 0x01, 0xe6, 0xe2, 0x42, 0xf8, 0xc9, 0xf0, 0x2f, 0xc3, 0xb4, 0x47, 0x75, 0xb3, 0xad, 0x07, - 0xc6, 0x3e, 0x0f, 0x58, 0x2f, 0xf0, 0x08, 0x73, 0x7e, 0x86, 0x0d, 0xe3, 0x04, 0x4c, 0xf2, 0x11, - 0x1d, 0x9a, 0xac, 0x60, 0xdb, 0xea, 0x51, 0x77, 0x10, 0x8c, 0x69, 0xfb, 0xe5, 0xdb, 0x2e, 0x8c, - 0x60, 0x30, 0x8e, 0xa9, 0x7d, 0x54, 0x80, 0x99, 0x78, 0x83, 0xcf, 0xdd, 0x2e, 0xb8, 0x9f, 0xb4, - 0x0b, 0xae, 0x4c, 0xe0, 0xbd, 0x8f, 0xb0, 0x05, 0x7e, 0xa7, 0x19, 0x7f, 0x34, 0x6e, 0xff, 0x8b, - 0x9b, 0x3c, 0x0a, 0x4f, 0x34, 0x79, 0x7c, 0xf2, 0xd3, 0x47, 0x8d, 0xd2, 0xd5, 0xcb, 0xcf, 0xb1, - 0xae, 0xfe, 0x71, 0xe6, 0xa0, 0x8a, 0xe5, 0x51, 0xaa, 0xe6, 0xc8, 0xa3, 0xd4, 0x0b, 0xf3, 0x28, - 0xd5, 0x26, 0x36, 0xb1, 0x9d, 0x26, 0x97, 0x52, 0xfd, 0x99, 0xe6, 0x52, 0x6a, 0x9c, 0x57, 0x2e, - 0x25, 0xc8, 0x9b, 0x4b, 0xe9, 0xbb, 0x05, 0x98, 0x31, 0x13, 0xe7, 0x7e, 0xe5, 0x89, 0xfb, 0xf1, - 0x97, 0xb3, 0xe4, 0x31, 0x62, 0x71, 0xf0, 0x2b, 0x59, 0x86, 0x29, 0x91, 0x59, 0x19, 0x8c, 0xa6, - 0x3e, 0x96, 0x0c, 0x46, 0xe4, 0x97, 0xa0, 0x61, 0xab, 0xb5, 0x4e, 0xe6, 0x75, 0xdc, 0x98, 0xc8, - 0x90, 0x94, 0x98, 0xd1, 0xd9, 0x82, 0xb0, 0x08, 0x23, 0x89, 0xda, 0xef, 0xd7, 0xe2, 0x0b, 0xe2, - 0xb3, 0xf6, 0x3c, 0xbc, 0x9e, 0xf4, 0x3c, 0x5c, 0x4f, 0x7b, 0x1e, 0x86, 0x56, 0x73, 0xe9, 0x7d, - 0xf8, 0xc9, 0xd8, 0x3a, 0x51, 0xe2, 0xa9, 0x93, 0xc2, 0x21, 0x97, 0xb1, 0x56, 0x2c, 0xc3, 0xac, - 0x54, 0x02, 0x14, 0x91, 0x4f, 0xb2, 0xd3, 0x51, 0xac, 0xd8, 0x6a, 0x92, 0x8c, 0x69, 0x7e, 0x26, - 0xd0, 0x57, 0x19, 0x74, 0xc5, 0x3e, 0x2a, 0x1a, 0xe3, 0x2a, 0xbb, 0x6d, 0xc8, 0xc1, 0xf6, 0x5c, - 0x1e, 0xd5, 0x7d, 0xe9, 0x3f, 0x88, 0xed, 0xb9, 0x90, 0x97, 0xa2, 0xa4, 0xc6, 0x9d, 0x28, 0xb5, - 0xa7, 0x38, 0x51, 0x74, 0x68, 0xda, 0xba, 0x1f, 0x88, 0xc1, 0x64, 0xca, 0xd9, 0xe4, 0x8f, 0x9d, - 0x6e, 0xdd, 0x67, 0xba, 0x44, 0xa4, 0xc0, 0x6f, 0x44, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb1, - 0xbf, 0x7c, 0x66, 0x31, 0x97, 0x03, 0x99, 0x67, 0xee, 0x2c, 0x32, 0xc2, 0x0d, 0xdd, 0x46, 0x0c, - 0x07, 0x13, 0xa8, 0x23, 0xfc, 0x2c, 0x30, 0x8e, 0x9f, 0x85, 0xfc, 0xac, 0x50, 0xdc, 0x8e, 0xc2, - 0xd7, 0xda, 0xe4, 0xaf, 0x35, 0x8c, 0x33, 0xc5, 0x38, 0x11, 0x93, 0xbc, 0x6c, 0x54, 0x0c, 0x64, - 0x37, 0xa8, 0xea, 0x53, 0xc9, 0x51, 0xb1, 0x93, 0x24, 0x63, 0x9a, 0x9f, 0x6c, 0xc1, 0xc5, 0xb0, - 0x28, 0xde, 0x8c, 0x69, 0x8e, 0x13, 0x06, 0xfe, 0xed, 0x64, 0xf0, 0x60, 0x66, 0x4d, 0x7e, 0x92, - 0x66, 0xe0, 0x79, 0xd4, 0x09, 0x6e, 0xeb, 0xfe, 0xbe, 0x8c, 0x20, 0x8c, 0x4e, 0xd2, 0x44, 0x24, - 0x8c, 0xf3, 0x91, 0x25, 0x00, 0x01, 0xc7, 0x6b, 0xcd, 0x26, 0x83, 0x74, 0x77, 0x42, 0x0a, 0xc6, - 0xb8, 0xb4, 0xef, 0x36, 0xa0, 0x79, 0x4f, 0x0f, 0xac, 0x43, 0xca, 0x9d, 0xa2, 0xe7, 0xe3, 0x99, - 0xfa, 0x2b, 0x05, 0xb8, 0x94, 0x8c, 0x7c, 0x3d, 0x47, 0xf7, 0x14, 0xcf, 0xbc, 0x84, 0x99, 0xd2, - 0x70, 0x44, 0x2b, 0xb8, 0xa3, 0x6a, 0x28, 0x90, 0xf6, 0xbc, 0x1d, 0x55, 0x9d, 0x51, 0x02, 0x71, - 0x74, 0x5b, 0x3e, 0x29, 0x8e, 0xaa, 0xe7, 0x3b, 0x55, 0x68, 0xca, 0x8d, 0x56, 0x7b, 0x6e, 0xdc, - 0x68, 0xf5, 0xe7, 0x42, 0xeb, 0xef, 0xc7, 0xdc, 0x68, 0x8d, 0x9c, 0xe1, 0x5c, 0xf2, 0xb0, 0x88, - 0x40, 0x1b, 0xe5, 0x8e, 0xe3, 0x79, 0x1e, 0x94, 0x7b, 0x83, 0x29, 0xcb, 0xbb, 0xba, 0x6f, 0x19, - 0x52, 0xed, 0xc8, 0x91, 0x1a, 0x59, 0xa5, 0x4c, 0x14, 0x51, 0x1f, 0xfc, 0x2f, 0x0a, 0xec, 0x28, - 0x43, 0x64, 0x31, 0x57, 0x86, 0x48, 0xb2, 0x02, 0x65, 0xe7, 0x80, 0x1e, 0x9d, 0x2d, 0x63, 0x02, - 0xdf, 0x04, 0xde, 0xbb, 0x4b, 0x8f, 0x90, 0x57, 0xd6, 0xbe, 0x5f, 0x04, 0x60, 0x8f, 0x7f, 0x3a, - 0x87, 0xd6, 0x8f, 0x43, 0xcd, 0x1f, 0x70, 0xc3, 0x90, 0x54, 0x98, 0xa2, 0x18, 0x38, 0x51, 0x8c, - 0x8a, 0x4e, 0x3e, 0x07, 0x95, 0x6f, 0x0d, 0xe8, 0x40, 0x45, 0x67, 0x84, 0xfb, 0x86, 0xaf, 0xb3, - 0x42, 0x14, 0xb4, 0xf3, 0x33, 0x3a, 0x2b, 0xc7, 0x57, 0xe5, 0xbc, 0x1c, 0x5f, 0x0d, 0xa8, 0xdd, - 0x73, 0x79, 0x48, 0xad, 0xf6, 0x5f, 0x8b, 0x00, 0x51, 0xc8, 0x22, 0xf9, 0xf5, 0x02, 0xbc, 0x14, - 0x7e, 0x70, 0x81, 0xd8, 0xfe, 0xf1, 0x6c, 0xe4, 0xb9, 0x9d, 0x60, 0x59, 0x1f, 0x3b, 0x9f, 0x81, - 0xb6, 0xb2, 0xc4, 0x61, 0x76, 0x2b, 0x08, 0x42, 0x9d, 0xf6, 0xfa, 0xc1, 0xd1, 0xaa, 0xe5, 0xc9, - 0x11, 0x98, 0x19, 0x19, 0x7b, 0x53, 0xf2, 0x88, 0xaa, 0xd2, 0x46, 0xc1, 0x3f, 0x22, 0x45, 0xc1, - 0x10, 0x87, 0xec, 0x43, 0xdd, 0x71, 0xdf, 0xf3, 0x59, 0x77, 0xc8, 0xe1, 0xf8, 0xd6, 0xf8, 0x5d, - 0x2e, 0xba, 0x55, 0x38, 0x43, 0xe4, 0x1f, 0xac, 0x39, 0xb2, 0xb3, 0x7f, 0xad, 0x08, 0x17, 0x32, - 0xfa, 0x81, 0xbc, 0x05, 0x73, 0x32, 0x3a, 0x34, 0x4a, 0xcb, 0x5f, 0x88, 0xd2, 0xf2, 0x77, 0x52, - 0x34, 0x1c, 0xe2, 0x26, 0xef, 0x01, 0xe8, 0x86, 0x41, 0x7d, 0x7f, 0xd3, 0x35, 0xd5, 0x7e, 0xe0, - 0x4d, 0xa6, 0xbe, 0x2c, 0x87, 0xa5, 0x8f, 0x8f, 0x5b, 0x3f, 0x95, 0x15, 0xf0, 0x9d, 0xea, 0xe7, - 0xa8, 0x02, 0xc6, 0x20, 0xc9, 0x37, 0x01, 0x84, 0x0d, 0x20, 0xcc, 0x49, 0xf1, 0x14, 0xc3, 0xd9, - 0x82, 0x4a, 0x79, 0xb6, 0xf0, 0xf5, 0x81, 0xee, 0x04, 0x56, 0x70, 0x24, 0x52, 0x00, 0x3d, 0x08, - 0x51, 0x30, 0x86, 0xa8, 0xfd, 0xe3, 0x22, 0xd4, 0x95, 0x5b, 0xe0, 0x19, 0xd8, 0x82, 0xbb, 0x09, - 0x5b, 0xf0, 0x84, 0x42, 0xbc, 0xb3, 0x2c, 0xc1, 0x6e, 0xca, 0x12, 0x7c, 0x2b, 0xbf, 0xa8, 0x27, - 0xdb, 0x81, 0x7f, 0xab, 0x08, 0x33, 0x8a, 0x35, 0xaf, 0x85, 0xf6, 0xab, 0x30, 0x2b, 0x42, 0x33, - 0x36, 0xf5, 0x47, 0x22, 0x1b, 0x12, 0xef, 0xb0, 0xb2, 0x88, 0xaa, 0x6e, 0x27, 0x49, 0x98, 0xe6, - 0x65, 0xc3, 0x5a, 0x14, 0xed, 0xb0, 0x4d, 0x98, 0x70, 0xe6, 0x8a, 0xfd, 0x26, 0x1f, 0xd6, 0xed, - 0x14, 0x0d, 0x87, 0xb8, 0xd3, 0x26, 0xe2, 0xf2, 0x39, 0x98, 0x88, 0xff, 0x6d, 0x01, 0xa6, 0xa2, - 0xfe, 0x3a, 0x77, 0x03, 0xf1, 0x5e, 0xd2, 0x40, 0xbc, 0x9c, 0x7b, 0x38, 0x8c, 0x30, 0x0f, 0xff, - 0x85, 0x1a, 0x24, 0x4e, 0x1a, 0x90, 0x5d, 0xb8, 0x62, 0x65, 0xc6, 0x4b, 0xc6, 0x66, 0x9b, 0xf0, - 0xe8, 0xfc, 0xfa, 0x48, 0x4e, 0x7c, 0x02, 0x0a, 0x19, 0x40, 0xfd, 0x90, 0x7a, 0x81, 0x65, 0x50, - 0xf5, 0x7c, 0xb7, 0x72, 0xab, 0x64, 0xd2, 0x08, 0x1e, 0xf6, 0xe9, 0x03, 0x29, 0x00, 0x43, 0x51, - 0x64, 0x17, 0x2a, 0xd4, 0xec, 0x52, 0x95, 0x9f, 0x2a, 0x67, 0xbe, 0xe0, 0xb0, 0x3f, 0xd9, 0x3f, - 0x1f, 0x05, 0x34, 0xf1, 0xe3, 0x86, 0xa6, 0x72, 0x4e, 0x05, 0xeb, 0x94, 0xe6, 0x25, 0x72, 0x10, - 0x5a, 0x5b, 0x2b, 0x13, 0x9a, 0x3c, 0x9e, 0x60, 0x6b, 0xf5, 0xa1, 0xf1, 0x50, 0x0f, 0xa8, 0xd7, - 0xd3, 0xbd, 0x03, 0xb9, 0xdb, 0x18, 0xff, 0x09, 0xdf, 0x56, 0x48, 0xd1, 0x13, 0x86, 0x45, 0x18, - 0xc9, 0x21, 0x2e, 0x34, 0x02, 0xa9, 0x3e, 0x2b, 0x93, 0xf2, 0xf8, 0x42, 0x95, 0x22, 0xee, 0xcb, - 0x13, 0x07, 0xea, 0x2f, 0x46, 0x32, 0xc8, 0x61, 0x22, 0xb9, 0xbc, 0xb8, 0x52, 0xa0, 0x9d, 0xc3, - 0x35, 0x21, 0xa1, 0xa2, 0xe5, 0x26, 0x3b, 0x49, 0xbd, 0xf6, 0x3f, 0x2b, 0xd1, 0xb4, 0xfc, 0xac, - 0xed, 0x84, 0x5f, 0x4a, 0xda, 0x09, 0xaf, 0xa5, 0xed, 0x84, 0x29, 0x7f, 0xfc, 0xd9, 0x63, 0x94, - 0x53, 0xe6, 0xb5, 0xf2, 0x39, 0x98, 0xd7, 0x5e, 0x85, 0xe6, 0x21, 0x9f, 0x09, 0x44, 0xb2, 0xab, - 0x0a, 0x5f, 0x46, 0xf8, 0xcc, 0xfe, 0x20, 0x2a, 0xc6, 0x38, 0x0f, 0xab, 0x22, 0xaf, 0xd3, 0x09, - 0xf3, 0x4b, 0xcb, 0x2a, 0x9d, 0xa8, 0x18, 0xe3, 0x3c, 0x3c, 0xbc, 0xd1, 0x72, 0x0e, 0x44, 0x85, - 0x1a, 0xaf, 0x20, 0xc2, 0x1b, 0x55, 0x21, 0x46, 0x74, 0x72, 0x03, 0xea, 0x03, 0x73, 0x4f, 0xf0, - 0xd6, 0x39, 0x2f, 0xd7, 0x30, 0x77, 0x56, 0xd7, 0x64, 0xf2, 0x2d, 0x45, 0x65, 0x2d, 0xe9, 0xe9, - 0x7d, 0x45, 0xe0, 0x7b, 0x43, 0xd9, 0x92, 0xcd, 0xa8, 0x18, 0xe3, 0x3c, 0xe4, 0x67, 0x60, 0xc6, - 0xa3, 0xe6, 0xc0, 0xa0, 0x61, 0x2d, 0xe0, 0xb5, 0x64, 0x56, 0xd2, 0x38, 0x05, 0x53, 0x9c, 0x23, - 0x8c, 0x84, 0xcd, 0xb1, 0x8c, 0x84, 0x5f, 0x83, 0x19, 0xd3, 0xd3, 0x2d, 0x87, 0x9a, 0xf7, 0x1d, - 0x1e, 0x74, 0x21, 0x83, 0x2c, 0x43, 0x03, 0xfd, 0x6a, 0x82, 0x8a, 0x29, 0x6e, 0xed, 0x9f, 0x17, - 0xa1, 0x22, 0x72, 0xa5, 0xae, 0xc3, 0x05, 0xcb, 0xb1, 0x02, 0x4b, 0xb7, 0x57, 0xa9, 0xad, 0x1f, - 0x25, 0x03, 0x4f, 0x5e, 0x66, 0x1b, 0xed, 0xf5, 0x61, 0x32, 0x66, 0xd5, 0x61, 0x9d, 0x13, 0x88, - 0xe5, 0x5b, 0xa1, 0x08, 0x3b, 0x9a, 0x48, 0xd4, 0x9d, 0xa0, 0x60, 0x8a, 0x93, 0x29, 0x43, 0xfd, - 0x8c, 0xa8, 0x12, 0xae, 0x0c, 0x25, 0x63, 0x49, 0x92, 0x7c, 0x5c, 0x49, 0x1f, 0x70, 0x85, 0x38, - 0x3c, 0xca, 0x24, 0x43, 0xd3, 0x84, 0x92, 0x9e, 0xa2, 0xe1, 0x10, 0x37, 0x43, 0xd8, 0xd3, 0x2d, - 0x7b, 0xe0, 0xd1, 0x08, 0xa1, 0x12, 0x21, 0xac, 0xa5, 0x68, 0x38, 0xc4, 0xad, 0xfd, 0xf7, 0x02, - 0x90, 0xe1, 0xc3, 0x19, 0x64, 0x1f, 0xaa, 0x0e, 0xb7, 0x45, 0xe6, 0xbe, 0x1f, 0x20, 0x66, 0xd2, - 0x14, 0x8b, 0x84, 0x2c, 0x90, 0xf8, 0xc4, 0x81, 0x3a, 0x7d, 0x14, 0x50, 0xcf, 0x09, 0x0f, 0x6b, - 0x4d, 0xe6, 0x2e, 0x02, 0xb1, 0x37, 0x93, 0xc8, 0x18, 0xca, 0xd0, 0x7e, 0xaf, 0x08, 0xcd, 0x18, - 0xdf, 0xd3, 0xb6, 0xf8, 0x3c, 0x5f, 0x84, 0x30, 0x01, 0xee, 0x78, 0xb6, 0x9c, 0xef, 0x62, 0xf9, - 0x22, 0x24, 0x09, 0x37, 0x30, 0xce, 0x47, 0x96, 0x00, 0x7a, 0xba, 0x1f, 0x50, 0x8f, 0xeb, 0x42, - 0xa9, 0x2c, 0x0d, 0x9b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0xba, 0xbc, 0x4d, 0xa2, 0x9c, 0xcc, 0xaa, - 0x39, 0xe2, 0xaa, 0x88, 0xca, 0x04, 0xae, 0x8a, 0x20, 0x5d, 0x98, 0x53, 0xad, 0x56, 0xd4, 0xb3, - 0xe5, 0x5c, 0x14, 0x03, 0x35, 0x05, 0x81, 0x43, 0xa0, 0xda, 0xf7, 0x0b, 0x30, 0x9d, 0x30, 0x40, - 0x89, 0x7c, 0x98, 0xea, 0x68, 0x51, 0x22, 0x1f, 0x66, 0xec, 0x44, 0xd0, 0x17, 0xa0, 0x2a, 0x3a, - 0x28, 0x1d, 0x31, 0x2c, 0xba, 0x10, 0x25, 0x95, 0xad, 0x2c, 0xd2, 0xc4, 0x9d, 0x5e, 0x59, 0xa4, - 0x0d, 0x1c, 0x15, 0x5d, 0x78, 0x8e, 0x44, 0xeb, 0x64, 0x4f, 0xc7, 0x3c, 0x47, 0xa2, 0x1c, 0x43, - 0x0e, 0xed, 0xef, 0xf3, 0x76, 0x07, 0xde, 0x51, 0xb8, 0xb3, 0xee, 0x42, 0x4d, 0x46, 0x89, 0xca, - 0x4f, 0xe3, 0xad, 0x1c, 0x56, 0x31, 0x8e, 0x23, 0xe3, 0x1c, 0x75, 0xe3, 0xe0, 0xfe, 0xde, 0x1e, - 0x2a, 0x74, 0x72, 0x13, 0x1a, 0xae, 0x23, 0xbf, 0x60, 0xf9, 0xf8, 0x5f, 0x64, 0x2b, 0xc7, 0x7d, - 0x55, 0xf8, 0xf8, 0xb8, 0x75, 0x29, 0xfc, 0x93, 0x68, 0x24, 0x46, 0x35, 0xb5, 0x3f, 0x53, 0x80, - 0x97, 0xd0, 0xb5, 0x6d, 0xcb, 0xe9, 0x26, 0x3d, 0x9f, 0xc4, 0x86, 0x99, 0x9e, 0xfe, 0x68, 0xc7, - 0xd1, 0x0f, 0x75, 0xcb, 0xd6, 0x77, 0x6d, 0xfa, 0xd4, 0x9d, 0xf1, 0x20, 0xb0, 0xec, 0x05, 0x71, - 0xbb, 0xe6, 0xc2, 0xba, 0x13, 0xdc, 0xf7, 0x3a, 0x81, 0x67, 0x39, 0x5d, 0x31, 0x4b, 0x6e, 0x26, - 0xb0, 0x30, 0x85, 0xad, 0xfd, 0x7e, 0x09, 0x78, 0x04, 0x22, 0xf9, 0x32, 0x34, 0x7a, 0xd4, 0xd8, - 0xd7, 0x1d, 0xcb, 0x57, 0x99, 0x85, 0x2f, 0xb3, 0xe7, 0xda, 0x54, 0x85, 0x8f, 0xd9, 0xab, 0x58, - 0xee, 0x6c, 0xf0, 0xc3, 0x40, 0x11, 0x2f, 0x31, 0xa0, 0xda, 0xf5, 0x7d, 0xbd, 0x6f, 0xe5, 0x0e, - 0x31, 0x11, 0x99, 0x5c, 0xc5, 0x74, 0x24, 0x7e, 0xa3, 0x84, 0x26, 0x06, 0x54, 0xfa, 0xb6, 0x6e, - 0x39, 0xb9, 0x6f, 0x83, 0x63, 0x4f, 0xb0, 0xc5, 0x90, 0x84, 0xa9, 0x92, 0xff, 0x44, 0x81, 0x4d, - 0x06, 0xd0, 0xf4, 0x0d, 0x4f, 0xef, 0xf9, 0xfb, 0xfa, 0xd2, 0x6b, 0xaf, 0xe7, 0x56, 0xfe, 0x23, - 0x51, 0x42, 0x17, 0x59, 0xc1, 0xe5, 0xcd, 0xce, 0xed, 0xe5, 0xa5, 0xd7, 0x5e, 0xc7, 0xb8, 0x9c, - 0xb8, 0xd8, 0xd7, 0x5e, 0x5d, 0x92, 0x33, 0xc8, 0xc4, 0xc5, 0xbe, 0xf6, 0xea, 0x12, 0xc6, 0xe5, - 0x68, 0xff, 0xab, 0x00, 0x8d, 0x90, 0x97, 0xec, 0x00, 0xb0, 0xb9, 0x4c, 0xe6, 0x5e, 0x3d, 0xd3, - 0xcd, 0x39, 0xdc, 0xda, 0xb3, 0x13, 0x56, 0xc6, 0x18, 0x50, 0x46, 0x72, 0xda, 0xe2, 0xa4, 0x93, - 0xd3, 0x2e, 0x42, 0x63, 0x5f, 0x77, 0x4c, 0x7f, 0x5f, 0x3f, 0xa0, 0x32, 0x70, 0x3b, 0xdc, 0x8a, - 0xdc, 0x56, 0x04, 0x8c, 0x78, 0xb4, 0x7f, 0x58, 0x05, 0x11, 0x17, 0xc2, 0x26, 0x1d, 0xd3, 0xf2, - 0xc5, 0xf1, 0x8a, 0x02, 0xaf, 0x19, 0x4e, 0x3a, 0xab, 0xb2, 0x1c, 0x43, 0x0e, 0x72, 0x19, 0x4a, - 0x3d, 0xcb, 0x91, 0x1a, 0x08, 0x37, 0xe4, 0x6e, 0x5a, 0x0e, 0xb2, 0x32, 0x4e, 0xd2, 0x1f, 0x49, - 0x0d, 0x43, 0x90, 0xf4, 0x47, 0xc8, 0xca, 0xc8, 0x57, 0x61, 0xd6, 0x76, 0xdd, 0x03, 0x36, 0x7d, - 0x28, 0x45, 0x44, 0x78, 0xd5, 0xb9, 0x69, 0x65, 0x23, 0x49, 0xc2, 0x34, 0x2f, 0xd9, 0x81, 0x97, - 0x3f, 0xa0, 0x9e, 0x2b, 0xe7, 0xcb, 0x8e, 0x4d, 0x69, 0x5f, 0xc1, 0x08, 0xd5, 0x98, 0x47, 0xc9, - 0xfe, 0x7c, 0x36, 0x0b, 0x8e, 0xaa, 0xcb, 0x23, 0xfa, 0x75, 0xaf, 0x4b, 0x83, 0x2d, 0xcf, 0x65, - 0xba, 0x8b, 0xe5, 0x74, 0x15, 0x6c, 0x35, 0x82, 0xdd, 0xce, 0x66, 0xc1, 0x51, 0x75, 0xc9, 0x3b, - 0x30, 0x2f, 0x48, 0x42, 0x6d, 0x59, 0x16, 0xd3, 0x8c, 0x65, 0xab, 0x4b, 0x54, 0xa7, 0x85, 0xbf, - 0x6c, 0x7b, 0x04, 0x0f, 0x8e, 0xac, 0x4d, 0xee, 0xc0, 0x9c, 0xf2, 0x96, 0x6e, 0x51, 0xaf, 0x13, - 0xc6, 0x0a, 0x4d, 0xb7, 0xaf, 0x9d, 0x1c, 0xb7, 0xae, 0xac, 0xd2, 0xbe, 0x47, 0x8d, 0xb8, 0xd7, - 0x59, 0x71, 0xe1, 0x50, 0x3d, 0x82, 0x70, 0x89, 0x07, 0x04, 0xed, 0xf4, 0x57, 0x5c, 0xd7, 0x36, - 0xdd, 0x87, 0x8e, 0x7a, 0x76, 0xa1, 0xb0, 0x73, 0x07, 0x69, 0x27, 0x93, 0x03, 0x47, 0xd4, 0x64, - 0x4f, 0xce, 0x29, 0xab, 0xee, 0x43, 0x27, 0x8d, 0x0a, 0xd1, 0x93, 0x77, 0x46, 0xf0, 0xe0, 0xc8, - 0xda, 0x64, 0x0d, 0x48, 0xfa, 0x09, 0x76, 0xfa, 0xd2, 0x85, 0x7f, 0x49, 0xa4, 0x51, 0x4a, 0x53, - 0x31, 0xa3, 0x06, 0xd9, 0x80, 0x8b, 0xe9, 0x52, 0x26, 0x4e, 0x7a, 0xf3, 0x79, 0x02, 0x65, 0xcc, - 0xa0, 0x63, 0x66, 0x2d, 0xed, 0x1f, 0x15, 0x61, 0x3a, 0x91, 0x77, 0xe3, 0xb9, 0xcb, 0x6f, 0xc0, - 0x36, 0x0f, 0x3d, 0xbf, 0xbb, 0xbe, 0x7a, 0x9b, 0xea, 0x26, 0xf5, 0xd4, 0xb1, 0x8e, 0x86, 0x5c, - 0x16, 0x13, 0x14, 0x4c, 0x71, 0x92, 0x3d, 0xa8, 0x08, 0x3f, 0x41, 0xde, 0x3b, 0x98, 0x54, 0x1f, - 0x71, 0x67, 0x81, 0xbc, 0xb8, 0xcc, 0xf5, 0x28, 0x0a, 0x78, 0x2d, 0x80, 0xa9, 0x38, 0x07, 0x9b, - 0x48, 0x22, 0xb5, 0xb7, 0x96, 0x50, 0x79, 0xd7, 0xa1, 0x14, 0x04, 0xe3, 0x66, 0x4e, 0x10, 0x7e, - 0xa7, 0xed, 0x0d, 0x64, 0x18, 0xda, 0x1e, 0x7b, 0x77, 0xbe, 0x6f, 0xb9, 0x8e, 0x4c, 0xa3, 0xbf, - 0x03, 0x35, 0xb9, 0x7b, 0x1a, 0x33, 0xf3, 0x03, 0xd7, 0x95, 0x94, 0xd9, 0x55, 0x61, 0x69, 0xff, - 0xae, 0x08, 0x8d, 0xd0, 0x4c, 0x72, 0x8a, 0xf4, 0xf4, 0x2e, 0x34, 0xc2, 0x80, 0xc6, 0xdc, 0x17, - 0xcc, 0x46, 0x71, 0x76, 0x7c, 0x67, 0x1f, 0xfe, 0xc5, 0x48, 0x46, 0x3c, 0x58, 0xb2, 0x94, 0x23, - 0x58, 0xb2, 0x0f, 0xb5, 0xc0, 0xb3, 0xba, 0x5d, 0xb9, 0x4b, 0xc8, 0x13, 0x2d, 0x19, 0x76, 0xd7, - 0xb6, 0x00, 0x94, 0x3d, 0x2b, 0xfe, 0xa0, 0x12, 0xa3, 0xbd, 0x0f, 0x73, 0x69, 0x4e, 0xae, 0x42, - 0x1b, 0xfb, 0xd4, 0x1c, 0xd8, 0xaa, 0x8f, 0x23, 0x15, 0x5a, 0x96, 0x63, 0xc8, 0x41, 0x6e, 0x40, - 0x9d, 0xbd, 0xa6, 0x0f, 0x5c, 0x47, 0xa9, 0xb1, 0x7c, 0x37, 0xb2, 0x2d, 0xcb, 0x30, 0xa4, 0x6a, - 0xff, 0xa5, 0x04, 0x97, 0x23, 0x63, 0xd7, 0xa6, 0xee, 0xe8, 0xdd, 0x53, 0xdc, 0x2a, 0xfa, 0xe9, - 0x59, 0xba, 0xb3, 0xde, 0x31, 0x52, 0x7a, 0x0e, 0xee, 0x18, 0xf9, 0xbf, 0x45, 0xe0, 0xc1, 0xd7, - 0xe4, 0xdb, 0x30, 0xa5, 0xc7, 0x2e, 0x94, 0x96, 0xaf, 0xf3, 0x66, 0xee, 0xd7, 0xc9, 0x63, 0xbc, - 0xc3, 0x00, 0xb8, 0x78, 0x29, 0x26, 0x04, 0x12, 0x17, 0xea, 0x7b, 0xba, 0x6d, 0x33, 0x5d, 0x28, - 0xb7, 0xf3, 0x2e, 0x21, 0x9c, 0x0f, 0xf3, 0x35, 0x09, 0x8d, 0xa1, 0x10, 0xf2, 0xdd, 0x02, 0x4c, - 0x7b, 0xf1, 0xed, 0x9a, 0x7c, 0x21, 0x79, 0x42, 0x3b, 0x62, 0x68, 0xf1, 0x70, 0xbb, 0xf8, 0x9e, - 0x30, 0x29, 0x53, 0xfb, 0xcf, 0x05, 0x98, 0xee, 0xd8, 0x96, 0x69, 0x39, 0xdd, 0x73, 0xbc, 0xe2, - 0xe4, 0x3e, 0x54, 0x7c, 0xdb, 0x32, 0xe9, 0x98, 0xab, 0x89, 0x58, 0xc7, 0x18, 0x00, 0x0a, 0x9c, - 0xe4, 0x9d, 0x29, 0xa5, 0x53, 0xdc, 0x99, 0xf2, 0x07, 0x55, 0x90, 0xc7, 0x08, 0xc8, 0x00, 0x1a, - 0x5d, 0x75, 0x15, 0x83, 0x7c, 0xc6, 0xdb, 0x39, 0xd2, 0x78, 0x26, 0x2e, 0x75, 0x10, 0x73, 0x7f, - 0x58, 0x88, 0x91, 0x24, 0x42, 0x93, 0x37, 0x99, 0xaf, 0xe6, 0xbc, 0xc9, 0x5c, 0x88, 0x1b, 0xbe, - 0xcb, 0x5c, 0x87, 0xf2, 0x7e, 0x10, 0xf4, 0xe5, 0x60, 0x1a, 0xff, 0x9c, 0x48, 0x94, 0x49, 0x4a, - 0xe8, 0x44, 0xec, 0x3f, 0x72, 0x68, 0x26, 0xc2, 0xd1, 0xc3, 0xfb, 0x22, 0x57, 0x72, 0x85, 0x91, - 0xc4, 0x45, 0xb0, 0xff, 0xc8, 0xa1, 0xc9, 0x2f, 0x42, 0x33, 0xf0, 0x74, 0xc7, 0xdf, 0x73, 0xbd, - 0x1e, 0xf5, 0xe4, 0x1e, 0x75, 0x2d, 0xc7, 0x65, 0xde, 0xdb, 0x11, 0x9a, 0x30, 0xc9, 0x26, 0x8a, - 0x30, 0x2e, 0x8d, 0x1c, 0x40, 0x7d, 0x60, 0x8a, 0x86, 0x49, 0x33, 0xd8, 0x72, 0x9e, 0xfb, 0xd9, - 0x63, 0x41, 0x22, 0xea, 0x1f, 0x86, 0x02, 0x92, 0x57, 0xa3, 0xd6, 0x26, 0x75, 0x35, 0x6a, 0x7c, - 0x34, 0x66, 0xa5, 0xb9, 0x21, 0x3d, 0xa9, 0xd7, 0x3a, 0x5d, 0x19, 0xe3, 0xb6, 0x96, 0x5b, 0xe5, - 0x14, 0x22, 0x9b, 0xa1, 0x6e, 0xec, 0x74, 0x51, 0xc9, 0xd0, 0x7a, 0x20, 0x7d, 0x47, 0xc4, 0x48, - 0x5c, 0x07, 0x25, 0x4e, 0x46, 0x2e, 0x9e, 0x6e, 0x3e, 0x08, 0xef, 0x25, 0x8a, 0xa5, 0xa3, 0xcf, - 0xbc, 0xf7, 0x49, 0xfb, 0xf7, 0x45, 0x28, 0x6d, 0x6f, 0x74, 0x44, 0x8a, 0x59, 0x7e, 0xc1, 0x1c, - 0xed, 0x1c, 0x58, 0xfd, 0x07, 0xd4, 0xb3, 0xf6, 0x8e, 0xe4, 0xd6, 0x3b, 0x96, 0x62, 0x36, 0xcd, - 0x81, 0x19, 0xb5, 0xc8, 0xbb, 0x30, 0x65, 0xe8, 0x2b, 0xd4, 0x0b, 0xc6, 0x31, 0x2c, 0xf0, 0x43, - 0xe6, 0x2b, 0xcb, 0x51, 0x75, 0x4c, 0x80, 0x91, 0x1d, 0x00, 0x23, 0x82, 0x2e, 0x9d, 0xd9, 0x1c, - 0x12, 0x03, 0x8e, 0x01, 0x11, 0x84, 0xc6, 0x01, 0x63, 0xe5, 0xa8, 0xe5, 0xb3, 0xa0, 0xf2, 0x91, - 0x73, 0x57, 0xd5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x3a, 0x71, 0x47, 0x14, 0xf9, 0x0a, 0xd4, 0xdd, - 0x7e, 0x6c, 0x3a, 0x6d, 0xf0, 0x68, 0xda, 0xfa, 0x7d, 0x59, 0xf6, 0xf8, 0xb8, 0x35, 0xbd, 0xe1, - 0x76, 0x2d, 0x43, 0x15, 0x60, 0xc8, 0x4e, 0x34, 0xa8, 0xf2, 0x73, 0x9b, 0xea, 0x86, 0x28, 0xbe, - 0x76, 0xf0, 0x4b, 0x5c, 0x7c, 0x94, 0x14, 0xed, 0x97, 0xcb, 0x10, 0x79, 0x5c, 0x89, 0x0f, 0x55, - 0x71, 0x66, 0x44, 0xce, 0xdc, 0xe7, 0x7a, 0x3c, 0x45, 0x8a, 0x22, 0x5d, 0x28, 0xbd, 0xef, 0xee, - 0xe6, 0x9e, 0xb8, 0x63, 0x29, 0x21, 0x84, 0xad, 0x2c, 0x56, 0x80, 0x4c, 0x02, 0xf9, 0xab, 0x05, - 0x78, 0xd1, 0x4f, 0xab, 0xbe, 0x72, 0x38, 0x60, 0x7e, 0x1d, 0x3f, 0xad, 0x4c, 0xcb, 0xb0, 0xe7, - 0x51, 0x64, 0x1c, 0x6e, 0x0b, 0xeb, 0x7f, 0xe1, 0x0a, 0x95, 0xc3, 0xe9, 0x56, 0xce, 0x9b, 0x70, - 0x93, 0xfd, 0x9f, 0x2c, 0x43, 0x29, 0x4a, 0xfb, 0x4e, 0x11, 0x9a, 0xb1, 0xd9, 0x3a, 0xf7, 0xc5, - 0x63, 0x8f, 0x52, 0x17, 0x8f, 0x6d, 0x8d, 0x1f, 0x19, 0x10, 0xb5, 0xea, 0xbc, 0xef, 0x1e, 0xfb, - 0xa7, 0x45, 0x28, 0xed, 0xac, 0xae, 0x25, 0x37, 0xad, 0x85, 0x67, 0xb0, 0x69, 0xdd, 0x87, 0xda, - 0xee, 0xc0, 0xb2, 0x03, 0xcb, 0xc9, 0x9d, 0xb4, 0x46, 0xdd, 0xd3, 0x26, 0x7d, 0x1d, 0x02, 0x15, - 0x15, 0x3c, 0xe9, 0x42, 0xad, 0x2b, 0xb2, 0x86, 0xe6, 0x8e, 0x97, 0x94, 0xd9, 0x47, 0x85, 0x20, - 0xf9, 0x07, 0x15, 0xba, 0x76, 0x04, 0xd5, 0x9d, 0x55, 0xa9, 0xf6, 0x3f, 0xdb, 0xde, 0xd4, 0x7e, - 0x11, 0x42, 0x2d, 0xe0, 0xd9, 0x0b, 0xff, 0x6f, 0x05, 0x48, 0x2a, 0x3e, 0xcf, 0x7e, 0x34, 0x1d, - 0xa4, 0x47, 0xd3, 0xea, 0x24, 0x3e, 0xbe, 0xec, 0x01, 0xa5, 0xfd, 0x9b, 0x02, 0xa4, 0x0e, 0xfa, - 0x91, 0xd7, 0x65, 0x02, 0xba, 0x64, 0x60, 0x9a, 0x4a, 0x40, 0x47, 0x92, 0xdc, 0xb1, 0x44, 0x74, - 0x1f, 0xb2, 0xed, 0x5a, 0xdc, 0x81, 0x26, 0x9b, 0x7f, 0x6f, 0xfc, 0xed, 0x5a, 0x96, 0x3b, 0x4e, - 0x06, 0x4f, 0xc6, 0x49, 0x98, 0x94, 0xab, 0xfd, 0x83, 0x22, 0x54, 0x9f, 0x59, 0x6e, 0x03, 0x9a, - 0x88, 0x67, 0x5d, 0xc9, 0x39, 0xdb, 0x8f, 0x8c, 0x66, 0xed, 0xa5, 0xa2, 0x59, 0xf3, 0x5e, 0xb0, - 0xfe, 0x94, 0x58, 0xd6, 0x7f, 0x55, 0x00, 0xb9, 0xd6, 0xac, 0x3b, 0x7e, 0xa0, 0x3b, 0x06, 0x25, - 0x46, 0xb8, 0xb0, 0xe5, 0x0d, 0x9a, 0x92, 0x81, 0x85, 0x42, 0x97, 0xe1, 0xbf, 0xd5, 0x42, 0x46, - 0x7e, 0x12, 0xea, 0xfb, 0xae, 0x1f, 0xf0, 0xc5, 0xab, 0x98, 0x34, 0x99, 0xdd, 0x96, 0xe5, 0x18, - 0x72, 0xa4, 0xdd, 0xd9, 0x95, 0xd1, 0xee, 0x6c, 0xed, 0x37, 0x8b, 0x30, 0xf5, 0x49, 0x49, 0x9e, - 0x90, 0x15, 0xfd, 0x5b, 0xca, 0x19, 0xfd, 0x5b, 0x3e, 0x4b, 0xf4, 0xaf, 0xf6, 0xc3, 0x02, 0xc0, - 0x33, 0xcb, 0xdc, 0x60, 0x26, 0x03, 0x73, 0x73, 0x8f, 0xab, 0xec, 0xb0, 0xdc, 0xbf, 0x5b, 0x51, - 0x8f, 0xc4, 0x83, 0x72, 0x3f, 0x2c, 0xc0, 0x8c, 0x9e, 0x08, 0x74, 0xcd, 0xad, 0x2f, 0xa7, 0xe2, - 0x66, 0xc3, 0x38, 0xad, 0x64, 0x39, 0xa6, 0xc4, 0x92, 0x37, 0xa2, 0xdc, 0xe7, 0xf7, 0xa2, 0x61, - 0x3f, 0x94, 0xb4, 0x9c, 0xeb, 0x6e, 0x09, 0xce, 0xa7, 0x04, 0x16, 0x97, 0x26, 0x12, 0x58, 0x1c, - 0x3f, 0x32, 0x59, 0x7e, 0xe2, 0x91, 0xc9, 0x43, 0x68, 0xec, 0x79, 0x6e, 0x8f, 0xc7, 0xee, 0xca, - 0xdb, 0xc5, 0x6f, 0xe6, 0x58, 0x28, 0x7b, 0xbb, 0x96, 0x43, 0x4d, 0x1e, 0x17, 0x1c, 0x1a, 0xae, - 0xd6, 0x14, 0x3e, 0x46, 0xa2, 0xb8, 0xad, 0xdf, 0x15, 0x52, 0xab, 0x93, 0x94, 0x1a, 0xce, 0x25, - 0xdb, 0x02, 0x1d, 0x95, 0x98, 0x64, 0xbc, 0x6e, 0xed, 0xd9, 0xc4, 0xeb, 0x6a, 0x7f, 0xbe, 0xa6, - 0x26, 0xb0, 0xe7, 0x2e, 0xcd, 0xee, 0xa7, 0x07, 0xdd, 0xbb, 0x74, 0xe8, 0x14, 0x7a, 0xfd, 0x19, - 0x9e, 0x42, 0x6f, 0x4c, 0xe6, 0x14, 0x3a, 0xe4, 0x3b, 0x85, 0xde, 0x9c, 0xd0, 0x29, 0xf4, 0xa9, - 0x49, 0x9d, 0x42, 0x9f, 0x1e, 0xeb, 0x14, 0xfa, 0xcc, 0xa9, 0x4e, 0xa1, 0x1f, 0x97, 0x20, 0xb5, - 0x19, 0xff, 0xd4, 0xf1, 0xf6, 0x47, 0xca, 0xf1, 0xf6, 0xbd, 0x22, 0x44, 0x13, 0xf1, 0x19, 0x03, - 0x93, 0xde, 0x81, 0x7a, 0x4f, 0x7f, 0xc4, 0x03, 0xa7, 0xf3, 0xdc, 0x4e, 0xbd, 0x29, 0x31, 0x30, - 0x44, 0x23, 0x3e, 0x80, 0x15, 0xde, 0x10, 0x91, 0xdb, 0x85, 0x11, 0x5d, 0x36, 0x21, 0x8c, 0xa4, - 0xd1, 0x7f, 0x8c, 0x89, 0xd1, 0xfe, 0x65, 0x11, 0xe4, 0x55, 0x22, 0x84, 0x42, 0x65, 0xcf, 0x7a, - 0x44, 0xcd, 0xdc, 0xe1, 0xce, 0x6b, 0x0c, 0x45, 0xde, 0x57, 0xc2, 0x7d, 0x34, 0xbc, 0x00, 0x05, - 0x3a, 0x37, 0xbe, 0x0b, 0x9f, 0x9b, 0xec, 0xbf, 0x1c, 0xc6, 0xf7, 0xb8, 0xef, 0x4e, 0x1a, 0xdf, - 0x45, 0x11, 0x2a, 0x19, 0xc2, 0xd6, 0xcf, 0xc3, 0x2f, 0x72, 0xbb, 0x18, 0x13, 0x61, 0x1c, 0xca, - 0xd6, 0xef, 0x8b, 0x34, 0x14, 0x52, 0x46, 0xfb, 0x17, 0x7e, 0xf0, 0xa3, 0x6b, 0x2f, 0xfc, 0xf0, - 0x47, 0xd7, 0x5e, 0xf8, 0xe8, 0x47, 0xd7, 0x5e, 0xf8, 0xe5, 0x93, 0x6b, 0x85, 0x1f, 0x9c, 0x5c, - 0x2b, 0xfc, 0xf0, 0xe4, 0x5a, 0xe1, 0xa3, 0x93, 0x6b, 0x85, 0xff, 0x78, 0x72, 0xad, 0xf0, 0x97, - 0xfe, 0xd3, 0xb5, 0x17, 0x7e, 0xfe, 0xcb, 0x51, 0x13, 0x16, 0x55, 0x13, 0x16, 0x95, 0xc0, 0xc5, - 0xfe, 0x41, 0x77, 0x91, 0x35, 0x21, 0x2a, 0x51, 0x4d, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xae, 0x9f, 0x3b, 0x27, 0x9f, 0x9f, 0x00, 0x00, + // 8167 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0x57, + 0x76, 0x9e, 0xfa, 0x8f, 0xdd, 0x7d, 0x9a, 0x7f, 0xba, 0x33, 0x1a, 0x71, 0xb8, 0xa3, 0xe9, 0xd9, + 0x5a, 0xaf, 0x76, 0x1c, 0xdb, 0x64, 0x44, 0xaf, 0xb4, 0x5a, 0xdb, 0xbb, 0x12, 0x9b, 0x1c, 0x72, + 0xa8, 0x21, 0x67, 0xb8, 0xa7, 0xc9, 0x91, 0xd6, 0x8a, 0x57, 0x29, 0x56, 0x5d, 0x36, 0x4b, 0xac, + 0xae, 0xea, 0xad, 0xaa, 0xe6, 0x0c, 0xe5, 0x18, 0x6b, 0xef, 0x26, 0xd0, 0x06, 0x49, 0x90, 0xc0, + 0x4f, 0x06, 0x02, 0x27, 0x48, 0x10, 0xc0, 0x0f, 0x86, 0xf3, 0x60, 0x64, 0xf3, 0x10, 0x20, 0x3f, + 0x0e, 0x82, 0x64, 0xf3, 0xbf, 0x08, 0x02, 0x44, 0x79, 0x08, 0x91, 0x65, 0x90, 0x87, 0x04, 0x48, + 0xe0, 0xc4, 0x48, 0x9c, 0x0c, 0x8c, 0x38, 0xb8, 0x7f, 0xf5, 0xd7, 0xd5, 0x33, 0x64, 0x57, 0x73, + 0x34, 0x5a, 0xeb, 0xad, 0xea, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xfd, 0x39, 0xf7, 0x9c, 0x73, + 0xef, 0x85, 0xf5, 0x8e, 0x15, 0x1c, 0xf4, 0xf7, 0x16, 0x0c, 0xb7, 0xbb, 0xe8, 0xf4, 0xbb, 0x7a, + 0xcf, 0x73, 0xdf, 0xe7, 0x0f, 0xfb, 0xb6, 0xfb, 0x60, 0xb1, 0x77, 0xd8, 0x59, 0xd4, 0x7b, 0x96, + 0x1f, 0xa5, 0x1c, 0xbd, 0xa2, 0xdb, 0xbd, 0x03, 0xfd, 0x95, 0xc5, 0x0e, 0x75, 0xa8, 0xa7, 0x07, + 0xd4, 0x5c, 0xe8, 0x79, 0x6e, 0xe0, 0x92, 0x2f, 0x45, 0x40, 0x0b, 0x0a, 0x68, 0x41, 0x65, 0x5b, + 0xe8, 0x1d, 0x76, 0x16, 0x18, 0x50, 0x94, 0xa2, 0x80, 0xe6, 0x7f, 0x2a, 0x56, 0x82, 0x8e, 0xdb, + 0x71, 0x17, 0x39, 0xde, 0x5e, 0x7f, 0x9f, 0xbf, 0xf1, 0x17, 0xfe, 0x24, 0xe4, 0xcc, 0x6b, 0x87, + 0xaf, 0xfb, 0x0b, 0x96, 0xcb, 0x8a, 0xb5, 0x68, 0xb8, 0x1e, 0x5d, 0x3c, 0x1a, 0x28, 0xcb, 0xfc, + 0x17, 0x23, 0x9e, 0xae, 0x6e, 0x1c, 0x58, 0x0e, 0xf5, 0x8e, 0xd5, 0xb7, 0x2c, 0x7a, 0xd4, 0x77, + 0xfb, 0x9e, 0x41, 0xcf, 0x95, 0xcb, 0x5f, 0xec, 0xd2, 0x40, 0xcf, 0x92, 0xb5, 0x38, 0x2c, 0x97, + 0xd7, 0x77, 0x02, 0xab, 0x3b, 0x28, 0xe6, 0xb5, 0x27, 0x65, 0xf0, 0x8d, 0x03, 0xda, 0xd5, 0x07, + 0xf2, 0xfd, 0xf4, 0xb0, 0x7c, 0xfd, 0xc0, 0xb2, 0x17, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x9d, 0x49, + 0xfb, 0x1d, 0x80, 0x4b, 0xcb, 0x7b, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xed, 0x9a, 0x3b, 0xb4, 0xdb, + 0xb3, 0xf5, 0x80, 0x92, 0x43, 0xa8, 0xb1, 0x0f, 0x32, 0xf5, 0x40, 0x9f, 0x2b, 0xdc, 0x28, 0xdc, + 0x6c, 0x2c, 0x2d, 0x2f, 0x8c, 0xf8, 0x03, 0x17, 0xb6, 0x24, 0x50, 0x6b, 0xf2, 0xf4, 0xa4, 0x59, + 0x53, 0x6f, 0x18, 0x0a, 0x20, 0xbf, 0x56, 0x80, 0x49, 0xc7, 0x35, 0x69, 0x9b, 0xda, 0xd4, 0x08, + 0x5c, 0x6f, 0xae, 0x78, 0xa3, 0x74, 0xb3, 0xb1, 0xf4, 0x8d, 0x91, 0x25, 0x66, 0x7c, 0xd1, 0xc2, + 0xdd, 0x98, 0x80, 0x5b, 0x4e, 0xe0, 0x1d, 0xb7, 0x2e, 0x7f, 0xff, 0xa4, 0xf9, 0xdc, 0xe9, 0x49, + 0x73, 0x32, 0x4e, 0xc2, 0x44, 0x49, 0xc8, 0x2e, 0x34, 0x02, 0xd7, 0x66, 0x55, 0x66, 0xb9, 0x8e, + 0x3f, 0x57, 0xe2, 0x05, 0xbb, 0xbe, 0x20, 0xaa, 0x9a, 0x89, 0x5f, 0x60, 0x6d, 0x6c, 0xe1, 0xe8, + 0x95, 0x85, 0x9d, 0x90, 0xad, 0x75, 0x49, 0x02, 0x37, 0xa2, 0x34, 0x1f, 0xe3, 0x38, 0x84, 0xc2, + 0x8c, 0x4f, 0x8d, 0xbe, 0x67, 0x05, 0xc7, 0x2b, 0xae, 0x13, 0xd0, 0x87, 0xc1, 0x5c, 0x99, 0xd7, + 0xf2, 0xcb, 0x59, 0xd0, 0xdb, 0xae, 0xd9, 0x4e, 0x72, 0xb7, 0x2e, 0x9d, 0x9e, 0x34, 0x67, 0x52, + 0x89, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x5a, 0x5d, 0xbd, 0x43, 0xb7, 0xfb, 0xb6, 0xdd, 0xa6, 0x86, + 0x47, 0x03, 0x7f, 0xae, 0xc2, 0x3f, 0xe1, 0x66, 0x96, 0x9c, 0x4d, 0xd7, 0xd0, 0xed, 0x7b, 0x7b, + 0xef, 0x53, 0x23, 0x40, 0xba, 0x4f, 0x3d, 0xea, 0x18, 0xb4, 0x35, 0x27, 0x3f, 0x66, 0x76, 0x23, + 0x85, 0x84, 0x03, 0xd8, 0x64, 0x1d, 0x9e, 0xef, 0x79, 0x96, 0xcb, 0x8b, 0x60, 0xeb, 0xbe, 0x7f, + 0x57, 0xef, 0xd2, 0xb9, 0x89, 0x1b, 0x85, 0x9b, 0xf5, 0xd6, 0x55, 0x09, 0xf3, 0xfc, 0x76, 0x9a, + 0x01, 0x07, 0xf3, 0x90, 0x9b, 0x50, 0x53, 0x89, 0x73, 0xd5, 0x1b, 0x85, 0x9b, 0x15, 0xd1, 0x76, + 0x54, 0x5e, 0x0c, 0xa9, 0x64, 0x0d, 0x6a, 0xfa, 0xfe, 0xbe, 0xe5, 0x30, 0xce, 0x1a, 0xaf, 0xc2, + 0x6b, 0x59, 0x9f, 0xb6, 0x2c, 0x79, 0x04, 0x8e, 0x7a, 0xc3, 0x30, 0x2f, 0x79, 0x0b, 0x88, 0x4f, + 0xbd, 0x23, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0xf6, 0x9d, 0x80, 0x97, 0xbd, 0xce, 0xcb, 0x3e, 0x2f, + 0xcb, 0x4e, 0xda, 0x03, 0x1c, 0x98, 0x91, 0x8b, 0xbc, 0x09, 0xb3, 0xb2, 0xaf, 0x46, 0xb5, 0x00, + 0x1c, 0xe9, 0x32, 0xab, 0x48, 0x4c, 0xd1, 0x70, 0x80, 0x9b, 0x98, 0x70, 0x4d, 0xef, 0x07, 0x6e, + 0x97, 0x41, 0x26, 0x85, 0xee, 0xb8, 0x87, 0xd4, 0x99, 0x6b, 0xdc, 0x28, 0xdc, 0xac, 0xb5, 0x6e, + 0x9c, 0x9e, 0x34, 0xaf, 0x2d, 0x3f, 0x86, 0x0f, 0x1f, 0x8b, 0x42, 0xee, 0x41, 0xdd, 0x74, 0xfc, + 0x6d, 0xd7, 0xb6, 0x8c, 0xe3, 0xb9, 0x49, 0x5e, 0xc0, 0x57, 0xe4, 0xa7, 0xd6, 0x57, 0xef, 0xb6, + 0x05, 0xe1, 0xd1, 0x49, 0xf3, 0xda, 0xe0, 0x90, 0xba, 0x10, 0xd2, 0x31, 0xc2, 0x20, 0x5b, 0x1c, + 0x70, 0xc5, 0x75, 0xf6, 0xad, 0xce, 0xdc, 0x14, 0xff, 0x1b, 0x37, 0x86, 0x34, 0xe8, 0xd5, 0xbb, + 0x6d, 0xc1, 0xd7, 0x9a, 0x92, 0xe2, 0xc4, 0x2b, 0x46, 0x08, 0xc4, 0x84, 0x69, 0x35, 0x18, 0xaf, + 0xd8, 0xba, 0xd5, 0xf5, 0xe7, 0xa6, 0x79, 0xe3, 0xfd, 0xb1, 0x21, 0x98, 0x18, 0x67, 0x6e, 0x5d, + 0x91, 0x9f, 0x32, 0x9d, 0x48, 0xf6, 0x31, 0x85, 0x39, 0xff, 0x06, 0x3c, 0x3f, 0x30, 0x36, 0x90, + 0x59, 0x28, 0x1d, 0xd2, 0x63, 0x3e, 0xf4, 0xd5, 0x91, 0x3d, 0x92, 0xcb, 0x50, 0x39, 0xd2, 0xed, + 0x3e, 0x9d, 0x2b, 0xf2, 0x34, 0xf1, 0xf2, 0x33, 0xc5, 0xd7, 0x0b, 0xda, 0x5f, 0x2f, 0xc1, 0xa4, + 0x1a, 0x71, 0xda, 0x96, 0x73, 0x48, 0xde, 0x86, 0x92, 0xed, 0x76, 0xe4, 0xb8, 0xf9, 0x73, 0x23, + 0x8f, 0x62, 0x9b, 0x6e, 0xa7, 0x55, 0x3d, 0x3d, 0x69, 0x96, 0x36, 0xdd, 0x0e, 0x32, 0x44, 0x62, + 0x40, 0xe5, 0x50, 0xdf, 0x3f, 0xd4, 0x79, 0x19, 0x1a, 0x4b, 0xad, 0x91, 0xa1, 0xef, 0x30, 0x14, + 0x56, 0xd6, 0x56, 0xfd, 0xf4, 0xa4, 0x59, 0xe1, 0xaf, 0x28, 0xb0, 0x89, 0x0b, 0xf5, 0x3d, 0x5b, + 0x37, 0x0e, 0x0f, 0x5c, 0x9b, 0xce, 0x95, 0x72, 0x0a, 0x6a, 0x29, 0x24, 0xf1, 0x9b, 0xc3, 0x57, + 0x8c, 0x64, 0x10, 0x03, 0x26, 0xfa, 0xa6, 0x6f, 0x39, 0x87, 0x72, 0x0c, 0x7c, 0x63, 0x64, 0x69, + 0xbb, 0xab, 0xfc, 0x9b, 0xe0, 0xf4, 0xa4, 0x39, 0x21, 0x9e, 0x51, 0x42, 0x6b, 0xff, 0x67, 0x12, + 0xa6, 0xd5, 0x4f, 0xba, 0x4f, 0xbd, 0x80, 0x3e, 0x24, 0x37, 0xa0, 0xec, 0xb0, 0xae, 0xc9, 0x7f, + 0x72, 0x6b, 0x52, 0x36, 0x97, 0x32, 0xef, 0x92, 0x9c, 0xc2, 0x4a, 0x26, 0x9a, 0x8a, 0xac, 0xf0, + 0xd1, 0x4b, 0xd6, 0xe6, 0x30, 0xa2, 0x64, 0xe2, 0x19, 0x25, 0x34, 0x79, 0x17, 0xca, 0xfc, 0xe3, + 0x45, 0x55, 0x7f, 0x65, 0x74, 0x11, 0xec, 0xd3, 0x6b, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, 0xd6, + 0x14, 0xfb, 0xe6, 0xbe, 0xac, 0xd8, 0x9f, 0xcb, 0x51, 0xb1, 0x6b, 0xa2, 0x29, 0xee, 0xae, 0xae, + 0x21, 0x43, 0x24, 0x7f, 0xb1, 0x00, 0xcf, 0x1b, 0xae, 0x13, 0xe8, 0x4c, 0xcf, 0x50, 0x93, 0xec, + 0x5c, 0x85, 0xcb, 0x79, 0x6b, 0x64, 0x39, 0x2b, 0x69, 0xc4, 0xd6, 0x0b, 0x6c, 0xce, 0x18, 0x48, + 0xc6, 0x41, 0xd9, 0xe4, 0x2f, 0x17, 0xe0, 0x05, 0x36, 0x96, 0x0f, 0x30, 0xf3, 0x19, 0x68, 0xbc, + 0xa5, 0xba, 0x7a, 0x7a, 0xd2, 0x7c, 0x61, 0x23, 0x4b, 0x18, 0x66, 0x97, 0x81, 0x95, 0xee, 0x92, + 0x3e, 0xa8, 0x96, 0xf0, 0xd9, 0xad, 0xb1, 0xb4, 0x39, 0x4e, 0x55, 0xa7, 0xf5, 0x19, 0xd9, 0x94, + 0xb3, 0x34, 0x3b, 0xcc, 0x2a, 0x05, 0xb9, 0x05, 0xd5, 0x23, 0xd7, 0xee, 0x77, 0xa9, 0x3f, 0x57, + 0xe3, 0x43, 0xec, 0x7c, 0xd6, 0x10, 0x7b, 0x9f, 0xb3, 0xb4, 0x66, 0x24, 0x7c, 0x55, 0xbc, 0xfb, + 0xa8, 0xf2, 0x12, 0x0b, 0x26, 0x6c, 0xab, 0x6b, 0x05, 0x3e, 0x9f, 0x38, 0x1b, 0x4b, 0xb7, 0x46, + 0xfe, 0x2c, 0xd1, 0x45, 0x37, 0x39, 0x98, 0xe8, 0x35, 0xe2, 0x19, 0xa5, 0x00, 0x36, 0x14, 0xfa, + 0x86, 0x6e, 0x8b, 0x89, 0xb5, 0xb1, 0xf4, 0xd5, 0xd1, 0xbb, 0x0d, 0x43, 0x69, 0x4d, 0xc9, 0x6f, + 0xaa, 0xf0, 0x57, 0x14, 0xd8, 0xe4, 0x17, 0x60, 0x3a, 0xf1, 0x37, 0xfd, 0xb9, 0x06, 0xaf, 0x9d, + 0x97, 0xb2, 0x6a, 0x27, 0xe4, 0x8a, 0x66, 0x9e, 0x44, 0x0b, 0xf1, 0x31, 0x05, 0x46, 0xee, 0x40, + 0xcd, 0xb7, 0x4c, 0x6a, 0xe8, 0x9e, 0x3f, 0x37, 0x79, 0x16, 0xe0, 0x59, 0x09, 0x5c, 0x6b, 0xcb, + 0x6c, 0x18, 0x02, 0x90, 0x05, 0x80, 0x9e, 0xee, 0x05, 0x96, 0x50, 0x54, 0xa7, 0xb8, 0xd2, 0x34, + 0x7d, 0x7a, 0xd2, 0x84, 0xed, 0x30, 0x15, 0x63, 0x1c, 0x8c, 0x9f, 0xe5, 0xdd, 0x70, 0x7a, 0xfd, + 0x40, 0x4c, 0xac, 0x75, 0xc1, 0xdf, 0x0e, 0x53, 0x31, 0xc6, 0x41, 0x7e, 0xab, 0x00, 0x9f, 0x89, + 0x5e, 0x07, 0x3b, 0xd9, 0xcc, 0xd8, 0x3b, 0x59, 0xf3, 0xf4, 0xa4, 0xf9, 0x99, 0xf6, 0x70, 0x91, + 0xf8, 0xb8, 0xf2, 0x90, 0x0f, 0x0b, 0x30, 0xdd, 0xef, 0x99, 0x7a, 0x40, 0xdb, 0x01, 0x5b, 0xf1, + 0x74, 0x8e, 0xe7, 0x66, 0x79, 0x11, 0xd7, 0x47, 0x1f, 0x05, 0x13, 0x70, 0xd1, 0x6f, 0x4e, 0xa6, + 0x63, 0x4a, 0xac, 0xf6, 0x36, 0x4c, 0x2d, 0xf7, 0x83, 0x03, 0xd7, 0xb3, 0x3e, 0xe0, 0xea, 0x3f, + 0x59, 0x83, 0x4a, 0xc0, 0xd5, 0x38, 0xa1, 0x21, 0x7c, 0x3e, 0xeb, 0xa7, 0x0b, 0x95, 0xfa, 0x0e, + 0x3d, 0x56, 0x7a, 0x89, 0x98, 0xa9, 0x85, 0x5a, 0x27, 0xb2, 0x6b, 0x7f, 0xba, 0x00, 0xd5, 0x96, + 0x6e, 0x1c, 0xba, 0xfb, 0xfb, 0xe4, 0x1d, 0xa8, 0x59, 0x4e, 0x40, 0xbd, 0x23, 0xdd, 0x96, 0xb0, + 0x0b, 0x31, 0xd8, 0x70, 0x41, 0x18, 0x7d, 0x1e, 0x5b, 0x7d, 0x31, 0x41, 0xab, 0x7d, 0xb9, 0x6a, + 0xe1, 0x9a, 0xf1, 0x86, 0xc4, 0xc0, 0x10, 0x8d, 0x34, 0xa1, 0xe2, 0x07, 0xb4, 0xe7, 0xf3, 0x39, + 0x70, 0x4a, 0x14, 0xa3, 0xcd, 0x12, 0x50, 0xa4, 0x6b, 0x7f, 0xad, 0x00, 0xf5, 0x96, 0xee, 0x5b, + 0x06, 0xfb, 0x4a, 0xb2, 0x02, 0xe5, 0xbe, 0x4f, 0xbd, 0xf3, 0x7d, 0x1b, 0x9f, 0xb6, 0x76, 0x7d, + 0xea, 0x21, 0xcf, 0x4c, 0xee, 0x41, 0xad, 0xa7, 0xfb, 0xfe, 0x03, 0xd7, 0x33, 0xe5, 0xd4, 0x7b, + 0x46, 0x20, 0xb1, 0x4c, 0x90, 0x59, 0x31, 0x04, 0xd1, 0x1a, 0x10, 0xe9, 0x1e, 0xda, 0xef, 0x15, + 0xe0, 0x52, 0xab, 0xbf, 0xbf, 0x4f, 0x3d, 0xa9, 0x15, 0x4b, 0x7d, 0x93, 0x42, 0xc5, 0xa3, 0xa6, + 0xe5, 0xcb, 0xb2, 0xaf, 0x8e, 0xdc, 0x50, 0x90, 0xa1, 0x48, 0xf5, 0x96, 0xd7, 0x17, 0x4f, 0x40, + 0x81, 0x4e, 0xfa, 0x50, 0x7f, 0x9f, 0xb2, 0xd5, 0x38, 0xd5, 0xbb, 0xf2, 0xeb, 0x6e, 0x8f, 0x2c, + 0xea, 0x2d, 0x1a, 0xb4, 0x39, 0x52, 0x5c, 0x9b, 0x0e, 0x13, 0x31, 0x92, 0xa4, 0xfd, 0x4e, 0x05, + 0x26, 0x57, 0xdc, 0xee, 0x9e, 0xe5, 0x50, 0xf3, 0x96, 0xd9, 0xa1, 0xe4, 0x3d, 0x28, 0x53, 0xb3, + 0x43, 0xe5, 0xd7, 0x8e, 0xae, 0x78, 0x30, 0xb0, 0x48, 0x7d, 0x62, 0x6f, 0xc8, 0x81, 0xc9, 0x26, + 0x4c, 0xef, 0x7b, 0x6e, 0x57, 0x8c, 0xe5, 0x3b, 0xc7, 0x3d, 0xa9, 0x3b, 0xb7, 0x7e, 0x4c, 0x75, + 0x9c, 0xb5, 0x04, 0xf5, 0xd1, 0x49, 0x13, 0xa2, 0x37, 0x4c, 0xe5, 0x25, 0xef, 0xc0, 0x5c, 0x94, + 0x12, 0x0e, 0x6a, 0x2b, 0x6c, 0x39, 0xc3, 0x75, 0xa7, 0x4a, 0xeb, 0xda, 0xe9, 0x49, 0x73, 0x6e, + 0x6d, 0x08, 0x0f, 0x0e, 0xcd, 0xcd, 0x86, 0x8a, 0xd9, 0x88, 0x28, 0x26, 0x1a, 0xa9, 0x32, 0x8d, + 0x69, 0x06, 0xe3, 0xeb, 0xbe, 0xb5, 0x94, 0x08, 0x1c, 0x10, 0x4a, 0xd6, 0x60, 0x32, 0x70, 0x63, + 0xf5, 0x55, 0xe1, 0xf5, 0xa5, 0x29, 0x43, 0xc5, 0x8e, 0x3b, 0xb4, 0xb6, 0x12, 0xf9, 0x08, 0xc2, + 0x15, 0xf5, 0x9e, 0xaa, 0xa9, 0x09, 0x5e, 0x53, 0xf3, 0xa7, 0x27, 0xcd, 0x2b, 0x3b, 0x99, 0x1c, + 0x38, 0x24, 0x27, 0xf9, 0x95, 0x02, 0x4c, 0x2b, 0x92, 0xac, 0xa3, 0xea, 0x38, 0xeb, 0x88, 0xb0, + 0x16, 0xb1, 0x93, 0x10, 0x80, 0x29, 0x81, 0xda, 0xf7, 0xaa, 0x50, 0x0f, 0x87, 0x7a, 0xf2, 0x39, + 0xa8, 0x70, 0x13, 0x84, 0xd4, 0xe0, 0xc3, 0x39, 0x9c, 0x5b, 0x2a, 0x50, 0xd0, 0xc8, 0xe7, 0xa1, + 0x6a, 0xb8, 0xdd, 0xae, 0xee, 0x98, 0xdc, 0xac, 0x54, 0x6f, 0x35, 0x98, 0xea, 0xb2, 0x22, 0x92, + 0x50, 0xd1, 0xc8, 0x35, 0x28, 0xeb, 0x5e, 0x47, 0x58, 0x78, 0xea, 0x62, 0x3c, 0x5a, 0xf6, 0x3a, + 0x3e, 0xf2, 0x54, 0xf2, 0x65, 0x28, 0x51, 0xe7, 0x68, 0xae, 0x3c, 0x5c, 0x37, 0xba, 0xe5, 0x1c, + 0xdd, 0xd7, 0xbd, 0x56, 0x43, 0x96, 0xa1, 0x74, 0xcb, 0x39, 0x42, 0x96, 0x87, 0x6c, 0x42, 0x95, + 0x3a, 0x47, 0xec, 0xdf, 0x4b, 0xd3, 0xcb, 0x67, 0x87, 0x64, 0x67, 0x2c, 0x72, 0x99, 0x10, 0x6a, + 0x58, 0x32, 0x19, 0x15, 0x04, 0xf9, 0x3a, 0x4c, 0x0a, 0x65, 0x6b, 0x8b, 0xfd, 0x13, 0x7f, 0x6e, + 0x82, 0x43, 0x36, 0x87, 0x6b, 0x6b, 0x9c, 0x2f, 0x32, 0x75, 0xc5, 0x12, 0x7d, 0x4c, 0x40, 0x91, + 0xaf, 0x43, 0x5d, 0xad, 0x8c, 0xd5, 0x9f, 0xcd, 0xb4, 0x12, 0xa9, 0xe5, 0x34, 0xd2, 0x6f, 0xf6, + 0x2d, 0x8f, 0x76, 0xa9, 0x13, 0xf8, 0xad, 0xe7, 0x95, 0xdd, 0x40, 0x51, 0x7d, 0x8c, 0xd0, 0xc8, + 0xde, 0xa0, 0xb9, 0x4b, 0xd8, 0x6a, 0x3e, 0x37, 0x64, 0x54, 0x1f, 0xc1, 0xd6, 0xf5, 0x0d, 0x98, + 0x09, 0xed, 0x51, 0xd2, 0xa4, 0x21, 0xac, 0x37, 0x5f, 0x64, 0xd9, 0x37, 0x92, 0xa4, 0x47, 0x27, + 0xcd, 0x97, 0x32, 0x8c, 0x1a, 0x11, 0x03, 0xa6, 0xc1, 0xc8, 0x07, 0x30, 0xed, 0x51, 0xdd, 0xb4, + 0x1c, 0xea, 0xfb, 0xdb, 0x9e, 0xbb, 0x97, 0x5f, 0xf3, 0xe4, 0x28, 0xa2, 0xd9, 0x63, 0x02, 0x19, + 0x53, 0x92, 0xc8, 0x03, 0x98, 0xb2, 0xad, 0x23, 0x1a, 0x89, 0x6e, 0x8c, 0x45, 0xf4, 0xf3, 0xa7, + 0x27, 0xcd, 0xa9, 0xcd, 0x38, 0x30, 0x26, 0xe5, 0x30, 0x4d, 0xa5, 0xe7, 0x7a, 0x81, 0x52, 0x4f, + 0x3f, 0xfb, 0x58, 0xf5, 0x74, 0xdb, 0xf5, 0x82, 0xa8, 0x13, 0xb2, 0x37, 0x1f, 0x45, 0x76, 0xed, + 0x6f, 0x55, 0x60, 0x70, 0x11, 0x97, 0x6c, 0x71, 0x85, 0x71, 0xb7, 0xb8, 0x74, 0x6b, 0x10, 0x73, + 0xcf, 0xeb, 0x32, 0xdb, 0x18, 0x5a, 0x44, 0x46, 0xab, 0x2e, 0x8d, 0xbb, 0x55, 0x3f, 0x33, 0x03, + 0xcf, 0x60, 0xf3, 0x9f, 0xf8, 0xf8, 0x9a, 0x7f, 0xf5, 0xe9, 0x34, 0x7f, 0xed, 0xbb, 0x65, 0x98, + 0x5e, 0xd5, 0x69, 0xd7, 0x75, 0x9e, 0xb8, 0x8e, 0x2f, 0x3c, 0x13, 0xeb, 0xf8, 0x9b, 0x50, 0xf3, + 0x68, 0xcf, 0xb6, 0x0c, 0x5d, 0xa8, 0xeb, 0xd2, 0x6e, 0x8e, 0x32, 0x0d, 0x43, 0xea, 0x10, 0xfb, + 0x4d, 0xe9, 0x99, 0xb4, 0xdf, 0x94, 0x3f, 0x7e, 0xfb, 0x8d, 0xf6, 0x2b, 0x45, 0xe0, 0xaa, 0x2d, + 0xb9, 0x01, 0x65, 0xa6, 0xb6, 0xa5, 0xad, 0x86, 0xbc, 0xb7, 0x70, 0x0a, 0x99, 0x87, 0x62, 0xe0, + 0xca, 0xe1, 0x06, 0x24, 0xbd, 0xb8, 0xe3, 0x62, 0x31, 0x70, 0xc9, 0x07, 0x00, 0x86, 0xeb, 0x98, + 0x96, 0x72, 0x27, 0xe5, 0xfb, 0xb0, 0x35, 0xd7, 0x7b, 0xa0, 0x7b, 0xe6, 0x4a, 0x88, 0x28, 0x56, + 0xf0, 0xd1, 0x3b, 0xc6, 0xa4, 0x91, 0x37, 0x60, 0xc2, 0x75, 0xd6, 0xfa, 0xb6, 0xcd, 0x2b, 0xb4, + 0xde, 0xfa, 0xc2, 0xe9, 0x49, 0x73, 0xe2, 0x1e, 0x4f, 0x79, 0x74, 0xd2, 0xbc, 0x2a, 0x56, 0x44, + 0xec, 0xed, 0x6d, 0xcf, 0x0a, 0x2c, 0xa7, 0x13, 0x2e, 0x68, 0x65, 0x36, 0xed, 0x57, 0x0b, 0xd0, + 0x58, 0xb3, 0x1e, 0x52, 0xf3, 0x6d, 0xcb, 0x31, 0xdd, 0x07, 0x04, 0x61, 0xc2, 0xa6, 0x4e, 0x27, + 0x38, 0x18, 0x71, 0xc5, 0x29, 0xec, 0x3a, 0x1c, 0x01, 0x25, 0x12, 0x59, 0x84, 0xba, 0x58, 0xaf, + 0x58, 0x4e, 0x87, 0xd7, 0x61, 0x2d, 0x1a, 0xe9, 0xdb, 0x8a, 0x80, 0x11, 0x8f, 0x76, 0x0c, 0xcf, + 0x0f, 0x54, 0x03, 0x31, 0xa1, 0x1c, 0xe8, 0x1d, 0x35, 0xa9, 0xac, 0x8d, 0x5c, 0xc1, 0x3b, 0x7a, + 0x27, 0x56, 0xb9, 0x5c, 0x2b, 0xdc, 0xd1, 0x99, 0x56, 0xc8, 0xd0, 0xb5, 0x3f, 0x28, 0x40, 0x6d, + 0xad, 0xef, 0x18, 0x7c, 0x51, 0xff, 0x64, 0x6b, 0xb2, 0x52, 0x31, 0x8b, 0x99, 0x2a, 0x66, 0x1f, + 0x26, 0x0e, 0x1f, 0x84, 0x2a, 0x68, 0x63, 0x69, 0x6b, 0xf4, 0x56, 0x21, 0x8b, 0xb4, 0x70, 0x87, + 0xe3, 0x09, 0x67, 0xe7, 0xb4, 0x2c, 0xd0, 0xc4, 0x9d, 0xb7, 0xb9, 0x50, 0x29, 0x6c, 0xfe, 0xcb, + 0xd0, 0x88, 0xb1, 0x9d, 0xcb, 0xef, 0xf1, 0xb7, 0xcb, 0x30, 0xb1, 0xde, 0x6e, 0x2f, 0x6f, 0x6f, + 0x90, 0x57, 0xa1, 0x21, 0xfd, 0x60, 0x77, 0xa3, 0x3a, 0x08, 0xdd, 0xa0, 0xed, 0x88, 0x84, 0x71, + 0x3e, 0xa6, 0xc0, 0x7b, 0x54, 0xb7, 0xbb, 0xb2, 0xb3, 0x84, 0xba, 0x03, 0xb2, 0x44, 0x14, 0x34, + 0xa2, 0xc3, 0x74, 0xdf, 0xa7, 0x1e, 0xab, 0x42, 0xb1, 0xde, 0x97, 0xdd, 0xe6, 0x8c, 0x16, 0x01, + 0x3e, 0xc1, 0xec, 0x26, 0x00, 0x30, 0x05, 0x48, 0x5e, 0x87, 0x9a, 0xde, 0x0f, 0x0e, 0xf8, 0x92, + 0x4b, 0xf4, 0x8d, 0x6b, 0xdc, 0x4d, 0x28, 0xd3, 0x1e, 0x9d, 0x34, 0x27, 0xef, 0x60, 0xeb, 0x55, + 0xf5, 0x8e, 0x21, 0x37, 0x2b, 0x9c, 0xb2, 0x31, 0xc8, 0xc2, 0x55, 0xce, 0x5d, 0xb8, 0xed, 0x04, + 0x00, 0xa6, 0x00, 0xc9, 0xbb, 0x30, 0x79, 0x48, 0x8f, 0x03, 0x7d, 0x4f, 0x0a, 0x98, 0x38, 0x8f, + 0x80, 0x59, 0xa6, 0xf4, 0xdf, 0x89, 0x65, 0xc7, 0x04, 0x18, 0xf1, 0xe1, 0xf2, 0x21, 0xf5, 0xf6, + 0xa8, 0xe7, 0x4a, 0x7b, 0x85, 0x14, 0x52, 0x3d, 0x8f, 0x90, 0xb9, 0xd3, 0x93, 0xe6, 0xe5, 0x3b, + 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xdf, 0x22, 0xcc, 0xac, 0x8b, 0x40, 0x04, 0xd7, 0x13, 0x9a, + 0x07, 0xb9, 0x0a, 0x25, 0xaf, 0xd7, 0xe7, 0x2d, 0xa7, 0x24, 0x5c, 0x0d, 0xb8, 0xbd, 0x8b, 0x2c, + 0x8d, 0xbc, 0x03, 0x35, 0x53, 0x0e, 0x19, 0xd2, 0x5c, 0x32, 0x92, 0x69, 0x4b, 0xbd, 0x61, 0x88, + 0xc6, 0xd6, 0x86, 0x5d, 0xbf, 0xd3, 0xb6, 0x3e, 0xa0, 0xd2, 0x82, 0xc0, 0xd7, 0x86, 0x5b, 0x22, + 0x09, 0x15, 0x8d, 0xcd, 0xaa, 0x87, 0xf4, 0x58, 0xac, 0x9f, 0xcb, 0xd1, 0xac, 0x7a, 0x47, 0xa6, + 0x61, 0x48, 0x25, 0x4d, 0xd5, 0x59, 0x58, 0x2b, 0x28, 0x0b, 0xdb, 0xcf, 0x7d, 0x96, 0x20, 0xfb, + 0x0d, 0x1b, 0x32, 0xdf, 0xb7, 0x82, 0x80, 0x7a, 0xf2, 0x37, 0x8e, 0x34, 0x64, 0xbe, 0xc5, 0x11, + 0x50, 0x22, 0x91, 0x9f, 0x80, 0x3a, 0x07, 0x6f, 0xd9, 0xee, 0x1e, 0xff, 0x71, 0x75, 0x61, 0x05, + 0xba, 0xaf, 0x12, 0x31, 0xa2, 0x6b, 0x7f, 0x58, 0x84, 0x2b, 0xeb, 0x34, 0x10, 0x5a, 0xcd, 0x2a, + 0xed, 0xd9, 0xee, 0x31, 0xd3, 0xa7, 0x91, 0x7e, 0x93, 0xbc, 0x09, 0x60, 0xf9, 0x7b, 0xed, 0x23, + 0x83, 0xf7, 0x03, 0xd1, 0x87, 0x6f, 0xc8, 0x2e, 0x09, 0x1b, 0xed, 0x96, 0xa4, 0x3c, 0x4a, 0xbc, + 0x61, 0x2c, 0x4f, 0xb4, 0x20, 0x2f, 0x3e, 0x66, 0x41, 0xde, 0x06, 0xe8, 0x45, 0x5a, 0x79, 0x89, + 0x73, 0xfe, 0xb4, 0x12, 0x73, 0x1e, 0x85, 0x3c, 0x06, 0x93, 0x47, 0x4f, 0x76, 0x60, 0xd6, 0xa4, + 0xfb, 0x7a, 0xdf, 0x0e, 0xc2, 0x95, 0x84, 0xec, 0xc4, 0x67, 0x5f, 0x8c, 0x84, 0x41, 0x12, 0xab, + 0x29, 0x24, 0x1c, 0xc0, 0xd6, 0xfe, 0x4e, 0x09, 0xe6, 0xd7, 0x69, 0x10, 0xda, 0xe8, 0xe4, 0xe8, + 0xd8, 0xee, 0x51, 0x83, 0xfd, 0x85, 0x0f, 0x0b, 0x30, 0x61, 0xeb, 0x7b, 0xd4, 0x66, 0xb3, 0x17, + 0xfb, 0x9a, 0xf7, 0x46, 0x9e, 0x08, 0x86, 0x4b, 0x59, 0xd8, 0xe4, 0x12, 0x52, 0x53, 0x83, 0x48, + 0x44, 0x29, 0x9e, 0x0d, 0xea, 0x86, 0xdd, 0xf7, 0x03, 0xb1, 0xb2, 0x93, 0xfa, 0x64, 0x38, 0xa8, + 0xaf, 0x44, 0x24, 0x8c, 0xf3, 0x91, 0x25, 0x00, 0xc3, 0xb6, 0xa8, 0x13, 0xf0, 0x5c, 0xa2, 0x5f, + 0x11, 0xf5, 0x7f, 0x57, 0x42, 0x0a, 0xc6, 0xb8, 0x98, 0xa8, 0xae, 0xeb, 0x58, 0x81, 0x2b, 0x44, + 0x95, 0x93, 0xa2, 0xb6, 0x22, 0x12, 0xc6, 0xf9, 0x78, 0x36, 0x1a, 0x78, 0x96, 0xe1, 0xf3, 0x6c, + 0x95, 0x54, 0xb6, 0x88, 0x84, 0x71, 0x3e, 0x36, 0xe7, 0xc5, 0xbe, 0xff, 0x5c, 0x73, 0xde, 0x6f, + 0xd6, 0xe1, 0x7a, 0xa2, 0x5a, 0x03, 0x3d, 0xa0, 0xfb, 0x7d, 0xbb, 0x4d, 0x03, 0xf5, 0x03, 0x47, + 0x9c, 0x0b, 0xff, 0x5c, 0xf4, 0xdf, 0x45, 0xf8, 0x93, 0x31, 0x9e, 0xff, 0x3e, 0x50, 0xc0, 0x33, + 0xfd, 0xfb, 0x45, 0xa8, 0x3b, 0x7a, 0xe0, 0xf3, 0x8e, 0x2b, 0xfb, 0x68, 0xa8, 0x86, 0xdd, 0x55, + 0x04, 0x8c, 0x78, 0xc8, 0x36, 0x5c, 0x96, 0x55, 0x7c, 0xeb, 0x21, 0x5b, 0xf3, 0x53, 0x4f, 0xe4, + 0x95, 0xd3, 0xa9, 0xcc, 0x7b, 0x79, 0x2b, 0x83, 0x07, 0x33, 0x73, 0x92, 0x2d, 0xb8, 0x64, 0x88, + 0x90, 0x10, 0x6a, 0xbb, 0xba, 0xa9, 0x00, 0x85, 0x49, 0x34, 0x5c, 0x1a, 0xad, 0x0c, 0xb2, 0x60, + 0x56, 0xbe, 0x74, 0x6b, 0x9e, 0x18, 0xa9, 0x35, 0x57, 0x47, 0x69, 0xcd, 0xb5, 0xd1, 0x5a, 0x73, + 0xfd, 0x6c, 0xad, 0x99, 0xd5, 0x3c, 0x6b, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x98, 0x61, 0x63, 0x11, + 0x47, 0x61, 0xcd, 0xb7, 0x33, 0x78, 0x30, 0x33, 0x27, 0xd9, 0x83, 0x79, 0x91, 0x7e, 0xcb, 0x31, + 0xbc, 0xe3, 0x1e, 0x9b, 0x78, 0x62, 0xb8, 0x8d, 0x84, 0x4d, 0x7a, 0xbe, 0x3d, 0x94, 0x13, 0x1f, + 0x83, 0x42, 0x7e, 0x16, 0xa6, 0xc4, 0x5f, 0xda, 0xd2, 0x7b, 0x1c, 0x56, 0xc4, 0x1f, 0xbd, 0x20, + 0x61, 0xa7, 0x56, 0xe2, 0x44, 0x4c, 0xf2, 0x92, 0x65, 0x98, 0xe9, 0x1d, 0x19, 0xec, 0x71, 0x63, + 0xff, 0x2e, 0xa5, 0x26, 0x35, 0xb9, 0xc3, 0xb3, 0xde, 0x7a, 0x51, 0x59, 0x77, 0xb6, 0x93, 0x64, + 0x4c, 0xf3, 0x93, 0xd7, 0x61, 0xd2, 0x0f, 0x74, 0x2f, 0x90, 0x86, 0xe0, 0xb9, 0x69, 0x11, 0x9f, + 0xa5, 0xec, 0xa4, 0xed, 0x18, 0x0d, 0x13, 0x9c, 0x99, 0xf3, 0xc5, 0xcc, 0xc5, 0xcd, 0x17, 0x79, + 0x46, 0xab, 0x7f, 0x52, 0x84, 0x1b, 0xeb, 0x34, 0xd8, 0x72, 0x1d, 0x69, 0x46, 0xcf, 0x9a, 0xf6, + 0xcf, 0x64, 0x45, 0x4f, 0x4e, 0xda, 0xc5, 0xb1, 0x4e, 0xda, 0xa5, 0x31, 0x4d, 0xda, 0xe5, 0x0b, + 0x9c, 0xb4, 0xff, 0x5e, 0x11, 0x5e, 0x4c, 0xd4, 0xe4, 0xb6, 0x6b, 0xaa, 0x01, 0xff, 0xd3, 0x0a, + 0x3c, 0x43, 0x05, 0x3e, 0x12, 0x7a, 0x27, 0x77, 0x84, 0xa6, 0x34, 0x9e, 0xef, 0xa4, 0x35, 0x9e, + 0x77, 0xf3, 0xcc, 0x7c, 0x19, 0x12, 0xce, 0x34, 0xe3, 0xbd, 0x05, 0xc4, 0x93, 0x6e, 0xdb, 0xc8, + 0x9c, 0x2d, 0x95, 0x9e, 0x30, 0x00, 0x14, 0x07, 0x38, 0x30, 0x23, 0x17, 0x69, 0xc3, 0x0b, 0x3e, + 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x68, 0x43, 0x2f, 0x49, 0xb8, 0x17, 0xda, 0x59, 0x4c, + 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0x2f, 0x80, 0xab, 0x9c, 0xa2, 0x6a, 0xc6, 0xa6, 0xb1, 0x7c, + 0x98, 0xd6, 0x58, 0xde, 0xcb, 0xff, 0xdf, 0x46, 0xd3, 0x56, 0x96, 0x00, 0xf8, 0x5f, 0x88, 0xab, + 0x2b, 0xe1, 0x24, 0x8d, 0x21, 0x05, 0x63, 0x5c, 0x6c, 0x02, 0x52, 0xf5, 0x1c, 0xd7, 0x54, 0xc2, + 0x09, 0xa8, 0x1d, 0x27, 0x62, 0x92, 0x77, 0xa8, 0xb6, 0x53, 0x19, 0x59, 0xdb, 0x79, 0x0b, 0x48, + 0xc2, 0xf0, 0x28, 0xf0, 0x26, 0x92, 0xf1, 0xc7, 0x1b, 0x03, 0x1c, 0x98, 0x91, 0x6b, 0x48, 0x53, + 0xae, 0x8e, 0xb7, 0x29, 0xd7, 0x46, 0x6f, 0xca, 0xe4, 0x3d, 0xb8, 0xca, 0x45, 0xc9, 0xfa, 0x49, + 0x02, 0x0b, 0xbd, 0xe7, 0xb3, 0x12, 0xf8, 0x2a, 0x0e, 0x63, 0xc4, 0xe1, 0x18, 0xec, 0xff, 0x18, + 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xae, 0x13, 0xad, 0x64, 0xf0, 0x60, 0x66, 0x4e, 0xd6, 0xc4, + 0x02, 0xd6, 0x0c, 0xf5, 0x3d, 0x9b, 0x9a, 0x32, 0xfe, 0x3a, 0x6c, 0x62, 0x3b, 0x9b, 0x6d, 0x49, + 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x79, 0x4e, 0x35, 0x65, 0x9d, 0x5b, 0xe9, 0xf7, 0x13, 0xda, + 0x90, 0xd4, 0x75, 0xc2, 0x88, 0xfa, 0x95, 0x34, 0x03, 0x0e, 0xe6, 0xe1, 0x5a, 0xa2, 0xe1, 0x59, + 0xbd, 0xc0, 0x4f, 0x62, 0x4d, 0xa7, 0xb4, 0xc4, 0x0c, 0x1e, 0xcc, 0xcc, 0xc9, 0xf4, 0xf3, 0x03, + 0xaa, 0xdb, 0xc1, 0x41, 0x12, 0x70, 0x26, 0xa9, 0x9f, 0xdf, 0x1e, 0x64, 0xc1, 0xac, 0x7c, 0x99, + 0x13, 0xd2, 0xec, 0xb3, 0xa9, 0x56, 0x7d, 0xbb, 0x04, 0x57, 0xd7, 0x69, 0x10, 0x86, 0xa6, 0x7d, + 0x6a, 0x46, 0xf9, 0x18, 0xcc, 0x28, 0xbf, 0x51, 0x81, 0x4b, 0xeb, 0x34, 0x18, 0xd0, 0xc6, 0xfe, + 0x88, 0x56, 0xff, 0x16, 0x5c, 0x8a, 0xa2, 0x21, 0xdb, 0x81, 0xeb, 0x89, 0xb9, 0x3c, 0xb5, 0x5a, + 0x6e, 0x0f, 0xb2, 0x60, 0x56, 0x3e, 0xf2, 0x75, 0x78, 0x91, 0x4f, 0xf5, 0x4e, 0x47, 0xd8, 0x67, + 0x85, 0x31, 0x21, 0xb6, 0x9f, 0xa7, 0x29, 0x21, 0x5f, 0x6c, 0x67, 0xb3, 0xe1, 0xb0, 0xfc, 0xe4, + 0x5b, 0x30, 0xd9, 0xb3, 0x7a, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0x22, 0xda, 0x8e, 0x81, + 0x45, 0x0b, 0xb8, 0x78, 0x2a, 0x26, 0x04, 0x66, 0xb6, 0xd4, 0xda, 0x05, 0xb6, 0xd4, 0xff, 0x59, + 0x84, 0xea, 0xba, 0xe7, 0xf6, 0x7b, 0xad, 0x63, 0xd2, 0x81, 0x89, 0x07, 0xdc, 0x79, 0x26, 0x5d, + 0x53, 0xa3, 0xef, 0x28, 0x10, 0x3e, 0xb8, 0x48, 0x25, 0x12, 0xef, 0x28, 0xe1, 0x59, 0x23, 0x3e, + 0xa4, 0xc7, 0xd4, 0x94, 0x3e, 0xb4, 0xb0, 0x11, 0xdf, 0x61, 0x89, 0x28, 0x68, 0xa4, 0x0b, 0x33, + 0xba, 0x6d, 0xbb, 0x0f, 0xa8, 0xb9, 0xa9, 0x07, 0xdc, 0xef, 0x2d, 0x7d, 0x2b, 0xe7, 0x35, 0x4b, + 0xf3, 0x60, 0x86, 0xe5, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xfb, 0x50, 0xf5, 0x03, 0xd7, 0x53, 0xca, + 0x56, 0x63, 0x69, 0x65, 0xf4, 0x9f, 0xde, 0xfa, 0x5a, 0x5b, 0x40, 0x09, 0x9b, 0xbd, 0x7c, 0x41, + 0x25, 0x40, 0xfb, 0xf5, 0x02, 0xc0, 0xed, 0x9d, 0x9d, 0x6d, 0xe9, 0x5e, 0x30, 0xa1, 0xac, 0xf7, + 0x43, 0x47, 0xe5, 0xe8, 0x0e, 0xc1, 0x44, 0x20, 0xaf, 0xf4, 0xe1, 0xf5, 0x83, 0x03, 0xe4, 0xe8, + 0xe4, 0xc7, 0xa1, 0x2a, 0x15, 0x64, 0x59, 0xed, 0x61, 0x3c, 0x85, 0x54, 0xa2, 0x51, 0xd1, 0xb5, + 0xdf, 0x2e, 0x02, 0x6c, 0x98, 0x36, 0x6d, 0xab, 0x4d, 0x20, 0xf5, 0xe0, 0xc0, 0xa3, 0xfe, 0x81, + 0x6b, 0x9b, 0x23, 0x7a, 0x53, 0xb9, 0xcd, 0x7f, 0x47, 0x81, 0x60, 0x84, 0x47, 0x4c, 0x98, 0xf4, + 0x03, 0xda, 0x53, 0xb1, 0xbd, 0x23, 0x3a, 0x51, 0x66, 0x85, 0x5d, 0x24, 0xc2, 0xc1, 0x04, 0x2a, + 0xd1, 0xa1, 0x61, 0x39, 0x86, 0xe8, 0x20, 0xad, 0xe3, 0x11, 0x1b, 0xd2, 0x0c, 0x5b, 0x71, 0x6c, + 0x44, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0xb7, 0x08, 0x57, 0xb8, 0x3c, 0x56, 0x8c, 0x44, 0x04, 0x2f, + 0xf9, 0x93, 0x03, 0x1b, 0x56, 0xff, 0xf8, 0xd9, 0x44, 0x8b, 0xfd, 0x8e, 0x5b, 0x34, 0xd0, 0x23, + 0x7d, 0x2e, 0x4a, 0x8b, 0xed, 0x52, 0xed, 0x43, 0xd9, 0x67, 0xe3, 0x95, 0xa8, 0xbd, 0xf6, 0xc8, + 0x4d, 0x28, 0xfb, 0x03, 0xf8, 0xe8, 0x15, 0x7a, 0x8d, 0xf9, 0xa8, 0xc5, 0xc5, 0x91, 0x5f, 0x82, + 0x09, 0x3f, 0xd0, 0x83, 0xbe, 0xea, 0x9a, 0xbb, 0xe3, 0x16, 0xcc, 0xc1, 0xa3, 0x71, 0x44, 0xbc, + 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xe6, 0xb3, 0x33, 0x6e, 0x5a, 0x7e, 0x40, 0xfe, 0xc4, 0x40, + 0xb5, 0x9f, 0xf1, 0x8f, 0xb3, 0xdc, 0xbc, 0xd2, 0xc3, 0x3d, 0x0d, 0x2a, 0x25, 0x56, 0xe5, 0x01, + 0x54, 0xac, 0x80, 0x76, 0xd5, 0xfa, 0xf2, 0xde, 0x98, 0x3f, 0x3d, 0x36, 0xb5, 0x33, 0x29, 0x28, + 0x84, 0x69, 0xdf, 0x2d, 0x0e, 0xfb, 0x64, 0x3e, 0x7d, 0xd8, 0xc9, 0x28, 0xf1, 0x3b, 0xf9, 0xa2, + 0xc4, 0x93, 0x05, 0x1a, 0x0c, 0x16, 0xff, 0x53, 0x83, 0xc1, 0xe2, 0xf7, 0xf2, 0x07, 0x8b, 0xa7, + 0xaa, 0x61, 0x68, 0xcc, 0xf8, 0x47, 0x25, 0xb8, 0xf6, 0xb8, 0x66, 0xc3, 0xe6, 0x33, 0xd9, 0x3a, + 0xf3, 0xce, 0x67, 0x8f, 0x6f, 0x87, 0x64, 0x09, 0x2a, 0xbd, 0x03, 0xdd, 0x57, 0x4a, 0xd9, 0xb5, + 0x30, 0xcc, 0x90, 0x25, 0x3e, 0x62, 0x83, 0x06, 0x57, 0xe6, 0xf8, 0x2b, 0x0a, 0x56, 0x36, 0x1c, + 0x77, 0xa9, 0xef, 0x47, 0x36, 0x81, 0x70, 0x38, 0xde, 0x12, 0xc9, 0xa8, 0xe8, 0x24, 0x80, 0x09, + 0x61, 0x62, 0x96, 0x33, 0xd3, 0xe8, 0x81, 0x5c, 0x19, 0x1b, 0x0b, 0xa2, 0x8f, 0x92, 0xde, 0x0a, + 0x29, 0x8b, 0x2c, 0x40, 0x39, 0x88, 0xc2, 0xbc, 0xd5, 0xd2, 0xbc, 0x9c, 0xa1, 0x9f, 0x72, 0x3e, + 0xb6, 0xb0, 0x77, 0xf7, 0xb8, 0x51, 0xdd, 0x94, 0xfe, 0x73, 0xcb, 0x75, 0xb8, 0x42, 0x56, 0x8a, + 0x16, 0xf6, 0xf7, 0x06, 0x38, 0x30, 0x23, 0x97, 0xf6, 0xaf, 0x6b, 0x70, 0x25, 0xbb, 0x3d, 0xb0, + 0x7a, 0x3b, 0xa2, 0x9e, 0xcf, 0xb0, 0x0b, 0xc9, 0x7a, 0xbb, 0x2f, 0x92, 0x51, 0xd1, 0x3f, 0xd1, + 0x01, 0x67, 0xbf, 0x51, 0x80, 0xab, 0x9e, 0xf4, 0x11, 0x3d, 0x8d, 0xa0, 0xb3, 0x97, 0x84, 0x39, + 0x63, 0x88, 0x40, 0x1c, 0x5e, 0x16, 0xf2, 0x37, 0x0a, 0x30, 0xd7, 0x4d, 0xd9, 0x39, 0x2e, 0x70, + 0xcf, 0x25, 0xdf, 0x47, 0xb1, 0x35, 0x44, 0x1e, 0x0e, 0x2d, 0x09, 0xf9, 0x16, 0x34, 0x7a, 0xac, + 0x5d, 0xf8, 0x01, 0x75, 0x0c, 0x15, 0x20, 0x3a, 0x7a, 0x4f, 0xda, 0x8e, 0xb0, 0xc2, 0x3d, 0x57, + 0x5c, 0x3f, 0x88, 0x11, 0x30, 0x2e, 0xf1, 0x19, 0xdf, 0x64, 0x79, 0x13, 0x6a, 0x3e, 0x0d, 0x02, + 0xcb, 0xe9, 0x88, 0xf5, 0x46, 0x5d, 0xf4, 0x95, 0xb6, 0x4c, 0xc3, 0x90, 0x4a, 0x7e, 0x02, 0xea, + 0xdc, 0xe5, 0xb4, 0xec, 0x75, 0xfc, 0xb9, 0x3a, 0x0f, 0x17, 0x9b, 0x12, 0x01, 0x70, 0x32, 0x11, + 0x23, 0x3a, 0xf9, 0x22, 0x4c, 0xee, 0xf1, 0xee, 0x2b, 0xf7, 0xdd, 0x0b, 0x1b, 0x17, 0xd7, 0xd6, + 0x5a, 0xb1, 0x74, 0x4c, 0x70, 0x91, 0x25, 0x00, 0x1a, 0xfa, 0xe5, 0xd2, 0xf6, 0xac, 0xc8, 0x63, + 0x87, 0x31, 0x2e, 0xf2, 0x12, 0x94, 0x02, 0xdb, 0xe7, 0x36, 0xac, 0x5a, 0xb4, 0x04, 0xdd, 0xd9, + 0x6c, 0x23, 0x4b, 0xd7, 0xfe, 0xb0, 0x00, 0x33, 0xa9, 0xed, 0x48, 0x2c, 0x4b, 0xdf, 0xb3, 0xe5, + 0x30, 0x12, 0x66, 0xd9, 0xc5, 0x4d, 0x64, 0xe9, 0xe4, 0x3d, 0xa9, 0x96, 0x17, 0x73, 0x1e, 0x31, + 0x72, 0x57, 0x0f, 0x7c, 0xa6, 0x87, 0x0f, 0x68, 0xe4, 0xdc, 0xcd, 0x17, 0x95, 0x47, 0xce, 0x03, + 0x31, 0x37, 0x5f, 0x44, 0xc3, 0x04, 0x67, 0xca, 0xe0, 0x57, 0x3e, 0x8b, 0xc1, 0x4f, 0xfb, 0xd5, + 0x62, 0xac, 0x06, 0xa4, 0x66, 0xff, 0x84, 0x1a, 0x78, 0x99, 0x4d, 0xa0, 0xe1, 0xe4, 0x5e, 0x8f, + 0xcf, 0x7f, 0x7c, 0x32, 0x96, 0x54, 0xf2, 0xb6, 0xa8, 0xfb, 0x52, 0xce, 0x8d, 0xdc, 0x3b, 0x9b, + 0x6d, 0x11, 0x5d, 0xa5, 0xfe, 0x5a, 0xf8, 0x0b, 0xca, 0x17, 0xf4, 0x0b, 0xb4, 0x7f, 0x56, 0x82, + 0xc6, 0x5b, 0xee, 0xde, 0x27, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0x7e, 0x8c, 0xd3, 0xd4, 0x2e, + 0xbc, 0x18, 0x04, 0x76, 0x9b, 0x1a, 0xae, 0x63, 0xfa, 0xcb, 0xfb, 0x01, 0xf5, 0xd6, 0x2c, 0xc7, + 0xf2, 0x0f, 0xa8, 0x29, 0xdd, 0x49, 0x9f, 0x39, 0x3d, 0x69, 0xbe, 0xb8, 0xb3, 0xb3, 0x99, 0xc5, + 0x82, 0xc3, 0xf2, 0xf2, 0x61, 0x43, 0xec, 0x1d, 0xe5, 0x7b, 0xab, 0x64, 0xcc, 0x8d, 0x18, 0x36, + 0x62, 0xe9, 0x98, 0xe0, 0xd2, 0xfe, 0x43, 0x11, 0xea, 0xe1, 0xe1, 0x11, 0xe4, 0xf3, 0x50, 0xdd, + 0xf3, 0xdc, 0x43, 0xea, 0x09, 0xcf, 0x9d, 0xdc, 0x5b, 0xd5, 0x12, 0x49, 0xa8, 0x68, 0xe4, 0x73, + 0x50, 0x09, 0xdc, 0x9e, 0x65, 0xa4, 0x0d, 0x6a, 0x3b, 0x2c, 0x11, 0x05, 0x8d, 0x77, 0x04, 0x1e, + 0x56, 0xc8, 0xbf, 0xaa, 0x16, 0xeb, 0x08, 0x3c, 0x15, 0x25, 0x55, 0x75, 0x84, 0xf2, 0xd8, 0x3b, + 0xc2, 0xcb, 0xa1, 0x0a, 0x58, 0x49, 0xf6, 0xc4, 0x94, 0xd2, 0xf6, 0x2e, 0x94, 0x7d, 0xdd, 0xb7, + 0xe5, 0xf4, 0x96, 0xe3, 0xbc, 0x86, 0xe5, 0xf6, 0xa6, 0x3c, 0xaf, 0x61, 0xb9, 0xbd, 0x89, 0x1c, + 0x54, 0xfb, 0xed, 0x12, 0x34, 0x44, 0xfd, 0x8a, 0xd1, 0x63, 0x9c, 0x35, 0xfc, 0x06, 0x0f, 0xb9, + 0xf0, 0xfb, 0x5d, 0xea, 0x71, 0x73, 0x94, 0x1c, 0x0c, 0xe3, 0x7e, 0x84, 0x88, 0x18, 0x86, 0x5d, + 0x44, 0x49, 0x3f, 0xda, 0x55, 0xcf, 0xa6, 0x0a, 0x7e, 0x00, 0x8a, 0xd4, 0x71, 0x65, 0x24, 0x65, + 0x38, 0x55, 0xdc, 0x89, 0xd1, 0x30, 0xc1, 0xa9, 0xfd, 0x8f, 0x22, 0xd4, 0x37, 0xad, 0x7d, 0x6a, + 0x1c, 0x1b, 0x36, 0x25, 0xdf, 0x80, 0x79, 0x93, 0xda, 0x94, 0xcd, 0x98, 0xeb, 0x9e, 0x6e, 0xd0, + 0x6d, 0xea, 0x59, 0xfc, 0x00, 0x27, 0xd6, 0x07, 0x65, 0x80, 0xeb, 0xf5, 0xd3, 0x93, 0xe6, 0xfc, + 0xea, 0x50, 0x2e, 0x7c, 0x0c, 0x02, 0xd9, 0x80, 0x49, 0x93, 0xfa, 0x96, 0x47, 0xcd, 0xed, 0xd8, + 0x82, 0xe8, 0xf3, 0xaa, 0x9c, 0xab, 0x31, 0xda, 0xa3, 0x93, 0xe6, 0x94, 0x32, 0x84, 0x8a, 0x95, + 0x51, 0x22, 0x2b, 0x1b, 0x5a, 0x7a, 0x7a, 0xdf, 0xa7, 0x19, 0xe5, 0x2c, 0xf1, 0x72, 0xf2, 0xa1, + 0x65, 0x3b, 0x9b, 0x05, 0x87, 0xe5, 0x25, 0x7b, 0x30, 0xc7, 0xcb, 0x9f, 0x85, 0x5b, 0xe6, 0xb8, + 0x2f, 0x9f, 0x9e, 0x34, 0xb5, 0x55, 0xda, 0xf3, 0xa8, 0xa1, 0x07, 0xd4, 0x5c, 0x1d, 0xc2, 0x8d, + 0x43, 0x71, 0xb4, 0x0a, 0x94, 0x36, 0xdd, 0x8e, 0xf6, 0xdd, 0x12, 0x84, 0x27, 0x8a, 0x91, 0x3f, + 0x5b, 0x80, 0x86, 0xee, 0x38, 0x6e, 0x20, 0x4f, 0xeb, 0x12, 0xd1, 0x04, 0x98, 0xfb, 0xe0, 0xb2, + 0x85, 0xe5, 0x08, 0x54, 0x38, 0xa2, 0x43, 0xe7, 0x78, 0x8c, 0x82, 0x71, 0xd9, 0xa4, 0x9f, 0xf2, + 0x8d, 0x6f, 0xe5, 0x2f, 0xc5, 0x19, 0x3c, 0xe1, 0xf3, 0x5f, 0x85, 0xd9, 0x74, 0x61, 0xcf, 0xe3, + 0xda, 0xca, 0x15, 0x64, 0x50, 0x04, 0x88, 0xe2, 0x63, 0x9e, 0x82, 0x41, 0xce, 0x4a, 0x18, 0xe4, + 0x46, 0x3f, 0xd6, 0x21, 0x2a, 0xf4, 0x50, 0x23, 0xdc, 0x37, 0x53, 0x46, 0xb8, 0x8d, 0x71, 0x08, + 0x7b, 0xbc, 0xe1, 0x6d, 0x0f, 0x2e, 0x45, 0xbc, 0xd1, 0xe8, 0x72, 0x27, 0xd5, 0xfb, 0x85, 0x5e, + 0xf9, 0x85, 0x21, 0xbd, 0x7f, 0x26, 0x16, 0xb0, 0x34, 0xd8, 0xff, 0xb5, 0xbf, 0x59, 0x80, 0xd9, + 0xb8, 0x10, 0xbe, 0x07, 0xfd, 0x4b, 0x30, 0xe5, 0x51, 0xdd, 0x6c, 0xe9, 0x81, 0x71, 0xc0, 0x43, + 0xe3, 0x0b, 0x3c, 0x96, 0x9d, 0xef, 0x96, 0xc3, 0x38, 0x01, 0x93, 0x7c, 0x44, 0x87, 0x06, 0x4b, + 0xd8, 0xb1, 0xba, 0xd4, 0xed, 0x07, 0x23, 0x5a, 0x99, 0xf9, 0x02, 0x0f, 0x23, 0x18, 0x8c, 0x63, + 0x6a, 0x1f, 0x15, 0x60, 0x3a, 0x5e, 0xe0, 0x0b, 0xb7, 0x40, 0x1e, 0x24, 0x2d, 0x90, 0x2b, 0x63, + 0xf8, 0xef, 0x43, 0xac, 0x8e, 0xdf, 0x6e, 0xc4, 0x3f, 0x8d, 0x5b, 0x1a, 0xe3, 0xc6, 0x95, 0xc2, + 0x63, 0x8d, 0x2b, 0x9f, 0xfc, 0x83, 0xaa, 0x86, 0xad, 0x0a, 0xca, 0xcf, 0xf0, 0xaa, 0xe0, 0xe3, + 0x3c, 0xed, 0x2a, 0x76, 0x62, 0xd3, 0x44, 0x8e, 0x13, 0x9b, 0xba, 0xe1, 0x89, 0x4d, 0xd5, 0xb1, + 0x0d, 0x6c, 0x67, 0x39, 0xb5, 0xa9, 0xf6, 0x54, 0x4f, 0x6d, 0xaa, 0x5f, 0xd4, 0xa9, 0x4d, 0x90, + 0xf7, 0xd4, 0xa6, 0xef, 0x14, 0x60, 0xda, 0x4c, 0xec, 0x30, 0x96, 0x7b, 0xfb, 0x47, 0x9f, 0xce, + 0x92, 0x1b, 0x96, 0xc5, 0x16, 0xb3, 0x64, 0x1a, 0xa6, 0x44, 0x66, 0x9d, 0x95, 0x34, 0xf9, 0xb1, + 0x9c, 0x95, 0x44, 0x7e, 0x09, 0xea, 0xb6, 0x9a, 0xeb, 0xe4, 0x09, 0x92, 0x9b, 0x63, 0x69, 0x92, + 0x12, 0x33, 0xda, 0xc5, 0x10, 0x26, 0x61, 0x24, 0x51, 0xfb, 0xfd, 0x6a, 0x7c, 0x42, 0x7c, 0xda, + 0x3e, 0x8e, 0xd7, 0x92, 0x3e, 0x8e, 0x1b, 0x69, 0x1f, 0xc7, 0xc0, 0x6c, 0x2e, 0xfd, 0x1c, 0x3f, + 0x19, 0x9b, 0x27, 0x4a, 0xfc, 0x90, 0xa6, 0xb0, 0xc9, 0x65, 0xcc, 0x15, 0xcb, 0x30, 0x23, 0x95, + 0x00, 0x45, 0xe4, 0x83, 0xec, 0x54, 0x14, 0x95, 0xb6, 0x9a, 0x24, 0x63, 0x9a, 0x9f, 0x09, 0xf4, + 0xd5, 0x59, 0xbd, 0x62, 0xc5, 0x16, 0xb5, 0x71, 0x75, 0x8e, 0x6e, 0xc8, 0xc1, 0x56, 0x77, 0x1e, + 0xd5, 0x7d, 0xe9, 0xa9, 0x88, 0xad, 0xee, 0x90, 0xa7, 0xa2, 0xa4, 0xc6, 0xdd, 0x35, 0xd5, 0x27, + 0xb8, 0x6b, 0x74, 0x68, 0xd8, 0xba, 0x1f, 0x88, 0xc6, 0x64, 0xca, 0xd1, 0xe4, 0x8f, 0x9d, 0x6d, + 0xde, 0x67, 0xba, 0x44, 0xa4, 0xc0, 0x6f, 0x46, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb2, 0x57, + 0x3e, 0xb2, 0x98, 0xcb, 0x81, 0x3c, 0xd1, 0xee, 0x3c, 0x32, 0xc2, 0xa5, 0xe3, 0x66, 0x0c, 0x07, + 0x13, 0xa8, 0x43, 0x3c, 0x3a, 0x30, 0x8a, 0x47, 0x87, 0xfc, 0xac, 0x50, 0xdc, 0x8e, 0xc3, 0xdf, + 0xda, 0xe0, 0xbf, 0x35, 0x8c, 0x68, 0xc5, 0x38, 0x11, 0x93, 0xbc, 0xac, 0x55, 0xf4, 0x65, 0x35, + 0xa8, 0xec, 0x93, 0xc9, 0x56, 0xb1, 0x9b, 0x24, 0x63, 0x9a, 0x9f, 0x6c, 0xc3, 0xe5, 0x30, 0x29, + 0x5e, 0x8c, 0x29, 0x8e, 0x13, 0x86, 0x18, 0xee, 0x66, 0xf0, 0x60, 0x66, 0x4e, 0xbe, 0x67, 0xa7, + 0xef, 0x79, 0xd4, 0x09, 0x6e, 0xeb, 0xfe, 0x81, 0x8c, 0x55, 0x8c, 0xf6, 0xec, 0x44, 0x24, 0x8c, + 0xf3, 0x91, 0x25, 0x00, 0x01, 0xc7, 0x73, 0xcd, 0x24, 0xc3, 0x81, 0x77, 0x43, 0x0a, 0xc6, 0xb8, + 0xb4, 0xef, 0xd4, 0xa1, 0x71, 0x57, 0x0f, 0xac, 0x23, 0xca, 0xdd, 0xaf, 0x17, 0xe3, 0x03, 0xfb, + 0x2b, 0x05, 0xb8, 0x92, 0x8c, 0xb1, 0xbd, 0x40, 0x47, 0x18, 0x3f, 0xe3, 0x09, 0x33, 0xa5, 0xe1, + 0x90, 0x52, 0x70, 0x97, 0xd8, 0x40, 0xc8, 0xee, 0x45, 0xbb, 0xc4, 0xda, 0xc3, 0x04, 0xe2, 0xf0, + 0xb2, 0x7c, 0x52, 0x5c, 0x62, 0xcf, 0xf6, 0xa1, 0xa4, 0x29, 0x87, 0x5d, 0xf5, 0x99, 0x71, 0xd8, + 0xd5, 0x9e, 0x09, 0xad, 0xbf, 0x17, 0x73, 0xd8, 0xd5, 0x73, 0x06, 0x8e, 0xc9, 0x6d, 0x29, 0x02, + 0x6d, 0x98, 0xe3, 0x8f, 0x9f, 0x28, 0xa1, 0x1c, 0x29, 0x4c, 0x59, 0xde, 0xd3, 0x7d, 0xcb, 0x90, + 0x6a, 0x47, 0x8e, 0x43, 0x98, 0xd5, 0xe1, 0x8c, 0x22, 0xbe, 0x84, 0xbf, 0xa2, 0xc0, 0x8e, 0xce, + 0xa2, 0x2c, 0xe6, 0x3a, 0x8b, 0x92, 0xac, 0x40, 0xd9, 0x39, 0xa4, 0xc7, 0xe7, 0x3b, 0x9b, 0x81, + 0x2f, 0x02, 0xef, 0xde, 0xa1, 0xc7, 0xc8, 0x33, 0x6b, 0xdf, 0x2b, 0x02, 0xb0, 0xcf, 0x3f, 0x9b, + 0xeb, 0xec, 0xc7, 0xa1, 0xea, 0xf7, 0xb9, 0x61, 0x48, 0x2a, 0x4c, 0x51, 0xb4, 0x9d, 0x48, 0x46, + 0x45, 0x27, 0x9f, 0x83, 0xca, 0x37, 0xfb, 0xb4, 0xaf, 0xe2, 0x40, 0xc2, 0x75, 0xc3, 0xd7, 0x58, + 0x22, 0x0a, 0xda, 0xc5, 0x99, 0xb7, 0x95, 0x8b, 0xad, 0x72, 0x51, 0x2e, 0xb6, 0x3a, 0x54, 0xef, + 0xba, 0x3c, 0x78, 0x57, 0xfb, 0xaf, 0x45, 0x80, 0x28, 0x38, 0x92, 0xfc, 0x7a, 0x01, 0x5e, 0x08, + 0x3b, 0x5c, 0x20, 0x96, 0x7f, 0xfc, 0xdc, 0xf3, 0xdc, 0xee, 0xb6, 0xac, 0xce, 0xce, 0x47, 0xa0, + 0xed, 0x2c, 0x71, 0x98, 0x5d, 0x0a, 0x82, 0x50, 0xa3, 0xdd, 0x5e, 0x70, 0xbc, 0x6a, 0x79, 0xb2, + 0x05, 0x66, 0xc6, 0xe0, 0xde, 0x92, 0x3c, 0x22, 0xab, 0xb4, 0x51, 0xf0, 0x4e, 0xa4, 0x28, 0x18, + 0xe2, 0x90, 0x03, 0xa8, 0x39, 0xee, 0x7b, 0x3e, 0xab, 0x0e, 0xd9, 0x1c, 0xdf, 0x1c, 0xbd, 0xca, + 0x45, 0xb5, 0x0a, 0xb7, 0x8b, 0x7c, 0xc1, 0xaa, 0x23, 0x2b, 0xfb, 0xd7, 0x8a, 0x70, 0x29, 0xa3, + 0x1e, 0xc8, 0x9b, 0x30, 0x2b, 0xe3, 0x50, 0xa3, 0x0b, 0x00, 0x0a, 0xd1, 0x05, 0x00, 0xed, 0x14, + 0x0d, 0x07, 0xb8, 0xc9, 0x7b, 0x00, 0xba, 0x61, 0x50, 0xdf, 0xdf, 0x72, 0x4d, 0xb5, 0x1e, 0x78, + 0x83, 0xa9, 0x2f, 0xcb, 0x61, 0xea, 0xa3, 0x93, 0xe6, 0x4f, 0x65, 0x85, 0x96, 0xa7, 0xea, 0x39, + 0xca, 0x80, 0x31, 0x48, 0xf2, 0x0d, 0x00, 0x61, 0x03, 0x08, 0x4f, 0xbf, 0x78, 0x82, 0xe1, 0x6c, + 0x41, 0x1d, 0xae, 0xb6, 0xf0, 0xb5, 0xbe, 0xee, 0x04, 0x56, 0x70, 0x2c, 0x0e, 0x1b, 0xba, 0x1f, + 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0xc7, 0x45, 0xa8, 0x29, 0xd7, 0xc3, 0x53, 0xb0, 0x05, 0x77, 0x12, + 0xb6, 0xe0, 0x31, 0x05, 0x93, 0x67, 0x59, 0x82, 0xdd, 0x94, 0x25, 0x78, 0x3d, 0xbf, 0xa8, 0xc7, + 0xdb, 0x81, 0x7f, 0xab, 0x08, 0xd3, 0x8a, 0x35, 0xaf, 0x85, 0xf6, 0x2b, 0x30, 0x23, 0x82, 0x40, + 0xb6, 0xf4, 0x87, 0xe2, 0xdc, 0x25, 0x5e, 0x61, 0x65, 0x11, 0xbf, 0xdd, 0x4a, 0x92, 0x30, 0xcd, + 0xcb, 0x9a, 0xb5, 0x48, 0xda, 0x65, 0x8b, 0x30, 0xe1, 0x36, 0x16, 0xeb, 0x4d, 0xde, 0xac, 0x5b, + 0x29, 0x1a, 0x0e, 0x70, 0xa7, 0x4d, 0xc4, 0xe5, 0x0b, 0x30, 0x11, 0xff, 0xdb, 0x02, 0x4c, 0x46, + 0xf5, 0x75, 0xe1, 0x06, 0xe2, 0xfd, 0xa4, 0x81, 0x78, 0x39, 0x77, 0x73, 0x18, 0x62, 0x1e, 0xfe, + 0x0b, 0x55, 0x48, 0xec, 0x69, 0x20, 0x7b, 0x30, 0x6f, 0x65, 0x46, 0x66, 0xc6, 0x46, 0x9b, 0x70, + 0x93, 0xfe, 0xc6, 0x50, 0x4e, 0x7c, 0x0c, 0x0a, 0xe9, 0x43, 0xed, 0x88, 0x7a, 0x81, 0x65, 0x50, + 0xf5, 0x7d, 0xeb, 0xb9, 0x55, 0x32, 0x69, 0x04, 0x0f, 0xeb, 0xf4, 0xbe, 0x14, 0x80, 0xa1, 0x28, + 0xb2, 0x07, 0x15, 0x6a, 0x76, 0xa8, 0x3a, 0x09, 0x2b, 0xe7, 0xc9, 0xc4, 0x61, 0x7d, 0xb2, 0x37, + 0x1f, 0x05, 0x34, 0xf1, 0xe3, 0x86, 0xa6, 0x72, 0x4e, 0x05, 0xeb, 0x8c, 0xe6, 0x25, 0x72, 0x18, + 0x5a, 0x5b, 0x2b, 0x63, 0x1a, 0x3c, 0x1e, 0x63, 0x6b, 0xf5, 0xa1, 0xfe, 0x40, 0x0f, 0xa8, 0xd7, + 0xd5, 0xbd, 0x43, 0xb9, 0xda, 0x18, 0xfd, 0x0b, 0xdf, 0x56, 0x48, 0xd1, 0x17, 0x86, 0x49, 0x18, + 0xc9, 0x21, 0x2e, 0xd4, 0x03, 0xa9, 0x3e, 0x2b, 0x93, 0xf2, 0xe8, 0x42, 0x95, 0x22, 0xee, 0xcb, + 0xbd, 0x0d, 0xea, 0x15, 0x23, 0x19, 0xe4, 0x28, 0x71, 0x8c, 0xbd, 0xb8, 0xbc, 0xa0, 0x95, 0xc3, + 0x35, 0x21, 0xa1, 0xa2, 0xe9, 0x26, 0xfb, 0x38, 0x7c, 0xed, 0x7f, 0x55, 0xa2, 0x61, 0xf9, 0x69, + 0xdb, 0x09, 0xbf, 0x98, 0xb4, 0x13, 0x5e, 0x4f, 0xdb, 0x09, 0x53, 0x3e, 0xff, 0xf3, 0x47, 0x43, + 0xa7, 0xcc, 0x6b, 0xe5, 0x0b, 0x30, 0xaf, 0xbd, 0x02, 0x8d, 0x23, 0x3e, 0x12, 0x88, 0x63, 0xb5, + 0x2a, 0x7c, 0x1a, 0xe1, 0x23, 0xfb, 0xfd, 0x28, 0x19, 0xe3, 0x3c, 0x2c, 0x8b, 0xbc, 0xb8, 0x27, + 0x3c, 0xc9, 0x5a, 0x66, 0x69, 0x47, 0xc9, 0x18, 0xe7, 0xe1, 0x81, 0x94, 0x96, 0x73, 0x28, 0x32, + 0x54, 0x79, 0x06, 0x11, 0x48, 0xa9, 0x12, 0x31, 0xa2, 0x93, 0x9b, 0x50, 0xeb, 0x9b, 0xfb, 0x82, + 0xb7, 0xc6, 0x79, 0xb9, 0x86, 0xb9, 0xbb, 0xba, 0x26, 0x8f, 0xf9, 0x52, 0x54, 0x56, 0x92, 0xae, + 0xde, 0x53, 0x04, 0xbe, 0x36, 0x94, 0x25, 0xd9, 0x8a, 0x92, 0x31, 0xce, 0x43, 0x7e, 0x06, 0xa6, + 0x3d, 0x6a, 0xf6, 0x0d, 0x1a, 0xe6, 0x02, 0x9e, 0x4b, 0x9e, 0x7f, 0x1a, 0xa7, 0x60, 0x8a, 0x73, + 0x88, 0x91, 0xb0, 0x31, 0x92, 0x91, 0xf0, 0xab, 0x30, 0x6d, 0x7a, 0xba, 0xe5, 0x50, 0xf3, 0x9e, + 0xc3, 0x03, 0x3b, 0x64, 0x38, 0x67, 0x68, 0xa0, 0x5f, 0x4d, 0x50, 0x31, 0xc5, 0xad, 0xfd, 0xf3, + 0x22, 0x54, 0xc4, 0xa9, 0xac, 0x1b, 0x70, 0xc9, 0x72, 0xac, 0xc0, 0xd2, 0xed, 0x55, 0x6a, 0xeb, + 0xc7, 0xf1, 0x00, 0x97, 0x4a, 0xeb, 0x45, 0xb6, 0xd0, 0xde, 0x18, 0x24, 0x63, 0x56, 0x1e, 0x56, + 0x39, 0x81, 0x98, 0xbe, 0x15, 0x8a, 0xb0, 0xa3, 0x89, 0x23, 0xc1, 0x13, 0x14, 0x4c, 0x71, 0x32, + 0x65, 0xa8, 0x37, 0x10, 0xb9, 0x52, 0x11, 0xca, 0x50, 0x32, 0x98, 0x24, 0xc9, 0xc7, 0x95, 0xf4, + 0x3e, 0x57, 0x88, 0xc3, 0x4d, 0x53, 0x32, 0x08, 0x4e, 0x28, 0xe9, 0x29, 0x1a, 0x0e, 0x70, 0x33, + 0x84, 0x7d, 0xdd, 0xb2, 0xfb, 0x1e, 0x8d, 0x10, 0x2a, 0x11, 0xc2, 0x5a, 0x8a, 0x86, 0x03, 0xdc, + 0xda, 0x7f, 0x2f, 0x00, 0x19, 0xdc, 0x06, 0x42, 0x0e, 0x60, 0xc2, 0xe1, 0xb6, 0xc8, 0xdc, 0x37, + 0x11, 0xc4, 0x4c, 0x9a, 0x62, 0x92, 0x90, 0x09, 0x12, 0x9f, 0x38, 0x50, 0xa3, 0x0f, 0x03, 0xea, + 0x39, 0xe1, 0xb6, 0xb0, 0xf1, 0xdc, 0x7a, 0x20, 0xd6, 0x66, 0x12, 0x19, 0x43, 0x19, 0xda, 0xef, + 0x15, 0xa1, 0x11, 0xe3, 0x7b, 0xd2, 0x12, 0x9f, 0x9f, 0x4c, 0x21, 0x4c, 0x80, 0xbb, 0x9e, 0x2d, + 0xc7, 0xbb, 0xd8, 0xc9, 0x14, 0x92, 0x84, 0x9b, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x5d, 0xdd, 0x0f, + 0xa8, 0xc7, 0x75, 0xa1, 0xd4, 0x79, 0x10, 0x5b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0x86, 0xbc, 0xb7, + 0xa2, 0x9c, 0x3c, 0xbf, 0x73, 0xc8, 0xa5, 0x14, 0x95, 0x31, 0x5c, 0x4a, 0x41, 0x3a, 0x30, 0xab, + 0x4a, 0xad, 0xa8, 0xe7, 0x3b, 0xdd, 0x51, 0x34, 0xd4, 0x14, 0x04, 0x0e, 0x80, 0x6a, 0xdf, 0x2b, + 0xc0, 0x54, 0xc2, 0x00, 0x25, 0x4e, 0xde, 0x54, 0x9b, 0x98, 0x12, 0x27, 0x6f, 0xc6, 0xf6, 0x1e, + 0xbd, 0x0c, 0x13, 0xa2, 0x82, 0xd2, 0xb1, 0xc9, 0xa2, 0x0a, 0x51, 0x52, 0xd9, 0xcc, 0x22, 0x4d, + 0xdc, 0xe9, 0x99, 0x45, 0xda, 0xc0, 0x51, 0xd1, 0x85, 0xe7, 0x48, 0x94, 0x4e, 0xd6, 0x74, 0xcc, + 0x73, 0x24, 0xd2, 0x31, 0xe4, 0xd0, 0xfe, 0x3e, 0x2f, 0x77, 0xe0, 0x1d, 0x87, 0x2b, 0xeb, 0x0e, + 0x54, 0x65, 0x3c, 0xaa, 0xec, 0x1a, 0x6f, 0xe6, 0xb0, 0x8a, 0x71, 0x1c, 0x19, 0x51, 0xa9, 0x1b, + 0x87, 0xf7, 0xf6, 0xf7, 0x51, 0xa1, 0x93, 0x5b, 0x50, 0x77, 0x1d, 0xd9, 0x83, 0xe5, 0xe7, 0x7f, + 0x81, 0xcd, 0x1c, 0xf7, 0x54, 0xe2, 0xa3, 0x93, 0xe6, 0x95, 0xf0, 0x25, 0x51, 0x48, 0x8c, 0x72, + 0x6a, 0x7f, 0xa6, 0x00, 0x2f, 0xa0, 0x6b, 0xdb, 0x96, 0xd3, 0x49, 0x7a, 0x3e, 0x89, 0x0d, 0xd3, + 0x5d, 0xfd, 0xe1, 0xae, 0xa3, 0x1f, 0xe9, 0x96, 0xad, 0xef, 0xd9, 0xf4, 0x89, 0x2b, 0xe3, 0x7e, + 0x60, 0xd9, 0x0b, 0xe2, 0x1e, 0xcf, 0x85, 0x0d, 0x27, 0xb8, 0xe7, 0xb5, 0x03, 0xcf, 0x72, 0x3a, + 0x62, 0x94, 0xdc, 0x4a, 0x60, 0x61, 0x0a, 0x5b, 0xfb, 0xfd, 0x12, 0xf0, 0x58, 0x47, 0xf2, 0x25, + 0xa8, 0x77, 0xa9, 0x71, 0xa0, 0x3b, 0x96, 0xaf, 0xce, 0x30, 0xbe, 0xca, 0xbe, 0x6b, 0x4b, 0x25, + 0x3e, 0x62, 0xbf, 0x62, 0xb9, 0xbd, 0xc9, 0xb7, 0x1d, 0x45, 0xbc, 0xc4, 0x80, 0x89, 0x8e, 0xef, + 0xeb, 0x3d, 0x2b, 0x77, 0x88, 0x89, 0x38, 0x33, 0x56, 0x0c, 0x47, 0xe2, 0x19, 0x25, 0x34, 0x31, + 0xa0, 0xd2, 0xb3, 0x75, 0xcb, 0xc9, 0x7d, 0xef, 0x1c, 0xfb, 0x82, 0x6d, 0x86, 0x24, 0x4c, 0x95, + 0xfc, 0x11, 0x05, 0x36, 0xe9, 0x43, 0xc3, 0x37, 0x3c, 0xbd, 0xeb, 0x1f, 0xe8, 0x4b, 0xaf, 0xbe, + 0x96, 0x5b, 0xf9, 0x8f, 0x44, 0x09, 0x5d, 0x64, 0x05, 0x97, 0xb7, 0xda, 0xb7, 0x97, 0x97, 0x5e, + 0x7d, 0x0d, 0xe3, 0x72, 0xe2, 0x62, 0x5f, 0x7d, 0x65, 0x49, 0x8e, 0x20, 0x63, 0x17, 0xfb, 0xea, + 0x2b, 0x4b, 0x18, 0x97, 0xa3, 0xfd, 0xef, 0x02, 0xd4, 0x43, 0x5e, 0xb2, 0x0b, 0xc0, 0xc6, 0x32, + 0x79, 0xca, 0xeb, 0xb9, 0xee, 0xe8, 0xe1, 0xd6, 0x9e, 0xdd, 0x30, 0x33, 0xc6, 0x80, 0x32, 0x8e, + 0xc1, 0x2d, 0x8e, 0xfb, 0x18, 0xdc, 0x45, 0xa8, 0x1f, 0xe8, 0x8e, 0xe9, 0x1f, 0xe8, 0x87, 0x54, + 0x86, 0x88, 0x87, 0x4b, 0x91, 0xdb, 0x8a, 0x80, 0x11, 0x8f, 0xf6, 0x0f, 0x27, 0x40, 0xc4, 0x85, + 0xb0, 0x41, 0xc7, 0xb4, 0x7c, 0xb1, 0x91, 0xa3, 0xc0, 0x73, 0x86, 0x83, 0xce, 0xaa, 0x4c, 0xc7, + 0x90, 0x83, 0x5c, 0x85, 0x52, 0xd7, 0x72, 0xa4, 0x06, 0xc2, 0x0d, 0xb9, 0x5b, 0x96, 0x83, 0x2c, + 0x8d, 0x93, 0xf4, 0x87, 0x52, 0xc3, 0x10, 0x24, 0xfd, 0x21, 0xb2, 0x34, 0xf2, 0x15, 0x98, 0xb1, + 0x5d, 0xf7, 0x90, 0x0d, 0x1f, 0xf1, 0x50, 0xd7, 0x29, 0x61, 0x5a, 0xd9, 0x4c, 0x92, 0x30, 0xcd, + 0x4b, 0x76, 0xe1, 0xc5, 0x0f, 0xa8, 0xe7, 0xca, 0xf1, 0xb2, 0x6d, 0x53, 0xda, 0x53, 0x30, 0x42, + 0x35, 0xe6, 0x91, 0xb8, 0x3f, 0x9f, 0xcd, 0x82, 0xc3, 0xf2, 0xf2, 0xbd, 0x03, 0xba, 0xd7, 0xa1, + 0xc1, 0xb6, 0xe7, 0x32, 0xdd, 0xc5, 0x72, 0x3a, 0x0a, 0x76, 0x22, 0x82, 0xdd, 0xc9, 0x66, 0xc1, + 0x61, 0x79, 0xc9, 0x3b, 0x30, 0x27, 0x48, 0x42, 0x6d, 0x59, 0x16, 0xc3, 0x8c, 0x65, 0xab, 0xeb, + 0x5a, 0xa7, 0x84, 0xbf, 0x6c, 0x67, 0x08, 0x0f, 0x0e, 0xcd, 0x4d, 0xde, 0x82, 0x59, 0xe5, 0x2d, + 0xdd, 0xa6, 0x5e, 0x3b, 0x8c, 0x15, 0x9a, 0x52, 0x21, 0xd3, 0x2a, 0x64, 0x18, 0x53, 0x5c, 0x38, + 0x90, 0x8f, 0x20, 0x5c, 0xe1, 0x01, 0x41, 0xbb, 0xbd, 0x15, 0xd7, 0xb5, 0x4d, 0xf7, 0x81, 0xa3, + 0xbe, 0x5d, 0x28, 0xec, 0xdc, 0x41, 0xda, 0xce, 0xe4, 0xc0, 0x21, 0x39, 0xd9, 0x97, 0x73, 0xca, + 0xaa, 0xfb, 0xc0, 0x49, 0xa3, 0x42, 0xf4, 0xe5, 0xed, 0x21, 0x3c, 0x38, 0x34, 0x37, 0x59, 0x03, + 0x92, 0xfe, 0x82, 0xdd, 0x9e, 0x74, 0xe1, 0x5f, 0x11, 0x07, 0x36, 0xa5, 0xa9, 0x98, 0x91, 0x83, + 0x6c, 0xc2, 0xe5, 0x74, 0x2a, 0x13, 0x27, 0xbd, 0xf9, 0xfc, 0xa8, 0x66, 0xcc, 0xa0, 0x63, 0x66, + 0x2e, 0xed, 0x1f, 0x15, 0x61, 0x2a, 0x71, 0xc2, 0xc7, 0x33, 0x77, 0x92, 0x02, 0x5b, 0x3c, 0x74, + 0xfd, 0xce, 0xc6, 0xea, 0x6d, 0xaa, 0x9b, 0xd4, 0x53, 0x1b, 0x48, 0xea, 0x72, 0x5a, 0x4c, 0x50, + 0x30, 0xc5, 0x49, 0xf6, 0xa1, 0x22, 0xfc, 0x04, 0x79, 0x6f, 0x7b, 0x52, 0x75, 0xc4, 0x9d, 0x05, + 0xf2, 0x8a, 0x34, 0xd7, 0xa3, 0x28, 0xe0, 0xb5, 0x00, 0x26, 0xe3, 0x1c, 0x6c, 0x20, 0x89, 0xd4, + 0xde, 0x6a, 0x42, 0xe5, 0xdd, 0x80, 0x52, 0x10, 0x8c, 0x7a, 0x46, 0x83, 0xf0, 0x3b, 0xed, 0x6c, + 0x22, 0xc3, 0xd0, 0xf6, 0xd9, 0xbf, 0xf3, 0x7d, 0xcb, 0x75, 0xe4, 0x81, 0xfd, 0xbb, 0x50, 0x95, + 0xab, 0xa7, 0x11, 0xcf, 0x98, 0xe0, 0xba, 0x92, 0x32, 0xbb, 0x2a, 0x2c, 0xed, 0xdf, 0x15, 0xa1, + 0x1e, 0x9a, 0x49, 0xce, 0x70, 0x10, 0xbe, 0x0b, 0xf5, 0x30, 0xa0, 0x31, 0xf7, 0x55, 0xb6, 0x51, + 0x9c, 0x1d, 0x5f, 0xd9, 0x87, 0xaf, 0x18, 0xc9, 0x88, 0x07, 0x4b, 0x96, 0x72, 0x04, 0x4b, 0xf6, + 0xa0, 0x1a, 0x78, 0x56, 0xa7, 0x23, 0x57, 0x09, 0x79, 0xa2, 0x25, 0xc3, 0xea, 0xda, 0x11, 0x80, + 0xb2, 0x66, 0xc5, 0x0b, 0x2a, 0x31, 0xda, 0xfb, 0x30, 0x9b, 0xe6, 0xe4, 0x2a, 0xb4, 0x71, 0x40, + 0xcd, 0xbe, 0xad, 0xea, 0x38, 0x52, 0xa1, 0x65, 0x3a, 0x86, 0x1c, 0xe4, 0x26, 0xd4, 0xd8, 0x6f, + 0xfa, 0xc0, 0x75, 0x94, 0x1a, 0xcb, 0x57, 0x23, 0x3b, 0x32, 0x0d, 0x43, 0xaa, 0xf6, 0x5f, 0x4a, + 0x70, 0x35, 0x32, 0x76, 0x6d, 0xe9, 0x8e, 0xde, 0x39, 0xc3, 0xfd, 0xa5, 0x9f, 0xee, 0xda, 0x3b, + 0xef, 0x6d, 0x26, 0xa5, 0x67, 0xe0, 0x36, 0x93, 0xff, 0x57, 0x04, 0x1e, 0x7c, 0x4d, 0xbe, 0x05, + 0x93, 0x7a, 0xec, 0xea, 0x6a, 0xf9, 0x3b, 0x6f, 0xe5, 0xfe, 0x9d, 0x3c, 0xc6, 0x3b, 0x0c, 0x80, + 0x8b, 0xa7, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xb6, 0xaf, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0x3b, 0xef, + 0x12, 0xc2, 0x79, 0x33, 0x5f, 0x93, 0xd0, 0x18, 0x0a, 0x21, 0xdf, 0x29, 0xc0, 0x94, 0x17, 0x5f, + 0xae, 0xc9, 0x1f, 0x92, 0x27, 0xb4, 0x23, 0x86, 0x16, 0x0f, 0xb7, 0x8b, 0xaf, 0x09, 0x93, 0x32, + 0xb5, 0xff, 0x5c, 0x80, 0xa9, 0xb6, 0x6d, 0x99, 0x96, 0xd3, 0xb9, 0xc0, 0xcb, 0x54, 0xee, 0x41, + 0xc5, 0xb7, 0x2d, 0x93, 0x8e, 0x38, 0x9b, 0x88, 0x79, 0x8c, 0x01, 0xa0, 0xc0, 0x49, 0xde, 0xce, + 0x52, 0x3a, 0xc3, 0xed, 0x2c, 0x7f, 0x30, 0x01, 0x72, 0x1b, 0x01, 0xe9, 0x43, 0xbd, 0xa3, 0x2e, + 0x7d, 0x90, 0xdf, 0x78, 0x3b, 0xc7, 0x81, 0xa1, 0x89, 0xeb, 0x23, 0xc4, 0xd8, 0x1f, 0x26, 0x62, + 0x24, 0x89, 0xd0, 0xe4, 0x9d, 0xe9, 0xab, 0x39, 0xef, 0x4c, 0x17, 0xe2, 0x06, 0x6f, 0x4d, 0xd7, + 0xa1, 0x7c, 0x10, 0x04, 0x3d, 0xd9, 0x98, 0x46, 0xdf, 0x27, 0x12, 0x9d, 0x59, 0x25, 0x74, 0x22, + 0xf6, 0x8e, 0x1c, 0x9a, 0x89, 0x70, 0xf4, 0xf0, 0x66, 0xca, 0x95, 0x5c, 0x61, 0x24, 0x71, 0x11, + 0xec, 0x1d, 0x39, 0x34, 0xf9, 0x45, 0x68, 0x04, 0x9e, 0xee, 0xf8, 0xfb, 0xae, 0xd7, 0xa5, 0x9e, + 0x5c, 0xa3, 0xae, 0xe5, 0xb8, 0x36, 0x7c, 0x27, 0x42, 0x13, 0x26, 0xd9, 0x44, 0x12, 0xc6, 0xa5, + 0x91, 0x43, 0xa8, 0xf5, 0x4d, 0x51, 0x30, 0x69, 0x06, 0x5b, 0xce, 0x73, 0x13, 0x7c, 0x2c, 0x48, + 0x44, 0xbd, 0x61, 0x28, 0x20, 0x79, 0x09, 0x6b, 0x75, 0x5c, 0x97, 0xb0, 0xc6, 0x5b, 0x63, 0xd6, + 0x81, 0x3a, 0xa4, 0x2b, 0xf5, 0x5a, 0xa7, 0x23, 0x63, 0xdc, 0xd6, 0x72, 0xab, 0x9c, 0x42, 0x64, + 0x23, 0xd4, 0x8d, 0x9d, 0x0e, 0x2a, 0x19, 0x5a, 0x17, 0xa4, 0xef, 0x88, 0x18, 0x89, 0x8b, 0xa7, + 0xc4, 0xce, 0xc8, 0xc5, 0xb3, 0x8d, 0x07, 0xe1, 0x0d, 0x48, 0xb1, 0x83, 0xef, 0x33, 0x6f, 0x98, + 0xd2, 0xfe, 0x7d, 0x11, 0x4a, 0x3b, 0x9b, 0x6d, 0x71, 0x98, 0x2d, 0xbf, 0xca, 0x8e, 0xb6, 0x0f, + 0xad, 0xde, 0x7d, 0xea, 0x59, 0xfb, 0xc7, 0x72, 0xe9, 0x1d, 0x3b, 0xcc, 0x36, 0xcd, 0x81, 0x19, + 0xb9, 0xc8, 0xbb, 0x30, 0x69, 0xe8, 0x2b, 0xd4, 0x0b, 0x46, 0x31, 0x2c, 0xf0, 0xed, 0xec, 0x2b, + 0xcb, 0x51, 0x76, 0x4c, 0x80, 0x91, 0x5d, 0x00, 0x23, 0x82, 0x2e, 0x9d, 0xdb, 0x1c, 0x12, 0x03, + 0x8e, 0x01, 0x11, 0x84, 0xfa, 0x21, 0x63, 0xe5, 0xa8, 0xe5, 0xf3, 0xa0, 0xf2, 0x96, 0x73, 0x47, + 0xe5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x2a, 0x71, 0x1b, 0x15, 0xf9, 0x32, 0xd4, 0xdc, 0x5e, 0x6c, + 0x38, 0xad, 0xf3, 0x68, 0xda, 0xda, 0x3d, 0x99, 0xf6, 0xe8, 0xa4, 0x39, 0xb5, 0xe9, 0x76, 0x2c, + 0x43, 0x25, 0x60, 0xc8, 0x4e, 0x34, 0x98, 0xe0, 0xfb, 0x36, 0xd5, 0x5d, 0x54, 0x7c, 0xee, 0xe0, + 0xd7, 0xc5, 0xf8, 0x28, 0x29, 0xda, 0x2f, 0x97, 0x21, 0xf2, 0xb8, 0x12, 0x1f, 0x26, 0xc4, 0x9e, + 0x11, 0x39, 0x72, 0x5f, 0xe8, 0xf6, 0x14, 0x29, 0x8a, 0x74, 0xa0, 0xf4, 0xbe, 0xbb, 0x97, 0x7b, + 0xe0, 0x8e, 0x1d, 0x3e, 0x21, 0x6c, 0x65, 0xb1, 0x04, 0x64, 0x12, 0xc8, 0x5f, 0x2d, 0xc0, 0xf3, + 0x7e, 0x5a, 0xf5, 0x95, 0xcd, 0x01, 0xf3, 0xeb, 0xf8, 0x69, 0x65, 0x5a, 0x86, 0x3d, 0x0f, 0x23, + 0xe3, 0x60, 0x59, 0x58, 0xfd, 0x0b, 0x57, 0xa8, 0x6c, 0x4e, 0xeb, 0x39, 0xef, 0xdc, 0x4d, 0xd6, + 0x7f, 0x32, 0x0d, 0xa5, 0x28, 0xed, 0xdb, 0x45, 0x68, 0xc4, 0x46, 0xeb, 0xdc, 0x57, 0x9c, 0x3d, + 0x4c, 0x5d, 0x71, 0xb6, 0x3d, 0x7a, 0x64, 0x40, 0x54, 0xaa, 0x8b, 0xbe, 0xe5, 0xec, 0x9f, 0x16, + 0xa1, 0xb4, 0xbb, 0xba, 0x96, 0x5c, 0xb4, 0x16, 0x9e, 0xc2, 0xa2, 0xf5, 0x00, 0xaa, 0x7b, 0x7d, + 0xcb, 0x0e, 0x2c, 0x27, 0xf7, 0xf1, 0x38, 0xea, 0x46, 0x38, 0xe9, 0xeb, 0x10, 0xa8, 0xa8, 0xe0, + 0x49, 0x07, 0xaa, 0x1d, 0x71, 0x3e, 0x69, 0xee, 0x78, 0x49, 0x79, 0xce, 0xa9, 0x10, 0x24, 0x5f, + 0x50, 0xa1, 0x6b, 0xc7, 0x30, 0xb1, 0xbb, 0x2a, 0xd5, 0xfe, 0xa7, 0x5b, 0x9b, 0xda, 0x2f, 0x42, + 0xa8, 0x05, 0x3c, 0x7d, 0xe1, 0xff, 0xad, 0x00, 0x49, 0xc5, 0xe7, 0xe9, 0xb7, 0xa6, 0xc3, 0x74, + 0x6b, 0x5a, 0x1d, 0x47, 0xe7, 0xcb, 0x6e, 0x50, 0xda, 0xbf, 0x29, 0x40, 0x6a, 0xa3, 0x1f, 0x79, + 0x4d, 0x1e, 0x75, 0x97, 0x0c, 0x4c, 0x53, 0x47, 0xdd, 0x91, 0x24, 0x77, 0xec, 0xc8, 0xbb, 0x0f, + 0xd9, 0x72, 0x2d, 0xee, 0x40, 0x93, 0xc5, 0xbf, 0x3b, 0xfa, 0x72, 0x2d, 0xcb, 0x1d, 0x27, 0x83, + 0x27, 0xe3, 0x24, 0x4c, 0xca, 0xd5, 0xfe, 0x41, 0x11, 0x26, 0x9e, 0xda, 0xd9, 0x06, 0x34, 0x11, + 0xcf, 0xba, 0x92, 0x73, 0xb4, 0x1f, 0x1a, 0xcd, 0xda, 0x4d, 0x45, 0xb3, 0xe6, 0xbd, 0xca, 0xfd, + 0x09, 0xb1, 0xac, 0xff, 0xaa, 0x00, 0x72, 0xae, 0xd9, 0x70, 0xfc, 0x40, 0x77, 0x0c, 0x4a, 0x8c, + 0x70, 0x62, 0xcb, 0x1b, 0x34, 0x25, 0x03, 0x0b, 0x85, 0x2e, 0xc3, 0x9f, 0xd5, 0x44, 0x46, 0x7e, + 0x12, 0x6a, 0x07, 0xae, 0x1f, 0xf0, 0xc9, 0xab, 0x98, 0x34, 0x99, 0xdd, 0x96, 0xe9, 0x18, 0x72, + 0xa4, 0xdd, 0xd9, 0x95, 0xe1, 0xee, 0x6c, 0xed, 0x37, 0x8b, 0x30, 0xf9, 0x49, 0x39, 0x3c, 0x21, + 0x2b, 0xfa, 0xb7, 0x94, 0x33, 0xfa, 0xb7, 0x7c, 0x9e, 0xe8, 0x5f, 0xed, 0x07, 0x05, 0x80, 0xa7, + 0x76, 0x72, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0xdd, 0xae, 0xb2, 0xc3, 0x72, 0xff, 0x6e, 0x45, 0x7d, + 0x12, 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0xd3, 0x7a, 0x22, 0xd0, 0x35, 0xb7, 0xbe, 0x9c, 0x8a, 0x9b, + 0x0d, 0xe3, 0xb4, 0x92, 0xe9, 0x98, 0x12, 0x4b, 0x5e, 0x8f, 0x4e, 0x59, 0xbf, 0x1b, 0x35, 0xfb, + 0x81, 0xe3, 0xd1, 0xb9, 0xee, 0x96, 0xe0, 0x7c, 0x42, 0x60, 0x71, 0x69, 0x2c, 0x81, 0xc5, 0xf1, + 0x2d, 0x93, 0xe5, 0xc7, 0x6e, 0x99, 0x3c, 0x82, 0xfa, 0xbe, 0xe7, 0x76, 0x79, 0xec, 0xae, 0xbc, + 0xc7, 0xfc, 0x56, 0x8e, 0x89, 0xb2, 0xbb, 0x67, 0x39, 0xd4, 0xe4, 0x71, 0xc1, 0xa1, 0xe1, 0x6a, + 0x4d, 0xe1, 0x63, 0x24, 0x8a, 0xdb, 0xfa, 0x5d, 0x21, 0x75, 0x62, 0x9c, 0x52, 0xc3, 0xb1, 0x64, + 0x47, 0xa0, 0xa3, 0x12, 0x93, 0x8c, 0xd7, 0xad, 0x3e, 0x9d, 0x78, 0x5d, 0xed, 0xcf, 0x57, 0xd5, + 0x00, 0xf6, 0xcc, 0x1d, 0xe8, 0xfb, 0xe9, 0x46, 0xf7, 0x0e, 0x1d, 0xd8, 0x85, 0x5e, 0x7b, 0x8a, + 0xbb, 0xd0, 0xeb, 0xe3, 0xd9, 0x85, 0x0e, 0xf9, 0x76, 0xa1, 0x37, 0xc6, 0xb4, 0x0b, 0x7d, 0x72, + 0x5c, 0xbb, 0xd0, 0xa7, 0x46, 0xda, 0x85, 0x3e, 0x7d, 0xa6, 0x5d, 0xe8, 0x27, 0x25, 0x48, 0x2d, + 0xc6, 0x3f, 0x75, 0xbc, 0xfd, 0x48, 0x39, 0xde, 0xbe, 0x5b, 0x84, 0x68, 0x20, 0x3e, 0x67, 0x60, + 0xd2, 0x3b, 0x50, 0xeb, 0xea, 0x0f, 0x79, 0xe0, 0x74, 0x9e, 0x7b, 0xb0, 0xb7, 0x24, 0x06, 0x86, + 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0xbb, 0x28, 0x72, 0xbb, 0x30, 0xa2, 0x6b, 0x2d, 0x84, 0x91, 0x34, + 0x7a, 0xc7, 0x98, 0x18, 0xed, 0x5f, 0x16, 0x41, 0x5e, 0x5a, 0x42, 0x28, 0x54, 0xf6, 0xad, 0x87, + 0xd4, 0xcc, 0x1d, 0xee, 0xbc, 0xc6, 0x50, 0xe4, 0xcd, 0x28, 0xdc, 0x47, 0xc3, 0x13, 0x50, 0xa0, + 0x73, 0xe3, 0xbb, 0xf0, 0xb9, 0xc9, 0xfa, 0xcb, 0x61, 0x7c, 0x8f, 0xfb, 0xee, 0xa4, 0xf1, 0x5d, + 0x24, 0xa1, 0x92, 0x21, 0x6c, 0xfd, 0x3c, 0xfc, 0x22, 0xb7, 0x8b, 0x31, 0x11, 0xc6, 0xa1, 0x6c, + 0xfd, 0xbe, 0x38, 0x86, 0x42, 0xca, 0x68, 0xfd, 0xc2, 0xf7, 0x7f, 0x78, 0xfd, 0xb9, 0x1f, 0xfc, + 0xf0, 0xfa, 0x73, 0x1f, 0xfd, 0xf0, 0xfa, 0x73, 0xbf, 0x7c, 0x7a, 0xbd, 0xf0, 0xfd, 0xd3, 0xeb, + 0x85, 0x1f, 0x9c, 0x5e, 0x2f, 0x7c, 0x74, 0x7a, 0xbd, 0xf0, 0x1f, 0x4f, 0xaf, 0x17, 0xfe, 0xd2, + 0x7f, 0xba, 0xfe, 0xdc, 0xcf, 0x7f, 0x29, 0x2a, 0xc2, 0xa2, 0x2a, 0xc2, 0xa2, 0x12, 0xb8, 0xd8, + 0x3b, 0xec, 0x2c, 0xb2, 0x22, 0x44, 0x29, 0xaa, 0x08, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xa1, + 0x3f, 0x0d, 0xca, 0x09, 0xa0, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -6290,6 +6292,11 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.DeprecatedDeleteGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedDeleteGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } if m.PauseGracePeriodSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.PauseGracePeriodSeconds)) i-- @@ -6300,8 +6307,8 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredPhase))) i-- dAtA[i] = 0x12 - if m.DeleteGracePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeleteGracePeriodSeconds)) + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) i-- dAtA[i] = 0x8 } @@ -10702,14 +10709,17 @@ func (m *Lifecycle) Size() (n int) { } var l int _ = l - if m.DeleteGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeleteGracePeriodSeconds)) + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) } l = len(m.DesiredPhase) n += 1 + l + sovGenerated(uint64(l)) if m.PauseGracePeriodSeconds != nil { n += 1 + sovGenerated(uint64(*m.PauseGracePeriodSeconds)) } + if m.DeprecatedDeleteGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeprecatedDeleteGracePeriodSeconds)) + } return n } @@ -12670,9 +12680,10 @@ func (this *Lifecycle) String() string { return "nil" } s := strings.Join([]string{`&Lifecycle{`, - `DeleteGracePeriodSeconds:` + valueToStringGenerated(this.DeleteGracePeriodSeconds) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `DesiredPhase:` + fmt.Sprintf("%v", this.DesiredPhase) + `,`, `PauseGracePeriodSeconds:` + valueToStringGenerated(this.PauseGracePeriodSeconds) + `,`, + `DeprecatedDeleteGracePeriodSeconds:` + valueToStringGenerated(this.DeprecatedDeleteGracePeriodSeconds) + `,`, `}`, }, "") return s @@ -22980,9 +22991,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteGracePeriodSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) } - var v int32 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22992,12 +23003,12 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.DeleteGracePeriodSeconds = &v + m.DeletionGracePeriodSeconds = &v case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DesiredPhase", wireType) @@ -23034,7 +23045,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PauseGracePeriodSeconds", wireType) } - var v int32 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23044,12 +23055,32 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } m.PauseGracePeriodSeconds = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedDeleteGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeprecatedDeleteGracePeriodSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index bafd54085c..dfe8339b6b 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -868,10 +868,10 @@ message KafkaSource { } message Lifecycle { - // DeleteGracePeriodSeconds used to delete pipeline gracefully + // DeletionGracePeriodSeconds used to delete pipeline gracefully // +kubebuilder:default=30 // +optional - optional int32 deleteGracePeriodSeconds = 1; + optional int64 deletionGracePeriodSeconds = 1; // DesiredPhase used to bring the pipeline from current phase to desired phase // +kubebuilder:default=Running @@ -881,7 +881,13 @@ message Lifecycle { // PauseGracePeriodSeconds used to pause pipeline gracefully // +kubebuilder:default=30 // +optional - optional int32 pauseGracePeriodSeconds = 3; + optional int64 pauseGracePeriodSeconds = 3; + + // DeleteGracePeriodSeconds used to delete pipeline gracefully + // +kubebuilder:default=30 + // Deprecated: Use DeletionGracePeriodSeconds instead + // +optional + optional int64 deleteGracePeriodSeconds = 4; } message Log { diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types.go b/pkg/apis/numaflow/v1alpha1/pipeline_types.go index 3307adf743..9d44901573 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types.go @@ -419,10 +419,10 @@ func (p Pipeline) GetPipelineLimits() PipelineLimits { } type Lifecycle struct { - // DeleteGracePeriodSeconds used to delete pipeline gracefully + // DeletionGracePeriodSeconds used to delete pipeline gracefully // +kubebuilder:default=30 // +optional - DeleteGracePeriodSeconds *int32 `json:"deleteGracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=deleteGracePeriodSeconds"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=deletionGracePeriodSeconds"` // DesiredPhase used to bring the pipeline from current phase to desired phase // +kubebuilder:default=Running // +optional @@ -430,28 +430,39 @@ type Lifecycle struct { // PauseGracePeriodSeconds used to pause pipeline gracefully // +kubebuilder:default=30 // +optional - PauseGracePeriodSeconds *int32 `json:"pauseGracePeriodSeconds,omitempty" protobuf:"varint,3,opt,name=pauseGracePeriodSeconds"` + PauseGracePeriodSeconds *int64 `json:"pauseGracePeriodSeconds,omitempty" protobuf:"varint,3,opt,name=pauseGracePeriodSeconds"` + // DeleteGracePeriodSeconds used to delete pipeline gracefully + // +kubebuilder:default=30 + // Deprecated: Use DeletionGracePeriodSeconds instead + // +optional + DeprecatedDeleteGracePeriodSeconds *int64 `json:"deleteGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=deleteGracePeriodSeconds"` } -// GetDeleteGracePeriodSeconds returns the value DeleteGracePeriodSeconds. -func (lc Lifecycle) GetDeleteGracePeriodSeconds() int32 { - if lc.DeleteGracePeriodSeconds != nil { - return *lc.DeleteGracePeriodSeconds +// GetTerminationGracePeriodSeconds returns the value DeleteGracePeriodSeconds. +func (p Pipeline) GetTerminationGracePeriodSeconds() int64 { + if p.Spec.Lifecycle.DeletionGracePeriodSeconds != nil { + return *p.Spec.Lifecycle.DeletionGracePeriodSeconds + } + if p.Spec.Lifecycle.DeprecatedDeleteGracePeriodSeconds != nil { + return *p.Spec.Lifecycle.DeprecatedDeleteGracePeriodSeconds + } + if p.DeletionGracePeriodSeconds != nil { + return *p.DeletionGracePeriodSeconds } return 30 } -func (lc Lifecycle) GetDesiredPhase() PipelinePhase { - if string(lc.DesiredPhase) != "" { - return lc.DesiredPhase +func (p Pipeline) GetDesiredPhase() PipelinePhase { + if string(p.Spec.Lifecycle.DesiredPhase) != "" { + return p.Spec.Lifecycle.DesiredPhase } return PipelinePhaseRunning } // return PauseGracePeriodSeconds if set -func (lc Lifecycle) GetPauseGracePeriodSeconds() int32 { - if lc.PauseGracePeriodSeconds != nil { - return *lc.PauseGracePeriodSeconds +func (p Pipeline) GetPauseGracePeriodSeconds() int64 { + if p.Spec.Lifecycle.PauseGracePeriodSeconds != nil { + return *p.Spec.Lifecycle.PauseGracePeriodSeconds } return 30 } diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go index 34835e7b07..ef3884fe18 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go @@ -400,18 +400,35 @@ func Test_GetWatermarkMaxDelay(t *testing.T) { assert.Equal(t, "2s", wm.GetMaxDelay().String()) } -func Test_GetDeleteGracePeriodSeconds(t *testing.T) { - lc := Lifecycle{} - assert.Equal(t, int32(30), lc.GetDeleteGracePeriodSeconds()) - lc.DeleteGracePeriodSeconds = ptr.To[int32](50) - assert.Equal(t, int32(50), lc.GetDeleteGracePeriodSeconds()) +func Test_GetTerminationGracePeriodSeconds(t *testing.T) { + p := Pipeline{} + assert.Equal(t, int64(30), p.GetTerminationGracePeriodSeconds()) + p.DeletionGracePeriodSeconds = ptr.To[int64](35) + assert.Equal(t, int64(35), p.GetTerminationGracePeriodSeconds()) + p.Spec.Lifecycle.DeletionGracePeriodSeconds = ptr.To[int64](40) + assert.Equal(t, int64(40), p.GetTerminationGracePeriodSeconds()) } func Test_GetDesiredPhase(t *testing.T) { - lc := Lifecycle{} - assert.Equal(t, PipelinePhaseRunning, lc.GetDesiredPhase()) - lc.DesiredPhase = PipelinePhasePaused - assert.Equal(t, PipelinePhasePaused, lc.GetDesiredPhase()) + p := Pipeline{ + Spec: PipelineSpec{ + Lifecycle: Lifecycle{}, + }, + } + assert.Equal(t, PipelinePhaseRunning, p.GetDesiredPhase()) + p.Spec.Lifecycle.DesiredPhase = PipelinePhasePaused + assert.Equal(t, PipelinePhasePaused, p.GetDesiredPhase()) +} + +func Test_GetPauseGracePeriodSeconds(t *testing.T) { + p := Pipeline{ + Spec: PipelineSpec{ + Lifecycle: Lifecycle{}, + }, + } + assert.Equal(t, int64(30), p.GetPauseGracePeriodSeconds()) + p.Spec.Lifecycle.PauseGracePeriodSeconds = ptr.To[int64](40) + assert.Equal(t, int64(40), p.GetPauseGracePeriodSeconds()) } func Test_GetPipelineLimits(t *testing.T) { diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 912aabc5c2..2576898f49 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -1317,14 +1317,19 @@ func (in *KafkaSource) DeepCopy() *KafkaSource { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { *out = *in - if in.DeleteGracePeriodSeconds != nil { - in, out := &in.DeleteGracePeriodSeconds, &out.DeleteGracePeriodSeconds - *out = new(int32) + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) **out = **in } if in.PauseGracePeriodSeconds != nil { in, out := &in.PauseGracePeriodSeconds, &out.PauseGracePeriodSeconds - *out = new(int32) + *out = new(int64) + **out = **in + } + if in.DeprecatedDeleteGracePeriodSeconds != nil { + in, out := &in.DeprecatedDeleteGracePeriodSeconds, &out.DeprecatedDeleteGracePeriodSeconds + *out = new(int64) **out = **in } return diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 314e17e867..1cd2d83b7f 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -2968,11 +2968,11 @@ func schema_pkg_apis_numaflow_v1alpha1_Lifecycle(ref common.ReferenceCallback) c SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "deleteGracePeriodSeconds": { + "deletionGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "DeleteGracePeriodSeconds used to delete pipeline gracefully", + Description: "DeletionGracePeriodSeconds used to delete pipeline gracefully", Type: []string{"integer"}, - Format: "int32", + Format: "int64", }, }, "desiredPhase": { @@ -2986,7 +2986,14 @@ func schema_pkg_apis_numaflow_v1alpha1_Lifecycle(ref common.ReferenceCallback) c SchemaProps: spec.SchemaProps{ Description: "PauseGracePeriodSeconds used to pause pipeline gracefully", Type: []string{"integer"}, - Format: "int32", + Format: "int64", + }, + }, + "deleteGracePeriodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteGracePeriodSeconds used to delete pipeline gracefully Deprecated: Use DeletionGracePeriodSeconds instead", + Type: []string{"integer"}, + Format: "int64", }, }, }, diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index 05183a27c1..cb11078c7f 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -112,7 +112,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( if !pl.DeletionTimestamp.IsZero() { log.Info("Deleting pipeline") if controllerutil.ContainsFinalizer(pl, finalizerName) { - if time.Now().Before(pl.DeletionTimestamp.Add(time.Duration(pl.Spec.Lifecycle.GetDeleteGracePeriodSeconds()) * time.Second)) { + if time.Now().Before(pl.DeletionTimestamp.Add(time.Duration(pl.GetTerminationGracePeriodSeconds()) * time.Second)) { safeToDelete, err := r.safeToDelete(ctx, pl) if err != nil { logMsg := fmt.Sprintf("Failed to check if it's safe to delete pipeline %s: %v", pl.Name, err.Error()) @@ -180,7 +180,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( // this should happen only after the required configs for the lifecycle changes // have been applied. if !isLifecycleChange(pl) { - pl.Status.SetPhase(pl.Spec.Lifecycle.GetDesiredPhase(), "") + pl.Status.SetPhase(pl.GetDesiredPhase(), "") } if err := r.checkChildrenResourceStatus(ctx, pl); err != nil { return ctrl.Result{}, fmt.Errorf("failed to check pipeline children resource status, %w", err) @@ -188,7 +188,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( // check if any changes related to pause/resume lifecycle for the pipeline oldPhase := pl.Status.Phase - if isLifecycleChange(pl) && oldPhase != pl.Spec.Lifecycle.GetDesiredPhase() { + if isLifecycleChange(pl) && oldPhase != pl.GetDesiredPhase() { requeue, err := r.updateDesiredState(ctx, pl) if err != nil { logMsg := fmt.Sprintf("Updated desired pipeline phase failed: %v", zap.Error(err)) @@ -215,7 +215,7 @@ func isLifecycleChange(pl *dfv1.Pipeline) bool { // Check if the desired phase of the pipeline is 'Paused', or if the current phase of the // pipeline is either 'Paused' or 'Pausing'. This indicates a transition into or out of // a paused state which is a lifecycle phase change - if oldPhase := pl.Status.Phase; pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused || + if oldPhase := pl.Status.Phase; pl.GetDesiredPhase() == dfv1.PipelinePhasePaused || oldPhase == dfv1.PipelinePhasePaused || oldPhase == dfv1.PipelinePhasePausing { return true } @@ -810,7 +810,7 @@ var allVertexFilter vertexFilterFunc = func(v dfv1.Vertex) bool { return true } var sourceVertexFilter vertexFilterFunc = func(v dfv1.Vertex) bool { return v.IsASource() } func (r *pipelineReconciler) updateDesiredState(ctx context.Context, pl *dfv1.Pipeline) (bool, error) { - switch pl.Spec.Lifecycle.GetDesiredPhase() { + switch pl.GetDesiredPhase() { case dfv1.PipelinePhasePaused: return r.pausePipeline(ctx, pl) case dfv1.PipelinePhaseRunning, dfv1.PipelinePhaseUnknown: @@ -892,7 +892,7 @@ func (r *pipelineReconciler) pausePipeline(ctx context.Context, pl *dfv1.Pipelin } // if drain is completed, or we have exceeded the pause deadline, mark pl as paused and scale down - if time.Now().After(pauseTimestamp.Add(time.Duration(pl.Spec.Lifecycle.GetPauseGracePeriodSeconds())*time.Second)) || drainCompleted { + if time.Now().After(pauseTimestamp.Add(time.Duration(pl.GetPauseGracePeriodSeconds())*time.Second)) || drainCompleted { _, err = r.scaleDownAllVertices(ctx, pl) if err != nil { return true, err diff --git a/pkg/reconciler/vertex/scaling/scaling.go b/pkg/reconciler/vertex/scaling/scaling.go index 139e189f5f..c1fb96eb47 100644 --- a/pkg/reconciler/vertex/scaling/scaling.go +++ b/pkg/reconciler/vertex/scaling/scaling.go @@ -198,7 +198,7 @@ func (s *Scaler) scaleOneVertex(ctx context.Context, key string, worker int) err log.Debug("Corresponding Pipeline being deleted.") return nil } - if pl.Spec.Lifecycle.GetDesiredPhase() != dfv1.PipelinePhaseRunning { + if pl.GetDesiredPhase() != dfv1.PipelinePhaseRunning { log.Info("Corresponding Pipeline not in Running state, skip scaling.") return nil } diff --git a/rust/numaflow-models/src/models/lifecycle.rs b/rust/numaflow-models/src/models/lifecycle.rs index 9223e0c377..b6ee6dd5ff 100644 --- a/rust/numaflow-models/src/models/lifecycle.rs +++ b/rust/numaflow-models/src/models/lifecycle.rs @@ -18,12 +18,18 @@ limitations under the License. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Lifecycle { - /// DeleteGracePeriodSeconds used to delete pipeline gracefully + /// DeleteGracePeriodSeconds used to delete pipeline gracefully Deprecated: Use DeletionGracePeriodSeconds instead #[serde( rename = "deleteGracePeriodSeconds", skip_serializing_if = "Option::is_none" )] - pub delete_grace_period_seconds: Option, + pub delete_grace_period_seconds: Option, + /// DeletionGracePeriodSeconds used to delete pipeline gracefully + #[serde( + rename = "deletionGracePeriodSeconds", + skip_serializing_if = "Option::is_none" + )] + pub deletion_grace_period_seconds: Option, /// DesiredPhase used to bring the pipeline from current phase to desired phase #[serde(rename = "desiredPhase", skip_serializing_if = "Option::is_none")] pub desired_phase: Option, @@ -32,13 +38,14 @@ pub struct Lifecycle { rename = "pauseGracePeriodSeconds", skip_serializing_if = "Option::is_none" )] - pub pause_grace_period_seconds: Option, + pub pause_grace_period_seconds: Option, } impl Lifecycle { pub fn new() -> Lifecycle { Lifecycle { delete_grace_period_seconds: None, + deletion_grace_period_seconds: None, desired_phase: None, pause_grace_period_seconds: None, } diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 20b77f9153..9e588daed8 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -1407,11 +1407,11 @@ func getMonoVertices(h *handler, namespace string) (MonoVertices, error) { func getPipelineStatus(pipeline *dfv1.Pipeline) (string, error) { retStatus := dfv1.PipelineStatusHealthy // Check if the pipeline is paused, if so, return inactive status - if pipeline.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused { + if pipeline.GetDesiredPhase() == dfv1.PipelinePhasePaused { retStatus = dfv1.PipelineStatusInactive - } else if pipeline.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhaseRunning { + } else if pipeline.GetDesiredPhase() == dfv1.PipelinePhaseRunning { retStatus = dfv1.PipelineStatusHealthy - } else if pipeline.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhaseFailed { + } else if pipeline.GetDesiredPhase() == dfv1.PipelinePhaseFailed { retStatus = dfv1.PipelineStatusCritical } return retStatus, nil diff --git a/server/apis/v1/health.go b/server/apis/v1/health.go index 5973a8865d..264436d7e5 100644 --- a/server/apis/v1/health.go +++ b/server/apis/v1/health.go @@ -103,7 +103,7 @@ func checkVertexLevelHealth(h *handler, ns string, // if the pipeline is paused, return inactive status // this cannot be checked at individual vertex level, hence needs to be checked here - if pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhasePaused { + if pl.GetDesiredPhase() == dfv1.PipelinePhasePaused { return &resourceHealthResponse{ Status: dfv1.PipelineStatusInactive, Message: fmt.Sprintf("Pipeline %q is paused", pipeline), @@ -113,7 +113,7 @@ func checkVertexLevelHealth(h *handler, ns string, // if the pipeline is killed, return killed status // this cannot be checked at individual vertex level, hence needs to be checked here - if pl.Spec.Lifecycle.GetDesiredPhase() == dfv1.PipelinePhaseDeleting { + if pl.GetDesiredPhase() == dfv1.PipelinePhaseDeleting { return &resourceHealthResponse{ Status: dfv1.PipelineStatusDeleting, Message: fmt.Sprintf("Pipeline %q is killed", pipeline), From c5afc906995c6cf2b4f0fa8f4b9c7d8c54577f5f Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 20 Nov 2024 08:24:07 -0800 Subject: [PATCH 146/188] feat: use sidecar for ud containers [Breaking k8s < v1.29] (#2230) Signed-off-by: Derek Wang --- .../numaflow/v1alpha1/container_builder.go | 10 ++- .../v1alpha1/container_builder_test.go | 3 + .../numaflow/v1alpha1/container_supplier.go | 3 +- .../numaflow/v1alpha1/mono_vertex_types.go | 34 ++++++---- .../v1alpha1/mono_vertex_types_test.go | 12 ++-- pkg/apis/numaflow/v1alpha1/sink.go | 13 ++-- pkg/apis/numaflow/v1alpha1/sink_test.go | 5 +- pkg/apis/numaflow/v1alpha1/source.go | 13 ++-- pkg/apis/numaflow/v1alpha1/source_test.go | 68 ++++++++++--------- pkg/apis/numaflow/v1alpha1/udf.go | 6 +- pkg/apis/numaflow/v1alpha1/udf_test.go | 38 ++++++----- pkg/apis/numaflow/v1alpha1/vertex_types.go | 25 +++---- .../numaflow/v1alpha1/vertex_types_test.go | 61 ++++++++++------- pkg/reconciler/monovertex/controller_test.go | 20 +++--- pkg/reconciler/vertex/controller_test.go | 43 ++++++------ 15 files changed, 200 insertions(+), 154 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/container_builder.go b/pkg/apis/numaflow/v1alpha1/container_builder.go index df09d250cd..0751239d66 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder.go @@ -16,7 +16,10 @@ limitations under the License. package v1alpha1 -import corev1 "k8s.io/api/core/v1" +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) type containerBuilder corev1.Container @@ -85,6 +88,11 @@ func (b containerBuilder) resources(x corev1.ResourceRequirements) containerBuil return b } +func (b containerBuilder) asSidecar() containerBuilder { + b.RestartPolicy = ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways) + return b +} + func (b containerBuilder) build() corev1.Container { return corev1.Container(b) } diff --git a/pkg/apis/numaflow/v1alpha1/container_builder_test.go b/pkg/apis/numaflow/v1alpha1/container_builder_test.go index b641918b6b..22a04b4ec3 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder_test.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/ptr" ) var ( @@ -49,6 +50,7 @@ func Test_containerBuilder(t *testing.T) { appendEnv(corev1.EnvVar{ Name: "env", Value: "value"}). appendPorts(corev1.ContainerPort{Name: "port", ContainerPort: 8080}). + asSidecar(). build() assert.Equal(t, "numa", c.Name) assert.Len(t, c.VolumeMounts, 1) @@ -58,4 +60,5 @@ func Test_containerBuilder(t *testing.T) { assert.Equal(t, corev1.PullIfNotPresent, c.ImagePullPolicy) assert.Equal(t, []corev1.EnvVar{{Name: "env", Value: "value"}}, c.Env) assert.Equal(t, []corev1.ContainerPort{{Name: "port", ContainerPort: 8080}}, c.Ports) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), c.RestartPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/container_supplier.go b/pkg/apis/numaflow/v1alpha1/container_supplier.go index 3d090c62fd..a15aee8ec7 100644 --- a/pkg/apis/numaflow/v1alpha1/container_supplier.go +++ b/pkg/apis/numaflow/v1alpha1/container_supplier.go @@ -29,5 +29,6 @@ type getContainerReq struct { } type containerSupplier interface { - getContainers(req getContainerReq) ([]corev1.Container, error) + // getContainers returns the sidecar containers and main containers for the vertex. + getContainers(req getContainerReq) ([]corev1.Container, []corev1.Container, error) } diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 0ed0730506..a167b56cc1 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -341,7 +341,7 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e } volumeMounts := []corev1.VolumeMount{{Name: varVolumeName, MountPath: PathVarRun}} - containers := mv.Spec.buildContainers(getContainerReq{ + sidecarContainers, containers := mv.Spec.buildContainers(getContainerReq{ env: envVars, image: req.Image, imagePullPolicy: req.PullPolicy, @@ -391,17 +391,19 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e {Name: MonoVertexMetricsPortName, ContainerPort: MonoVertexMetricsPort}, } - if len(containers) > 1 { // udf, udsink, udsource, or source vertex specifies a udtransformer - for i := 1; i < len(containers); i++ { - containers[i].Env = append(containers[i].Env, mv.commonEnvs()...) - containers[i].Env = append(containers[i].Env, mv.sidecarEnvs()...) - } + for i := 0; i < len(sidecarContainers); i++ { // udsink, udsource, udtransformer ... + sidecarContainers[i].Env = append(sidecarContainers[i].Env, mv.commonEnvs()...) + sidecarContainers[i].Env = append(sidecarContainers[i].Env, mv.sidecarEnvs()...) } + initContainers := []corev1.Container{} + initContainers = append(initContainers, mv.Spec.InitContainers...) + initContainers = append(initContainers, sidecarContainers...) + spec := &corev1.PodSpec{ Subdomain: mv.GetHeadlessServiceName(), Volumes: append(volumes, mv.Spec.Volumes...), - InitContainers: mv.Spec.InitContainers, + InitContainers: initContainers, Containers: append(containers, mv.Spec.Sidecars...), } mv.Spec.AbstractPodTemplate.ApplyToPodSpec(spec) @@ -458,26 +460,28 @@ func (mvspec MonoVertexSpec) DeepCopyWithoutReplicas() MonoVertexSpec { return x } -func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) []corev1.Container { +// buildContainers builds the sidecar containers and main containers for the mono vertex. +func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) ([]corev1.Container, []corev1.Container) { mainContainer := containerBuilder{}. init(req).command(NumaflowRustBinary).args("--rust").build() - containers := []corev1.Container{mainContainer} + + sidecarContainers := []corev1.Container{} if mvspec.Source.UDSource != nil { // Only support UDSource for now. - containers = append(containers, mvspec.Source.getUDSourceContainer(req)) + sidecarContainers = append(sidecarContainers, mvspec.Source.getUDSourceContainer(req)) } if mvspec.Source.UDTransformer != nil { - containers = append(containers, mvspec.Source.getUDTransformerContainer(req)) + sidecarContainers = append(sidecarContainers, mvspec.Source.getUDTransformerContainer(req)) } if mvspec.Sink.UDSink != nil { // Only support UDSink for now. - containers = append(containers, mvspec.Sink.getUDSinkContainer(req)) + sidecarContainers = append(sidecarContainers, mvspec.Sink.getUDSinkContainer(req)) } if mvspec.Sink.Fallback != nil { - containers = append(containers, mvspec.Sink.getFallbackUDSinkContainer(req)) + sidecarContainers = append(sidecarContainers, mvspec.Sink.getFallbackUDSinkContainer(req)) } // Fallback sink is not supported. - containers = append(containers, mvspec.Sidecars...) - return containers + sidecarContainers = append(sidecarContainers, mvspec.Sidecars...) + return sidecarContainers, containers } type MonoVertexLimits struct { diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go index 98bb801003..db7bed1fb9 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types_test.go @@ -205,7 +205,8 @@ func TestMonoVertexGetPodSpec(t *testing.T) { } podSpec, err := testMvtx.GetPodSpec(req) assert.NoError(t, err) - assert.Equal(t, 4, len(podSpec.Containers)) + assert.Equal(t, 1, len(podSpec.Containers)) + assert.Equal(t, 3, len(podSpec.InitContainers)) assert.Equal(t, 1, len(podSpec.Volumes)) assert.Equal(t, "my-image", podSpec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, podSpec.Containers[0].ImagePullPolicy) @@ -213,12 +214,15 @@ func TestMonoVertexGetPodSpec(t *testing.T) { assert.Equal(t, "200m", podSpec.Containers[0].Resources.Limits.Cpu().String()) assert.Equal(t, "100Mi", podSpec.Containers[0].Resources.Requests.Memory().String()) assert.Equal(t, "200Mi", podSpec.Containers[0].Resources.Limits.Memory().String()) - assert.Equal(t, "test-image1", podSpec.Containers[1].Image) - assert.Equal(t, "test-image2", podSpec.Containers[2].Image) - assert.Equal(t, "test-image3", podSpec.Containers[3].Image) + assert.Equal(t, "test-image1", podSpec.InitContainers[0].Image) + assert.Equal(t, "test-image2", podSpec.InitContainers[1].Image) + assert.Equal(t, "test-image3", podSpec.InitContainers[2].Image) for _, c := range podSpec.Containers { assert.Equal(t, 1, len(c.VolumeMounts)) } + for _, c := range podSpec.InitContainers { + assert.Equal(t, 1, len(c.VolumeMounts)) + } envNames := []string{} for _, env := range podSpec.Containers[0].Env { envNames = append(envNames, env.Name) diff --git a/pkg/apis/numaflow/v1alpha1/sink.go b/pkg/apis/numaflow/v1alpha1/sink.go index 1b37ef5dcf..dc66593e01 100644 --- a/pkg/apis/numaflow/v1alpha1/sink.go +++ b/pkg/apis/numaflow/v1alpha1/sink.go @@ -48,17 +48,18 @@ type AbstractSink struct { UDSink *UDSink `json:"udsink,omitempty" protobuf:"bytes,4,opt,name=udsink"` } -func (s Sink) getContainers(req getContainerReq) ([]corev1.Container, error) { +func (s Sink) getContainers(req getContainerReq) ([]corev1.Container, []corev1.Container, error) { containers := []corev1.Container{ s.getMainContainer(req), } + sidecarContainers := []corev1.Container{} if s.UDSink != nil { - containers = append(containers, s.getUDSinkContainer(req)) + sidecarContainers = append(sidecarContainers, s.getUDSinkContainer(req)) } if s.Fallback != nil && s.Fallback.UDSink != nil { - containers = append(containers, s.getFallbackUDSinkContainer(req)) + sidecarContainers = append(sidecarContainers, s.getFallbackUDSinkContainer(req)) } - return containers, nil + return sidecarContainers, containers, nil } func (s Sink) getMainContainer(req getContainerReq) corev1.Container { @@ -72,7 +73,7 @@ func (s Sink) getUDSinkContainer(mainContainerReq getContainerReq) corev1.Contai c := containerBuilder{}. name(CtrUdsink). imagePullPolicy(mainContainerReq.imagePullPolicy). // Use the same image pull policy as the main container - appendVolumeMounts(mainContainerReq.volumeMounts...) + appendVolumeMounts(mainContainerReq.volumeMounts...).asSidecar() x := s.UDSink.Container c = c.image(x.Image) if len(x.Command) > 0 { @@ -107,7 +108,7 @@ func (s Sink) getFallbackUDSinkContainer(mainContainerReq getContainerReq) corev c := containerBuilder{}. name(CtrFallbackUdsink). imagePullPolicy(mainContainerReq.imagePullPolicy). // Use the same image pull policy as the main container - appendVolumeMounts(mainContainerReq.volumeMounts...) + appendVolumeMounts(mainContainerReq.volumeMounts...).asSidecar() x := s.Fallback.UDSink.Container c = c.image(x.Image) if len(x.Command) > 0 { diff --git a/pkg/apis/numaflow/v1alpha1/sink_test.go b/pkg/apis/numaflow/v1alpha1/sink_test.go index f63cb751bd..b15f9a166b 100644 --- a/pkg/apis/numaflow/v1alpha1/sink_test.go +++ b/pkg/apis/numaflow/v1alpha1/sink_test.go @@ -27,7 +27,7 @@ import ( func Test_Sink_getContainers(t *testing.T) { s := Sink{} - c, err := s.getContainers(getContainerReq{ + sc, c, err := s.getContainers(getContainerReq{ env: []corev1.EnvVar{ {Name: "test-env", Value: "test-val"}, }, @@ -37,6 +37,7 @@ func Test_Sink_getContainers(t *testing.T) { resources: corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, }) assert.NoError(t, err) + assert.Equal(t, 0, len(sc)) assert.Equal(t, 1, len(c)) assert.Equal(t, testFlowImage, c[0].Image) assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[0].Resources) @@ -89,6 +90,7 @@ func Test_Sink_getUDSinkContainer(t *testing.T) { assert.Equal(t, int32(15), c.LivenessProbe.TimeoutSeconds) assert.Equal(t, int32(14), c.LivenessProbe.PeriodSeconds) assert.Equal(t, int32(5), c.LivenessProbe.FailureThreshold) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), c.RestartPolicy) } func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { @@ -151,4 +153,5 @@ func Test_Sink_getFallbackUDSinkContainer(t *testing.T) { }) assert.Equal(t, testImagePullPolicy, c.ImagePullPolicy) assert.True(t, c.LivenessProbe != nil) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), c.RestartPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/source.go b/pkg/apis/numaflow/v1alpha1/source.go index c59d4c3c21..e1308cf28e 100644 --- a/pkg/apis/numaflow/v1alpha1/source.go +++ b/pkg/apis/numaflow/v1alpha1/source.go @@ -45,17 +45,18 @@ type Source struct { Serving *ServingSource `json:"serving,omitempty" protobuf:"bytes,8,opt,name=serving"` } -func (s Source) getContainers(req getContainerReq) ([]corev1.Container, error) { +func (s Source) getContainers(req getContainerReq) ([]corev1.Container, []corev1.Container, error) { containers := []corev1.Container{ s.getMainContainer(req), } + sidecarContainers := []corev1.Container{} if s.UDTransformer != nil { - containers = append(containers, s.getUDTransformerContainer(req)) + sidecarContainers = append(sidecarContainers, s.getUDTransformerContainer(req)) } if s.UDSource != nil { - containers = append(containers, s.getUDSourceContainer(req)) + sidecarContainers = append(sidecarContainers, s.getUDSourceContainer(req)) } - return containers, nil + return sidecarContainers, containers, nil } func (s Source) getMainContainer(req getContainerReq) corev1.Container { @@ -69,7 +70,7 @@ func (s Source) getUDTransformerContainer(mainContainerReq getContainerReq) core c := containerBuilder{}. name(CtrUdtransformer). imagePullPolicy(mainContainerReq.imagePullPolicy). // Use the same image pull policy as the main container - appendVolumeMounts(mainContainerReq.volumeMounts...) + appendVolumeMounts(mainContainerReq.volumeMounts...).asSidecar() c = c.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerTransformer}) if x := s.UDTransformer.Container; x != nil && x.Image != "" { // customized image c = c.image(x.Image) @@ -133,7 +134,7 @@ func (s Source) getUDSourceContainer(mainContainerReq getContainerReq) corev1.Co c := containerBuilder{}. name(CtrUdsource). imagePullPolicy(mainContainerReq.imagePullPolicy). // Use the same image pull policy as the main container - appendVolumeMounts(mainContainerReq.volumeMounts...) + appendVolumeMounts(mainContainerReq.volumeMounts...).asSidecar() c = c.appendEnv(corev1.EnvVar{Name: EnvUDContainerType, Value: UDContainerSource}) if x := s.UDSource.Container; x != nil && x.Image != "" { // customized image c = c.image(x.Image) diff --git a/pkg/apis/numaflow/v1alpha1/source_test.go b/pkg/apis/numaflow/v1alpha1/source_test.go index 01f26f2cd6..9e816896a0 100644 --- a/pkg/apis/numaflow/v1alpha1/source_test.go +++ b/pkg/apis/numaflow/v1alpha1/source_test.go @@ -77,60 +77,64 @@ func TestSource_getContainers(t *testing.T) { }, }, } - c, err := x.getContainers(getContainerReq{ + sc, c, err := x.getContainers(getContainerReq{ image: "main-image", }) assert.NoError(t, err) - assert.Equal(t, 3, len(c)) + assert.Equal(t, 2, len(sc)) + assert.Equal(t, 1, len(c)) assert.Equal(t, "main-image", c[0].Image) - assert.Equal(t, x.UDSource.Container.Image, c[2].Image) - assert.Contains(t, c[2].VolumeMounts, c[2].VolumeMounts[0]) - assert.Equal(t, x.UDSource.Container.Command, c[2].Command) - assert.Equal(t, x.UDSource.Container.Args, c[2].Args) + assert.Equal(t, x.UDSource.Container.Image, sc[1].Image) + assert.Contains(t, sc[1].VolumeMounts, sc[1].VolumeMounts[0]) + assert.Equal(t, x.UDSource.Container.Command, sc[1].Command) + assert.Equal(t, x.UDSource.Container.Args, sc[1].Args) envsUDSource := map[string]string{} - for _, e := range c[2].Env { + for _, e := range sc[1].Env { envsUDSource[e.Name] = e.Value } assert.Equal(t, envsUDSource[EnvUDContainerType], UDContainerSource) - assert.Equal(t, x.UDSource.Container.EnvFrom, c[2].EnvFrom) - assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[2].Resources) - assert.Equal(t, c[0].ImagePullPolicy, c[2].ImagePullPolicy) - assert.NotNil(t, c[1].LivenessProbe) - assert.Equal(t, int32(10), c[2].LivenessProbe.InitialDelaySeconds) - assert.Equal(t, int32(15), c[2].LivenessProbe.TimeoutSeconds) - assert.Equal(t, int32(14), c[2].LivenessProbe.PeriodSeconds) - assert.Equal(t, int32(5), c[2].LivenessProbe.FailureThreshold) + assert.Equal(t, x.UDSource.Container.EnvFrom, sc[1].EnvFrom) + assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, sc[1].Resources) + assert.Equal(t, c[0].ImagePullPolicy, sc[1].ImagePullPolicy) + assert.NotNil(t, sc[0].LivenessProbe) + assert.Equal(t, int32(10), sc[1].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), sc[1].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(14), sc[1].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), sc[1].LivenessProbe.FailureThreshold) x.UDSource.Container.ImagePullPolicy = &testImagePullPolicy - c, _ = x.getContainers(getContainerReq{ + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), sc[0].RestartPolicy) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), sc[1].RestartPolicy) + sc, c, _ = x.getContainers(getContainerReq{ image: "main-image", imagePullPolicy: corev1.PullAlways, }) - assert.Equal(t, testImagePullPolicy, c[2].ImagePullPolicy) + assert.Equal(t, testImagePullPolicy, sc[1].ImagePullPolicy) - assert.Equal(t, x.UDTransformer.Container.Image, c[1].Image) - assert.Contains(t, c[1].VolumeMounts, c[1].VolumeMounts[0]) - assert.Equal(t, x.UDTransformer.Container.Command, c[1].Command) - assert.Equal(t, x.UDTransformer.Container.Args, c[1].Args) + assert.Equal(t, x.UDTransformer.Container.Image, sc[0].Image) + assert.Contains(t, sc[0].VolumeMounts, sc[0].VolumeMounts[0]) + assert.Equal(t, x.UDTransformer.Container.Command, sc[0].Command) + assert.Equal(t, x.UDTransformer.Container.Args, sc[0].Args) envs := map[string]string{} - for _, e := range c[1].Env { + for _, e := range sc[0].Env { envs[e.Name] = e.Value } assert.Equal(t, envs[EnvUDContainerType], UDContainerTransformer) - assert.Equal(t, x.UDTransformer.Container.EnvFrom, c[1].EnvFrom) - assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[1].Resources) - assert.Equal(t, c[0].ImagePullPolicy, c[1].ImagePullPolicy) - assert.NotNil(t, c[1].LivenessProbe) - assert.Equal(t, int32(20), c[1].LivenessProbe.InitialDelaySeconds) - assert.Equal(t, int32(25), c[1].LivenessProbe.TimeoutSeconds) - assert.Equal(t, int32(24), c[1].LivenessProbe.PeriodSeconds) - assert.Equal(t, int32(5), c[1].LivenessProbe.FailureThreshold) + assert.Equal(t, x.UDTransformer.Container.EnvFrom, sc[0].EnvFrom) + assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, sc[0].Resources) + assert.Equal(t, c[0].ImagePullPolicy, sc[0].ImagePullPolicy) + assert.NotNil(t, sc[0].LivenessProbe) + assert.Equal(t, int32(20), sc[0].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(25), sc[0].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(24), sc[0].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), sc[0].LivenessProbe.FailureThreshold) x.UDTransformer.Container.ImagePullPolicy = &testImagePullPolicy - c, _ = x.getContainers(getContainerReq{ + sc, c, _ = x.getContainers(getContainerReq{ image: "main-image", imagePullPolicy: corev1.PullAlways, }) - assert.Equal(t, testImagePullPolicy, c[1].ImagePullPolicy) + assert.Equal(t, corev1.PullAlways, c[0].ImagePullPolicy) + assert.Equal(t, testImagePullPolicy, sc[0].ImagePullPolicy) } func Test_getTransformerContainer(t *testing.T) { diff --git a/pkg/apis/numaflow/v1alpha1/udf.go b/pkg/apis/numaflow/v1alpha1/udf.go index 6f5f18ff49..573ddcbca1 100644 --- a/pkg/apis/numaflow/v1alpha1/udf.go +++ b/pkg/apis/numaflow/v1alpha1/udf.go @@ -45,8 +45,8 @@ type UDF struct { GroupBy *GroupBy `json:"groupBy" protobuf:"bytes,3,opt,name=groupBy"` } -func (in UDF) getContainers(req getContainerReq) ([]corev1.Container, error) { - return []corev1.Container{in.getMainContainer(req), in.getUDFContainer(req)}, nil +func (in UDF) getContainers(req getContainerReq) ([]corev1.Container, []corev1.Container, error) { + return []corev1.Container{in.getUDFContainer(req)}, []corev1.Container{in.getMainContainer(req)}, nil } func (in UDF) getMainContainer(req getContainerReq) corev1.Container { @@ -63,7 +63,7 @@ func (in UDF) getUDFContainer(mainContainerReq getContainerReq) corev1.Container c := containerBuilder{}. name(CtrUdf). imagePullPolicy(mainContainerReq.imagePullPolicy). // Use the same image pull policy as main container - appendVolumeMounts(mainContainerReq.volumeMounts...) + appendVolumeMounts(mainContainerReq.volumeMounts...).asSidecar() if x := in.Container; x != nil && x.Image != "" { // customized image c = c.image(x.Image) if len(x.Command) > 0 { diff --git a/pkg/apis/numaflow/v1alpha1/udf_test.go b/pkg/apis/numaflow/v1alpha1/udf_test.go index 0bb8e3862a..79eea01fc8 100644 --- a/pkg/apis/numaflow/v1alpha1/udf_test.go +++ b/pkg/apis/numaflow/v1alpha1/udf_test.go @@ -49,36 +49,39 @@ func TestUDF_getContainers(t *testing.T) { }, }, } - c, err := x.getContainers(getContainerReq{ + sc, c, err := x.getContainers(getContainerReq{ image: "main-image", imagePullPolicy: corev1.PullAlways, }) assert.NoError(t, err) - assert.Equal(t, 2, len(c)) + assert.Equal(t, 1, len(c)) + assert.Equal(t, 1, len(sc)) assert.Equal(t, "main-image", c[0].Image) - assert.Equal(t, x.Container.Image, c[1].Image) - assert.Contains(t, c[1].VolumeMounts, c[1].VolumeMounts[0]) - assert.Equal(t, x.Container.Command, c[1].Command) - assert.Equal(t, x.Container.Args, c[1].Args) + assert.Equal(t, x.Container.Image, sc[0].Image) + assert.Contains(t, sc[0].VolumeMounts, sc[0].VolumeMounts[0]) + assert.Equal(t, x.Container.Command, sc[0].Command) + assert.Equal(t, x.Container.Args, sc[0].Args) envs := map[string]string{} - for _, e := range c[1].Env { + for _, e := range sc[0].Env { envs[e.Name] = e.Value } assert.Equal(t, envs[EnvUDContainerType], UDContainerFunction) - assert.Equal(t, 1, len(c[1].EnvFrom)) - assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, c[1].Resources) - assert.Equal(t, corev1.PullAlways, c[1].ImagePullPolicy) + assert.Equal(t, 1, len(sc[0].EnvFrom)) + assert.Equal(t, corev1.ResourceRequirements{Requests: map[corev1.ResourceName]resource.Quantity{"cpu": resource.MustParse("2")}}, sc[0].Resources) + assert.Equal(t, corev1.PullAlways, sc[0].ImagePullPolicy) x.Container.ImagePullPolicy = &testImagePullPolicy - c, _ = x.getContainers(getContainerReq{ + sc, c, _ = x.getContainers(getContainerReq{ image: "main-image", imagePullPolicy: corev1.PullAlways, }) - assert.Equal(t, testImagePullPolicy, c[1].ImagePullPolicy) - assert.True(t, c[1].LivenessProbe != nil) - assert.Equal(t, int32(10), c[1].LivenessProbe.InitialDelaySeconds) - assert.Equal(t, int32(15), c[1].LivenessProbe.TimeoutSeconds) - assert.Equal(t, int32(14), c[1].LivenessProbe.PeriodSeconds) - assert.Equal(t, int32(5), c[1].LivenessProbe.FailureThreshold) + assert.Equal(t, 1, len(c)) + assert.Equal(t, 1, len(sc)) + assert.Equal(t, testImagePullPolicy, sc[0].ImagePullPolicy) + assert.True(t, sc[0].LivenessProbe != nil) + assert.Equal(t, int32(10), sc[0].LivenessProbe.InitialDelaySeconds) + assert.Equal(t, int32(15), sc[0].LivenessProbe.TimeoutSeconds) + assert.Equal(t, int32(14), sc[0].LivenessProbe.PeriodSeconds) + assert.Equal(t, int32(5), sc[0].LivenessProbe.FailureThreshold) } func Test_getUDFContainer(t *testing.T) { @@ -110,6 +113,7 @@ func Test_getUDFContainer(t *testing.T) { envs[e.Name] = e.Value } assert.Equal(t, envs[EnvUDContainerType], UDContainerFunction) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), c.RestartPolicy) }) t.Run("with built-in functions", func(t *testing.T) { diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index e45c168bdf..ccd7ecd5d6 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -242,7 +242,7 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { } volumeMounts := []corev1.VolumeMount{{Name: varVolumeName, MountPath: PathVarRun}} executeRustBinary, _ := env.GetBool(EnvExecuteRustBinary, false) - containers, err := v.Spec.getType().getContainers(getContainerReq{ + sidecarContainers, containers, err := v.Spec.getType().getContainers(getContainerReq{ isbSvcType: req.ISBSvcType, env: envVars, image: req.Image, @@ -251,7 +251,6 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { volumeMounts: volumeMounts, executeRustBinary: executeRustBinary, }) - if err != nil { return nil, err } @@ -299,11 +298,9 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { {Name: VertexMetricsPortName, ContainerPort: VertexMetricsPort}, } - if len(containers) > 1 { // udf, udsink, udsource, or source vertex specifies a udtransformer - for i := 1; i < len(containers); i++ { - containers[i].Env = append(containers[i].Env, v.commonEnvs()...) - containers[i].Env = append(containers[i].Env, v.sidecarEnvs()...) - } + for i := 0; i < len(sidecarContainers); i++ { // udf, udsink, udsource, or source vertex specifies a udtransformer + sidecarContainers[i].Env = append(sidecarContainers[i].Env, v.commonEnvs()...) + sidecarContainers[i].Env = append(sidecarContainers[i].Env, v.sidecarEnvs()...) } initContainers := v.getInitContainers(req) @@ -327,19 +324,19 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { if x := v.Spec.SideInputsContainerTemplate; x != nil { x.ApplyToContainer(&sideInputsWatcher) } + sideInputsWatcher.VolumeMounts = append(sideInputsWatcher.VolumeMounts, corev1.VolumeMount{Name: sideInputsVolName, MountPath: PathSideInputsMount}) containers = append(containers, sideInputsWatcher) - for i := 1; i < len(containers); i++ { - if containers[i].Name == CtrSideInputsWatcher { - containers[i].VolumeMounts = append(containers[i].VolumeMounts, corev1.VolumeMount{Name: sideInputsVolName, MountPath: PathSideInputsMount}) - } else { - // Readonly mount for user-defined containers - containers[i].VolumeMounts = append(containers[i].VolumeMounts, corev1.VolumeMount{Name: sideInputsVolName, MountPath: PathSideInputsMount, ReadOnly: true}) - } + for i := 0; i < len(sidecarContainers); i++ { + // Readonly mount for user-defined containers + sidecarContainers[i].VolumeMounts = append(sidecarContainers[i].VolumeMounts, corev1.VolumeMount{Name: sideInputsVolName, MountPath: PathSideInputsMount, ReadOnly: true}) } // Side Inputs init container initContainers[1].VolumeMounts = append(initContainers[1].VolumeMounts, corev1.VolumeMount{Name: sideInputsVolName, MountPath: PathSideInputsMount}) } + // Add the sidecar containers + initContainers = append(initContainers, sidecarContainers...) + if v.IsASource() && v.Spec.Source.Serving != nil { servingContainer, err := v.getServingContainer(req) if err != nil { diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go index 83384cf624..fde6914253 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types_test.go @@ -358,14 +358,15 @@ func TestGetPodSpec(t *testing.T) { } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) - assert.Equal(t, 2, len(s.Containers)) - assert.Equal(t, "image", s.Containers[1].Image) - assert.Equal(t, 1, len(s.Containers[1].Command)) - assert.Equal(t, "cmd", s.Containers[1].Command[0]) - assert.Equal(t, 1, len(s.Containers[1].Args)) - assert.Equal(t, "arg0", s.Containers[1].Args[0]) + assert.Equal(t, 1, len(s.Containers)) + assert.Equal(t, 2, len(s.InitContainers)) + assert.Equal(t, "image", s.InitContainers[1].Image) + assert.Equal(t, 1, len(s.InitContainers[1].Command)) + assert.Equal(t, "cmd", s.InitContainers[1].Command[0]) + assert.Equal(t, 1, len(s.InitContainers[1].Args)) + assert.Equal(t, "arg0", s.InitContainers[1].Args[0]) var sidecarEnvNames []string - for _, env := range s.Containers[1].Env { + for _, env := range s.InitContainers[1].Env { sidecarEnvNames = append(sidecarEnvNames, env.Name) } assert.Contains(t, sidecarEnvNames, EnvCPULimit) @@ -394,16 +395,17 @@ func TestGetPodSpec(t *testing.T) { } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) - assert.Equal(t, 3, len(s.Containers)) - - for i := 1; i < len(s.Containers); i++ { - assert.Equal(t, "image", s.Containers[i].Image) - assert.Equal(t, 1, len(s.Containers[i].Command)) - assert.Equal(t, "cmd", s.Containers[i].Command[0]) - assert.Equal(t, 1, len(s.Containers[i].Args)) - assert.Equal(t, "arg0", s.Containers[i].Args[0]) + assert.Equal(t, 1, len(s.Containers)) + assert.Equal(t, 3, len(s.InitContainers)) + + for i := 1; i < len(s.InitContainers); i++ { + assert.Equal(t, "image", s.InitContainers[i].Image) + assert.Equal(t, 1, len(s.InitContainers[i].Command)) + assert.Equal(t, "cmd", s.InitContainers[i].Command[0]) + assert.Equal(t, 1, len(s.InitContainers[i].Args)) + assert.Equal(t, "arg0", s.InitContainers[i].Args[0]) var sidecarEnvNames []string - for _, env := range s.Containers[i].Env { + for _, env := range s.InitContainers[i].Env { sidecarEnvNames = append(sidecarEnvNames, env.Name) } assert.Contains(t, sidecarEnvNames, EnvCPULimit) @@ -422,9 +424,10 @@ func TestGetPodSpec(t *testing.T) { } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) - assert.Equal(t, 2, len(s.Containers)) + assert.Equal(t, 1, len(s.Containers)) + assert.Equal(t, 2, len(s.InitContainers)) assert.Equal(t, CtrMain, s.Containers[0].Name) - assert.Equal(t, CtrUdf, s.Containers[1].Name) + assert.Equal(t, CtrUdf, s.InitContainers[1].Name) assert.Equal(t, testFlowImage, s.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, s.Containers[0].ImagePullPolicy) var envNames []string @@ -440,10 +443,11 @@ func TestGetPodSpec(t *testing.T) { assert.Contains(t, envNames, EnvReplica) assert.Contains(t, s.Containers[0].Args, "processor") assert.Contains(t, s.Containers[0].Args, "--type="+string(VertexTypeMapUDF)) - assert.Equal(t, 1, len(s.InitContainers)) + assert.Equal(t, 2, len(s.InitContainers)) assert.Equal(t, CtrInit, s.InitContainers[0].Name) + assert.Equal(t, CtrUdf, s.InitContainers[1].Name) var sidecarEnvNames []string - for _, env := range s.Containers[1].Env { + for _, env := range s.InitContainers[1].Env { sidecarEnvNames = append(sidecarEnvNames, env.Name) } assert.Contains(t, sidecarEnvNames, EnvCPULimit) @@ -462,13 +466,22 @@ func TestGetPodSpec(t *testing.T) { } s, err := testObj.GetPodSpec(req) assert.NoError(t, err) - assert.Equal(t, 3, len(s.Containers)) + assert.Equal(t, 2, len(s.Containers)) assert.Equal(t, CtrMain, s.Containers[0].Name) - assert.Equal(t, CtrUdf, s.Containers[1].Name) - assert.Equal(t, CtrSideInputsWatcher, s.Containers[2].Name) - assert.Equal(t, 2, len(s.InitContainers)) + assert.Equal(t, CtrSideInputsWatcher, s.Containers[1].Name) + assert.Equal(t, 3, len(s.InitContainers)) assert.Equal(t, CtrInit, s.InitContainers[0].Name) assert.Equal(t, CtrInitSideInputs, s.InitContainers[1].Name) + assert.Equal(t, 1, len(s.InitContainers[1].VolumeMounts)) + assert.Equal(t, "var-run-side-inputs", s.InitContainers[1].VolumeMounts[0].Name) + assert.False(t, s.InitContainers[1].VolumeMounts[0].ReadOnly) + assert.Equal(t, CtrUdf, s.InitContainers[2].Name) + assert.Equal(t, 2, len(s.InitContainers[2].VolumeMounts)) + assert.Equal(t, "var-run-side-inputs", s.InitContainers[2].VolumeMounts[1].Name) + assert.True(t, s.InitContainers[2].VolumeMounts[1].ReadOnly) + assert.Equal(t, 1, len(s.Containers[1].VolumeMounts)) + assert.Equal(t, "var-run-side-inputs", s.Containers[1].VolumeMounts[0].Name) + assert.False(t, s.Containers[1].VolumeMounts[0].ReadOnly) }) t.Run("test serving source", func(t *testing.T) { diff --git a/pkg/reconciler/monovertex/controller_test.go b/pkg/reconciler/monovertex/controller_test.go index c9c5c003c2..6cbc986c13 100644 --- a/pkg/reconciler/monovertex/controller_test.go +++ b/pkg/reconciler/monovertex/controller_test.go @@ -164,13 +164,13 @@ func Test_BuildPodSpec(t *testing.T) { testObj := testMonoVtx.DeepCopy() spec, err := r.buildPodSpec(testObj) assert.NoError(t, err) - assert.Equal(t, 5, len(spec.Containers)) + assert.Equal(t, 1, len(spec.Containers)) assert.Equal(t, dfv1.CtrMain, spec.Containers[0].Name) - assert.Equal(t, dfv1.CtrUdsource, spec.Containers[1].Name) - assert.Equal(t, dfv1.CtrUdtransformer, spec.Containers[2].Name) - assert.Equal(t, dfv1.CtrUdsink, spec.Containers[3].Name) - assert.Equal(t, dfv1.CtrFallbackUdsink, spec.Containers[4].Name) - assert.Equal(t, 0, len(spec.InitContainers)) + assert.Equal(t, 4, len(spec.InitContainers)) + assert.Equal(t, dfv1.CtrUdsource, spec.InitContainers[0].Name) + assert.Equal(t, dfv1.CtrUdtransformer, spec.InitContainers[1].Name) + assert.Equal(t, dfv1.CtrUdsink, spec.InitContainers[2].Name) + assert.Equal(t, dfv1.CtrFallbackUdsink, spec.InitContainers[3].Name) }) t.Run("test no transformer, no fallback sink", func(t *testing.T) { @@ -179,11 +179,11 @@ func Test_BuildPodSpec(t *testing.T) { testObj.Spec.Sink.Fallback = nil spec, err := r.buildPodSpec(testObj) assert.NoError(t, err) - assert.Equal(t, 3, len(spec.Containers)) + assert.Equal(t, 1, len(spec.Containers)) assert.Equal(t, dfv1.CtrMain, spec.Containers[0].Name) - assert.Equal(t, dfv1.CtrUdsource, spec.Containers[1].Name) - assert.Equal(t, dfv1.CtrUdsink, spec.Containers[2].Name) - assert.Equal(t, 0, len(spec.InitContainers)) + assert.Equal(t, 2, len(spec.InitContainers)) + assert.Equal(t, dfv1.CtrUdsource, spec.InitContainers[0].Name) + assert.Equal(t, dfv1.CtrUdsink, spec.InitContainers[1].Name) }) } diff --git a/pkg/reconciler/vertex/controller_test.go b/pkg/reconciler/vertex/controller_test.go index 4a2faa56b0..51a2540c32 100644 --- a/pkg/reconciler/vertex/controller_test.go +++ b/pkg/reconciler/vertex/controller_test.go @@ -273,8 +273,8 @@ func Test_BuildPodSpec(t *testing.T) { } spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 2) assert.NoError(t, err) - assert.Equal(t, 1, len(spec.InitContainers)) - assert.Equal(t, 2, len(spec.Containers)) + assert.Equal(t, 2, len(spec.InitContainers)) + assert.Equal(t, 1, len(spec.Containers)) }) t.Run("test user-defined source with transformer", func(t *testing.T) { @@ -295,8 +295,8 @@ func Test_BuildPodSpec(t *testing.T) { } spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 2) assert.NoError(t, err) - assert.Equal(t, 1, len(spec.InitContainers)) - assert.Equal(t, 3, len(spec.Containers)) + assert.Equal(t, 3, len(spec.InitContainers)) + assert.Equal(t, 1, len(spec.Containers)) }) t.Run("test sink", func(t *testing.T) { @@ -351,13 +351,13 @@ func Test_BuildPodSpec(t *testing.T) { testObj.Spec.ToEdges = []dfv1.CombinedEdge{} spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(spec.InitContainers)) - assert.Equal(t, 2, len(spec.Containers)) - assert.Equal(t, "image", spec.Containers[1].Image) - assert.Equal(t, 1, len(spec.Containers[1].Command)) - assert.Equal(t, "cmd", spec.Containers[1].Command[0]) - assert.Equal(t, 1, len(spec.Containers[1].Args)) - assert.Equal(t, "arg0", spec.Containers[1].Args[0]) + assert.Equal(t, 2, len(spec.InitContainers)) + assert.Equal(t, 1, len(spec.Containers)) + assert.Equal(t, "image", spec.InitContainers[1].Image) + assert.Equal(t, 1, len(spec.InitContainers[1].Command)) + assert.Equal(t, "cmd", spec.InitContainers[1].Command[0]) + assert.Equal(t, 1, len(spec.InitContainers[1].Args)) + assert.Equal(t, "arg0", spec.InitContainers[1].Args[0]) }) t.Run("test map udf", func(t *testing.T) { @@ -371,8 +371,8 @@ func Test_BuildPodSpec(t *testing.T) { } spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(spec.InitContainers)) - assert.Equal(t, 2, len(spec.Containers)) + assert.Equal(t, 2, len(spec.InitContainers)) + assert.Equal(t, 1, len(spec.Containers)) var envNames []string for _, e := range spec.Containers[0].Env { envNames = append(envNames, e.Name) @@ -410,8 +410,8 @@ func Test_BuildPodSpec(t *testing.T) { } spec, err := r.buildPodSpec(testObj, testPipeline, fakeIsbSvcConfig, 2) assert.NoError(t, err) - assert.Equal(t, 1, len(spec.InitContainers)) - assert.Equal(t, 2, len(spec.Containers)) + assert.Equal(t, 2, len(spec.InitContainers)) + assert.Equal(t, 1, len(spec.Containers)) containsPVC := false containsPVCMount := false for _, v := range spec.Volumes { @@ -509,7 +509,8 @@ func Test_reconcile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(pods.Items)) assert.True(t, strings.HasPrefix(pods.Items[0].Name, testVertexName+"-0-")) - assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 1, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 2, len(pods.Items[0].Spec.InitContainers)) svcs := &corev1.ServiceList{} err = r.client.List(ctx, svcs, &client.ListOptions{Namespace: testNamespace, LabelSelector: selector}) assert.NoError(t, err) @@ -573,7 +574,8 @@ func Test_reconcile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(pods.Items)) assert.True(t, strings.HasPrefix(pods.Items[0].Name, testVertexName+"-0-")) - assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 1, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 2, len(pods.Items[0].Spec.InitContainers)) }) t.Run("test reconcile reduce udf", func(t *testing.T) { @@ -616,7 +618,8 @@ func Test_reconcile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(pods.Items)) assert.True(t, strings.HasPrefix(pods.Items[0].Name, testVertexName+"-0-")) - assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 1, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 2, len(pods.Items[0].Spec.InitContainers)) pvc := &corev1.PersistentVolumeClaim{} err = r.client.Get(ctx, types.NamespacedName{Name: dfv1.GeneratePBQStoragePVCName(testPl.Name, testObj.Spec.Name, 0), Namespace: testNamespace}, pvc) assert.NoError(t, err) @@ -715,8 +718,8 @@ func Test_reconcile(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(pods.Items)) assert.True(t, strings.HasPrefix(pods.Items[0].Name, testVertexName+"-0-")) - assert.Equal(t, 3, len(pods.Items[0].Spec.Containers)) - assert.Equal(t, 2, len(pods.Items[0].Spec.InitContainers)) + assert.Equal(t, 2, len(pods.Items[0].Spec.Containers)) + assert.Equal(t, 3, len(pods.Items[0].Spec.InitContainers)) }) t.Run("test reconcile rolling update", func(t *testing.T) { From ae05a7cb5cb3a1b37aeb4a63f25a67a19890463d Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Fri, 22 Nov 2024 10:07:04 -0800 Subject: [PATCH 147/188] chore: make sidecar container change backward compatible (#2236) Signed-off-by: Derek Wang --- pkg/apis/numaflow/v1alpha1/const.go | 2 ++ .../numaflow/v1alpha1/container_builder.go | 4 +++ pkg/apis/numaflow/v1alpha1/deprecated.go | 33 +++++++++++++++++++ .../numaflow/v1alpha1/mono_vertex_types.go | 7 +++- .../numaflow/v1alpha1/pipeline_types_test.go | 3 +- pkg/apis/numaflow/v1alpha1/side_inputs.go | 21 ++++++++---- .../numaflow/v1alpha1/side_inputs_test.go | 6 ++-- pkg/apis/numaflow/v1alpha1/vertex_types.go | 7 +++- pkg/reconciler/cmd/start.go | 11 +++++++ pkg/reconciler/isbsvc/controller.go | 2 +- pkg/reconciler/pipeline/controller.go | 2 +- .../testdata/map-sideinput-pipeline.yaml | 2 ++ 12 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 pkg/apis/numaflow/v1alpha1/deprecated.go diff --git a/pkg/apis/numaflow/v1alpha1/const.go b/pkg/apis/numaflow/v1alpha1/const.go index 2deddd477c..8f747f91ef 100644 --- a/pkg/apis/numaflow/v1alpha1/const.go +++ b/pkg/apis/numaflow/v1alpha1/const.go @@ -153,6 +153,8 @@ const ( EnvServingStoreTTL = "NUMAFLOW_SERVING_STORE_TTL" EnvExecuteRustBinary = "NUMAFLOW_EXECUTE_RUST_BINARY" + EnvK8sServerVersion = "K8S_SERVER_VERSION" + PathVarRun = "/var/run/numaflow" VertexMetricsPort = 2469 VertexMetricsPortName = "metrics" diff --git a/pkg/apis/numaflow/v1alpha1/container_builder.go b/pkg/apis/numaflow/v1alpha1/container_builder.go index 0751239d66..cc386eb210 100644 --- a/pkg/apis/numaflow/v1alpha1/container_builder.go +++ b/pkg/apis/numaflow/v1alpha1/container_builder.go @@ -89,6 +89,10 @@ func (b containerBuilder) resources(x corev1.ResourceRequirements) containerBuil } func (b containerBuilder) asSidecar() containerBuilder { + // TODO: (k8s 1.29) clean this up once we deprecate the support for k8s < 1.29 + if !isSidecarSupported() { + return b + } b.RestartPolicy = ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways) return b } diff --git a/pkg/apis/numaflow/v1alpha1/deprecated.go b/pkg/apis/numaflow/v1alpha1/deprecated.go new file mode 100644 index 0000000000..9fd3152115 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/deprecated.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "os" + "strconv" +) + +// TODO: (k8s 1.29) Remove this once we deprecate the support for k8s < 1.29 +func isSidecarSupported() bool { + v := os.Getenv(EnvK8sServerVersion) + if v == "" { + return true // default to true if the env var is not found + } + // e.g. 1.31 + k8sVersion, _ := strconv.ParseFloat(v, 32) + return k8sVersion >= 1.29 +} diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index a167b56cc1..2fee030203 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -398,7 +398,12 @@ func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, e initContainers := []corev1.Container{} initContainers = append(initContainers, mv.Spec.InitContainers...) - initContainers = append(initContainers, sidecarContainers...) + // TODO: (k8s 1.29) clean this up once we deprecate the support for k8s < 1.29 + if isSidecarSupported() { + initContainers = append(initContainers, sidecarContainers...) + } else { + containers = append(containers, sidecarContainers...) + } spec := &corev1.PodSpec{ Subdomain: mv.GetHeadlessServiceName(), diff --git a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go index ef3884fe18..6a1c90566b 100644 --- a/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go +++ b/pkg/apis/numaflow/v1alpha1/pipeline_types_test.go @@ -507,7 +507,8 @@ func Test_GetSideInputManagerDeployments(t *testing.T) { deployments, err := testObj.GetSideInputsManagerDeployments(testGetSideInputDeploymentReq) assert.Nil(t, err) assert.Equal(t, 1, len(deployments)) - assert.Equal(t, 2, len(deployments[0].Spec.Template.Spec.Containers)) + assert.Equal(t, 1, len(deployments[0].Spec.Template.Spec.Containers)) + assert.Equal(t, 2, len(deployments[0].Spec.Template.Spec.InitContainers)) }) } diff --git a/pkg/apis/numaflow/v1alpha1/side_inputs.go b/pkg/apis/numaflow/v1alpha1/side_inputs.go index f3eea9bef4..f9606dac5c 100644 --- a/pkg/apis/numaflow/v1alpha1/side_inputs.go +++ b/pkg/apis/numaflow/v1alpha1/side_inputs.go @@ -72,6 +72,18 @@ func (si SideInput) getManagerDeploymentObj(pipeline Pipeline, req GetSideInputD volumes = append(volumes, si.Volumes...) } volumeMounts := []corev1.VolumeMount{{Name: varVolumeName, MountPath: PathVarRun}} + numaContainer.VolumeMounts = append(numaContainer.VolumeMounts, volumeMounts...) + sidecarContainer := si.getUDContainer(req) + sidecarContainer.VolumeMounts = append(sidecarContainer.VolumeMounts, volumeMounts...) + containers := []corev1.Container{*numaContainer} + initContainers := []corev1.Container{si.getInitContainer(pipeline, req), sidecarContainer} + + // TODO: (k8s 1.29) clean this up once we deprecate the support for k8s < 1.29 + if !isSidecarSupported() { + initContainers = []corev1.Container{si.getInitContainer(pipeline, req)} + containers = []corev1.Container{*numaContainer, sidecarContainer} + } + deployment := &appv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: pipeline.GetSideInputsManagerDeploymentName(si.Name), @@ -93,8 +105,8 @@ func (si SideInput) getManagerDeploymentObj(pipeline Pipeline, req GetSideInputD }, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{*numaContainer, si.getUDContainer(req)}, - InitContainers: []corev1.Container{si.getInitContainer(pipeline, req)}, + Containers: containers, + InitContainers: initContainers, Volumes: volumes, }, }, @@ -103,9 +115,6 @@ func (si SideInput) getManagerDeploymentObj(pipeline Pipeline, req GetSideInputD if x := pipeline.Spec.Templates; x != nil && x.SideInputsManagerTemplate != nil { x.SideInputsManagerTemplate.ApplyToPodTemplateSpec(&deployment.Spec.Template) } - for i := range deployment.Spec.Template.Spec.Containers { - deployment.Spec.Template.Spec.Containers[i].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[i].VolumeMounts, volumeMounts...) - } return deployment, nil } @@ -157,7 +166,7 @@ func (si SideInput) getUDContainer(req GetSideInputDeploymentReq) corev1.Contain cb := containerBuilder{}. name(CtrUdSideInput). image(si.Container.Image). - imagePullPolicy(req.PullPolicy) + imagePullPolicy(req.PullPolicy).asSidecar() if si.Container.ImagePullPolicy != nil { cb = cb.imagePullPolicy(*si.Container.ImagePullPolicy) } diff --git a/pkg/apis/numaflow/v1alpha1/side_inputs_test.go b/pkg/apis/numaflow/v1alpha1/side_inputs_test.go index e3949cbbe8..3287bdbcb6 100644 --- a/pkg/apis/numaflow/v1alpha1/side_inputs_test.go +++ b/pkg/apis/numaflow/v1alpha1/side_inputs_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" ) var ( @@ -65,6 +66,7 @@ func Test_getUDContainer(t *testing.T) { } assert.Equal(t, envs[EnvUDContainerType], UDContainerSideInputs) assert.Equal(t, imagePullNever, c.ImagePullPolicy) + assert.Equal(t, ptr.To[corev1.ContainerRestartPolicy](corev1.ContainerRestartPolicyAlways), c.RestartPolicy) } func Test_getNumaContainer(t *testing.T) { @@ -99,7 +101,7 @@ func Test_getManagerDeploymentObj(t *testing.T) { deploy, err := newObj.getManagerDeploymentObj(*testPipeline, testGetSideInputDeploymentReq) assert.NoError(t, err) assert.NotNil(t, deploy) - assert.Equal(t, 1, len(deploy.Spec.Template.Spec.InitContainers)) - assert.Equal(t, 2, len(deploy.Spec.Template.Spec.Containers)) + assert.Equal(t, 2, len(deploy.Spec.Template.Spec.InitContainers)) + assert.Equal(t, 1, len(deploy.Spec.Template.Spec.Containers)) assert.Equal(t, 2, len(deploy.Spec.Template.Spec.Volumes)) } diff --git a/pkg/apis/numaflow/v1alpha1/vertex_types.go b/pkg/apis/numaflow/v1alpha1/vertex_types.go index ccd7ecd5d6..90474d4580 100644 --- a/pkg/apis/numaflow/v1alpha1/vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/vertex_types.go @@ -335,7 +335,12 @@ func (v Vertex) GetPodSpec(req GetVertexPodSpecReq) (*corev1.PodSpec, error) { } // Add the sidecar containers - initContainers = append(initContainers, sidecarContainers...) + // TODO: (k8s 1.29) clean this up once we deprecate the support for k8s <1.29 + if isSidecarSupported() { + initContainers = append(initContainers, sidecarContainers...) + } else { + containers = append(containers, sidecarContainers...) + } if v.IsASource() && v.Spec.Source.Serving != nil { servingContainer, err := v.getServingContainer(req) diff --git a/pkg/reconciler/cmd/start.go b/pkg/reconciler/cmd/start.go index c4da13a525..cae8052b37 100644 --- a/pkg/reconciler/cmd/start.go +++ b/pkg/reconciler/cmd/start.go @@ -18,6 +18,8 @@ package cmd import ( "context" + "fmt" + "os" "time" "go.uber.org/zap" @@ -121,6 +123,15 @@ func Start(namespaced bool, managedNamespace string) { kubeClient := kubernetes.NewForConfigOrDie(restConfig) + // TODO: clean up? + if svrVersion, err := kubeClient.ServerVersion(); err != nil { + logger.Fatalw("Failed to get k8s cluster server version", zap.Error(err)) + } else { + k8sVersion := fmt.Sprintf("%s.%s", svrVersion.Major, svrVersion.Minor) + os.Setenv(dfv1.EnvK8sServerVersion, k8sVersion) + logger.Infof("Kubernetes server version: %s", k8sVersion) + } + // Readiness probe if err := mgr.AddReadyzCheck("readiness", healthz.Ping); err != nil { logger.Fatalw("Unable add a readiness check", zap.Error(err)) diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index 6f3cd212f9..7b04913d17 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -82,7 +82,7 @@ func (r *interStepBufferServiceReconciler) Reconcile(ctx context.Context, req ct if !equality.Semantic.DeepEqual(isbSvc.Finalizers, isbSvcCopy.Finalizers) { patchYaml := "metadata:\n finalizers: [" + strings.Join(isbSvcCopy.Finalizers, ",") + "]" patchJson, _ := yaml.YAMLToJSON([]byte(patchYaml)) - if err := r.client.Patch(ctx, isbSvc, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil { + if err := r.client.Patch(ctx, isbSvc, client.RawPatch(types.MergePatchType, patchJson)); err != nil { return ctrl.Result{}, err } } diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index cb11078c7f..cb7e7a2bfd 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -96,7 +96,7 @@ func (r *pipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if !equality.Semantic.DeepEqual(pl.Finalizers, plCopy.Finalizers) { patchYaml := "metadata:\n finalizers: [" + strings.Join(plCopy.Finalizers, ",") + "]" patchJson, _ := yaml.YAMLToJSON([]byte(patchYaml)) - if err := r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, []byte(patchJson))); err != nil { + if err := r.client.Patch(ctx, pl, client.RawPatch(types.MergePatchType, patchJson)); err != nil { return result, err } } diff --git a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml index 7ecff1e3e6..9468a99538 100644 --- a/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml +++ b/test/sideinputs-e2e/testdata/map-sideinput-pipeline.yaml @@ -16,6 +16,8 @@ spec: source: http: {} - name: si-e2e + scale: + min: 1 udf: container: # A map side input udf, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sideinput/examples/map_sideinput/udf From f5a79bf9e808a32d4895a04ccdd048c377fd4549 Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Sat, 23 Nov 2024 08:09:45 +0530 Subject: [PATCH 148/188] feat: unify metrics ( cleanup and add missing metrics ) (#2207) --- .codecov.yml | 2 +- docs/operations/metrics/metrics.md | 116 +++++++--------- pkg/metrics/metrics.go | 56 ++++++-- pkg/reduce/data_forward.go | 98 +++++-------- pkg/reduce/pnf/pnf.go | 47 +++---- pkg/sinks/blackhole/blackhole.go | 3 - pkg/sinks/blackhole/metrics.go | 31 ----- pkg/sinks/forward/forward.go | 59 +++++--- pkg/sinks/kafka/kafka.go | 7 - pkg/sinks/kafka/metrics.go | 14 -- pkg/sinks/logger/log.go | 2 - pkg/sinks/logger/metrics.go | 31 ----- pkg/sources/forward/data_forward.go | 170 ++++++++--------------- pkg/sources/forward/data_forward_test.go | 1 - pkg/sources/generator/metrics.go | 38 ----- pkg/sources/generator/tickgen.go | 3 - pkg/sources/http/http.go | 2 - pkg/sources/http/metrics.go | 31 ----- pkg/sources/jetstream/jetstream.go | 2 - pkg/sources/jetstream/metrics.go | 31 ----- pkg/sources/kafka/metrics.go | 14 -- pkg/sources/kafka/reader.go | 3 - pkg/sources/nats/metrics.go | 31 ----- pkg/sources/nats/nats.go | 2 - pkg/udf/forward/forward.go | 125 +++++++++-------- 25 files changed, 307 insertions(+), 612 deletions(-) delete mode 100644 pkg/sinks/blackhole/metrics.go delete mode 100644 pkg/sinks/logger/metrics.go delete mode 100644 pkg/sources/generator/metrics.go delete mode 100644 pkg/sources/http/metrics.go delete mode 100644 pkg/sources/jetstream/metrics.go delete mode 100644 pkg/sources/nats/metrics.go diff --git a/.codecov.yml b/.codecov.yml index 9939d697da..206690e926 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -17,4 +17,4 @@ coverage: project: default: # allow test coverage to drop by 2%, assume that it's typically due to CI problems - threshold: 2 + threshold: "2" diff --git a/docs/operations/metrics/metrics.md b/docs/operations/metrics/metrics.md index 3b64f46924..35194f3e59 100644 --- a/docs/operations/metrics/metrics.md +++ b/docs/operations/metrics/metrics.md @@ -10,84 +10,64 @@ These metrics in combination can be used to determine the overall health of your These metrics can be used to determine throughput of your pipeline. -#### Data-forward - -| Metric name | Metric type | Labels | Description | -| ----------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -| `forwarder_data_read_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages read by a given Vertex from an Inter-Step Buffer Partition | -| `forwarder_read_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes read by a given Vertex from an Inter-Step Buffer Partition | -| `forwarder_write_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages written to Inter-Step Buffer by a given Vertex | -| `forwarder_write_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes written to Inter-Step Buffer by a given Vertex | -| `forwarder_ack_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages acknowledged by a given Vertex from an Inter-Step Buffer Partition | -| `forwarder_drop_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages dropped by a given Vertex due to a full Inter-Step Buffer Partition | -| `forwarder_drop_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes dropped by a given Vertex due to a full Inter-Step Buffer Partition | - -#### Kafka Source - -| Metric name | Metric type | Labels | Description | -| ------------------------- | ----------- | ------------------------------------------------------ | --------------------------------------------------------------------------------- | -| `kafka_source_read_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages read by the Kafka Source Vertex/Processor. | -| `kafka_source_ack_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages acknowledged by the Kafka Source Vertex/Processor | - -#### Generator Source - -| Metric name | Metric type | Labels | Description | -| --------------------------- | ----------- | ------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `tickgen_source_read_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages read by the Generator Source Vertex/Processor. | - -#### Http Source - -| Metric name | Metric type | Labels | Description | -| ------------------------ | ----------- | ------------------------------------------------------ | ------------------------------------------------------------------------- | -| `http_source_read_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages read by the HTTP Source Vertex/Processor. | - -#### Kafka Sink - -| Metric name | Metric type | Labels | Description | -| ------------------------ | ----------- | ------------------------------------------------------ | -------------------------------------------------------------------------- | -| `kafka_sink_write_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages written by the Kafka Sink Vertex/Processor | - -#### Log Sink - -| Metric name | Metric type | Labels | Description | -| ---------------------- | ----------- | ------------------------------------------------------ | ------------------------------------------------------------------------ | -| `log_sink_write_total` | Counter | `pipeline=`
`vertex=` | Provides the number of messages written by the Log Sink Vertex/Processor | +| Metric name | Metric type | Labels | Description | +| ------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | +| `forwarder_read_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages read by a given Vertex from an Inter-Step Buffer Partition or Source | +| `forwarder_data_read_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of data messages read by a given Vertex from an Inter-Step Buffer Partition or Source | +| `forwarder_read_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes read by a given Vertex from an Inter-Step Buffer Partition or Source | +| `forwarder_data_read_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of data bytes read by a given Vertex from an Inter-Step Buffer Partition or Source | +| `source_forwarder_transformer_read_total` | Counter | `pipeline=`
`vertex=`
`replica=`
`partition_name=` | Provides the total number of messages read by source transformer | +| `forwarder_write_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages written to Inter-Step Buffer by a given Vertex | +| `forwarder_write_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes written to Inter-Step Buffer by a given Vertex | +| `source_forwarder_transformer_write_total` | Counter | `pipeline=`
`vertex=`
`replica=`
`partition_name=` | Provides the total number of messages written by source transformer | +| `forwarder_fbsink_write_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages written to a fallback sink | +| `forwarder_fbsink_write_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes written to a fallback sink | +| `forwarder_ack_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages acknowledged by a given Vertex from an Inter-Step Buffer Partition | +| `forwarder_drop_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages dropped by a given Vertex due to a full Inter-Step Buffer Partition | +| `forwarder_drop_bytes_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of bytes dropped by a given Vertex due to a full Inter-Step Buffer Partition | +| `forwarder_udf_read_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages read by UDF | +| `forwarder_udf_write_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the total number of messages written by UDF | ### Latency These metrics can be used to determine the latency of your pipeline. -| Metric name | Metric type | Labels | Description | -| ---------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | -| `pipeline_processing_lag` | Gauge | `pipeline=` | Pipeline processing lag in milliseconds (max watermark - min watermark) | -| `pipeline_watermark_cmp_now` | Gauge | `pipeline=` | Max watermark of source compared with current time in milliseconds | -| `source_forwarder_transformer_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides a histogram distribution of the processing times of User-defined Source Transformer | -| `forwarder_udf_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Provides a histogram distribution of the processing times of User-defined Functions. (UDF's) | -| `forwarder_forward_chunk_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Provides a histogram distribution of the processing times of the forwarder function as a whole | -| `reduce_pnf_process_time` | Histogram | `pipeline=`
`vertex=`
`replica=` | Provides a histogram distribution of the processing times of the reducer | -| `reduce_pnf_forward_time` | Histogram | `pipeline=`
`vertex=`
`replica=` | Provides a histogram distribution of the forwarding times of the reducer | +| Metric name | Metric type | Labels | Description | +| ---------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| `pipeline_processing_lag` | Gauge | `pipeline=` | Pipeline processing lag in milliseconds (max watermark - min watermark) | +| `pipeline_watermark_cmp_now` | Gauge | `pipeline=` | Max watermark of source compared with current time in milliseconds | +| `forwarder_read_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the histogram distribution of the processing times of read operations | +| `forwarder_write_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the histogram distribution of the processing times of write operations | +| `forwarder_ack_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the histogram distribution of the processing times of ack operations | +| `forwarder_fbsink_write_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Provides the histogram distribution of the processing times of write operations to a fallback sink | +| `source_forwarder_transformer_processing_time` | Histogram | `pipeline=`
`vertex=`
`replica=`
`partition_name=` | Provides a histogram distribution of the processing times of source transformer | +| `forwarder_udf_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Provides a histogram distribution of the processing times of User-defined Functions. (UDF's) | +| `forwarder_forward_chunk_processing_time` | Histogram | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Provides a histogram distribution of the processing times of the forwarder function as a whole | +| `reduce_pnf_process_time` | Histogram | `pipeline=`
`vertex=`
`replica=` | Provides a histogram distribution of the processing times of the reducer | +| `reduce_pnf_forward_time` | Histogram | `pipeline=`
`vertex=`
`replica=` | Provides a histogram distribution of the forwarding times of the reducer | +| `vertex_pending_messages` | Gauge | `pipeline=`
`vertex=`
`period=`
`partition_name=` | Provides the average pending messages in the last period of seconds. It is the pending messages of a vertex | ### Errors These metrics can be used to determine if there are any errors in the pipeline. -| Metric name | Metric type | Labels | Description | -| --------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -| `pipeline_data_processing_health` | Gauge | `pipeline=` | Pipeline data processing health status. 1: Healthy, 0: Unknown, -1: Warning, -2: Critical | -| `controller_isbsvc_health` | Gauge | `ns=`
`isbsvc=` | A metric to indicate whether the ISB Service is healthy. '1' means healthy, '0' means unhealthy | -| `controller_pipeline_health` | Gauge | `ns=`
`pipeline=` | A metric to indicate whether the Pipeline is healthy. '1' means healthy, '0' means unhealthy | -| `controller_monovtx_health` | Gauge | `ns=`
`mvtx_name=` | A metric to indicate whether the MonoVertex is healthy. '1' means healthy, '0' means unhealthy | -| `forwarder_platform_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Indicates any internal errors which could stop pipeline processing | -| `forwarder_read_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while reading messages by the forwarder | -| `forwarder_write_error_total` | Counter | `pipeline=`
`vertex=` `vertex_type=`

`replica=`
`partition_name=` | Indicates any errors while writing messages by the forwarder | -| `forwarder_ack_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while acknowledging messages by the forwarder | -| `kafka_source_offset_ack_errors` | Counter | `pipeline=`
`vertex=` | Indicates any kafka acknowledgement errors | -| `kafka_sink_write_error_total` | Counter | `pipeline=`
`vertex=` | Provides the number of errors while writing to the Kafka sink | -| `kafka_sink_write_timeout_total` | Counter | `pipeline=`
`vertex=` | Provides the write timeouts while writing to the Kafka sink | -| `isb_jetstream_read_error_total` | Counter | `partition_name=` | Indicates any read errors with NATS Jetstream ISB | -| `isb_jetstream_write_error_total` | Counter | `partition_name=` | Indicates any write errors with NATS Jetstream ISB | -| `isb_redis_read_error_total` | Counter | `partition_name=` | Indicates any read errors with Redis ISB | -| `isb_redis_write_error_total` | Counter | `partition_name=` | Indicates any write errors with Redis ISB | +| Metric name | Metric type | Labels | Description | +| ------------------------------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `pipeline_data_processing_health` | Gauge | `pipeline=` | Pipeline data processing health status. 1: Healthy, 0: Unknown, -1: Warning, -2: Critical | +| `controller_isbsvc_health` | Gauge | `ns=`
`isbsvc=` | A metric to indicate whether the ISB Service is healthy. '1' means healthy, '0' means unhealthy | +| `controller_pipeline_health` | Gauge | `ns=`
`pipeline=` | A metric to indicate whether the Pipeline is healthy. '1' means healthy, '0' means unhealthy | +| `controller_monovtx_health` | Gauge | `ns=`
`mvtx_name=` | A metric to indicate whether the MonoVertex is healthy. '1' means healthy, '0' means unhealthy | +| `forwarder_platform_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=` | Indicates any internal errors which could stop pipeline processing | +| `forwarder_read_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while reading messages by the forwarder | +| `source_forwarder_transformer_error_total` | Counter | `pipeline=`
`vertex=`
`replica=`
`partition_name=` | Indicates source transformer errors | +| `forwarder_write_error_total` | Counter | `pipeline=`
`vertex=` `vertex_type=`

`replica=`
`partition_name=` | Indicates any errors while writing messages by the forwarder | +| `forwarder_fbsink_write_error_total` | Counter | `pipeline=`
`vertex=` `vertex_type=`

`replica=`
`partition_name=` | Indicates any errors while writing to a fallback sink | +| `forwarder_ack_error_total` | Counter | `pipeline=`
`vertex=`
`vertex_type=`
`replica=`
`partition_name=` | Indicates any errors while acknowledging messages by the forwarder | +| `kafka_sink_write_timeout_total` | Counter | `pipeline=`
`vertex=` | Provides the write timeouts while writing to the Kafka sink | +| `isb_jetstream_read_error_total` | Counter | `partition_name=` | Indicates any read errors with NATS Jetstream ISB | +| `isb_jetstream_write_error_total` | Counter | `partition_name=` | Indicates any write errors with NATS Jetstream ISB | +| `isb_redis_read_error_total` | Counter | `partition_name=` | Indicates any read errors with Redis ISB | +| `isb_redis_write_error_total` | Counter | `partition_name=` | Indicates any write errors with Redis ISB | ### Saturation diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 4f8bfe1aac..2754983481 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -32,14 +32,12 @@ const ( LabelVertexType = "vertex_type" LabelPartitionName = "partition_name" LabelMonoVertexName = "mvtx_name" - - LabelComponent = "component" - LabelComponentName = "component_name" - LabelSDKLanguage = "language" - LabelSDKVersion = "version" - LabelSDKType = "type" // container type, e.g sourcer, sourcetransformer, sinker, etc. see serverinfo.ContainerType - - LabelReason = "reason" + LabelComponent = "component" + LabelComponentName = "component_name" + LabelSDKLanguage = "language" + LabelSDKVersion = "version" + LabelSDKType = "type" // container type, e.g sourcer, sourcetransformer, sinker, etc. see serverinfo.ContainerType + LabelReason = "reason" ) var ( @@ -85,6 +83,14 @@ var ( Help: "Total number of Read Errors", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // ReadProcessingTime is a histogram to observe read operation latency + ReadProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: "forwarder", + Name: "read_processing_time", + Help: "Processing times of read operations (100 microseconds to 10 minutes)", + Buckets: prometheus.ExponentialBucketsRange(100, 60000000*10, 10), + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // WriteMessagesCount is used to indicate the number of messages written WriteMessagesCount = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", @@ -106,6 +112,14 @@ var ( Help: "Total number of Write Errors", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // WriteProcessingTime is a histogram to observe write operation latency + WriteProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: "forwarder", + Name: "write_processing_time", + Help: "Processing times of write operations (100 microseconds to 20 minutes)", + Buckets: prometheus.ExponentialBucketsRange(100, 60000000*20, 10), + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // DropMessagesCount is used to indicate the number of messages dropped DropMessagesCount = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", @@ -127,6 +141,14 @@ var ( Help: "Total number of Messages Acknowledged", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // AckProcessingTime is a histogram to observe acknowledgment operation latency + AckProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: "forwarder", + Name: "ack_processing_time", + Help: "Processing times of acknowledgment operations (100 microseconds to 10 minutes)", + Buckets: prometheus.ExponentialBucketsRange(100, 60000000*10, 10), + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // AckMessageError is used to indicate the errors in the number of messages acknowledged AckMessageError = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", @@ -200,7 +222,7 @@ var ( Subsystem: "source_forwarder", Name: "transformer_error_total", Help: "Total number of source transformer Errors", - }, []string{LabelVertex, LabelPipeline, LabelVertexReplicaIndex, LabelPartitionName}) + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) // SourceTransformerProcessingTime is a histogram to Observe Source Transformer Processing times as a whole SourceTransformerProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ @@ -208,7 +230,7 @@ var ( Name: "transformer_processing_time", Help: "Processing times of source transformer (100 microseconds to 15 minutes)", Buckets: prometheus.ExponentialBucketsRange(100, 60000000*15, 10), - }, []string{LabelVertex, LabelPipeline, LabelVertexReplicaIndex, LabelPartitionName}) + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) // SourceTransformerConcurrentProcessingTime is a histogram to Observe Source Transformer Processing times as a whole SourceTransformerConcurrentProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ @@ -216,21 +238,21 @@ var ( Name: "concurrent_transformer_processing_time", Help: "Processing times of Concurrent source transformer (100 microseconds to 20 minutes)", Buckets: prometheus.ExponentialBucketsRange(100, 60000000*20, 10), - }, []string{LabelVertex, LabelPipeline, LabelVertexReplicaIndex, LabelPartitionName}) + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) // SourceTransformerReadMessagesCount is used to indicate the number of messages read by source transformer SourceTransformerReadMessagesCount = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "source_forwarder", Name: "transformer_read_total", Help: "Total number of Messages Read by source transformer", - }, []string{LabelVertex, LabelPipeline, LabelVertexReplicaIndex, LabelPartitionName}) + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) // SourceTransformerWriteMessagesCount is used to indicate the number of messages written by source transformer SourceTransformerWriteMessagesCount = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "source_forwarder", Name: "transformer_write_total", Help: "Total number of Messages Written by source transformer", - }, []string{LabelVertex, LabelPipeline, LabelVertexReplicaIndex, LabelPartitionName}) + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) ) // Reduce forwarder specific metrics @@ -328,6 +350,14 @@ var ( Help: "Total number of bytes written to a fallback sink", }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // FbSinkWriteProcessingTime is a histogram to observe write operation latency to a fallback sink + FbSinkWriteProcessingTime = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: "forwarder", + Name: "fbsink_write_processing_time", + Help: "Processing times of write operations to a fallback sink (100 microseconds to 20 minutes)", + Buckets: prometheus.ExponentialBucketsRange(100, 60000000*20, 10), + }, []string{LabelVertex, LabelPipeline, LabelVertexType, LabelVertexReplicaIndex, LabelPartitionName}) + // FbSinkWriteMessagesError is used to indicate the number of errors while writing to a fallback sink FbSinkWriteMessagesError = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "forwarder", diff --git a/pkg/reduce/data_forward.go b/pkg/reduce/data_forward.go index 2ca50f232c..1a96facc5c 100644 --- a/pkg/reduce/data_forward.go +++ b/pkg/reduce/data_forward.go @@ -283,17 +283,15 @@ func (df *DataForward) replayForAlignedWindows(ctx context.Context, discoveredWA // forwardAChunk reads a chunk of messages from isb and assigns watermark to messages // and writes the windowRequests to pbq func (df *DataForward) forwardAChunk(ctx context.Context) { + // Initialize metric labels + metricLabelsWithPartition := map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()} + readMessages, err := df.fromBufferPartition.Read(ctx, df.opts.readBatchSize) totalBytes := 0 dataBytes := 0 if err != nil { df.log.Errorw("Failed to read from isb", zap.Error(err)) - metrics.ReadMessagesError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Inc() + metrics.ReadMessagesError.With(metricLabelsWithPartition).Inc() } // idle watermark @@ -373,21 +371,9 @@ func (df *DataForward) forwardAChunk(ctx context.Context) { } } - metrics.ReadBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(totalBytes)) + metrics.ReadBytesCount.With(metricLabelsWithPartition).Add(float64(totalBytes)) - metrics.ReadDataBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(dataBytes)) + metrics.ReadDataBytesCount.With(metricLabelsWithPartition).Add(float64(dataBytes)) // readMessages has to be written to PBQ, acked, etc. df.process(ctx, readMessages) @@ -448,6 +434,15 @@ func (df *DataForward) process(ctx context.Context, messages []*isb.ReadMessage) var dataMessages = make([]*isb.ReadMessage, 0, len(messages)) var ctrlMessages = make([]*isb.ReadMessage, 0) // for a high TPS pipeline, 0 is the most optimal value + // Initialize metric labels + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + } + for _, message := range messages { if message.Kind == isb.Data { dataMessages = append(dataMessages, message) @@ -456,20 +451,8 @@ func (df *DataForward) process(ctx context.Context, messages []*isb.ReadMessage) } } - metrics.ReadDataMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(len(dataMessages))) - metrics.ReadMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(len(messages))) + metrics.ReadDataMessagesCount.With(metricLabelsWithPartition).Add(float64(len(dataMessages))) + metrics.ReadMessagesCount.With(metricLabelsWithPartition).Add(float64(len(messages))) // write messages to windows based by PBQs. successfullyWrittenMessages, failedMessages, err := df.writeMessagesToWindows(ctx, dataMessages) @@ -635,12 +618,14 @@ func (df *DataForward) handleOnTimeMessage(message *isb.ReadMessage) []*window.T // writeToPBQ writes to the PBQ. It will return error only if it is not failing to write to PBQ and is in a continuous // error loop, and we have received ctx.Done() via SIGTERM. func (df *DataForward) writeToPBQ(ctx context.Context, winOp *window.TimedWindowRequest, persist bool) error { + //initialize metric labels + metricLabels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + } defer func(t time.Time) { - metrics.PBQWriteTime.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Observe(float64(time.Since(t).Milliseconds())) + metrics.PBQWriteTime.With(metricLabels).Observe(float64(time.Since(t).Milliseconds())) }(time.Now()) var pbqWriteBackoff = wait.Backoff{ @@ -659,11 +644,7 @@ func (df *DataForward) writeToPBQ(ctx context.Context, winOp *window.TimedWindow rErr := q.Write(ctx, winOp, persist) if rErr != nil { df.log.Errorw("Failed to write message to pbq", zap.String("partitionID", winOp.ID.String()), zap.Error(rErr)) - metrics.PBQWriteErrorCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Inc() + metrics.PBQWriteErrorCount.With(metricLabels).Inc() // no point retrying if ctx.Done has been invoked select { case <-ctx.Done(): @@ -676,11 +657,7 @@ func (df *DataForward) writeToPBQ(ctx context.Context, winOp *window.TimedWindow } // happy path - metrics.PBQWriteMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Inc() + metrics.PBQWriteMessagesCount.With(metricLabels).Inc() return true, nil }) @@ -696,6 +673,13 @@ func (df *DataForward) ackMessages(ctx context.Context, messages []*isb.ReadMess Jitter: 0.1, } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + } var wg sync.WaitGroup // Ack the message to ISB for _, m := range messages { @@ -707,13 +691,7 @@ func (df *DataForward) ackMessages(ctx context.Context, messages []*isb.ReadMess rErr := o.AckIt() attempt += 1 if rErr != nil { - metrics.AckMessageError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Inc() + metrics.AckMessageError.With(metricLabelsWithPartition).Inc() df.log.Errorw("Failed to ack message, retrying", zap.String("msgOffSet", o.String()), zap.Error(rErr), zap.Int("attempt", attempt)) // no point retrying if ctx.Done has been invoked @@ -727,13 +705,7 @@ func (df *DataForward) ackMessages(ctx context.Context, messages []*isb.ReadMess } } df.log.Debugw("Successfully acked message", zap.String("msgOffSet", o.String())) - metrics.AckMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Inc() + metrics.AckMessagesCount.With(metricLabelsWithPartition).Inc() return true, nil }) // no point trying the rest of the message, most likely that will also fail diff --git a/pkg/reduce/pnf/pnf.go b/pkg/reduce/pnf/pnf.go index e09a6e1bc8..6da0026e14 100644 --- a/pkg/reduce/pnf/pnf.go +++ b/pkg/reduce/pnf/pnf.go @@ -391,6 +391,15 @@ func (pf *ProcessAndForward) writeToBuffer(ctx context.Context, edgeName string, Jitter: 0.1, } + // initialize metric label + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: pf.vertexName, + metrics.LabelPipeline: pf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), + metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName(), + } + writeMessages := resultMessages // write to isb with infinite exponential backoff (until shutdown is triggered) @@ -406,24 +415,16 @@ func (pf *ProcessAndForward) writeToBuffer(ctx context.Context, edgeName string, // when the buffer is full and the user has set the buffer full strategy to // DiscardLatest or when the message is duplicate. if errors.As(writeErr, &isb.NonRetryableBufferWriteErr{}) { - metrics.DropMessagesCount.With(map[string]string{ - metrics.LabelVertex: pf.vertexName, - metrics.LabelPipeline: pf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), - metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName(), - metrics.LabelReason: writeErr.Error(), - }).Inc() - - metrics.DropBytesCount.With(map[string]string{ + metricLabelWithReason := map[string]string{ metrics.LabelVertex: pf.vertexName, metrics.LabelPipeline: pf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName(), metrics.LabelReason: writeErr.Error(), - }).Add(float64(len(message.Payload))) - + } + metrics.DropMessagesCount.With(metricLabelWithReason).Inc() + metrics.DropBytesCount.With(metricLabelWithReason).Add(float64(len(message.Payload))) pf.log.Infow("Dropped message", zap.String("reason", writeErr.Error()), zap.String("vertex", pf.vertexName), zap.String("pipeline", pf.pipelineName), zap.String("msg_id", message.ID.String())) } else { failedMessages = append(failedMessages, message) @@ -437,12 +438,7 @@ func (pf *ProcessAndForward) writeToBuffer(ctx context.Context, edgeName string, if len(failedMessages) > 0 { pf.log.Warnw("Failed to write messages to isb inside pnf", zap.Errors("errors", writeErrs)) writeMessages = failedMessages - metrics.WriteMessagesError.With(map[string]string{ - metrics.LabelVertex: pf.vertexName, - metrics.LabelPipeline: pf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), - metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName()}).Add(float64(len(failedMessages))) + metrics.WriteMessagesError.With(metricLabelsWithPartition).Add(float64(len(failedMessages))) if ctx.Err() != nil { // no need to retry if the context is closed @@ -459,19 +455,8 @@ func (pf *ProcessAndForward) writeToBuffer(ctx context.Context, edgeName string, return nil, ctxClosedErr } - metrics.WriteMessagesCount.With(map[string]string{ - metrics.LabelVertex: pf.vertexName, - metrics.LabelPipeline: pf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), - metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName()}).Add(float64(writeCount)) - - metrics.WriteBytesCount.With(map[string]string{ - metrics.LabelVertex: pf.vertexName, - metrics.LabelPipeline: pf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeReduceUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(pf.vertexReplica)), - metrics.LabelPartitionName: pf.toBuffers[edgeName][partition].GetName()}).Add(writeBytes) + metrics.WriteMessagesCount.With(metricLabelsWithPartition).Add(float64(writeCount)) + metrics.WriteBytesCount.With(metricLabelsWithPartition).Add(writeBytes) return offsets, nil } diff --git a/pkg/sinks/blackhole/blackhole.go b/pkg/sinks/blackhole/blackhole.go index 3ef0332123..b849615f49 100644 --- a/pkg/sinks/blackhole/blackhole.go +++ b/pkg/sinks/blackhole/blackhole.go @@ -23,7 +23,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" ) @@ -62,8 +61,6 @@ func (b *Blackhole) IsFull() bool { // Write writes to the blackhole. func (b *Blackhole) Write(_ context.Context, messages []isb.Message) ([]isb.Offset, []error) { - sinkWriteCount.With(map[string]string{metrics.LabelVertex: b.name, metrics.LabelPipeline: b.pipelineName}).Add(float64(len(messages))) - return nil, make([]error, len(messages)) } diff --git a/pkg/sinks/blackhole/metrics.go b/pkg/sinks/blackhole/metrics.go deleted file mode 100644 index 0afcbb28b9..0000000000 --- a/pkg/sinks/blackhole/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package blackhole - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// sinkWriteCount is used to indicate the number of messages written to the sink -var sinkWriteCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "blackhole_sink", - Name: "write_total", - Help: "Total number of messages written to blackhole sink", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sinks/forward/forward.go b/pkg/sinks/forward/forward.go index 832611c6dc..ac60ecddf7 100644 --- a/pkg/sinks/forward/forward.go +++ b/pkg/sinks/forward/forward.go @@ -181,9 +181,25 @@ func (df *DataForward) Start() <-chan error { // the message after forwarding, barring any platform errors. The platform errors include buffer-full, // buffer-not-reachable, etc., but does not include errors due to WhereTo, etc. func (df *DataForward) forwardAChunk(ctx context.Context) error { + // Initialize forwardAChunk and read start times start := time.Now() + readStart := time.Now() totalBytes := 0 dataBytes := 0 + // Initialize metric labels + metricLabels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSink), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: df.fromBufferPartition.GetName(), + } // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. @@ -191,7 +207,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { df.opts.logger.Debugw("Read from buffer", zap.String("bufferFrom", df.fromBufferPartition.GetName()), zap.Int64("length", int64(len(readMessages)))) if err != nil { df.opts.logger.Warnw("failed to read fromBufferPartition", zap.Error(err)) - metrics.ReadMessagesError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Inc() + metrics.ReadMessagesError.With(metricLabelsWithPartition).Inc() } // process only if we have any read messages. There is a natural looping here if there is an internal error while @@ -219,6 +235,8 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { return nil } + metrics.ReadProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(readStart).Microseconds())) + var dataMessages = make([]*isb.ReadMessage, 0, len(readMessages)) // store the offsets of the messages we read from ISB @@ -232,24 +250,12 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } } - metrics.ReadDataMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(dataMessages))) - metrics.ReadMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readMessages))) + metrics.ReadDataMessagesCount.With(metricLabelsWithPartition).Add(float64(len(dataMessages))) + metrics.ReadMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readMessages))) - metrics.ReadBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSink), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(totalBytes)) + metrics.ReadBytesCount.With(metricLabelsWithPartition).Add(float64(totalBytes)) - metrics.ReadDataBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSink), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.fromBufferPartition.GetName(), - }).Add(float64(dataBytes)) + metrics.ReadDataBytesCount.With(metricLabelsWithPartition).Add(float64(dataBytes)) // fetch watermark if available // TODO: make it async (concurrent and wait later) @@ -300,14 +306,18 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { df.opts.logger.Debugw("write to sink completed") + ackStart := time.Now() err = df.ackFromBuffer(ctx, readOffsets) // implicit return for posterity :-) if err != nil { df.opts.logger.Errorw("Failed to ack from buffer", zap.Error(err)) - metrics.AckMessageError.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) + metrics.AckMessageError.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) return nil } - metrics.AckMessagesCount.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: df.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) + + // Ack processing time + metrics.AckProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(ackStart).Microseconds())) + metrics.AckMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) if df.opts.cbPublisher != nil { if err = df.opts.cbPublisher.SinkVertexCallback(ctx, writeMessages); err != nil { @@ -315,7 +325,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } } // ProcessingTimes of the entire forwardAChunk - metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) + metrics.ForwardAChunkProcessingTime.With(metricLabels).Observe(float64(time.Since(start).Microseconds())) return nil } @@ -378,6 +388,7 @@ func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWr writeBytes float64 fallbackMessages []isb.Message ) + writeStart := time.Now() // slice to store the successful offsets returned by the sink writeOffsets := make([]isb.Offset, 0, len(messagesToTry)) @@ -467,7 +478,7 @@ func (df *DataForward) writeToSink(ctx context.Context, sinkWriter sinker.SinkWr } } // update the write metrics for sink - df.updateSinkWriteMetrics(writeCount, writeBytes, sinkWriter.GetName(), isFbSinkWriter) + df.updateSinkWriteMetrics(writeCount, writeBytes, sinkWriter.GetName(), isFbSinkWriter, writeStart) return writeOffsets, fallbackMessages, nil } @@ -510,7 +521,7 @@ func (df *DataForward) handlePostRetryFailures(messagesToTry *[]isb.Message, fai // updateSinkWriteMetrics updates metrics related to data writes to a sink. // Metrics are updated based on whether the operation involves the primary or fallback sink. -func (df *DataForward) updateSinkWriteMetrics(writeCount int, writeBytes float64, sinkWriterName string, isFallback bool) { +func (df *DataForward) updateSinkWriteMetrics(writeCount int, writeBytes float64, sinkWriterName string, isFallback bool, writeStart time.Time) { // Define labels to keep track of the data related to the specific operation labels := map[string]string{ metrics.LabelVertex: df.vertexName, @@ -524,10 +535,14 @@ func (df *DataForward) updateSinkWriteMetrics(writeCount int, writeBytes float64 metrics.WriteMessagesCount.With(labels).Add(float64(writeCount)) metrics.WriteBytesCount.With(labels).Add(writeBytes) + // Add write processing time metric + metrics.WriteProcessingTime.With(labels).Observe(float64(time.Since(writeStart).Microseconds())) + // if this is for Fallback Sink, increment specific metrics as well if isFallback { metrics.FbSinkWriteMessagesCount.With(labels).Add(float64(writeCount)) metrics.FbSinkWriteBytesCount.With(labels).Add(writeBytes) + metrics.FbSinkWriteProcessingTime.With(labels).Observe(float64(time.Since(writeStart).Microseconds())) } } diff --git a/pkg/sinks/kafka/kafka.go b/pkg/sinks/kafka/kafka.go index f9a7ef8bcc..e7b3acd610 100644 --- a/pkg/sinks/kafka/kafka.go +++ b/pkg/sinks/kafka/kafka.go @@ -196,13 +196,6 @@ func (tk *ToKafka) Write(_ context.Context, messages []isb.Message) ([]isb.Offse tk.producer.Input() <- message } <-done - for _, err := range errs { - if err != nil { - kafkaSinkWriteErrors.With(map[string]string{metrics.LabelVertex: tk.name, metrics.LabelPipeline: tk.pipelineName}).Inc() - } else { - kafkaSinkWriteCount.With(map[string]string{metrics.LabelVertex: tk.name, metrics.LabelPipeline: tk.pipelineName}).Inc() - } - } return nil, errs } diff --git a/pkg/sinks/kafka/metrics.go b/pkg/sinks/kafka/metrics.go index 4770931940..200c9f2c80 100644 --- a/pkg/sinks/kafka/metrics.go +++ b/pkg/sinks/kafka/metrics.go @@ -23,20 +23,6 @@ import ( "github.com/numaproj/numaflow/pkg/metrics" ) -// kafkaSinkWriteErrors is used to indicate the number of errors while while writing to kafka sink -var kafkaSinkWriteErrors = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "kafka_sink", - Name: "write_error_total", - Help: "Total number of Write Errors", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) - -// kafkaSinkWriteCount is used to indicate the number of messages written to kafka -var kafkaSinkWriteCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "kafka_sink", - Name: "write_total", - Help: "Total number of errors on NewToKafka", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) - var kafkaSinkWriteTimeouts = promauto.NewCounterVec(prometheus.CounterOpts{ Subsystem: "kafka_sink", Name: "write_timeout_total", diff --git a/pkg/sinks/logger/log.go b/pkg/sinks/logger/log.go index 776048b05e..877d59894c 100644 --- a/pkg/sinks/logger/log.go +++ b/pkg/sinks/logger/log.go @@ -26,7 +26,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" ) @@ -71,7 +70,6 @@ func (t *ToLog) Write(_ context.Context, messages []isb.Message) ([]isb.Offset, for k, v := range message.Headers { hStr.WriteString(fmt.Sprintf("%s: %s, ", k, v)) } - logSinkWriteCount.With(map[string]string{metrics.LabelVertex: t.name, metrics.LabelPipeline: t.pipelineName}).Inc() log.Println(prefix, " Payload - ", string(message.Payload), " Keys - ", message.Keys, " EventTime - ", message.EventTime.UnixMilli(), " Headers - ", hStr.String(), " ID - ", message.ID.String()) } return nil, make([]error, len(messages)) diff --git a/pkg/sinks/logger/metrics.go b/pkg/sinks/logger/metrics.go deleted file mode 100644 index 6a02c3a0e9..0000000000 --- a/pkg/sinks/logger/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logger - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// logSinkWriteCount is used to indicate the number of messages written to log sink -var logSinkWriteCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "log_sink", - Name: "write_total", - Help: "Total number of messages written to log sink", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sources/forward/data_forward.go b/pkg/sources/forward/data_forward.go index 63d230652c..facbff8e2b 100644 --- a/pkg/sources/forward/data_forward.go +++ b/pkg/sources/forward/data_forward.go @@ -193,22 +193,33 @@ func (df *DataForward) Start() <-chan error { // the message after forwarding, barring any platform errors. The platform errors include buffer-full, // buffer-not-reachable, etc., but do not include errors due to user code transformer, WhereTo, etc. func (df *DataForward) forwardAChunk(ctx context.Context) error { - start := time.Now() + // Initialize metric labels + replicaIndex := strconv.Itoa(int(df.vertexReplica)) + vertexType := string(dfv1.VertexTypeSource) totalBytes := 0 + metricLabels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: vertexType, + metrics.LabelVertexReplicaIndex: replicaIndex, + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: vertexType, + metrics.LabelVertexReplicaIndex: replicaIndex, + metrics.LabelPartitionName: df.reader.GetName(), + } + // Initialize forwardAChunk and read start times + start := time.Now() + readStart := time.Now() // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during the restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. readMessages, err := df.reader.Read(ctx, df.opts.readBatchSize) if err != nil { df.opts.logger.Warnw("failed to read from source", zap.Error(err)) - metrics.ReadMessagesError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Inc() - + metrics.ReadMessagesError.With(metricLabelsWithPartition).Inc() // if the error is not retryable, we should return the error. var readErr = new(errors2.SourceReadErr) if errors.As(err, &readErr) { @@ -258,48 +269,20 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { return nil } + // Only expose read processing time metric when there are messages and no error + metrics.ReadProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(readStart).Microseconds())) + metrics.ReadDataMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readMessages))) + metrics.ReadMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readMessages))) // reset the idle handler because we have read messages df.srcIdleHandler.Reset() - metrics.ReadDataMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName()}, - ).Add(float64(len(readMessages))) - - metrics.ReadMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(readMessages))) - // store the offsets of the messages we read from source - - // store the offsets of the messages we read from ISB var readOffsets = make([]isb.Offset, len(readMessages)) for idx, m := range readMessages { totalBytes += len(m.Payload) - readOffsets[idx] = m.ReadOffset } - - metrics.ReadBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(totalBytes)) - metrics.ReadDataBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(totalBytes)) + metrics.ReadBytesCount.With(metricLabelsWithPartition).Add(float64(totalBytes)) + metrics.ReadDataBytesCount.With(metricLabelsWithPartition).Add(float64(totalBytes)) // source data transformer applies filtering and assigns event time to source data, which doesn't require watermarks. // hence we assign time.UnixMilli(-1) to processorWM. @@ -333,15 +316,8 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { zap.Int("concurrency", df.opts.transformerConcurrency), zap.Duration("took", time.Since(transformerProcessingStart)), ) - - metrics.SourceTransformerProcessingTime.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Observe(float64(time.Since(transformerProcessingStart).Microseconds())) + metrics.SourceTransformerProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(transformerProcessingStart).Microseconds())) } else { - for idx, m := range readMessages { // assign watermark to the message m.Watermark = time.Time(processorWM) @@ -401,11 +377,13 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } // forward the messages to the edge buffer (could be multiple edges) + writeStart := time.Now() writeOffsets, err = df.writeToBuffers(ctx, messageToStep) if err != nil { df.opts.logger.Errorw("failed to write to toBuffers", zap.Error(err)) return err } + metrics.WriteProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(writeStart).Microseconds())) // activeWatermarkBuffers records the buffers that the publisher has published // a watermark in this batch processing cycle. @@ -464,17 +442,13 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { // when we apply transformer, we don't handle partial errors (it's either non or all, non will return early), // so we should be able to ack all the readOffsets including data messages and control messages + // Measure ack processing time + ackStart := time.Now() err = df.ackFromSource(ctx, readOffsets) // implicit return for posterity :-) if err != nil { df.opts.logger.Errorw("failed to ack from source", zap.Error(err)) - metrics.AckMessageError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(readOffsets))) + metrics.AckMessageError.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) // if the error is not retryable, we should return the error. var ackErr = new(errors2.SourceAckErr) if errors.As(err, &ackErr) { @@ -484,13 +458,9 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } return nil } - metrics.AckMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: df.reader.GetName(), - }).Add(float64(len(readOffsets))) + // Only expose ack processing time metric when there is no error + metrics.AckProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(ackStart).Microseconds())) + metrics.AckMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) if df.opts.cbPublisher != nil { if err := df.opts.cbPublisher.NonSinkVertexCallback(ctx, readWriteMessagePairs); err != nil { @@ -499,12 +469,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } // ProcessingTimes of the entire forwardAChunk - metrics.ForwardAChunkProcessingTime.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Observe(float64(time.Since(start).Microseconds())) + metrics.ForwardAChunkProcessingTime.With(metricLabels).Observe(float64(time.Since(start).Microseconds())) return nil } @@ -548,6 +513,20 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. writeCount int writeBytes float64 ) + // initialize metric labels + metricLabels := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSource), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: df.vertexName, + metrics.LabelPipeline: df.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeSource), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), + metrics.LabelPartitionName: toBufferPartition.GetName(), + } totalCount = len(messages) writeOffsets = make([]isb.Offset, 0, totalCount) @@ -563,24 +542,16 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. // when the buffer is full and the user has set the buffer full strategy to // DiscardLatest or when the message is duplicate. if errors.As(err, &isb.NonRetryableBufferWriteErr{}) { - metrics.DropMessagesCount.With(map[string]string{ + metricLabelWithReason := map[string]string{ metrics.LabelVertex: df.vertexName, metrics.LabelPipeline: df.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSource), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), metrics.LabelPartitionName: toBufferPartition.GetName(), metrics.LabelReason: err.Error(), - }).Inc() - - metrics.DropBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: toBufferPartition.GetName(), - metrics.LabelReason: err.Error(), - }).Add(float64(len(msg.Payload))) - + } + metrics.DropMessagesCount.With(metricLabelWithReason).Inc() + metrics.DropBytesCount.With(metricLabelWithReason).Add(float64(len(msg.Payload))) df.opts.logger.Infow("Dropped message", zap.String("reason", err.Error()), zap.String("partition", toBufferPartition.GetName()), @@ -591,22 +562,11 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. needRetry = true // we retry only failed messages failedMessages = append(failedMessages, msg) - metrics.WriteMessagesError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: toBufferPartition.GetName(), - }).Inc() + metrics.WriteMessagesError.With(metricLabelsWithPartition).Inc() // a shutdown can break the blocking loop caused due to InternalErr if ok, _ := df.IsShuttingDown(); ok { - metrics.PlatformError.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - }).Inc() + metrics.PlatformError.With(metricLabels).Inc() return writeOffsets, fmt.Errorf("writeToBuffer failed, Stop called while stuck on an internal error with failed messages:%d, %v", len(failedMessages), errs) } @@ -637,22 +597,8 @@ func (df *DataForward) writeToBuffer(ctx context.Context, toBufferPartition isb. } } - metrics.WriteMessagesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: toBufferPartition.GetName(), - }).Add(float64(writeCount)) - - metrics.WriteBytesCount.With(map[string]string{ - metrics.LabelVertex: df.vertexName, - metrics.LabelPipeline: df.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSource), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(df.vertexReplica)), - metrics.LabelPartitionName: toBufferPartition.GetName(), - }).Add(writeBytes) - + metrics.WriteMessagesCount.With(metricLabelsWithPartition).Add(float64(writeCount)) + metrics.WriteBytesCount.With(metricLabelsWithPartition).Add(writeBytes) return writeOffsets, nil } diff --git a/pkg/sources/forward/data_forward_test.go b/pkg/sources/forward/data_forward_test.go index 59797dec2b..3eac08698e 100644 --- a/pkg/sources/forward/data_forward_test.go +++ b/pkg/sources/forward/data_forward_test.go @@ -1230,7 +1230,6 @@ func validateMetrics(batchSize int64) (err error) { expected := ` forwarder_read_total{partition_name="from",pipeline="testPipeline",replica="0",vertex="testVertex",vertex_type="Source"} ` + fmt.Sprintf("%f", float64(batchSize)) + ` ` - err = testutil.CollectAndCompare(metrics.ReadMessagesCount, strings.NewReader(metadata+expected), "forwarder_read_total") if err != nil { return err diff --git a/pkg/sources/generator/metrics.go b/pkg/sources/generator/metrics.go deleted file mode 100644 index b9a4bbe0cc..0000000000 --- a/pkg/sources/generator/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// tickgenSourceReadCount is used to indicate the number of messages read by tick generator -var tickgenSourceReadCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "tickgen_source", - Name: "read_total", - Help: "Total number of messages Read", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) - -// tickgenSourceCount is used to indicate the number of times tickgen has ticked -var tickgenSourceCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "tickgen_source", - Name: "total", - Help: "Total number of times tickgen source has ticked", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sources/generator/tickgen.go b/pkg/sources/generator/tickgen.go index c0cdb9dcf1..e2e96fec9c 100644 --- a/pkg/sources/generator/tickgen.go +++ b/pkg/sources/generator/tickgen.go @@ -31,7 +31,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" "github.com/numaproj/numaflow/pkg/sources/sourcer" ) @@ -199,7 +198,6 @@ loop: // we implement Read With Wait semantics select { case r := <-mg.srcChan: - tickgenSourceReadCount.With(map[string]string{metrics.LabelVertex: mg.vertexName, metrics.LabelPipeline: mg.pipelineName}).Inc() msgs = append(msgs, mg.newReadMessage(r.key, r.data, r.offset, r.ts)) case <-timeout: break loop @@ -240,7 +238,6 @@ func (mg *memGen) newWorker(ctx context.Context, rate int) func(chan time.Time, case <-ctx.Done(): return case ts := <-tickChan: - tickgenSourceCount.With(map[string]string{metrics.LabelVertex: mg.vertexName, metrics.LabelPipeline: mg.pipelineName}).Inc() // we would generate all the keys in a round robin fashion // even if there are multiple pods, all the pods will generate same keys in the same order. // TODO: alternatively, we could also think about generating a subset of keys per pod. diff --git a/pkg/sources/http/http.go b/pkg/sources/http/http.go index 2637764c93..b09f7f79ad 100644 --- a/pkg/sources/http/http.go +++ b/pkg/sources/http/http.go @@ -33,7 +33,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" sharedtls "github.com/numaproj/numaflow/pkg/shared/tls" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" @@ -211,7 +210,6 @@ loop: for i := int64(0); i < count; i++ { select { case m := <-h.messages: - httpSourceReadCount.With(map[string]string{metrics.LabelVertex: h.vertexName, metrics.LabelPipeline: h.pipelineName}).Inc() msgs = append(msgs, m) case <-timeout: h.logger.Debugw("Timed out waiting for messages to read.", zap.Duration("waited", h.readTimeout), zap.Int("read", len(msgs))) diff --git a/pkg/sources/http/metrics.go b/pkg/sources/http/metrics.go deleted file mode 100644 index 4512525a7f..0000000000 --- a/pkg/sources/http/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package http - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// httpSourceReadCount is used to indicate the number of messages read by the http source vertex -var httpSourceReadCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "http_source", - Name: "read_total", - Help: "Total number of messages Read", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sources/jetstream/jetstream.go b/pkg/sources/jetstream/jetstream.go index 139b02e56f..191017c665 100644 --- a/pkg/sources/jetstream/jetstream.go +++ b/pkg/sources/jetstream/jetstream.go @@ -30,7 +30,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" "github.com/numaproj/numaflow/pkg/sources/sourcer" @@ -401,7 +400,6 @@ loop: for i := int64(0); i < count; i++ { select { case m := <-ns.messages: - jetstreamSourceReadCount.With(map[string]string{metrics.LabelVertex: ns.vertexName, metrics.LabelPipeline: ns.pipelineName}).Inc() msgs = append(msgs, m) case <-timeout: ns.logger.Debugw("Timed out waiting for messages to read.", zap.Duration("waited", ns.readTimeout), zap.Int("read", len(msgs))) diff --git a/pkg/sources/jetstream/metrics.go b/pkg/sources/jetstream/metrics.go deleted file mode 100644 index 385b5be497..0000000000 --- a/pkg/sources/jetstream/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jetstream - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// jetstreamSourceReadCount is used to indicate the number of messages read by the nats source vertex -var jetstreamSourceReadCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "jetstream_source", - Name: "read_total", - Help: "Total number of messages read", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sources/kafka/metrics.go b/pkg/sources/kafka/metrics.go index 7420204055..50556adc48 100644 --- a/pkg/sources/kafka/metrics.go +++ b/pkg/sources/kafka/metrics.go @@ -30,20 +30,6 @@ var kafkaSourceReadCount = promauto.NewCounterVec(prometheus.CounterOpts{ Help: "Total number of messages Read", }, []string{metrics.LabelVertex, metrics.LabelPipeline, metrics.LabelPartitionName}) -// kafkaSourceAckCount is used to indicate the number of messages Acknowledged -var kafkaSourceAckCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "kafka_source", - Name: "ack_total", - Help: "Total number of messages Acknowledged", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) - -// kafkaSourceOffsetAckErrors is used to indicate the number of errors while reading from kafka source with offsets -var kafkaSourceOffsetAckErrors = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "kafka_source", - Name: "ack_error_total", - Help: "Total number of Kafka ID Errors", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) - // kafkaPending is used to indicate the number of messages pending in the kafka source var kafkaPending = promauto.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "kafka_source", diff --git a/pkg/sources/kafka/reader.go b/pkg/sources/kafka/reader.go index 9926c94888..a16edf04a8 100644 --- a/pkg/sources/kafka/reader.go +++ b/pkg/sources/kafka/reader.go @@ -188,7 +188,6 @@ func (ks *kafkaSource) Ack(_ context.Context, offsets []isb.Offset) []error { partitionIdx := offset.PartitionIdx() sequence, err := offset.Sequence() if err != nil { - kafkaSourceOffsetAckErrors.With(map[string]string{metrics.LabelVertex: ks.vertexName, metrics.LabelPipeline: ks.pipelineName}).Inc() ks.logger.Errorw("Unable to extract partition offset of type int64 from the supplied offset. skipping and continuing", zap.String("supplied-offset", offset.String()), zap.Error(err)) continue } @@ -204,8 +203,6 @@ func (ks *kafkaSource) Ack(_ context.Context, offsets []isb.Offset) []error { // we need to mark the offset of the next message to read ks.handler.sess.MarkOffset(ks.topic, partitionIdx, offset+1, "") } - - kafkaSourceAckCount.With(map[string]string{metrics.LabelVertex: ks.vertexName, metrics.LabelPipeline: ks.pipelineName}).Add(float64(len(offsets))) // How come it does not return errors at all? return make([]error, len(offsets)) } diff --git a/pkg/sources/nats/metrics.go b/pkg/sources/nats/metrics.go deleted file mode 100644 index 6369e457fa..0000000000 --- a/pkg/sources/nats/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Numaproj Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nats - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/numaproj/numaflow/pkg/metrics" -) - -// natsSourceReadCount is used to indicate the number of messages read by the nats source vertex -var natsSourceReadCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Subsystem: "nats_source", - Name: "read_total", - Help: "Total number of messages read", -}, []string{metrics.LabelVertex, metrics.LabelPipeline}) diff --git a/pkg/sources/nats/nats.go b/pkg/sources/nats/nats.go index 46348f3635..14976d1277 100644 --- a/pkg/sources/nats/nats.go +++ b/pkg/sources/nats/nats.go @@ -27,7 +27,6 @@ import ( dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/isb" - "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/shared/logging" sharedutil "github.com/numaproj/numaflow/pkg/shared/util" "github.com/numaproj/numaflow/pkg/sources/sourcer" @@ -192,7 +191,6 @@ loop: for i := int64(0); i < count; i++ { select { case m := <-ns.messages: - natsSourceReadCount.With(map[string]string{metrics.LabelVertex: ns.vertexName, metrics.LabelPipeline: ns.pipelineName}).Inc() msgs = append(msgs, m) case <-timeout: ns.logger.Debugw("Timed out waiting for messages to read.", zap.Duration("waited", ns.readTimeout), zap.Int("read", len(msgs))) diff --git a/pkg/udf/forward/forward.go b/pkg/udf/forward/forward.go index 2d61e32408..1966d25250 100644 --- a/pkg/udf/forward/forward.go +++ b/pkg/udf/forward/forward.go @@ -178,14 +178,29 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) error { start := time.Now() totalBytes := 0 dataBytes := 0 + // Initialize metric labels + metricLabels := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), + } // There is a chance that we have read the message and the container got forcefully terminated before processing. To provide // at-least-once semantics for reading, during restart we will have to reprocess all unacknowledged messages. It is the // responsibility of the Read function to do that. + readStart := time.Now() readMessages, err := isdf.fromBufferPartition.Read(ctx, isdf.opts.readBatchSize) isdf.opts.logger.Debugw("Read from buffer", zap.String("bufferFrom", isdf.fromBufferPartition.GetName()), zap.Int64("length", int64(len(readMessages)))) if err != nil { isdf.opts.logger.Warnw("failed to read fromBufferPartition", zap.Error(err)) - metrics.ReadMessagesError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Inc() + metrics.ReadMessagesError.With(metricLabelsWithPartition).Inc() } // process only if we have any read messages. There is a natural looping here if there is an internal error while @@ -242,10 +257,11 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) error { return nil } - metrics.ReadDataMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(dataMessages))) - metrics.ReadMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readMessages))) - metrics.ReadBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(totalBytes)) - metrics.ReadDataBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(dataBytes)) + metrics.ReadProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(readStart).Microseconds())) + metrics.ReadDataMessagesCount.With(metricLabelsWithPartition).Add(float64(len(dataMessages))) + metrics.ReadMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readMessages))) + metrics.ReadBytesCount.With(metricLabelsWithPartition).Add(float64(totalBytes)) + metrics.ReadDataBytesCount.With(metricLabelsWithPartition).Add(float64(dataBytes)) // fetch watermark if available // TODO: make it async (concurrent and wait later) @@ -358,14 +374,16 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) error { // when we apply udf, we don't handle partial errors (it's either non or all, non will return early), // so we should be able to ack all the readOffsets including data messages and control messages + ackStart := time.Now() err = isdf.ackFromBuffer(ctx, readOffsets) // implicit return for posterity :-) if err != nil { isdf.opts.logger.Errorw("Failed to ack from buffer", zap.Error(err)) - metrics.AckMessageError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) + metrics.AckMessageError.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) return err } - metrics.AckMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: isdf.fromBufferPartition.GetName()}).Add(float64(len(readOffsets))) + metrics.AckProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(ackStart).Microseconds())) + metrics.AckMessagesCount.With(metricLabelsWithPartition).Add(float64(len(readOffsets))) if isdf.opts.cbPublisher != nil { // Publish the callback for the vertex @@ -374,7 +392,7 @@ func (isdf *InterStepDataForward) forwardAChunk(ctx context.Context) error { } } // ProcessingTimes of the entire forwardAChunk - metrics.ForwardAChunkProcessingTime.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Observe(float64(time.Since(start).Microseconds())) + metrics.ForwardAChunkProcessingTime.With(metricLabels).Observe(float64(time.Since(start).Microseconds())) return nil } @@ -384,6 +402,20 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage messageToStep := make(map[string][][]isb.Message) writeOffsets := make(map[string][][]isb.Offset) + metricLabels := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), + } + for toVertex := range isdf.toBuffers { messageToStep[toVertex] = make([][]isb.Message, len(isdf.toBuffers[toVertex])) writeOffsets[toVertex] = make([][]isb.Offset, len(isdf.toBuffers[toVertex])) @@ -398,13 +430,7 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage // Process the single data message start := time.Now() - metrics.UDFReadMessagesCount.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), - }).Inc() + metrics.UDFReadMessagesCount.With(metricLabelsWithPartition).Inc() writeMessageCh := make(chan isb.WriteMessage) errs, ctx := errgroup.WithContext(ctx) @@ -415,13 +441,7 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage // Stream the message to the next vertex for writeMessage := range writeMessageCh { writeMessage.Headers = dataMessages[0].Headers - metrics.UDFWriteMessagesCount.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - metrics.LabelPartitionName: isdf.fromBufferPartition.GetName(), - }).Add(1) + metrics.UDFWriteMessagesCount.With(metricLabelsWithPartition).Add(1) // Determine where to step and write to buffers if err := isdf.whereToStep(&writeMessage, messageToStep, dataMessages[0]); err != nil { @@ -448,30 +468,15 @@ func (isdf *InterStepDataForward) streamMessage(ctx context.Context, dataMessage // Handle errors in UDF processing if err := errs.Wait(); err != nil { - metrics.UDFError.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - }).Inc() + metrics.UDFError.With(metricLabels).Inc() if ok, _ := isdf.IsShuttingDown(); ok { isdf.opts.logger.Errorw("mapUDF.Apply, Stop called while stuck on an internal error", zap.Error(err)) - metrics.PlatformError.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - }).Inc() + metrics.PlatformError.With(metricLabels).Inc() } return nil, fmt.Errorf("failed to applyUDF, error: %w", err) } - metrics.UDFProcessingTime.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - }).Observe(float64(time.Since(start).Microseconds())) + metrics.UDFProcessingTime.With(metricLabels).Observe(float64(time.Since(start).Microseconds())) return writeOffsets, nil } @@ -554,8 +559,23 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar writeCount int writeBytes float64 ) + // initialize metric labels + metricLabels := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + } + metricLabelsWithPartition := map[string]string{ + metrics.LabelVertex: isdf.vertexName, + metrics.LabelPipeline: isdf.pipelineName, + metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), + metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), + metrics.LabelPartitionName: toBufferPartition.GetName(), + } totalCount = len(messages) writeOffsets = make([]isb.Offset, 0, totalCount) + writeStart := time.Now() for { _writeOffsets, errs := toBufferPartition.Write(ctx, messages) @@ -569,33 +589,25 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar // when the buffer is full and the user has set the buffer full strategy to // DiscardLatest or when the message is duplicate. if errors.As(err, &isb.NonRetryableBufferWriteErr{}) { - metrics.DropMessagesCount.With(map[string]string{ - metrics.LabelVertex: isdf.vertexName, - metrics.LabelPipeline: isdf.pipelineName, - metrics.LabelVertexType: string(dfv1.VertexTypeSink), - metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), - metrics.LabelPartitionName: toBufferPartition.GetName(), - metrics.LabelReason: err.Error(), - }).Inc() - - metrics.DropBytesCount.With(map[string]string{ + metricLabelWithReason := map[string]string{ metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeSink), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: toBufferPartition.GetName(), metrics.LabelReason: err.Error(), - }).Add(float64(len(msg.Payload))) - + } + metrics.DropMessagesCount.With(metricLabelWithReason).Inc() + metrics.DropBytesCount.With(metricLabelWithReason).Add(float64(len(msg.Payload))) isdf.opts.logger.Infow("Dropped message", zap.String("reason", err.Error()), zap.String("partition", toBufferPartition.GetName()), zap.String("vertex", isdf.vertexName), zap.String("pipeline", isdf.pipelineName), zap.String("msg_id", msg.ID.String())) } else { needRetry = true // we retry only failed messages failedMessages = append(failedMessages, msg) - metrics.WriteMessagesError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: toBufferPartition.GetName()}).Inc() + metrics.WriteMessagesError.With(metricLabelsWithPartition).Inc() // a shutdown can break the blocking loop caused due to InternalErr if ok, _ := isdf.IsShuttingDown(); ok { - metrics.PlatformError.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica))}).Inc() + metrics.PlatformError.With(metricLabels).Inc() return writeOffsets, fmt.Errorf("writeToBuffer failed, Stop called while stuck on an internal error with failed messages:%d, %v", len(failedMessages), errs) } } @@ -625,8 +637,9 @@ func (isdf *InterStepDataForward) writeToBuffer(ctx context.Context, toBufferPar } } - metrics.WriteMessagesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: toBufferPartition.GetName()}).Add(float64(writeCount)) - metrics.WriteBytesCount.With(map[string]string{metrics.LabelVertex: isdf.vertexName, metrics.LabelPipeline: isdf.pipelineName, metrics.LabelVertexType: string(dfv1.VertexTypeMapUDF), metrics.LabelVertexReplicaIndex: strconv.Itoa(int(isdf.vertexReplica)), metrics.LabelPartitionName: toBufferPartition.GetName()}).Add(writeBytes) + metrics.WriteProcessingTime.With(metricLabelsWithPartition).Observe(float64(time.Since(writeStart).Microseconds())) + metrics.WriteMessagesCount.With(metricLabelsWithPartition).Add(float64(writeCount)) + metrics.WriteBytesCount.With(metricLabelsWithPartition).Add(writeBytes) return writeOffsets, nil } From e4bb2f726a633fde7b12c6ef9b6874de5af1e572 Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Sun, 1 Dec 2024 13:21:07 +0530 Subject: [PATCH 149/188] fix: init container logs (#2245) Signed-off-by: veds-g --- ui/src/utils/fetcherHooks/podsViewFetch.ts | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ui/src/utils/fetcherHooks/podsViewFetch.ts b/ui/src/utils/fetcherHooks/podsViewFetch.ts index e893381cf6..d678da4705 100644 --- a/ui/src/utils/fetcherHooks/podsViewFetch.ts +++ b/ui/src/utils/fetcherHooks/podsViewFetch.ts @@ -56,7 +56,15 @@ export const usePodsViewFetch = ( const pList = data?.map((pod: any) => { const containers: string[] = []; const containerSpecMap = new Map(); - pod?.spec?.containers?.forEach((container: any) => { + + const containersList = JSON.parse( + JSON.stringify(pod?.spec?.containers) + ); + pod?.spec?.initContainers + ?.filter((initContainer: any) => initContainer?.name !== "init") + ?.forEach((container: any) => containersList.push(container)); + + containersList?.forEach((container: any) => { const cpu = container?.resources?.requests?.cpu; let cpuParsed: undefined | number; if (cpu) { From b31380b1445669fa91888fb2e3feec382773590e Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Tue, 3 Dec 2024 09:08:31 +0530 Subject: [PATCH 150/188] feat: Built-in Pulsar source (#2237) Signed-off-by: Vigith Maurice Signed-off-by: Sreekanth Co-authored-by: Vigith Maurice --- .github/workflows/ci.yaml | 4 + Dockerfile | 2 +- Makefile | 4 +- api/json-schema/schema.json | 45 + api/openapi-spec/swagger.json | 45 + .../numaflow.numaproj.io_monovertices.yaml | 35 + .../full/numaflow.numaproj.io_pipelines.yaml | 35 + .../full/numaflow.numaproj.io_vertices.yaml | 35 + config/install.yaml | 105 + config/namespace-install.yaml | 105 + docs/APIs.md | 216 +++ docs/user-guide/sources/kafka.md | 2 +- docs/user-guide/sources/nats.md | 2 +- docs/user-guide/sources/pulsar.md | 39 + mkdocs.yml | 1 + pkg/apis/numaflow/v1alpha1/generated.pb.go | 1715 +++++++++++------ pkg/apis/numaflow/v1alpha1/generated.proto | 27 + pkg/apis/numaflow/v1alpha1/pulsar_auth.go | 26 + pkg/apis/numaflow/v1alpha1/pulsar_source.go | 29 + pkg/apis/numaflow/v1alpha1/source.go | 2 + .../v1alpha1/zz_generated.deepcopy.go | 47 + .../numaflow/v1alpha1/zz_generated.openapi.go | 86 +- rust/Cargo.lock | 334 +++- rust/Cargo.toml | 2 +- rust/numaflow-core/Cargo.toml | 7 +- rust/numaflow-core/src/config/components.rs | 126 +- rust/numaflow-core/src/config/pipeline.rs | 54 + rust/numaflow-core/src/error.rs | 9 + rust/numaflow-core/src/message.rs | 4 +- rust/numaflow-core/src/monovertex.rs | 49 +- rust/numaflow-core/src/pipeline.rs | 32 +- rust/numaflow-core/src/shared/utils.rs | 9 + rust/numaflow-core/src/source.rs | 23 +- rust/numaflow-core/src/source/pulsar.rs | 157 ++ rust/numaflow-extns/pulsar/Cargo.toml | 24 + rust/numaflow-extns/pulsar/src/lib.rs | 35 + rust/numaflow-extns/pulsar/src/source.rs | 298 +++ rust/numaflow-models/src/models/mod.rs | 4 + .../numaflow-models/src/models/pulsar_auth.rs | 32 + .../src/models/pulsar_source.rs | 52 + rust/numaflow-models/src/models/source.rs | 3 + rust/rust-toolchain.toml | 2 +- 42 files changed, 3143 insertions(+), 720 deletions(-) create mode 100644 docs/user-guide/sources/pulsar.md create mode 100644 pkg/apis/numaflow/v1alpha1/pulsar_auth.go create mode 100644 pkg/apis/numaflow/v1alpha1/pulsar_source.go create mode 100644 rust/numaflow-core/src/source/pulsar.rs create mode 100644 rust/numaflow-extns/pulsar/Cargo.toml create mode 100644 rust/numaflow-extns/pulsar/src/lib.rs create mode 100644 rust/numaflow-extns/pulsar/src/source.rs create mode 100644 rust/numaflow-models/src/models/pulsar_auth.rs create mode 100644 rust/numaflow-models/src/models/pulsar_source.rs diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a5a35fba4c..b7f59c2314 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -77,7 +77,11 @@ jobs: - 4222:4222 env: NATS_EXTRA_ARGS: -js + steps: + - name: Start Pulsar standalone container + run: docker run -d -p 6650:6650 -p 8080:8080 apachepulsar/pulsar:4.0.0 bin/pulsar standalone + - name: Set up Go 1.x uses: actions/setup-go@v5 with: diff --git a/Dockerfile b/Dockerfile index 57feea2da7..bded35d1f9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN chmod +x /bin/numaflow-rs #################################################################################################### # Rust binary #################################################################################################### -FROM lukemathwalker/cargo-chef:latest-rust-1.81 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.82 AS chef ARG TARGETPLATFORM WORKDIR /numaflow RUN apt-get update && apt-get install -y protobuf-compiler diff --git a/Makefile b/Makefile index 450f9dc491..aab918c933 100644 --- a/Makefile +++ b/Makefile @@ -296,11 +296,11 @@ endif .PHONY: docs docs: /usr/local/bin/mkdocs docs-linkcheck - mkdocs build + $(PYTHON) -m mkdocs build .PHONY: docs-serve docs-serve: docs - mkdocs serve + $(PYTHON) -m mkdocs serve .PHONY: docs-linkcheck docs-linkcheck: /usr/local/bin/lychee diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index ffc4bbcfee..84ed58ed90 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -21651,6 +21651,48 @@ }, "type": "object" }, + "io.numaproj.numaflow.v1alpha1.PulsarAuth": { + "description": "PulsarAuth defines how to authenticate with Pulsar", + "properties": { + "token": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "JWT Token auth" + } + }, + "type": "object" + }, + "io.numaproj.numaflow.v1alpha1.PulsarSource": { + "properties": { + "auth": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.PulsarAuth", + "description": "Auth information" + }, + "consumerName": { + "type": "string" + }, + "maxUnack": { + "description": "Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list.", + "format": "int64", + "type": "integer" + }, + "serverAddr": { + "type": "string" + }, + "subscriptionName": { + "type": "string" + }, + "topic": { + "type": "string" + } + }, + "required": [ + "serverAddr", + "topic", + "consumerName", + "subscriptionName" + ], + "type": "object" + }, "io.numaproj.numaflow.v1alpha1.RedisBufferService": { "properties": { "external": { @@ -22096,6 +22138,9 @@ "nats": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.NatsSource" }, + "pulsar": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.PulsarSource" + }, "serving": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.ServingSource" }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index afb3bb8b07..19c65fe5cf 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -21637,6 +21637,48 @@ } } }, + "io.numaproj.numaflow.v1alpha1.PulsarAuth": { + "description": "PulsarAuth defines how to authenticate with Pulsar", + "type": "object", + "properties": { + "token": { + "description": "JWT Token auth", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.numaproj.numaflow.v1alpha1.PulsarSource": { + "type": "object", + "required": [ + "serverAddr", + "topic", + "consumerName", + "subscriptionName" + ], + "properties": { + "auth": { + "description": "Auth information", + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.PulsarAuth" + }, + "consumerName": { + "type": "string" + }, + "maxUnack": { + "description": "Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list.", + "type": "integer", + "format": "int64" + }, + "serverAddr": { + "type": "string" + }, + "subscriptionName": { + "type": "string" + }, + "topic": { + "type": "string" + } + } + }, "io.numaproj.numaflow.v1alpha1.RedisBufferService": { "type": "object", "properties": { @@ -22083,6 +22125,9 @@ "nats": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.NatsSource" }, + "pulsar": { + "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.PulsarSource" + }, "serving": { "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.ServingSource" }, diff --git a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml index 79ffec3919..a9ff56ca3e 100644 --- a/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_monovertices.yaml @@ -4992,6 +4992,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: diff --git a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml index d035799a96..3e06abecea 100644 --- a/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_pipelines.yaml @@ -9676,6 +9676,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: diff --git a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml index 0dfad151d6..5f80efaf57 100644 --- a/config/base/crds/full/numaflow.numaproj.io_vertices.yaml +++ b/config/base/crds/full/numaflow.numaproj.io_vertices.yaml @@ -4460,6 +4460,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: diff --git a/config/install.yaml b/config/install.yaml index d6332dc53f..6e3551c059 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -8179,6 +8179,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: @@ -19414,6 +19449,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: @@ -25824,6 +25894,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index e2769284de..12821ec38d 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -8179,6 +8179,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: @@ -19414,6 +19449,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: @@ -25824,6 +25894,41 @@ spec: - subject - url type: object + pulsar: + properties: + auth: + properties: + token: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + consumerName: + type: string + maxUnack: + format: int32 + type: integer + serverAddr: + type: string + subscriptionName: + type: string + topic: + type: string + required: + - consumerName + - serverAddr + - subscriptionName + - topic + type: object serving: properties: auth: diff --git a/docs/APIs.md b/docs/APIs.md index 57c594c67b..bcda2eb8d4 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -8436,6 +8436,206 @@ having succeeded. Defaults to 3. Minimum value is 1. +

+ +PulsarAuth +

+ +

+ +(Appears on: +PulsarSource) +

+ +

+ +

+ +PulsarAuth defines how to authenticate with Pulsar +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +token
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +JWT Token auth +

+ +
+ +

+ +PulsarSource +

+ +

+ +(Appears on: +Source) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +serverAddr
string +
+ +
+ +topic
string +
+ +
+ +consumerName
string +
+ +
+ +subscriptionName
string +
+ +
+ +maxUnack
uint32 +
+ +

+ +Maximum number of messages that are in not yet acked state. Once this +limit is crossed, futher read requests will return empty list. +

+ +
+ +auth
+ PulsarAuth + +
+ +(Optional) +

+ +Auth information +

+ +
+

RedisBufferService @@ -10453,6 +10653,22 @@ JetStreamSource + + + + +pulsar
+ PulsarSource + + + + + +(Optional) + + + + diff --git a/docs/user-guide/sources/kafka.md b/docs/user-guide/sources/kafka.md index 3fdd9b2c4f..f0e9f61c3a 100644 --- a/docs/user-guide/sources/kafka.md +++ b/docs/user-guide/sources/kafka.md @@ -18,7 +18,7 @@ spec: offsets: initial: -2 # -2 for sarama.OffsetOldest, -1 for sarama.OffsetNewest. Default to sarama.OffsetNewest. tls: # Optional. - insecureSkipVerify: # Optional, where to skip TLS verification. Default to false. + insecureSkipVerify: # Optional, whether to skip TLS verification. Default to false. caCertSecret: # Optional, a secret reference, which contains the CA Cert. name: my-ca-cert key: my-ca-cert-key diff --git a/docs/user-guide/sources/nats.md b/docs/user-guide/sources/nats.md index 17ef120ffc..c57e26fb54 100644 --- a/docs/user-guide/sources/nats.md +++ b/docs/user-guide/sources/nats.md @@ -12,7 +12,7 @@ spec: subject: my-subject queue: my-queue # Queue subscription, see https://docs.nats.io/using-nats/developer/receiving/queues tls: # Optional. - insecureSkipVerify: # Optional, where to skip TLS verification. Default to false. + insecureSkipVerify: # Optional, whether to skip TLS verification. Default to false. caCertSecret: # Optional, a secret reference, which contains the CA Cert. name: my-ca-cert key: my-ca-cert-key diff --git a/docs/user-guide/sources/pulsar.md b/docs/user-guide/sources/pulsar.md new file mode 100644 index 0000000000..ca932920c9 --- /dev/null +++ b/docs/user-guide/sources/pulsar.md @@ -0,0 +1,39 @@ +# Pulsar Source + +#### NOTE: 1.5 Feature, not available Numaflow version < 1.5 + +A `Pulsar` source is used to ingest the messages from a Pulsar topic. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: pulsar +type: Opaque +data: + token: ZXlKaGJHY2lPaUpJVXpJMU5pSjkuZXlKemRXSWlPaUowWlhOMExYVnpaWElpZlEuZkRTWFFOcEdBWUN4anN1QlZzSDRTM2VLOVlZdHpwejhfdkFZcUxwVHAybwo= + +--- +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: Pipeline +metadata: + name: simple-pipeline +spec: + vertices: + - name: in + source: + pulsar: + serverAddr: "pulsar+ssl://borker.example.com:6651" + consumerName: my_consumer + topic: my_topic + subscriptionName: my_subscription + auth: # Optional + token: # Optional, pointing to a secret reference which contains the JWT Token. + name: pulsar + key: token +``` + +We have only tested the 4.0.x LTS version of Pulsar. Currently, the implementation only supports [JWT token](https://pulsar.apache.org/docs/4.0.x/security-jwt/) based authentication. If the `auth` field is not specified, Numaflow will connect to the Pulsar servers without authentication. + +More authentication mechanisms and the ability to customize Pulsar consumer will be added in the future. + diff --git a/mkdocs.yml b/mkdocs.yml index 68e1c6411c..205c6f705a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -51,6 +51,7 @@ nav: - user-guide/sources/generator.md - user-guide/sources/http.md - user-guide/sources/kafka.md + - user-guide/sources/pulsar.md - user-guide/sources/nats.md - user-guide/sources/user-defined-sources.md - Data Transformer: diff --git a/pkg/apis/numaflow/v1alpha1/generated.pb.go b/pkg/apis/numaflow/v1alpha1/generated.pb.go index cc34329198..0a90cd05d9 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.pb.go +++ b/pkg/apis/numaflow/v1alpha1/generated.pb.go @@ -1758,10 +1758,66 @@ func (m *Probe) XXX_DiscardUnknown() { var xxx_messageInfo_Probe proto.InternalMessageInfo +func (m *PulsarAuth) Reset() { *m = PulsarAuth{} } +func (*PulsarAuth) ProtoMessage() {} +func (*PulsarAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{61} +} +func (m *PulsarAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PulsarAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PulsarAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_PulsarAuth.Merge(m, src) +} +func (m *PulsarAuth) XXX_Size() int { + return m.Size() +} +func (m *PulsarAuth) XXX_DiscardUnknown() { + xxx_messageInfo_PulsarAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_PulsarAuth proto.InternalMessageInfo + +func (m *PulsarSource) Reset() { *m = PulsarSource{} } +func (*PulsarSource) ProtoMessage() {} +func (*PulsarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9d0d1b17d3865563, []int{62} +} +func (m *PulsarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PulsarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PulsarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PulsarSource.Merge(m, src) +} +func (m *PulsarSource) XXX_Size() int { + return m.Size() +} +func (m *PulsarSource) XXX_DiscardUnknown() { + xxx_messageInfo_PulsarSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PulsarSource proto.InternalMessageInfo + func (m *RedisBufferService) Reset() { *m = RedisBufferService{} } func (*RedisBufferService) ProtoMessage() {} func (*RedisBufferService) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{61} + return fileDescriptor_9d0d1b17d3865563, []int{63} } func (m *RedisBufferService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1789,7 +1845,7 @@ var xxx_messageInfo_RedisBufferService proto.InternalMessageInfo func (m *RedisConfig) Reset() { *m = RedisConfig{} } func (*RedisConfig) ProtoMessage() {} func (*RedisConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{62} + return fileDescriptor_9d0d1b17d3865563, []int{64} } func (m *RedisConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1817,7 +1873,7 @@ var xxx_messageInfo_RedisConfig proto.InternalMessageInfo func (m *RedisSettings) Reset() { *m = RedisSettings{} } func (*RedisSettings) ProtoMessage() {} func (*RedisSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{63} + return fileDescriptor_9d0d1b17d3865563, []int{65} } func (m *RedisSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1845,7 +1901,7 @@ var xxx_messageInfo_RedisSettings proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{64} + return fileDescriptor_9d0d1b17d3865563, []int{66} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1873,7 +1929,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RollingUpdateStrategy) Reset() { *m = RollingUpdateStrategy{} } func (*RollingUpdateStrategy) ProtoMessage() {} func (*RollingUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{65} + return fileDescriptor_9d0d1b17d3865563, []int{67} } func (m *RollingUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1901,7 +1957,7 @@ var xxx_messageInfo_RollingUpdateStrategy proto.InternalMessageInfo func (m *SASL) Reset() { *m = SASL{} } func (*SASL) ProtoMessage() {} func (*SASL) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{66} + return fileDescriptor_9d0d1b17d3865563, []int{68} } func (m *SASL) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1929,7 +1985,7 @@ var xxx_messageInfo_SASL proto.InternalMessageInfo func (m *SASLPlain) Reset() { *m = SASLPlain{} } func (*SASLPlain) ProtoMessage() {} func (*SASLPlain) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{67} + return fileDescriptor_9d0d1b17d3865563, []int{69} } func (m *SASLPlain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1957,7 +2013,7 @@ var xxx_messageInfo_SASLPlain proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{68} + return fileDescriptor_9d0d1b17d3865563, []int{70} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1985,7 +2041,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ServingSource) Reset() { *m = ServingSource{} } func (*ServingSource) ProtoMessage() {} func (*ServingSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{69} + return fileDescriptor_9d0d1b17d3865563, []int{71} } func (m *ServingSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2013,7 +2069,7 @@ var xxx_messageInfo_ServingSource proto.InternalMessageInfo func (m *ServingStore) Reset() { *m = ServingStore{} } func (*ServingStore) ProtoMessage() {} func (*ServingStore) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{70} + return fileDescriptor_9d0d1b17d3865563, []int{72} } func (m *ServingStore) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2041,7 +2097,7 @@ var xxx_messageInfo_ServingStore proto.InternalMessageInfo func (m *SessionWindow) Reset() { *m = SessionWindow{} } func (*SessionWindow) ProtoMessage() {} func (*SessionWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{71} + return fileDescriptor_9d0d1b17d3865563, []int{73} } func (m *SessionWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2069,7 +2125,7 @@ var xxx_messageInfo_SessionWindow proto.InternalMessageInfo func (m *SideInput) Reset() { *m = SideInput{} } func (*SideInput) ProtoMessage() {} func (*SideInput) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{72} + return fileDescriptor_9d0d1b17d3865563, []int{74} } func (m *SideInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2097,7 +2153,7 @@ var xxx_messageInfo_SideInput proto.InternalMessageInfo func (m *SideInputTrigger) Reset() { *m = SideInputTrigger{} } func (*SideInputTrigger) ProtoMessage() {} func (*SideInputTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{73} + return fileDescriptor_9d0d1b17d3865563, []int{75} } func (m *SideInputTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2125,7 +2181,7 @@ var xxx_messageInfo_SideInputTrigger proto.InternalMessageInfo func (m *SideInputsManagerTemplate) Reset() { *m = SideInputsManagerTemplate{} } func (*SideInputsManagerTemplate) ProtoMessage() {} func (*SideInputsManagerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{74} + return fileDescriptor_9d0d1b17d3865563, []int{76} } func (m *SideInputsManagerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2153,7 +2209,7 @@ var xxx_messageInfo_SideInputsManagerTemplate proto.InternalMessageInfo func (m *Sink) Reset() { *m = Sink{} } func (*Sink) ProtoMessage() {} func (*Sink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{75} + return fileDescriptor_9d0d1b17d3865563, []int{77} } func (m *Sink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2181,7 +2237,7 @@ var xxx_messageInfo_Sink proto.InternalMessageInfo func (m *SlidingWindow) Reset() { *m = SlidingWindow{} } func (*SlidingWindow) ProtoMessage() {} func (*SlidingWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{76} + return fileDescriptor_9d0d1b17d3865563, []int{78} } func (m *SlidingWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2209,7 +2265,7 @@ var xxx_messageInfo_SlidingWindow proto.InternalMessageInfo func (m *Source) Reset() { *m = Source{} } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{77} + return fileDescriptor_9d0d1b17d3865563, []int{79} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2237,7 +2293,7 @@ var xxx_messageInfo_Source proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{78} + return fileDescriptor_9d0d1b17d3865563, []int{80} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2265,7 +2321,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLS) Reset() { *m = TLS{} } func (*TLS) ProtoMessage() {} func (*TLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{79} + return fileDescriptor_9d0d1b17d3865563, []int{81} } func (m *TLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2293,7 +2349,7 @@ var xxx_messageInfo_TLS proto.InternalMessageInfo func (m *TagConditions) Reset() { *m = TagConditions{} } func (*TagConditions) ProtoMessage() {} func (*TagConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{80} + return fileDescriptor_9d0d1b17d3865563, []int{82} } func (m *TagConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2321,7 +2377,7 @@ var xxx_messageInfo_TagConditions proto.InternalMessageInfo func (m *Templates) Reset() { *m = Templates{} } func (*Templates) ProtoMessage() {} func (*Templates) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{81} + return fileDescriptor_9d0d1b17d3865563, []int{83} } func (m *Templates) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2349,7 +2405,7 @@ var xxx_messageInfo_Templates proto.InternalMessageInfo func (m *Transformer) Reset() { *m = Transformer{} } func (*Transformer) ProtoMessage() {} func (*Transformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{82} + return fileDescriptor_9d0d1b17d3865563, []int{84} } func (m *Transformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2377,7 +2433,7 @@ var xxx_messageInfo_Transformer proto.InternalMessageInfo func (m *UDF) Reset() { *m = UDF{} } func (*UDF) ProtoMessage() {} func (*UDF) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{83} + return fileDescriptor_9d0d1b17d3865563, []int{85} } func (m *UDF) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2405,7 +2461,7 @@ var xxx_messageInfo_UDF proto.InternalMessageInfo func (m *UDSink) Reset() { *m = UDSink{} } func (*UDSink) ProtoMessage() {} func (*UDSink) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{84} + return fileDescriptor_9d0d1b17d3865563, []int{86} } func (m *UDSink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2433,7 +2489,7 @@ var xxx_messageInfo_UDSink proto.InternalMessageInfo func (m *UDSource) Reset() { *m = UDSource{} } func (*UDSource) ProtoMessage() {} func (*UDSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{85} + return fileDescriptor_9d0d1b17d3865563, []int{87} } func (m *UDSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2461,7 +2517,7 @@ var xxx_messageInfo_UDSource proto.InternalMessageInfo func (m *UDTransformer) Reset() { *m = UDTransformer{} } func (*UDTransformer) ProtoMessage() {} func (*UDTransformer) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{86} + return fileDescriptor_9d0d1b17d3865563, []int{88} } func (m *UDTransformer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2489,7 +2545,7 @@ var xxx_messageInfo_UDTransformer proto.InternalMessageInfo func (m *UpdateStrategy) Reset() { *m = UpdateStrategy{} } func (*UpdateStrategy) ProtoMessage() {} func (*UpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{87} + return fileDescriptor_9d0d1b17d3865563, []int{89} } func (m *UpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2517,7 +2573,7 @@ var xxx_messageInfo_UpdateStrategy proto.InternalMessageInfo func (m *Vertex) Reset() { *m = Vertex{} } func (*Vertex) ProtoMessage() {} func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{88} + return fileDescriptor_9d0d1b17d3865563, []int{90} } func (m *Vertex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2545,7 +2601,7 @@ var xxx_messageInfo_Vertex proto.InternalMessageInfo func (m *VertexInstance) Reset() { *m = VertexInstance{} } func (*VertexInstance) ProtoMessage() {} func (*VertexInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{89} + return fileDescriptor_9d0d1b17d3865563, []int{91} } func (m *VertexInstance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2573,7 +2629,7 @@ var xxx_messageInfo_VertexInstance proto.InternalMessageInfo func (m *VertexLimits) Reset() { *m = VertexLimits{} } func (*VertexLimits) ProtoMessage() {} func (*VertexLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{90} + return fileDescriptor_9d0d1b17d3865563, []int{92} } func (m *VertexLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2601,7 +2657,7 @@ var xxx_messageInfo_VertexLimits proto.InternalMessageInfo func (m *VertexList) Reset() { *m = VertexList{} } func (*VertexList) ProtoMessage() {} func (*VertexList) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{91} + return fileDescriptor_9d0d1b17d3865563, []int{93} } func (m *VertexList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2629,7 +2685,7 @@ var xxx_messageInfo_VertexList proto.InternalMessageInfo func (m *VertexSpec) Reset() { *m = VertexSpec{} } func (*VertexSpec) ProtoMessage() {} func (*VertexSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{92} + return fileDescriptor_9d0d1b17d3865563, []int{94} } func (m *VertexSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2657,7 +2713,7 @@ var xxx_messageInfo_VertexSpec proto.InternalMessageInfo func (m *VertexStatus) Reset() { *m = VertexStatus{} } func (*VertexStatus) ProtoMessage() {} func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{93} + return fileDescriptor_9d0d1b17d3865563, []int{95} } func (m *VertexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2685,7 +2741,7 @@ var xxx_messageInfo_VertexStatus proto.InternalMessageInfo func (m *VertexTemplate) Reset() { *m = VertexTemplate{} } func (*VertexTemplate) ProtoMessage() {} func (*VertexTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{94} + return fileDescriptor_9d0d1b17d3865563, []int{96} } func (m *VertexTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2713,7 +2769,7 @@ var xxx_messageInfo_VertexTemplate proto.InternalMessageInfo func (m *Watermark) Reset() { *m = Watermark{} } func (*Watermark) ProtoMessage() {} func (*Watermark) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{95} + return fileDescriptor_9d0d1b17d3865563, []int{97} } func (m *Watermark) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2741,7 +2797,7 @@ var xxx_messageInfo_Watermark proto.InternalMessageInfo func (m *Window) Reset() { *m = Window{} } func (*Window) ProtoMessage() {} func (*Window) Descriptor() ([]byte, []int) { - return fileDescriptor_9d0d1b17d3865563, []int{96} + return fileDescriptor_9d0d1b17d3865563, []int{98} } func (m *Window) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2836,6 +2892,8 @@ func init() { proto.RegisterType((*PipelineSpec)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PipelineSpec") proto.RegisterType((*PipelineStatus)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PipelineStatus") proto.RegisterType((*Probe)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.Probe") + proto.RegisterType((*PulsarAuth)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PulsarAuth") + proto.RegisterType((*PulsarSource)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.PulsarSource") proto.RegisterType((*RedisBufferService)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisBufferService") proto.RegisterType((*RedisConfig)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisConfig") proto.RegisterType((*RedisSettings)(nil), "github.com.numaproj.numaflow.pkg.apis.numaflow.v1alpha1.RedisSettings") @@ -2880,518 +2938,526 @@ func init() { } var fileDescriptor_9d0d1b17d3865563 = []byte{ - // 8167 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0x57, - 0x76, 0x9e, 0xfa, 0x8f, 0xdd, 0x7d, 0x9a, 0x7f, 0xba, 0x33, 0x1a, 0x71, 0xb8, 0xa3, 0xe9, 0xd9, - 0x5a, 0xaf, 0x76, 0x1c, 0xdb, 0x64, 0x44, 0xaf, 0xb4, 0x5a, 0xdb, 0xbb, 0x12, 0x9b, 0x1c, 0x72, - 0xa8, 0x21, 0x67, 0xb8, 0xa7, 0xc9, 0x91, 0xd6, 0x8a, 0x57, 0x29, 0x56, 0x5d, 0x36, 0x4b, 0xac, - 0xae, 0xea, 0xad, 0xaa, 0xe6, 0x0c, 0xe5, 0x18, 0x6b, 0xef, 0x26, 0xd0, 0x06, 0x49, 0x90, 0xc0, - 0x4f, 0x06, 0x02, 0x27, 0x48, 0x10, 0xc0, 0x0f, 0x86, 0xf3, 0x60, 0x64, 0xf3, 0x10, 0x20, 0x3f, - 0x0e, 0x82, 0x64, 0xf3, 0xbf, 0x08, 0x02, 0x44, 0x79, 0x08, 0x91, 0x65, 0x90, 0x87, 0x04, 0x48, - 0xe0, 0xc4, 0x48, 0x9c, 0x0c, 0x8c, 0x38, 0xb8, 0x7f, 0xf5, 0xd7, 0xd5, 0x33, 0x64, 0x57, 0x73, - 0x34, 0x5a, 0xeb, 0xad, 0xea, 0x9e, 0x73, 0xbf, 0x73, 0xeb, 0xd6, 0xfd, 0x39, 0xf7, 0x9c, 0x73, - 0xef, 0x85, 0xf5, 0x8e, 0x15, 0x1c, 0xf4, 0xf7, 0x16, 0x0c, 0xb7, 0xbb, 0xe8, 0xf4, 0xbb, 0x7a, - 0xcf, 0x73, 0xdf, 0xe7, 0x0f, 0xfb, 0xb6, 0xfb, 0x60, 0xb1, 0x77, 0xd8, 0x59, 0xd4, 0x7b, 0x96, - 0x1f, 0xa5, 0x1c, 0xbd, 0xa2, 0xdb, 0xbd, 0x03, 0xfd, 0x95, 0xc5, 0x0e, 0x75, 0xa8, 0xa7, 0x07, - 0xd4, 0x5c, 0xe8, 0x79, 0x6e, 0xe0, 0x92, 0x2f, 0x45, 0x40, 0x0b, 0x0a, 0x68, 0x41, 0x65, 0x5b, - 0xe8, 0x1d, 0x76, 0x16, 0x18, 0x50, 0x94, 0xa2, 0x80, 0xe6, 0x7f, 0x2a, 0x56, 0x82, 0x8e, 0xdb, - 0x71, 0x17, 0x39, 0xde, 0x5e, 0x7f, 0x9f, 0xbf, 0xf1, 0x17, 0xfe, 0x24, 0xe4, 0xcc, 0x6b, 0x87, - 0xaf, 0xfb, 0x0b, 0x96, 0xcb, 0x8a, 0xb5, 0x68, 0xb8, 0x1e, 0x5d, 0x3c, 0x1a, 0x28, 0xcb, 0xfc, - 0x17, 0x23, 0x9e, 0xae, 0x6e, 0x1c, 0x58, 0x0e, 0xf5, 0x8e, 0xd5, 0xb7, 0x2c, 0x7a, 0xd4, 0x77, - 0xfb, 0x9e, 0x41, 0xcf, 0x95, 0xcb, 0x5f, 0xec, 0xd2, 0x40, 0xcf, 0x92, 0xb5, 0x38, 0x2c, 0x97, - 0xd7, 0x77, 0x02, 0xab, 0x3b, 0x28, 0xe6, 0xb5, 0x27, 0x65, 0xf0, 0x8d, 0x03, 0xda, 0xd5, 0x07, - 0xf2, 0xfd, 0xf4, 0xb0, 0x7c, 0xfd, 0xc0, 0xb2, 0x17, 0x2d, 0x27, 0xf0, 0x03, 0x2f, 0x9d, 0x49, - 0xfb, 0x1d, 0x80, 0x4b, 0xcb, 0x7b, 0x7e, 0xe0, 0xe9, 0x46, 0xb0, 0xed, 0x9a, 0x3b, 0xb4, 0xdb, - 0xb3, 0xf5, 0x80, 0x92, 0x43, 0xa8, 0xb1, 0x0f, 0x32, 0xf5, 0x40, 0x9f, 0x2b, 0xdc, 0x28, 0xdc, - 0x6c, 0x2c, 0x2d, 0x2f, 0x8c, 0xf8, 0x03, 0x17, 0xb6, 0x24, 0x50, 0x6b, 0xf2, 0xf4, 0xa4, 0x59, - 0x53, 0x6f, 0x18, 0x0a, 0x20, 0xbf, 0x56, 0x80, 0x49, 0xc7, 0x35, 0x69, 0x9b, 0xda, 0xd4, 0x08, - 0x5c, 0x6f, 0xae, 0x78, 0xa3, 0x74, 0xb3, 0xb1, 0xf4, 0x8d, 0x91, 0x25, 0x66, 0x7c, 0xd1, 0xc2, - 0xdd, 0x98, 0x80, 0x5b, 0x4e, 0xe0, 0x1d, 0xb7, 0x2e, 0x7f, 0xff, 0xa4, 0xf9, 0xdc, 0xe9, 0x49, - 0x73, 0x32, 0x4e, 0xc2, 0x44, 0x49, 0xc8, 0x2e, 0x34, 0x02, 0xd7, 0x66, 0x55, 0x66, 0xb9, 0x8e, - 0x3f, 0x57, 0xe2, 0x05, 0xbb, 0xbe, 0x20, 0xaa, 0x9a, 0x89, 0x5f, 0x60, 0x6d, 0x6c, 0xe1, 0xe8, - 0x95, 0x85, 0x9d, 0x90, 0xad, 0x75, 0x49, 0x02, 0x37, 0xa2, 0x34, 0x1f, 0xe3, 0x38, 0x84, 0xc2, - 0x8c, 0x4f, 0x8d, 0xbe, 0x67, 0x05, 0xc7, 0x2b, 0xae, 0x13, 0xd0, 0x87, 0xc1, 0x5c, 0x99, 0xd7, - 0xf2, 0xcb, 0x59, 0xd0, 0xdb, 0xae, 0xd9, 0x4e, 0x72, 0xb7, 0x2e, 0x9d, 0x9e, 0x34, 0x67, 0x52, - 0x89, 0x98, 0xc6, 0x24, 0x0e, 0xcc, 0x5a, 0x5d, 0xbd, 0x43, 0xb7, 0xfb, 0xb6, 0xdd, 0xa6, 0x86, - 0x47, 0x03, 0x7f, 0xae, 0xc2, 0x3f, 0xe1, 0x66, 0x96, 0x9c, 0x4d, 0xd7, 0xd0, 0xed, 0x7b, 0x7b, - 0xef, 0x53, 0x23, 0x40, 0xba, 0x4f, 0x3d, 0xea, 0x18, 0xb4, 0x35, 0x27, 0x3f, 0x66, 0x76, 0x23, - 0x85, 0x84, 0x03, 0xd8, 0x64, 0x1d, 0x9e, 0xef, 0x79, 0x96, 0xcb, 0x8b, 0x60, 0xeb, 0xbe, 0x7f, - 0x57, 0xef, 0xd2, 0xb9, 0x89, 0x1b, 0x85, 0x9b, 0xf5, 0xd6, 0x55, 0x09, 0xf3, 0xfc, 0x76, 0x9a, - 0x01, 0x07, 0xf3, 0x90, 0x9b, 0x50, 0x53, 0x89, 0x73, 0xd5, 0x1b, 0x85, 0x9b, 0x15, 0xd1, 0x76, - 0x54, 0x5e, 0x0c, 0xa9, 0x64, 0x0d, 0x6a, 0xfa, 0xfe, 0xbe, 0xe5, 0x30, 0xce, 0x1a, 0xaf, 0xc2, - 0x6b, 0x59, 0x9f, 0xb6, 0x2c, 0x79, 0x04, 0x8e, 0x7a, 0xc3, 0x30, 0x2f, 0x79, 0x0b, 0x88, 0x4f, - 0xbd, 0x23, 0xcb, 0xa0, 0xcb, 0x86, 0xe1, 0xf6, 0x9d, 0x80, 0x97, 0xbd, 0xce, 0xcb, 0x3e, 0x2f, - 0xcb, 0x4e, 0xda, 0x03, 0x1c, 0x98, 0x91, 0x8b, 0xbc, 0x09, 0xb3, 0xb2, 0xaf, 0x46, 0xb5, 0x00, - 0x1c, 0xe9, 0x32, 0xab, 0x48, 0x4c, 0xd1, 0x70, 0x80, 0x9b, 0x98, 0x70, 0x4d, 0xef, 0x07, 0x6e, - 0x97, 0x41, 0x26, 0x85, 0xee, 0xb8, 0x87, 0xd4, 0x99, 0x6b, 0xdc, 0x28, 0xdc, 0xac, 0xb5, 0x6e, - 0x9c, 0x9e, 0x34, 0xaf, 0x2d, 0x3f, 0x86, 0x0f, 0x1f, 0x8b, 0x42, 0xee, 0x41, 0xdd, 0x74, 0xfc, - 0x6d, 0xd7, 0xb6, 0x8c, 0xe3, 0xb9, 0x49, 0x5e, 0xc0, 0x57, 0xe4, 0xa7, 0xd6, 0x57, 0xef, 0xb6, - 0x05, 0xe1, 0xd1, 0x49, 0xf3, 0xda, 0xe0, 0x90, 0xba, 0x10, 0xd2, 0x31, 0xc2, 0x20, 0x5b, 0x1c, - 0x70, 0xc5, 0x75, 0xf6, 0xad, 0xce, 0xdc, 0x14, 0xff, 0x1b, 0x37, 0x86, 0x34, 0xe8, 0xd5, 0xbb, - 0x6d, 0xc1, 0xd7, 0x9a, 0x92, 0xe2, 0xc4, 0x2b, 0x46, 0x08, 0xc4, 0x84, 0x69, 0x35, 0x18, 0xaf, - 0xd8, 0xba, 0xd5, 0xf5, 0xe7, 0xa6, 0x79, 0xe3, 0xfd, 0xb1, 0x21, 0x98, 0x18, 0x67, 0x6e, 0x5d, - 0x91, 0x9f, 0x32, 0x9d, 0x48, 0xf6, 0x31, 0x85, 0x39, 0xff, 0x06, 0x3c, 0x3f, 0x30, 0x36, 0x90, - 0x59, 0x28, 0x1d, 0xd2, 0x63, 0x3e, 0xf4, 0xd5, 0x91, 0x3d, 0x92, 0xcb, 0x50, 0x39, 0xd2, 0xed, - 0x3e, 0x9d, 0x2b, 0xf2, 0x34, 0xf1, 0xf2, 0x33, 0xc5, 0xd7, 0x0b, 0xda, 0x5f, 0x2f, 0xc1, 0xa4, - 0x1a, 0x71, 0xda, 0x96, 0x73, 0x48, 0xde, 0x86, 0x92, 0xed, 0x76, 0xe4, 0xb8, 0xf9, 0x73, 0x23, - 0x8f, 0x62, 0x9b, 0x6e, 0xa7, 0x55, 0x3d, 0x3d, 0x69, 0x96, 0x36, 0xdd, 0x0e, 0x32, 0x44, 0x62, - 0x40, 0xe5, 0x50, 0xdf, 0x3f, 0xd4, 0x79, 0x19, 0x1a, 0x4b, 0xad, 0x91, 0xa1, 0xef, 0x30, 0x14, - 0x56, 0xd6, 0x56, 0xfd, 0xf4, 0xa4, 0x59, 0xe1, 0xaf, 0x28, 0xb0, 0x89, 0x0b, 0xf5, 0x3d, 0x5b, - 0x37, 0x0e, 0x0f, 0x5c, 0x9b, 0xce, 0x95, 0x72, 0x0a, 0x6a, 0x29, 0x24, 0xf1, 0x9b, 0xc3, 0x57, - 0x8c, 0x64, 0x10, 0x03, 0x26, 0xfa, 0xa6, 0x6f, 0x39, 0x87, 0x72, 0x0c, 0x7c, 0x63, 0x64, 0x69, - 0xbb, 0xab, 0xfc, 0x9b, 0xe0, 0xf4, 0xa4, 0x39, 0x21, 0x9e, 0x51, 0x42, 0x6b, 0xff, 0x67, 0x12, - 0xa6, 0xd5, 0x4f, 0xba, 0x4f, 0xbd, 0x80, 0x3e, 0x24, 0x37, 0xa0, 0xec, 0xb0, 0xae, 0xc9, 0x7f, - 0x72, 0x6b, 0x52, 0x36, 0x97, 0x32, 0xef, 0x92, 0x9c, 0xc2, 0x4a, 0x26, 0x9a, 0x8a, 0xac, 0xf0, - 0xd1, 0x4b, 0xd6, 0xe6, 0x30, 0xa2, 0x64, 0xe2, 0x19, 0x25, 0x34, 0x79, 0x17, 0xca, 0xfc, 0xe3, - 0x45, 0x55, 0x7f, 0x65, 0x74, 0x11, 0xec, 0xd3, 0x6b, 0xec, 0x0b, 0xf8, 0x87, 0x73, 0x50, 0xd6, - 0x14, 0xfb, 0xe6, 0xbe, 0xac, 0xd8, 0x9f, 0xcb, 0x51, 0xb1, 0x6b, 0xa2, 0x29, 0xee, 0xae, 0xae, - 0x21, 0x43, 0x24, 0x7f, 0xb1, 0x00, 0xcf, 0x1b, 0xae, 0x13, 0xe8, 0x4c, 0xcf, 0x50, 0x93, 0xec, - 0x5c, 0x85, 0xcb, 0x79, 0x6b, 0x64, 0x39, 0x2b, 0x69, 0xc4, 0xd6, 0x0b, 0x6c, 0xce, 0x18, 0x48, - 0xc6, 0x41, 0xd9, 0xe4, 0x2f, 0x17, 0xe0, 0x05, 0x36, 0x96, 0x0f, 0x30, 0xf3, 0x19, 0x68, 0xbc, - 0xa5, 0xba, 0x7a, 0x7a, 0xd2, 0x7c, 0x61, 0x23, 0x4b, 0x18, 0x66, 0x97, 0x81, 0x95, 0xee, 0x92, - 0x3e, 0xa8, 0x96, 0xf0, 0xd9, 0xad, 0xb1, 0xb4, 0x39, 0x4e, 0x55, 0xa7, 0xf5, 0x19, 0xd9, 0x94, - 0xb3, 0x34, 0x3b, 0xcc, 0x2a, 0x05, 0xb9, 0x05, 0xd5, 0x23, 0xd7, 0xee, 0x77, 0xa9, 0x3f, 0x57, - 0xe3, 0x43, 0xec, 0x7c, 0xd6, 0x10, 0x7b, 0x9f, 0xb3, 0xb4, 0x66, 0x24, 0x7c, 0x55, 0xbc, 0xfb, - 0xa8, 0xf2, 0x12, 0x0b, 0x26, 0x6c, 0xab, 0x6b, 0x05, 0x3e, 0x9f, 0x38, 0x1b, 0x4b, 0xb7, 0x46, - 0xfe, 0x2c, 0xd1, 0x45, 0x37, 0x39, 0x98, 0xe8, 0x35, 0xe2, 0x19, 0xa5, 0x00, 0x36, 0x14, 0xfa, - 0x86, 0x6e, 0x8b, 0x89, 0xb5, 0xb1, 0xf4, 0xd5, 0xd1, 0xbb, 0x0d, 0x43, 0x69, 0x4d, 0xc9, 0x6f, - 0xaa, 0xf0, 0x57, 0x14, 0xd8, 0xe4, 0x17, 0x60, 0x3a, 0xf1, 0x37, 0xfd, 0xb9, 0x06, 0xaf, 0x9d, - 0x97, 0xb2, 0x6a, 0x27, 0xe4, 0x8a, 0x66, 0x9e, 0x44, 0x0b, 0xf1, 0x31, 0x05, 0x46, 0xee, 0x40, - 0xcd, 0xb7, 0x4c, 0x6a, 0xe8, 0x9e, 0x3f, 0x37, 0x79, 0x16, 0xe0, 0x59, 0x09, 0x5c, 0x6b, 0xcb, - 0x6c, 0x18, 0x02, 0x90, 0x05, 0x80, 0x9e, 0xee, 0x05, 0x96, 0x50, 0x54, 0xa7, 0xb8, 0xd2, 0x34, - 0x7d, 0x7a, 0xd2, 0x84, 0xed, 0x30, 0x15, 0x63, 0x1c, 0x8c, 0x9f, 0xe5, 0xdd, 0x70, 0x7a, 0xfd, - 0x40, 0x4c, 0xac, 0x75, 0xc1, 0xdf, 0x0e, 0x53, 0x31, 0xc6, 0x41, 0x7e, 0xab, 0x00, 0x9f, 0x89, - 0x5e, 0x07, 0x3b, 0xd9, 0xcc, 0xd8, 0x3b, 0x59, 0xf3, 0xf4, 0xa4, 0xf9, 0x99, 0xf6, 0x70, 0x91, - 0xf8, 0xb8, 0xf2, 0x90, 0x0f, 0x0b, 0x30, 0xdd, 0xef, 0x99, 0x7a, 0x40, 0xdb, 0x01, 0x5b, 0xf1, - 0x74, 0x8e, 0xe7, 0x66, 0x79, 0x11, 0xd7, 0x47, 0x1f, 0x05, 0x13, 0x70, 0xd1, 0x6f, 0x4e, 0xa6, - 0x63, 0x4a, 0xac, 0xf6, 0x36, 0x4c, 0x2d, 0xf7, 0x83, 0x03, 0xd7, 0xb3, 0x3e, 0xe0, 0xea, 0x3f, - 0x59, 0x83, 0x4a, 0xc0, 0xd5, 0x38, 0xa1, 0x21, 0x7c, 0x3e, 0xeb, 0xa7, 0x0b, 0x95, 0xfa, 0x0e, - 0x3d, 0x56, 0x7a, 0x89, 0x98, 0xa9, 0x85, 0x5a, 0x27, 0xb2, 0x6b, 0x7f, 0xba, 0x00, 0xd5, 0x96, - 0x6e, 0x1c, 0xba, 0xfb, 0xfb, 0xe4, 0x1d, 0xa8, 0x59, 0x4e, 0x40, 0xbd, 0x23, 0xdd, 0x96, 0xb0, - 0x0b, 0x31, 0xd8, 0x70, 0x41, 0x18, 0x7d, 0x1e, 0x5b, 0x7d, 0x31, 0x41, 0xab, 0x7d, 0xb9, 0x6a, - 0xe1, 0x9a, 0xf1, 0x86, 0xc4, 0xc0, 0x10, 0x8d, 0x34, 0xa1, 0xe2, 0x07, 0xb4, 0xe7, 0xf3, 0x39, - 0x70, 0x4a, 0x14, 0xa3, 0xcd, 0x12, 0x50, 0xa4, 0x6b, 0x7f, 0xad, 0x00, 0xf5, 0x96, 0xee, 0x5b, - 0x06, 0xfb, 0x4a, 0xb2, 0x02, 0xe5, 0xbe, 0x4f, 0xbd, 0xf3, 0x7d, 0x1b, 0x9f, 0xb6, 0x76, 0x7d, - 0xea, 0x21, 0xcf, 0x4c, 0xee, 0x41, 0xad, 0xa7, 0xfb, 0xfe, 0x03, 0xd7, 0x33, 0xe5, 0xd4, 0x7b, - 0x46, 0x20, 0xb1, 0x4c, 0x90, 0x59, 0x31, 0x04, 0xd1, 0x1a, 0x10, 0xe9, 0x1e, 0xda, 0xef, 0x15, - 0xe0, 0x52, 0xab, 0xbf, 0xbf, 0x4f, 0x3d, 0xa9, 0x15, 0x4b, 0x7d, 0x93, 0x42, 0xc5, 0xa3, 0xa6, - 0xe5, 0xcb, 0xb2, 0xaf, 0x8e, 0xdc, 0x50, 0x90, 0xa1, 0x48, 0xf5, 0x96, 0xd7, 0x17, 0x4f, 0x40, - 0x81, 0x4e, 0xfa, 0x50, 0x7f, 0x9f, 0xb2, 0xd5, 0x38, 0xd5, 0xbb, 0xf2, 0xeb, 0x6e, 0x8f, 0x2c, - 0xea, 0x2d, 0x1a, 0xb4, 0x39, 0x52, 0x5c, 0x9b, 0x0e, 0x13, 0x31, 0x92, 0xa4, 0xfd, 0x4e, 0x05, - 0x26, 0x57, 0xdc, 0xee, 0x9e, 0xe5, 0x50, 0xf3, 0x96, 0xd9, 0xa1, 0xe4, 0x3d, 0x28, 0x53, 0xb3, - 0x43, 0xe5, 0xd7, 0x8e, 0xae, 0x78, 0x30, 0xb0, 0x48, 0x7d, 0x62, 0x6f, 0xc8, 0x81, 0xc9, 0x26, - 0x4c, 0xef, 0x7b, 0x6e, 0x57, 0x8c, 0xe5, 0x3b, 0xc7, 0x3d, 0xa9, 0x3b, 0xb7, 0x7e, 0x4c, 0x75, - 0x9c, 0xb5, 0x04, 0xf5, 0xd1, 0x49, 0x13, 0xa2, 0x37, 0x4c, 0xe5, 0x25, 0xef, 0xc0, 0x5c, 0x94, - 0x12, 0x0e, 0x6a, 0x2b, 0x6c, 0x39, 0xc3, 0x75, 0xa7, 0x4a, 0xeb, 0xda, 0xe9, 0x49, 0x73, 0x6e, - 0x6d, 0x08, 0x0f, 0x0e, 0xcd, 0xcd, 0x86, 0x8a, 0xd9, 0x88, 0x28, 0x26, 0x1a, 0xa9, 0x32, 0x8d, - 0x69, 0x06, 0xe3, 0xeb, 0xbe, 0xb5, 0x94, 0x08, 0x1c, 0x10, 0x4a, 0xd6, 0x60, 0x32, 0x70, 0x63, - 0xf5, 0x55, 0xe1, 0xf5, 0xa5, 0x29, 0x43, 0xc5, 0x8e, 0x3b, 0xb4, 0xb6, 0x12, 0xf9, 0x08, 0xc2, - 0x15, 0xf5, 0x9e, 0xaa, 0xa9, 0x09, 0x5e, 0x53, 0xf3, 0xa7, 0x27, 0xcd, 0x2b, 0x3b, 0x99, 0x1c, - 0x38, 0x24, 0x27, 0xf9, 0x95, 0x02, 0x4c, 0x2b, 0x92, 0xac, 0xa3, 0xea, 0x38, 0xeb, 0x88, 0xb0, - 0x16, 0xb1, 0x93, 0x10, 0x80, 0x29, 0x81, 0xda, 0xf7, 0xaa, 0x50, 0x0f, 0x87, 0x7a, 0xf2, 0x39, - 0xa8, 0x70, 0x13, 0x84, 0xd4, 0xe0, 0xc3, 0x39, 0x9c, 0x5b, 0x2a, 0x50, 0xd0, 0xc8, 0xe7, 0xa1, - 0x6a, 0xb8, 0xdd, 0xae, 0xee, 0x98, 0xdc, 0xac, 0x54, 0x6f, 0x35, 0x98, 0xea, 0xb2, 0x22, 0x92, - 0x50, 0xd1, 0xc8, 0x35, 0x28, 0xeb, 0x5e, 0x47, 0x58, 0x78, 0xea, 0x62, 0x3c, 0x5a, 0xf6, 0x3a, - 0x3e, 0xf2, 0x54, 0xf2, 0x65, 0x28, 0x51, 0xe7, 0x68, 0xae, 0x3c, 0x5c, 0x37, 0xba, 0xe5, 0x1c, - 0xdd, 0xd7, 0xbd, 0x56, 0x43, 0x96, 0xa1, 0x74, 0xcb, 0x39, 0x42, 0x96, 0x87, 0x6c, 0x42, 0x95, - 0x3a, 0x47, 0xec, 0xdf, 0x4b, 0xd3, 0xcb, 0x67, 0x87, 0x64, 0x67, 0x2c, 0x72, 0x99, 0x10, 0x6a, - 0x58, 0x32, 0x19, 0x15, 0x04, 0xf9, 0x3a, 0x4c, 0x0a, 0x65, 0x6b, 0x8b, 0xfd, 0x13, 0x7f, 0x6e, - 0x82, 0x43, 0x36, 0x87, 0x6b, 0x6b, 0x9c, 0x2f, 0x32, 0x75, 0xc5, 0x12, 0x7d, 0x4c, 0x40, 0x91, - 0xaf, 0x43, 0x5d, 0xad, 0x8c, 0xd5, 0x9f, 0xcd, 0xb4, 0x12, 0xa9, 0xe5, 0x34, 0xd2, 0x6f, 0xf6, - 0x2d, 0x8f, 0x76, 0xa9, 0x13, 0xf8, 0xad, 0xe7, 0x95, 0xdd, 0x40, 0x51, 0x7d, 0x8c, 0xd0, 0xc8, - 0xde, 0xa0, 0xb9, 0x4b, 0xd8, 0x6a, 0x3e, 0x37, 0x64, 0x54, 0x1f, 0xc1, 0xd6, 0xf5, 0x0d, 0x98, - 0x09, 0xed, 0x51, 0xd2, 0xa4, 0x21, 0xac, 0x37, 0x5f, 0x64, 0xd9, 0x37, 0x92, 0xa4, 0x47, 0x27, - 0xcd, 0x97, 0x32, 0x8c, 0x1a, 0x11, 0x03, 0xa6, 0xc1, 0xc8, 0x07, 0x30, 0xed, 0x51, 0xdd, 0xb4, - 0x1c, 0xea, 0xfb, 0xdb, 0x9e, 0xbb, 0x97, 0x5f, 0xf3, 0xe4, 0x28, 0xa2, 0xd9, 0x63, 0x02, 0x19, - 0x53, 0x92, 0xc8, 0x03, 0x98, 0xb2, 0xad, 0x23, 0x1a, 0x89, 0x6e, 0x8c, 0x45, 0xf4, 0xf3, 0xa7, - 0x27, 0xcd, 0xa9, 0xcd, 0x38, 0x30, 0x26, 0xe5, 0x30, 0x4d, 0xa5, 0xe7, 0x7a, 0x81, 0x52, 0x4f, - 0x3f, 0xfb, 0x58, 0xf5, 0x74, 0xdb, 0xf5, 0x82, 0xa8, 0x13, 0xb2, 0x37, 0x1f, 0x45, 0x76, 0xed, - 0x6f, 0x55, 0x60, 0x70, 0x11, 0x97, 0x6c, 0x71, 0x85, 0x71, 0xb7, 0xb8, 0x74, 0x6b, 0x10, 0x73, - 0xcf, 0xeb, 0x32, 0xdb, 0x18, 0x5a, 0x44, 0x46, 0xab, 0x2e, 0x8d, 0xbb, 0x55, 0x3f, 0x33, 0x03, - 0xcf, 0x60, 0xf3, 0x9f, 0xf8, 0xf8, 0x9a, 0x7f, 0xf5, 0xe9, 0x34, 0x7f, 0xed, 0xbb, 0x65, 0x98, - 0x5e, 0xd5, 0x69, 0xd7, 0x75, 0x9e, 0xb8, 0x8e, 0x2f, 0x3c, 0x13, 0xeb, 0xf8, 0x9b, 0x50, 0xf3, - 0x68, 0xcf, 0xb6, 0x0c, 0x5d, 0xa8, 0xeb, 0xd2, 0x6e, 0x8e, 0x32, 0x0d, 0x43, 0xea, 0x10, 0xfb, - 0x4d, 0xe9, 0x99, 0xb4, 0xdf, 0x94, 0x3f, 0x7e, 0xfb, 0x8d, 0xf6, 0x2b, 0x45, 0xe0, 0xaa, 0x2d, - 0xb9, 0x01, 0x65, 0xa6, 0xb6, 0xa5, 0xad, 0x86, 0xbc, 0xb7, 0x70, 0x0a, 0x99, 0x87, 0x62, 0xe0, - 0xca, 0xe1, 0x06, 0x24, 0xbd, 0xb8, 0xe3, 0x62, 0x31, 0x70, 0xc9, 0x07, 0x00, 0x86, 0xeb, 0x98, - 0x96, 0x72, 0x27, 0xe5, 0xfb, 0xb0, 0x35, 0xd7, 0x7b, 0xa0, 0x7b, 0xe6, 0x4a, 0x88, 0x28, 0x56, - 0xf0, 0xd1, 0x3b, 0xc6, 0xa4, 0x91, 0x37, 0x60, 0xc2, 0x75, 0xd6, 0xfa, 0xb6, 0xcd, 0x2b, 0xb4, - 0xde, 0xfa, 0xc2, 0xe9, 0x49, 0x73, 0xe2, 0x1e, 0x4f, 0x79, 0x74, 0xd2, 0xbc, 0x2a, 0x56, 0x44, - 0xec, 0xed, 0x6d, 0xcf, 0x0a, 0x2c, 0xa7, 0x13, 0x2e, 0x68, 0x65, 0x36, 0xed, 0x57, 0x0b, 0xd0, - 0x58, 0xb3, 0x1e, 0x52, 0xf3, 0x6d, 0xcb, 0x31, 0xdd, 0x07, 0x04, 0x61, 0xc2, 0xa6, 0x4e, 0x27, - 0x38, 0x18, 0x71, 0xc5, 0x29, 0xec, 0x3a, 0x1c, 0x01, 0x25, 0x12, 0x59, 0x84, 0xba, 0x58, 0xaf, - 0x58, 0x4e, 0x87, 0xd7, 0x61, 0x2d, 0x1a, 0xe9, 0xdb, 0x8a, 0x80, 0x11, 0x8f, 0x76, 0x0c, 0xcf, - 0x0f, 0x54, 0x03, 0x31, 0xa1, 0x1c, 0xe8, 0x1d, 0x35, 0xa9, 0xac, 0x8d, 0x5c, 0xc1, 0x3b, 0x7a, - 0x27, 0x56, 0xb9, 0x5c, 0x2b, 0xdc, 0xd1, 0x99, 0x56, 0xc8, 0xd0, 0xb5, 0x3f, 0x28, 0x40, 0x6d, - 0xad, 0xef, 0x18, 0x7c, 0x51, 0xff, 0x64, 0x6b, 0xb2, 0x52, 0x31, 0x8b, 0x99, 0x2a, 0x66, 0x1f, - 0x26, 0x0e, 0x1f, 0x84, 0x2a, 0x68, 0x63, 0x69, 0x6b, 0xf4, 0x56, 0x21, 0x8b, 0xb4, 0x70, 0x87, - 0xe3, 0x09, 0x67, 0xe7, 0xb4, 0x2c, 0xd0, 0xc4, 0x9d, 0xb7, 0xb9, 0x50, 0x29, 0x6c, 0xfe, 0xcb, - 0xd0, 0x88, 0xb1, 0x9d, 0xcb, 0xef, 0xf1, 0xb7, 0xcb, 0x30, 0xb1, 0xde, 0x6e, 0x2f, 0x6f, 0x6f, - 0x90, 0x57, 0xa1, 0x21, 0xfd, 0x60, 0x77, 0xa3, 0x3a, 0x08, 0xdd, 0xa0, 0xed, 0x88, 0x84, 0x71, - 0x3e, 0xa6, 0xc0, 0x7b, 0x54, 0xb7, 0xbb, 0xb2, 0xb3, 0x84, 0xba, 0x03, 0xb2, 0x44, 0x14, 0x34, - 0xa2, 0xc3, 0x74, 0xdf, 0xa7, 0x1e, 0xab, 0x42, 0xb1, 0xde, 0x97, 0xdd, 0xe6, 0x8c, 0x16, 0x01, - 0x3e, 0xc1, 0xec, 0x26, 0x00, 0x30, 0x05, 0x48, 0x5e, 0x87, 0x9a, 0xde, 0x0f, 0x0e, 0xf8, 0x92, - 0x4b, 0xf4, 0x8d, 0x6b, 0xdc, 0x4d, 0x28, 0xd3, 0x1e, 0x9d, 0x34, 0x27, 0xef, 0x60, 0xeb, 0x55, - 0xf5, 0x8e, 0x21, 0x37, 0x2b, 0x9c, 0xb2, 0x31, 0xc8, 0xc2, 0x55, 0xce, 0x5d, 0xb8, 0xed, 0x04, - 0x00, 0xa6, 0x00, 0xc9, 0xbb, 0x30, 0x79, 0x48, 0x8f, 0x03, 0x7d, 0x4f, 0x0a, 0x98, 0x38, 0x8f, - 0x80, 0x59, 0xa6, 0xf4, 0xdf, 0x89, 0x65, 0xc7, 0x04, 0x18, 0xf1, 0xe1, 0xf2, 0x21, 0xf5, 0xf6, - 0xa8, 0xe7, 0x4a, 0x7b, 0x85, 0x14, 0x52, 0x3d, 0x8f, 0x90, 0xb9, 0xd3, 0x93, 0xe6, 0xe5, 0x3b, - 0x19, 0x30, 0x98, 0x09, 0xae, 0xfd, 0xdf, 0x22, 0xcc, 0xac, 0x8b, 0x40, 0x04, 0xd7, 0x13, 0x9a, - 0x07, 0xb9, 0x0a, 0x25, 0xaf, 0xd7, 0xe7, 0x2d, 0xa7, 0x24, 0x5c, 0x0d, 0xb8, 0xbd, 0x8b, 0x2c, - 0x8d, 0xbc, 0x03, 0x35, 0x53, 0x0e, 0x19, 0xd2, 0x5c, 0x32, 0x92, 0x69, 0x4b, 0xbd, 0x61, 0x88, - 0xc6, 0xd6, 0x86, 0x5d, 0xbf, 0xd3, 0xb6, 0x3e, 0xa0, 0xd2, 0x82, 0xc0, 0xd7, 0x86, 0x5b, 0x22, - 0x09, 0x15, 0x8d, 0xcd, 0xaa, 0x87, 0xf4, 0x58, 0xac, 0x9f, 0xcb, 0xd1, 0xac, 0x7a, 0x47, 0xa6, - 0x61, 0x48, 0x25, 0x4d, 0xd5, 0x59, 0x58, 0x2b, 0x28, 0x0b, 0xdb, 0xcf, 0x7d, 0x96, 0x20, 0xfb, - 0x0d, 0x1b, 0x32, 0xdf, 0xb7, 0x82, 0x80, 0x7a, 0xf2, 0x37, 0x8e, 0x34, 0x64, 0xbe, 0xc5, 0x11, - 0x50, 0x22, 0x91, 0x9f, 0x80, 0x3a, 0x07, 0x6f, 0xd9, 0xee, 0x1e, 0xff, 0x71, 0x75, 0x61, 0x05, - 0xba, 0xaf, 0x12, 0x31, 0xa2, 0x6b, 0x7f, 0x58, 0x84, 0x2b, 0xeb, 0x34, 0x10, 0x5a, 0xcd, 0x2a, - 0xed, 0xd9, 0xee, 0x31, 0xd3, 0xa7, 0x91, 0x7e, 0x93, 0xbc, 0x09, 0x60, 0xf9, 0x7b, 0xed, 0x23, - 0x83, 0xf7, 0x03, 0xd1, 0x87, 0x6f, 0xc8, 0x2e, 0x09, 0x1b, 0xed, 0x96, 0xa4, 0x3c, 0x4a, 0xbc, - 0x61, 0x2c, 0x4f, 0xb4, 0x20, 0x2f, 0x3e, 0x66, 0x41, 0xde, 0x06, 0xe8, 0x45, 0x5a, 0x79, 0x89, - 0x73, 0xfe, 0xb4, 0x12, 0x73, 0x1e, 0x85, 0x3c, 0x06, 0x93, 0x47, 0x4f, 0x76, 0x60, 0xd6, 0xa4, - 0xfb, 0x7a, 0xdf, 0x0e, 0xc2, 0x95, 0x84, 0xec, 0xc4, 0x67, 0x5f, 0x8c, 0x84, 0x41, 0x12, 0xab, - 0x29, 0x24, 0x1c, 0xc0, 0xd6, 0xfe, 0x4e, 0x09, 0xe6, 0xd7, 0x69, 0x10, 0xda, 0xe8, 0xe4, 0xe8, - 0xd8, 0xee, 0x51, 0x83, 0xfd, 0x85, 0x0f, 0x0b, 0x30, 0x61, 0xeb, 0x7b, 0xd4, 0x66, 0xb3, 0x17, - 0xfb, 0x9a, 0xf7, 0x46, 0x9e, 0x08, 0x86, 0x4b, 0x59, 0xd8, 0xe4, 0x12, 0x52, 0x53, 0x83, 0x48, - 0x44, 0x29, 0x9e, 0x0d, 0xea, 0x86, 0xdd, 0xf7, 0x03, 0xb1, 0xb2, 0x93, 0xfa, 0x64, 0x38, 0xa8, - 0xaf, 0x44, 0x24, 0x8c, 0xf3, 0x91, 0x25, 0x00, 0xc3, 0xb6, 0xa8, 0x13, 0xf0, 0x5c, 0xa2, 0x5f, - 0x11, 0xf5, 0x7f, 0x57, 0x42, 0x0a, 0xc6, 0xb8, 0x98, 0xa8, 0xae, 0xeb, 0x58, 0x81, 0x2b, 0x44, - 0x95, 0x93, 0xa2, 0xb6, 0x22, 0x12, 0xc6, 0xf9, 0x78, 0x36, 0x1a, 0x78, 0x96, 0xe1, 0xf3, 0x6c, - 0x95, 0x54, 0xb6, 0x88, 0x84, 0x71, 0x3e, 0x36, 0xe7, 0xc5, 0xbe, 0xff, 0x5c, 0x73, 0xde, 0x6f, - 0xd6, 0xe1, 0x7a, 0xa2, 0x5a, 0x03, 0x3d, 0xa0, 0xfb, 0x7d, 0xbb, 0x4d, 0x03, 0xf5, 0x03, 0x47, - 0x9c, 0x0b, 0xff, 0x5c, 0xf4, 0xdf, 0x45, 0xf8, 0x93, 0x31, 0x9e, 0xff, 0x3e, 0x50, 0xc0, 0x33, - 0xfd, 0xfb, 0x45, 0xa8, 0x3b, 0x7a, 0xe0, 0xf3, 0x8e, 0x2b, 0xfb, 0x68, 0xa8, 0x86, 0xdd, 0x55, - 0x04, 0x8c, 0x78, 0xc8, 0x36, 0x5c, 0x96, 0x55, 0x7c, 0xeb, 0x21, 0x5b, 0xf3, 0x53, 0x4f, 0xe4, - 0x95, 0xd3, 0xa9, 0xcc, 0x7b, 0x79, 0x2b, 0x83, 0x07, 0x33, 0x73, 0x92, 0x2d, 0xb8, 0x64, 0x88, - 0x90, 0x10, 0x6a, 0xbb, 0xba, 0xa9, 0x00, 0x85, 0x49, 0x34, 0x5c, 0x1a, 0xad, 0x0c, 0xb2, 0x60, - 0x56, 0xbe, 0x74, 0x6b, 0x9e, 0x18, 0xa9, 0x35, 0x57, 0x47, 0x69, 0xcd, 0xb5, 0xd1, 0x5a, 0x73, - 0xfd, 0x6c, 0xad, 0x99, 0xd5, 0x3c, 0x6b, 0x47, 0xd4, 0x63, 0xea, 0x89, 0x98, 0x61, 0x63, 0x11, - 0x47, 0x61, 0xcd, 0xb7, 0x33, 0x78, 0x30, 0x33, 0x27, 0xd9, 0x83, 0x79, 0x91, 0x7e, 0xcb, 0x31, - 0xbc, 0xe3, 0x1e, 0x9b, 0x78, 0x62, 0xb8, 0x8d, 0x84, 0x4d, 0x7a, 0xbe, 0x3d, 0x94, 0x13, 0x1f, - 0x83, 0x42, 0x7e, 0x16, 0xa6, 0xc4, 0x5f, 0xda, 0xd2, 0x7b, 0x1c, 0x56, 0xc4, 0x1f, 0xbd, 0x20, - 0x61, 0xa7, 0x56, 0xe2, 0x44, 0x4c, 0xf2, 0x92, 0x65, 0x98, 0xe9, 0x1d, 0x19, 0xec, 0x71, 0x63, - 0xff, 0x2e, 0xa5, 0x26, 0x35, 0xb9, 0xc3, 0xb3, 0xde, 0x7a, 0x51, 0x59, 0x77, 0xb6, 0x93, 0x64, - 0x4c, 0xf3, 0x93, 0xd7, 0x61, 0xd2, 0x0f, 0x74, 0x2f, 0x90, 0x86, 0xe0, 0xb9, 0x69, 0x11, 0x9f, - 0xa5, 0xec, 0xa4, 0xed, 0x18, 0x0d, 0x13, 0x9c, 0x99, 0xf3, 0xc5, 0xcc, 0xc5, 0xcd, 0x17, 0x79, - 0x46, 0xab, 0x7f, 0x52, 0x84, 0x1b, 0xeb, 0x34, 0xd8, 0x72, 0x1d, 0x69, 0x46, 0xcf, 0x9a, 0xf6, - 0xcf, 0x64, 0x45, 0x4f, 0x4e, 0xda, 0xc5, 0xb1, 0x4e, 0xda, 0xa5, 0x31, 0x4d, 0xda, 0xe5, 0x0b, - 0x9c, 0xb4, 0xff, 0x5e, 0x11, 0x5e, 0x4c, 0xd4, 0xe4, 0xb6, 0x6b, 0xaa, 0x01, 0xff, 0xd3, 0x0a, - 0x3c, 0x43, 0x05, 0x3e, 0x12, 0x7a, 0x27, 0x77, 0x84, 0xa6, 0x34, 0x9e, 0xef, 0xa4, 0x35, 0x9e, - 0x77, 0xf3, 0xcc, 0x7c, 0x19, 0x12, 0xce, 0x34, 0xe3, 0xbd, 0x05, 0xc4, 0x93, 0x6e, 0xdb, 0xc8, - 0x9c, 0x2d, 0x95, 0x9e, 0x30, 0x00, 0x14, 0x07, 0x38, 0x30, 0x23, 0x17, 0x69, 0xc3, 0x0b, 0x3e, - 0x75, 0x02, 0xcb, 0xa1, 0x76, 0x12, 0x4e, 0x68, 0x43, 0x2f, 0x49, 0xb8, 0x17, 0xda, 0x59, 0x4c, - 0x98, 0x9d, 0x37, 0xcf, 0x38, 0xf0, 0x2f, 0x80, 0xab, 0x9c, 0xa2, 0x6a, 0xc6, 0xa6, 0xb1, 0x7c, - 0x98, 0xd6, 0x58, 0xde, 0xcb, 0xff, 0xdf, 0x46, 0xd3, 0x56, 0x96, 0x00, 0xf8, 0x5f, 0x88, 0xab, - 0x2b, 0xe1, 0x24, 0x8d, 0x21, 0x05, 0x63, 0x5c, 0x6c, 0x02, 0x52, 0xf5, 0x1c, 0xd7, 0x54, 0xc2, - 0x09, 0xa8, 0x1d, 0x27, 0x62, 0x92, 0x77, 0xa8, 0xb6, 0x53, 0x19, 0x59, 0xdb, 0x79, 0x0b, 0x48, - 0xc2, 0xf0, 0x28, 0xf0, 0x26, 0x92, 0xf1, 0xc7, 0x1b, 0x03, 0x1c, 0x98, 0x91, 0x6b, 0x48, 0x53, - 0xae, 0x8e, 0xb7, 0x29, 0xd7, 0x46, 0x6f, 0xca, 0xe4, 0x3d, 0xb8, 0xca, 0x45, 0xc9, 0xfa, 0x49, - 0x02, 0x0b, 0xbd, 0xe7, 0xb3, 0x12, 0xf8, 0x2a, 0x0e, 0x63, 0xc4, 0xe1, 0x18, 0xec, 0xff, 0x18, - 0x1e, 0x35, 0x99, 0x70, 0xdd, 0x1e, 0xae, 0x13, 0xad, 0x64, 0xf0, 0x60, 0x66, 0x4e, 0xd6, 0xc4, - 0x02, 0xd6, 0x0c, 0xf5, 0x3d, 0x9b, 0x9a, 0x32, 0xfe, 0x3a, 0x6c, 0x62, 0x3b, 0x9b, 0x6d, 0x49, - 0xc1, 0x18, 0x57, 0x96, 0x9a, 0x32, 0x79, 0x4e, 0x35, 0x65, 0x9d, 0x5b, 0xe9, 0xf7, 0x13, 0xda, - 0x90, 0xd4, 0x75, 0xc2, 0x88, 0xfa, 0x95, 0x34, 0x03, 0x0e, 0xe6, 0xe1, 0x5a, 0xa2, 0xe1, 0x59, - 0xbd, 0xc0, 0x4f, 0x62, 0x4d, 0xa7, 0xb4, 0xc4, 0x0c, 0x1e, 0xcc, 0xcc, 0xc9, 0xf4, 0xf3, 0x03, - 0xaa, 0xdb, 0xc1, 0x41, 0x12, 0x70, 0x26, 0xa9, 0x9f, 0xdf, 0x1e, 0x64, 0xc1, 0xac, 0x7c, 0x99, - 0x13, 0xd2, 0xec, 0xb3, 0xa9, 0x56, 0x7d, 0xbb, 0x04, 0x57, 0xd7, 0x69, 0x10, 0x86, 0xa6, 0x7d, - 0x6a, 0x46, 0xf9, 0x18, 0xcc, 0x28, 0xbf, 0x51, 0x81, 0x4b, 0xeb, 0x34, 0x18, 0xd0, 0xc6, 0xfe, - 0x88, 0x56, 0xff, 0x16, 0x5c, 0x8a, 0xa2, 0x21, 0xdb, 0x81, 0xeb, 0x89, 0xb9, 0x3c, 0xb5, 0x5a, - 0x6e, 0x0f, 0xb2, 0x60, 0x56, 0x3e, 0xf2, 0x75, 0x78, 0x91, 0x4f, 0xf5, 0x4e, 0x47, 0xd8, 0x67, - 0x85, 0x31, 0x21, 0xb6, 0x9f, 0xa7, 0x29, 0x21, 0x5f, 0x6c, 0x67, 0xb3, 0xe1, 0xb0, 0xfc, 0xe4, - 0x5b, 0x30, 0xd9, 0xb3, 0x7a, 0xd4, 0xb6, 0x1c, 0xae, 0x9f, 0xe5, 0x0e, 0x22, 0xda, 0x8e, 0x81, - 0x45, 0x0b, 0xb8, 0x78, 0x2a, 0x26, 0x04, 0x66, 0xb6, 0xd4, 0xda, 0x05, 0xb6, 0xd4, 0xff, 0x59, - 0x84, 0xea, 0xba, 0xe7, 0xf6, 0x7b, 0xad, 0x63, 0xd2, 0x81, 0x89, 0x07, 0xdc, 0x79, 0x26, 0x5d, - 0x53, 0xa3, 0xef, 0x28, 0x10, 0x3e, 0xb8, 0x48, 0x25, 0x12, 0xef, 0x28, 0xe1, 0x59, 0x23, 0x3e, - 0xa4, 0xc7, 0xd4, 0x94, 0x3e, 0xb4, 0xb0, 0x11, 0xdf, 0x61, 0x89, 0x28, 0x68, 0xa4, 0x0b, 0x33, - 0xba, 0x6d, 0xbb, 0x0f, 0xa8, 0xb9, 0xa9, 0x07, 0xdc, 0xef, 0x2d, 0x7d, 0x2b, 0xe7, 0x35, 0x4b, - 0xf3, 0x60, 0x86, 0xe5, 0x24, 0x14, 0xa6, 0xb1, 0xc9, 0xfb, 0x50, 0xf5, 0x03, 0xd7, 0x53, 0xca, - 0x56, 0x63, 0x69, 0x65, 0xf4, 0x9f, 0xde, 0xfa, 0x5a, 0x5b, 0x40, 0x09, 0x9b, 0xbd, 0x7c, 0x41, - 0x25, 0x40, 0xfb, 0xf5, 0x02, 0xc0, 0xed, 0x9d, 0x9d, 0x6d, 0xe9, 0x5e, 0x30, 0xa1, 0xac, 0xf7, - 0x43, 0x47, 0xe5, 0xe8, 0x0e, 0xc1, 0x44, 0x20, 0xaf, 0xf4, 0xe1, 0xf5, 0x83, 0x03, 0xe4, 0xe8, - 0xe4, 0xc7, 0xa1, 0x2a, 0x15, 0x64, 0x59, 0xed, 0x61, 0x3c, 0x85, 0x54, 0xa2, 0x51, 0xd1, 0xb5, - 0xdf, 0x2e, 0x02, 0x6c, 0x98, 0x36, 0x6d, 0xab, 0x4d, 0x20, 0xf5, 0xe0, 0xc0, 0xa3, 0xfe, 0x81, - 0x6b, 0x9b, 0x23, 0x7a, 0x53, 0xb9, 0xcd, 0x7f, 0x47, 0x81, 0x60, 0x84, 0x47, 0x4c, 0x98, 0xf4, - 0x03, 0xda, 0x53, 0xb1, 0xbd, 0x23, 0x3a, 0x51, 0x66, 0x85, 0x5d, 0x24, 0xc2, 0xc1, 0x04, 0x2a, - 0xd1, 0xa1, 0x61, 0x39, 0x86, 0xe8, 0x20, 0xad, 0xe3, 0x11, 0x1b, 0xd2, 0x0c, 0x5b, 0x71, 0x6c, - 0x44, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0xb7, 0x08, 0x57, 0xb8, 0x3c, 0x56, 0x8c, 0x44, 0x04, 0x2f, - 0xf9, 0x93, 0x03, 0x1b, 0x56, 0xff, 0xf8, 0xd9, 0x44, 0x8b, 0xfd, 0x8e, 0x5b, 0x34, 0xd0, 0x23, - 0x7d, 0x2e, 0x4a, 0x8b, 0xed, 0x52, 0xed, 0x43, 0xd9, 0x67, 0xe3, 0x95, 0xa8, 0xbd, 0xf6, 0xc8, - 0x4d, 0x28, 0xfb, 0x03, 0xf8, 0xe8, 0x15, 0x7a, 0x8d, 0xf9, 0xa8, 0xc5, 0xc5, 0x91, 0x5f, 0x82, - 0x09, 0x3f, 0xd0, 0x83, 0xbe, 0xea, 0x9a, 0xbb, 0xe3, 0x16, 0xcc, 0xc1, 0xa3, 0x71, 0x44, 0xbc, - 0xa3, 0x14, 0xaa, 0xfd, 0x6e, 0x01, 0xe6, 0xb3, 0x33, 0x6e, 0x5a, 0x7e, 0x40, 0xfe, 0xc4, 0x40, - 0xb5, 0x9f, 0xf1, 0x8f, 0xb3, 0xdc, 0xbc, 0xd2, 0xc3, 0x3d, 0x0d, 0x2a, 0x25, 0x56, 0xe5, 0x01, - 0x54, 0xac, 0x80, 0x76, 0xd5, 0xfa, 0xf2, 0xde, 0x98, 0x3f, 0x3d, 0x36, 0xb5, 0x33, 0x29, 0x28, - 0x84, 0x69, 0xdf, 0x2d, 0x0e, 0xfb, 0x64, 0x3e, 0x7d, 0xd8, 0xc9, 0x28, 0xf1, 0x3b, 0xf9, 0xa2, - 0xc4, 0x93, 0x05, 0x1a, 0x0c, 0x16, 0xff, 0x53, 0x83, 0xc1, 0xe2, 0xf7, 0xf2, 0x07, 0x8b, 0xa7, - 0xaa, 0x61, 0x68, 0xcc, 0xf8, 0x47, 0x25, 0xb8, 0xf6, 0xb8, 0x66, 0xc3, 0xe6, 0x33, 0xd9, 0x3a, - 0xf3, 0xce, 0x67, 0x8f, 0x6f, 0x87, 0x64, 0x09, 0x2a, 0xbd, 0x03, 0xdd, 0x57, 0x4a, 0xd9, 0xb5, - 0x30, 0xcc, 0x90, 0x25, 0x3e, 0x62, 0x83, 0x06, 0x57, 0xe6, 0xf8, 0x2b, 0x0a, 0x56, 0x36, 0x1c, - 0x77, 0xa9, 0xef, 0x47, 0x36, 0x81, 0x70, 0x38, 0xde, 0x12, 0xc9, 0xa8, 0xe8, 0x24, 0x80, 0x09, - 0x61, 0x62, 0x96, 0x33, 0xd3, 0xe8, 0x81, 0x5c, 0x19, 0x1b, 0x0b, 0xa2, 0x8f, 0x92, 0xde, 0x0a, - 0x29, 0x8b, 0x2c, 0x40, 0x39, 0x88, 0xc2, 0xbc, 0xd5, 0xd2, 0xbc, 0x9c, 0xa1, 0x9f, 0x72, 0x3e, - 0xb6, 0xb0, 0x77, 0xf7, 0xb8, 0x51, 0xdd, 0x94, 0xfe, 0x73, 0xcb, 0x75, 0xb8, 0x42, 0x56, 0x8a, - 0x16, 0xf6, 0xf7, 0x06, 0x38, 0x30, 0x23, 0x97, 0xf6, 0xaf, 0x6b, 0x70, 0x25, 0xbb, 0x3d, 0xb0, - 0x7a, 0x3b, 0xa2, 0x9e, 0xcf, 0xb0, 0x0b, 0xc9, 0x7a, 0xbb, 0x2f, 0x92, 0x51, 0xd1, 0x3f, 0xd1, - 0x01, 0x67, 0xbf, 0x51, 0x80, 0xab, 0x9e, 0xf4, 0x11, 0x3d, 0x8d, 0xa0, 0xb3, 0x97, 0x84, 0x39, - 0x63, 0x88, 0x40, 0x1c, 0x5e, 0x16, 0xf2, 0x37, 0x0a, 0x30, 0xd7, 0x4d, 0xd9, 0x39, 0x2e, 0x70, - 0xcf, 0x25, 0xdf, 0x47, 0xb1, 0x35, 0x44, 0x1e, 0x0e, 0x2d, 0x09, 0xf9, 0x16, 0x34, 0x7a, 0xac, - 0x5d, 0xf8, 0x01, 0x75, 0x0c, 0x15, 0x20, 0x3a, 0x7a, 0x4f, 0xda, 0x8e, 0xb0, 0xc2, 0x3d, 0x57, - 0x5c, 0x3f, 0x88, 0x11, 0x30, 0x2e, 0xf1, 0x19, 0xdf, 0x64, 0x79, 0x13, 0x6a, 0x3e, 0x0d, 0x02, - 0xcb, 0xe9, 0x88, 0xf5, 0x46, 0x5d, 0xf4, 0x95, 0xb6, 0x4c, 0xc3, 0x90, 0x4a, 0x7e, 0x02, 0xea, - 0xdc, 0xe5, 0xb4, 0xec, 0x75, 0xfc, 0xb9, 0x3a, 0x0f, 0x17, 0x9b, 0x12, 0x01, 0x70, 0x32, 0x11, - 0x23, 0x3a, 0xf9, 0x22, 0x4c, 0xee, 0xf1, 0xee, 0x2b, 0xf7, 0xdd, 0x0b, 0x1b, 0x17, 0xd7, 0xd6, - 0x5a, 0xb1, 0x74, 0x4c, 0x70, 0x91, 0x25, 0x00, 0x1a, 0xfa, 0xe5, 0xd2, 0xf6, 0xac, 0xc8, 0x63, - 0x87, 0x31, 0x2e, 0xf2, 0x12, 0x94, 0x02, 0xdb, 0xe7, 0x36, 0xac, 0x5a, 0xb4, 0x04, 0xdd, 0xd9, - 0x6c, 0x23, 0x4b, 0xd7, 0xfe, 0xb0, 0x00, 0x33, 0xa9, 0xed, 0x48, 0x2c, 0x4b, 0xdf, 0xb3, 0xe5, - 0x30, 0x12, 0x66, 0xd9, 0xc5, 0x4d, 0x64, 0xe9, 0xe4, 0x3d, 0xa9, 0x96, 0x17, 0x73, 0x1e, 0x31, - 0x72, 0x57, 0x0f, 0x7c, 0xa6, 0x87, 0x0f, 0x68, 0xe4, 0xdc, 0xcd, 0x17, 0x95, 0x47, 0xce, 0x03, - 0x31, 0x37, 0x5f, 0x44, 0xc3, 0x04, 0x67, 0xca, 0xe0, 0x57, 0x3e, 0x8b, 0xc1, 0x4f, 0xfb, 0xd5, - 0x62, 0xac, 0x06, 0xa4, 0x66, 0xff, 0x84, 0x1a, 0x78, 0x99, 0x4d, 0xa0, 0xe1, 0xe4, 0x5e, 0x8f, - 0xcf, 0x7f, 0x7c, 0x32, 0x96, 0x54, 0xf2, 0xb6, 0xa8, 0xfb, 0x52, 0xce, 0x8d, 0xdc, 0x3b, 0x9b, - 0x6d, 0x11, 0x5d, 0xa5, 0xfe, 0x5a, 0xf8, 0x0b, 0xca, 0x17, 0xf4, 0x0b, 0xb4, 0x7f, 0x56, 0x82, - 0xc6, 0x5b, 0xee, 0xde, 0x27, 0x24, 0x82, 0x3a, 0x7b, 0x9a, 0x2a, 0x7e, 0x8c, 0xd3, 0xd4, 0x2e, - 0xbc, 0x18, 0x04, 0x76, 0x9b, 0x1a, 0xae, 0x63, 0xfa, 0xcb, 0xfb, 0x01, 0xf5, 0xd6, 0x2c, 0xc7, - 0xf2, 0x0f, 0xa8, 0x29, 0xdd, 0x49, 0x9f, 0x39, 0x3d, 0x69, 0xbe, 0xb8, 0xb3, 0xb3, 0x99, 0xc5, - 0x82, 0xc3, 0xf2, 0xf2, 0x61, 0x43, 0xec, 0x1d, 0xe5, 0x7b, 0xab, 0x64, 0xcc, 0x8d, 0x18, 0x36, - 0x62, 0xe9, 0x98, 0xe0, 0xd2, 0xfe, 0x43, 0x11, 0xea, 0xe1, 0xe1, 0x11, 0xe4, 0xf3, 0x50, 0xdd, - 0xf3, 0xdc, 0x43, 0xea, 0x09, 0xcf, 0x9d, 0xdc, 0x5b, 0xd5, 0x12, 0x49, 0xa8, 0x68, 0xe4, 0x73, - 0x50, 0x09, 0xdc, 0x9e, 0x65, 0xa4, 0x0d, 0x6a, 0x3b, 0x2c, 0x11, 0x05, 0x8d, 0x77, 0x04, 0x1e, - 0x56, 0xc8, 0xbf, 0xaa, 0x16, 0xeb, 0x08, 0x3c, 0x15, 0x25, 0x55, 0x75, 0x84, 0xf2, 0xd8, 0x3b, - 0xc2, 0xcb, 0xa1, 0x0a, 0x58, 0x49, 0xf6, 0xc4, 0x94, 0xd2, 0xf6, 0x2e, 0x94, 0x7d, 0xdd, 0xb7, - 0xe5, 0xf4, 0x96, 0xe3, 0xbc, 0x86, 0xe5, 0xf6, 0xa6, 0x3c, 0xaf, 0x61, 0xb9, 0xbd, 0x89, 0x1c, - 0x54, 0xfb, 0xed, 0x12, 0x34, 0x44, 0xfd, 0x8a, 0xd1, 0x63, 0x9c, 0x35, 0xfc, 0x06, 0x0f, 0xb9, - 0xf0, 0xfb, 0x5d, 0xea, 0x71, 0x73, 0x94, 0x1c, 0x0c, 0xe3, 0x7e, 0x84, 0x88, 0x18, 0x86, 0x5d, - 0x44, 0x49, 0x3f, 0xda, 0x55, 0xcf, 0xa6, 0x0a, 0x7e, 0x00, 0x8a, 0xd4, 0x71, 0x65, 0x24, 0x65, - 0x38, 0x55, 0xdc, 0x89, 0xd1, 0x30, 0xc1, 0xa9, 0xfd, 0x8f, 0x22, 0xd4, 0x37, 0xad, 0x7d, 0x6a, - 0x1c, 0x1b, 0x36, 0x25, 0xdf, 0x80, 0x79, 0x93, 0xda, 0x94, 0xcd, 0x98, 0xeb, 0x9e, 0x6e, 0xd0, - 0x6d, 0xea, 0x59, 0xfc, 0x00, 0x27, 0xd6, 0x07, 0x65, 0x80, 0xeb, 0xf5, 0xd3, 0x93, 0xe6, 0xfc, - 0xea, 0x50, 0x2e, 0x7c, 0x0c, 0x02, 0xd9, 0x80, 0x49, 0x93, 0xfa, 0x96, 0x47, 0xcd, 0xed, 0xd8, - 0x82, 0xe8, 0xf3, 0xaa, 0x9c, 0xab, 0x31, 0xda, 0xa3, 0x93, 0xe6, 0x94, 0x32, 0x84, 0x8a, 0x95, - 0x51, 0x22, 0x2b, 0x1b, 0x5a, 0x7a, 0x7a, 0xdf, 0xa7, 0x19, 0xe5, 0x2c, 0xf1, 0x72, 0xf2, 0xa1, - 0x65, 0x3b, 0x9b, 0x05, 0x87, 0xe5, 0x25, 0x7b, 0x30, 0xc7, 0xcb, 0x9f, 0x85, 0x5b, 0xe6, 0xb8, - 0x2f, 0x9f, 0x9e, 0x34, 0xb5, 0x55, 0xda, 0xf3, 0xa8, 0xa1, 0x07, 0xd4, 0x5c, 0x1d, 0xc2, 0x8d, - 0x43, 0x71, 0xb4, 0x0a, 0x94, 0x36, 0xdd, 0x8e, 0xf6, 0xdd, 0x12, 0x84, 0x27, 0x8a, 0x91, 0x3f, - 0x5b, 0x80, 0x86, 0xee, 0x38, 0x6e, 0x20, 0x4f, 0xeb, 0x12, 0xd1, 0x04, 0x98, 0xfb, 0xe0, 0xb2, - 0x85, 0xe5, 0x08, 0x54, 0x38, 0xa2, 0x43, 0xe7, 0x78, 0x8c, 0x82, 0x71, 0xd9, 0xa4, 0x9f, 0xf2, - 0x8d, 0x6f, 0xe5, 0x2f, 0xc5, 0x19, 0x3c, 0xe1, 0xf3, 0x5f, 0x85, 0xd9, 0x74, 0x61, 0xcf, 0xe3, - 0xda, 0xca, 0x15, 0x64, 0x50, 0x04, 0x88, 0xe2, 0x63, 0x9e, 0x82, 0x41, 0xce, 0x4a, 0x18, 0xe4, - 0x46, 0x3f, 0xd6, 0x21, 0x2a, 0xf4, 0x50, 0x23, 0xdc, 0x37, 0x53, 0x46, 0xb8, 0x8d, 0x71, 0x08, - 0x7b, 0xbc, 0xe1, 0x6d, 0x0f, 0x2e, 0x45, 0xbc, 0xd1, 0xe8, 0x72, 0x27, 0xd5, 0xfb, 0x85, 0x5e, - 0xf9, 0x85, 0x21, 0xbd, 0x7f, 0x26, 0x16, 0xb0, 0x34, 0xd8, 0xff, 0xb5, 0xbf, 0x59, 0x80, 0xd9, - 0xb8, 0x10, 0xbe, 0x07, 0xfd, 0x4b, 0x30, 0xe5, 0x51, 0xdd, 0x6c, 0xe9, 0x81, 0x71, 0xc0, 0x43, - 0xe3, 0x0b, 0x3c, 0x96, 0x9d, 0xef, 0x96, 0xc3, 0x38, 0x01, 0x93, 0x7c, 0x44, 0x87, 0x06, 0x4b, - 0xd8, 0xb1, 0xba, 0xd4, 0xed, 0x07, 0x23, 0x5a, 0x99, 0xf9, 0x02, 0x0f, 0x23, 0x18, 0x8c, 0x63, - 0x6a, 0x1f, 0x15, 0x60, 0x3a, 0x5e, 0xe0, 0x0b, 0xb7, 0x40, 0x1e, 0x24, 0x2d, 0x90, 0x2b, 0x63, - 0xf8, 0xef, 0x43, 0xac, 0x8e, 0xdf, 0x6e, 0xc4, 0x3f, 0x8d, 0x5b, 0x1a, 0xe3, 0xc6, 0x95, 0xc2, - 0x63, 0x8d, 0x2b, 0x9f, 0xfc, 0x83, 0xaa, 0x86, 0xad, 0x0a, 0xca, 0xcf, 0xf0, 0xaa, 0xe0, 0xe3, - 0x3c, 0xed, 0x2a, 0x76, 0x62, 0xd3, 0x44, 0x8e, 0x13, 0x9b, 0xba, 0xe1, 0x89, 0x4d, 0xd5, 0xb1, - 0x0d, 0x6c, 0x67, 0x39, 0xb5, 0xa9, 0xf6, 0x54, 0x4f, 0x6d, 0xaa, 0x5f, 0xd4, 0xa9, 0x4d, 0x90, - 0xf7, 0xd4, 0xa6, 0xef, 0x14, 0x60, 0xda, 0x4c, 0xec, 0x30, 0x96, 0x7b, 0xfb, 0x47, 0x9f, 0xce, - 0x92, 0x1b, 0x96, 0xc5, 0x16, 0xb3, 0x64, 0x1a, 0xa6, 0x44, 0x66, 0x9d, 0x95, 0x34, 0xf9, 0xb1, - 0x9c, 0x95, 0x44, 0x7e, 0x09, 0xea, 0xb6, 0x9a, 0xeb, 0xe4, 0x09, 0x92, 0x9b, 0x63, 0x69, 0x92, - 0x12, 0x33, 0xda, 0xc5, 0x10, 0x26, 0x61, 0x24, 0x51, 0xfb, 0xfd, 0x6a, 0x7c, 0x42, 0x7c, 0xda, - 0x3e, 0x8e, 0xd7, 0x92, 0x3e, 0x8e, 0x1b, 0x69, 0x1f, 0xc7, 0xc0, 0x6c, 0x2e, 0xfd, 0x1c, 0x3f, - 0x19, 0x9b, 0x27, 0x4a, 0xfc, 0x90, 0xa6, 0xb0, 0xc9, 0x65, 0xcc, 0x15, 0xcb, 0x30, 0x23, 0x95, - 0x00, 0x45, 0xe4, 0x83, 0xec, 0x54, 0x14, 0x95, 0xb6, 0x9a, 0x24, 0x63, 0x9a, 0x9f, 0x09, 0xf4, - 0xd5, 0x59, 0xbd, 0x62, 0xc5, 0x16, 0xb5, 0x71, 0x75, 0x8e, 0x6e, 0xc8, 0xc1, 0x56, 0x77, 0x1e, - 0xd5, 0x7d, 0xe9, 0xa9, 0x88, 0xad, 0xee, 0x90, 0xa7, 0xa2, 0xa4, 0xc6, 0xdd, 0x35, 0xd5, 0x27, - 0xb8, 0x6b, 0x74, 0x68, 0xd8, 0xba, 0x1f, 0x88, 0xc6, 0x64, 0xca, 0xd1, 0xe4, 0x8f, 0x9d, 0x6d, - 0xde, 0x67, 0xba, 0x44, 0xa4, 0xc0, 0x6f, 0x46, 0x30, 0x18, 0xc7, 0x24, 0x26, 0x4c, 0xb2, 0x57, - 0x3e, 0xb2, 0x98, 0xcb, 0x81, 0x3c, 0xd1, 0xee, 0x3c, 0x32, 0xc2, 0xa5, 0xe3, 0x66, 0x0c, 0x07, - 0x13, 0xa8, 0x43, 0x3c, 0x3a, 0x30, 0x8a, 0x47, 0x87, 0xfc, 0xac, 0x50, 0xdc, 0x8e, 0xc3, 0xdf, - 0xda, 0xe0, 0xbf, 0x35, 0x8c, 0x68, 0xc5, 0x38, 0x11, 0x93, 0xbc, 0xac, 0x55, 0xf4, 0x65, 0x35, - 0xa8, 0xec, 0x93, 0xc9, 0x56, 0xb1, 0x9b, 0x24, 0x63, 0x9a, 0x9f, 0x6c, 0xc3, 0xe5, 0x30, 0x29, - 0x5e, 0x8c, 0x29, 0x8e, 0x13, 0x86, 0x18, 0xee, 0x66, 0xf0, 0x60, 0x66, 0x4e, 0xbe, 0x67, 0xa7, - 0xef, 0x79, 0xd4, 0x09, 0x6e, 0xeb, 0xfe, 0x81, 0x8c, 0x55, 0x8c, 0xf6, 0xec, 0x44, 0x24, 0x8c, - 0xf3, 0x91, 0x25, 0x00, 0x01, 0xc7, 0x73, 0xcd, 0x24, 0xc3, 0x81, 0x77, 0x43, 0x0a, 0xc6, 0xb8, - 0xb4, 0xef, 0xd4, 0xa1, 0x71, 0x57, 0x0f, 0xac, 0x23, 0xca, 0xdd, 0xaf, 0x17, 0xe3, 0x03, 0xfb, - 0x2b, 0x05, 0xb8, 0x92, 0x8c, 0xb1, 0xbd, 0x40, 0x47, 0x18, 0x3f, 0xe3, 0x09, 0x33, 0xa5, 0xe1, - 0x90, 0x52, 0x70, 0x97, 0xd8, 0x40, 0xc8, 0xee, 0x45, 0xbb, 0xc4, 0xda, 0xc3, 0x04, 0xe2, 0xf0, - 0xb2, 0x7c, 0x52, 0x5c, 0x62, 0xcf, 0xf6, 0xa1, 0xa4, 0x29, 0x87, 0x5d, 0xf5, 0x99, 0x71, 0xd8, - 0xd5, 0x9e, 0x09, 0xad, 0xbf, 0x17, 0x73, 0xd8, 0xd5, 0x73, 0x06, 0x8e, 0xc9, 0x6d, 0x29, 0x02, - 0x6d, 0x98, 0xe3, 0x8f, 0x9f, 0x28, 0xa1, 0x1c, 0x29, 0x4c, 0x59, 0xde, 0xd3, 0x7d, 0xcb, 0x90, - 0x6a, 0x47, 0x8e, 0x43, 0x98, 0xd5, 0xe1, 0x8c, 0x22, 0xbe, 0x84, 0xbf, 0xa2, 0xc0, 0x8e, 0xce, - 0xa2, 0x2c, 0xe6, 0x3a, 0x8b, 0x92, 0xac, 0x40, 0xd9, 0x39, 0xa4, 0xc7, 0xe7, 0x3b, 0x9b, 0x81, - 0x2f, 0x02, 0xef, 0xde, 0xa1, 0xc7, 0xc8, 0x33, 0x6b, 0xdf, 0x2b, 0x02, 0xb0, 0xcf, 0x3f, 0x9b, - 0xeb, 0xec, 0xc7, 0xa1, 0xea, 0xf7, 0xb9, 0x61, 0x48, 0x2a, 0x4c, 0x51, 0xb4, 0x9d, 0x48, 0x46, - 0x45, 0x27, 0x9f, 0x83, 0xca, 0x37, 0xfb, 0xb4, 0xaf, 0xe2, 0x40, 0xc2, 0x75, 0xc3, 0xd7, 0x58, - 0x22, 0x0a, 0xda, 0xc5, 0x99, 0xb7, 0x95, 0x8b, 0xad, 0x72, 0x51, 0x2e, 0xb6, 0x3a, 0x54, 0xef, - 0xba, 0x3c, 0x78, 0x57, 0xfb, 0xaf, 0x45, 0x80, 0x28, 0x38, 0x92, 0xfc, 0x7a, 0x01, 0x5e, 0x08, - 0x3b, 0x5c, 0x20, 0x96, 0x7f, 0xfc, 0xdc, 0xf3, 0xdc, 0xee, 0xb6, 0xac, 0xce, 0xce, 0x47, 0xa0, - 0xed, 0x2c, 0x71, 0x98, 0x5d, 0x0a, 0x82, 0x50, 0xa3, 0xdd, 0x5e, 0x70, 0xbc, 0x6a, 0x79, 0xb2, - 0x05, 0x66, 0xc6, 0xe0, 0xde, 0x92, 0x3c, 0x22, 0xab, 0xb4, 0x51, 0xf0, 0x4e, 0xa4, 0x28, 0x18, - 0xe2, 0x90, 0x03, 0xa8, 0x39, 0xee, 0x7b, 0x3e, 0xab, 0x0e, 0xd9, 0x1c, 0xdf, 0x1c, 0xbd, 0xca, - 0x45, 0xb5, 0x0a, 0xb7, 0x8b, 0x7c, 0xc1, 0xaa, 0x23, 0x2b, 0xfb, 0xd7, 0x8a, 0x70, 0x29, 0xa3, - 0x1e, 0xc8, 0x9b, 0x30, 0x2b, 0xe3, 0x50, 0xa3, 0x0b, 0x00, 0x0a, 0xd1, 0x05, 0x00, 0xed, 0x14, - 0x0d, 0x07, 0xb8, 0xc9, 0x7b, 0x00, 0xba, 0x61, 0x50, 0xdf, 0xdf, 0x72, 0x4d, 0xb5, 0x1e, 0x78, - 0x83, 0xa9, 0x2f, 0xcb, 0x61, 0xea, 0xa3, 0x93, 0xe6, 0x4f, 0x65, 0x85, 0x96, 0xa7, 0xea, 0x39, - 0xca, 0x80, 0x31, 0x48, 0xf2, 0x0d, 0x00, 0x61, 0x03, 0x08, 0x4f, 0xbf, 0x78, 0x82, 0xe1, 0x6c, - 0x41, 0x1d, 0xae, 0xb6, 0xf0, 0xb5, 0xbe, 0xee, 0x04, 0x56, 0x70, 0x2c, 0x0e, 0x1b, 0xba, 0x1f, - 0xa2, 0x60, 0x0c, 0x51, 0xfb, 0xc7, 0x45, 0xa8, 0x29, 0xd7, 0xc3, 0x53, 0xb0, 0x05, 0x77, 0x12, - 0xb6, 0xe0, 0x31, 0x05, 0x93, 0x67, 0x59, 0x82, 0xdd, 0x94, 0x25, 0x78, 0x3d, 0xbf, 0xa8, 0xc7, - 0xdb, 0x81, 0x7f, 0xab, 0x08, 0xd3, 0x8a, 0x35, 0xaf, 0x85, 0xf6, 0x2b, 0x30, 0x23, 0x82, 0x40, - 0xb6, 0xf4, 0x87, 0xe2, 0xdc, 0x25, 0x5e, 0x61, 0x65, 0x11, 0xbf, 0xdd, 0x4a, 0x92, 0x30, 0xcd, - 0xcb, 0x9a, 0xb5, 0x48, 0xda, 0x65, 0x8b, 0x30, 0xe1, 0x36, 0x16, 0xeb, 0x4d, 0xde, 0xac, 0x5b, - 0x29, 0x1a, 0x0e, 0x70, 0xa7, 0x4d, 0xc4, 0xe5, 0x0b, 0x30, 0x11, 0xff, 0xdb, 0x02, 0x4c, 0x46, - 0xf5, 0x75, 0xe1, 0x06, 0xe2, 0xfd, 0xa4, 0x81, 0x78, 0x39, 0x77, 0x73, 0x18, 0x62, 0x1e, 0xfe, - 0x0b, 0x55, 0x48, 0xec, 0x69, 0x20, 0x7b, 0x30, 0x6f, 0x65, 0x46, 0x66, 0xc6, 0x46, 0x9b, 0x70, - 0x93, 0xfe, 0xc6, 0x50, 0x4e, 0x7c, 0x0c, 0x0a, 0xe9, 0x43, 0xed, 0x88, 0x7a, 0x81, 0x65, 0x50, - 0xf5, 0x7d, 0xeb, 0xb9, 0x55, 0x32, 0x69, 0x04, 0x0f, 0xeb, 0xf4, 0xbe, 0x14, 0x80, 0xa1, 0x28, - 0xb2, 0x07, 0x15, 0x6a, 0x76, 0xa8, 0x3a, 0x09, 0x2b, 0xe7, 0xc9, 0xc4, 0x61, 0x7d, 0xb2, 0x37, - 0x1f, 0x05, 0x34, 0xf1, 0xe3, 0x86, 0xa6, 0x72, 0x4e, 0x05, 0xeb, 0x8c, 0xe6, 0x25, 0x72, 0x18, - 0x5a, 0x5b, 0x2b, 0x63, 0x1a, 0x3c, 0x1e, 0x63, 0x6b, 0xf5, 0xa1, 0xfe, 0x40, 0x0f, 0xa8, 0xd7, - 0xd5, 0xbd, 0x43, 0xb9, 0xda, 0x18, 0xfd, 0x0b, 0xdf, 0x56, 0x48, 0xd1, 0x17, 0x86, 0x49, 0x18, - 0xc9, 0x21, 0x2e, 0xd4, 0x03, 0xa9, 0x3e, 0x2b, 0x93, 0xf2, 0xe8, 0x42, 0x95, 0x22, 0xee, 0xcb, - 0xbd, 0x0d, 0xea, 0x15, 0x23, 0x19, 0xe4, 0x28, 0x71, 0x8c, 0xbd, 0xb8, 0xbc, 0xa0, 0x95, 0xc3, - 0x35, 0x21, 0xa1, 0xa2, 0xe9, 0x26, 0xfb, 0x38, 0x7c, 0xed, 0x7f, 0x55, 0xa2, 0x61, 0xf9, 0x69, - 0xdb, 0x09, 0xbf, 0x98, 0xb4, 0x13, 0x5e, 0x4f, 0xdb, 0x09, 0x53, 0x3e, 0xff, 0xf3, 0x47, 0x43, - 0xa7, 0xcc, 0x6b, 0xe5, 0x0b, 0x30, 0xaf, 0xbd, 0x02, 0x8d, 0x23, 0x3e, 0x12, 0x88, 0x63, 0xb5, - 0x2a, 0x7c, 0x1a, 0xe1, 0x23, 0xfb, 0xfd, 0x28, 0x19, 0xe3, 0x3c, 0x2c, 0x8b, 0xbc, 0xb8, 0x27, - 0x3c, 0xc9, 0x5a, 0x66, 0x69, 0x47, 0xc9, 0x18, 0xe7, 0xe1, 0x81, 0x94, 0x96, 0x73, 0x28, 0x32, - 0x54, 0x79, 0x06, 0x11, 0x48, 0xa9, 0x12, 0x31, 0xa2, 0x93, 0x9b, 0x50, 0xeb, 0x9b, 0xfb, 0x82, - 0xb7, 0xc6, 0x79, 0xb9, 0x86, 0xb9, 0xbb, 0xba, 0x26, 0x8f, 0xf9, 0x52, 0x54, 0x56, 0x92, 0xae, - 0xde, 0x53, 0x04, 0xbe, 0x36, 0x94, 0x25, 0xd9, 0x8a, 0x92, 0x31, 0xce, 0x43, 0x7e, 0x06, 0xa6, - 0x3d, 0x6a, 0xf6, 0x0d, 0x1a, 0xe6, 0x02, 0x9e, 0x4b, 0x9e, 0x7f, 0x1a, 0xa7, 0x60, 0x8a, 0x73, - 0x88, 0x91, 0xb0, 0x31, 0x92, 0x91, 0xf0, 0xab, 0x30, 0x6d, 0x7a, 0xba, 0xe5, 0x50, 0xf3, 0x9e, - 0xc3, 0x03, 0x3b, 0x64, 0x38, 0x67, 0x68, 0xa0, 0x5f, 0x4d, 0x50, 0x31, 0xc5, 0xad, 0xfd, 0xf3, - 0x22, 0x54, 0xc4, 0xa9, 0xac, 0x1b, 0x70, 0xc9, 0x72, 0xac, 0xc0, 0xd2, 0xed, 0x55, 0x6a, 0xeb, - 0xc7, 0xf1, 0x00, 0x97, 0x4a, 0xeb, 0x45, 0xb6, 0xd0, 0xde, 0x18, 0x24, 0x63, 0x56, 0x1e, 0x56, - 0x39, 0x81, 0x98, 0xbe, 0x15, 0x8a, 0xb0, 0xa3, 0x89, 0x23, 0xc1, 0x13, 0x14, 0x4c, 0x71, 0x32, - 0x65, 0xa8, 0x37, 0x10, 0xb9, 0x52, 0x11, 0xca, 0x50, 0x32, 0x98, 0x24, 0xc9, 0xc7, 0x95, 0xf4, - 0x3e, 0x57, 0x88, 0xc3, 0x4d, 0x53, 0x32, 0x08, 0x4e, 0x28, 0xe9, 0x29, 0x1a, 0x0e, 0x70, 0x33, - 0x84, 0x7d, 0xdd, 0xb2, 0xfb, 0x1e, 0x8d, 0x10, 0x2a, 0x11, 0xc2, 0x5a, 0x8a, 0x86, 0x03, 0xdc, - 0xda, 0x7f, 0x2f, 0x00, 0x19, 0xdc, 0x06, 0x42, 0x0e, 0x60, 0xc2, 0xe1, 0xb6, 0xc8, 0xdc, 0x37, - 0x11, 0xc4, 0x4c, 0x9a, 0x62, 0x92, 0x90, 0x09, 0x12, 0x9f, 0x38, 0x50, 0xa3, 0x0f, 0x03, 0xea, - 0x39, 0xe1, 0xb6, 0xb0, 0xf1, 0xdc, 0x7a, 0x20, 0xd6, 0x66, 0x12, 0x19, 0x43, 0x19, 0xda, 0xef, - 0x15, 0xa1, 0x11, 0xe3, 0x7b, 0xd2, 0x12, 0x9f, 0x9f, 0x4c, 0x21, 0x4c, 0x80, 0xbb, 0x9e, 0x2d, - 0xc7, 0xbb, 0xd8, 0xc9, 0x14, 0x92, 0x84, 0x9b, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x5d, 0xdd, 0x0f, - 0xa8, 0xc7, 0x75, 0xa1, 0xd4, 0x79, 0x10, 0x5b, 0x21, 0x05, 0x63, 0x5c, 0xe4, 0x86, 0xbc, 0xb7, - 0xa2, 0x9c, 0x3c, 0xbf, 0x73, 0xc8, 0xa5, 0x14, 0x95, 0x31, 0x5c, 0x4a, 0x41, 0x3a, 0x30, 0xab, - 0x4a, 0xad, 0xa8, 0xe7, 0x3b, 0xdd, 0x51, 0x34, 0xd4, 0x14, 0x04, 0x0e, 0x80, 0x6a, 0xdf, 0x2b, - 0xc0, 0x54, 0xc2, 0x00, 0x25, 0x4e, 0xde, 0x54, 0x9b, 0x98, 0x12, 0x27, 0x6f, 0xc6, 0xf6, 0x1e, - 0xbd, 0x0c, 0x13, 0xa2, 0x82, 0xd2, 0xb1, 0xc9, 0xa2, 0x0a, 0x51, 0x52, 0xd9, 0xcc, 0x22, 0x4d, - 0xdc, 0xe9, 0x99, 0x45, 0xda, 0xc0, 0x51, 0xd1, 0x85, 0xe7, 0x48, 0x94, 0x4e, 0xd6, 0x74, 0xcc, - 0x73, 0x24, 0xd2, 0x31, 0xe4, 0xd0, 0xfe, 0x3e, 0x2f, 0x77, 0xe0, 0x1d, 0x87, 0x2b, 0xeb, 0x0e, - 0x54, 0x65, 0x3c, 0xaa, 0xec, 0x1a, 0x6f, 0xe6, 0xb0, 0x8a, 0x71, 0x1c, 0x19, 0x51, 0xa9, 0x1b, - 0x87, 0xf7, 0xf6, 0xf7, 0x51, 0xa1, 0x93, 0x5b, 0x50, 0x77, 0x1d, 0xd9, 0x83, 0xe5, 0xe7, 0x7f, - 0x81, 0xcd, 0x1c, 0xf7, 0x54, 0xe2, 0xa3, 0x93, 0xe6, 0x95, 0xf0, 0x25, 0x51, 0x48, 0x8c, 0x72, - 0x6a, 0x7f, 0xa6, 0x00, 0x2f, 0xa0, 0x6b, 0xdb, 0x96, 0xd3, 0x49, 0x7a, 0x3e, 0x89, 0x0d, 0xd3, - 0x5d, 0xfd, 0xe1, 0xae, 0xa3, 0x1f, 0xe9, 0x96, 0xad, 0xef, 0xd9, 0xf4, 0x89, 0x2b, 0xe3, 0x7e, - 0x60, 0xd9, 0x0b, 0xe2, 0x1e, 0xcf, 0x85, 0x0d, 0x27, 0xb8, 0xe7, 0xb5, 0x03, 0xcf, 0x72, 0x3a, - 0x62, 0x94, 0xdc, 0x4a, 0x60, 0x61, 0x0a, 0x5b, 0xfb, 0xfd, 0x12, 0xf0, 0x58, 0x47, 0xf2, 0x25, - 0xa8, 0x77, 0xa9, 0x71, 0xa0, 0x3b, 0x96, 0xaf, 0xce, 0x30, 0xbe, 0xca, 0xbe, 0x6b, 0x4b, 0x25, - 0x3e, 0x62, 0xbf, 0x62, 0xb9, 0xbd, 0xc9, 0xb7, 0x1d, 0x45, 0xbc, 0xc4, 0x80, 0x89, 0x8e, 0xef, - 0xeb, 0x3d, 0x2b, 0x77, 0x88, 0x89, 0x38, 0x33, 0x56, 0x0c, 0x47, 0xe2, 0x19, 0x25, 0x34, 0x31, - 0xa0, 0xd2, 0xb3, 0x75, 0xcb, 0xc9, 0x7d, 0xef, 0x1c, 0xfb, 0x82, 0x6d, 0x86, 0x24, 0x4c, 0x95, - 0xfc, 0x11, 0x05, 0x36, 0xe9, 0x43, 0xc3, 0x37, 0x3c, 0xbd, 0xeb, 0x1f, 0xe8, 0x4b, 0xaf, 0xbe, - 0x96, 0x5b, 0xf9, 0x8f, 0x44, 0x09, 0x5d, 0x64, 0x05, 0x97, 0xb7, 0xda, 0xb7, 0x97, 0x97, 0x5e, - 0x7d, 0x0d, 0xe3, 0x72, 0xe2, 0x62, 0x5f, 0x7d, 0x65, 0x49, 0x8e, 0x20, 0x63, 0x17, 0xfb, 0xea, - 0x2b, 0x4b, 0x18, 0x97, 0xa3, 0xfd, 0xef, 0x02, 0xd4, 0x43, 0x5e, 0xb2, 0x0b, 0xc0, 0xc6, 0x32, - 0x79, 0xca, 0xeb, 0xb9, 0xee, 0xe8, 0xe1, 0xd6, 0x9e, 0xdd, 0x30, 0x33, 0xc6, 0x80, 0x32, 0x8e, - 0xc1, 0x2d, 0x8e, 0xfb, 0x18, 0xdc, 0x45, 0xa8, 0x1f, 0xe8, 0x8e, 0xe9, 0x1f, 0xe8, 0x87, 0x54, - 0x86, 0x88, 0x87, 0x4b, 0x91, 0xdb, 0x8a, 0x80, 0x11, 0x8f, 0xf6, 0x0f, 0x27, 0x40, 0xc4, 0x85, - 0xb0, 0x41, 0xc7, 0xb4, 0x7c, 0xb1, 0x91, 0xa3, 0xc0, 0x73, 0x86, 0x83, 0xce, 0xaa, 0x4c, 0xc7, - 0x90, 0x83, 0x5c, 0x85, 0x52, 0xd7, 0x72, 0xa4, 0x06, 0xc2, 0x0d, 0xb9, 0x5b, 0x96, 0x83, 0x2c, - 0x8d, 0x93, 0xf4, 0x87, 0x52, 0xc3, 0x10, 0x24, 0xfd, 0x21, 0xb2, 0x34, 0xf2, 0x15, 0x98, 0xb1, - 0x5d, 0xf7, 0x90, 0x0d, 0x1f, 0xf1, 0x50, 0xd7, 0x29, 0x61, 0x5a, 0xd9, 0x4c, 0x92, 0x30, 0xcd, - 0x4b, 0x76, 0xe1, 0xc5, 0x0f, 0xa8, 0xe7, 0xca, 0xf1, 0xb2, 0x6d, 0x53, 0xda, 0x53, 0x30, 0x42, - 0x35, 0xe6, 0x91, 0xb8, 0x3f, 0x9f, 0xcd, 0x82, 0xc3, 0xf2, 0xf2, 0xbd, 0x03, 0xba, 0xd7, 0xa1, - 0xc1, 0xb6, 0xe7, 0x32, 0xdd, 0xc5, 0x72, 0x3a, 0x0a, 0x76, 0x22, 0x82, 0xdd, 0xc9, 0x66, 0xc1, - 0x61, 0x79, 0xc9, 0x3b, 0x30, 0x27, 0x48, 0x42, 0x6d, 0x59, 0x16, 0xc3, 0x8c, 0x65, 0xab, 0xeb, - 0x5a, 0xa7, 0x84, 0xbf, 0x6c, 0x67, 0x08, 0x0f, 0x0e, 0xcd, 0x4d, 0xde, 0x82, 0x59, 0xe5, 0x2d, - 0xdd, 0xa6, 0x5e, 0x3b, 0x8c, 0x15, 0x9a, 0x52, 0x21, 0xd3, 0x2a, 0x64, 0x18, 0x53, 0x5c, 0x38, - 0x90, 0x8f, 0x20, 0x5c, 0xe1, 0x01, 0x41, 0xbb, 0xbd, 0x15, 0xd7, 0xb5, 0x4d, 0xf7, 0x81, 0xa3, - 0xbe, 0x5d, 0x28, 0xec, 0xdc, 0x41, 0xda, 0xce, 0xe4, 0xc0, 0x21, 0x39, 0xd9, 0x97, 0x73, 0xca, - 0xaa, 0xfb, 0xc0, 0x49, 0xa3, 0x42, 0xf4, 0xe5, 0xed, 0x21, 0x3c, 0x38, 0x34, 0x37, 0x59, 0x03, - 0x92, 0xfe, 0x82, 0xdd, 0x9e, 0x74, 0xe1, 0x5f, 0x11, 0x07, 0x36, 0xa5, 0xa9, 0x98, 0x91, 0x83, - 0x6c, 0xc2, 0xe5, 0x74, 0x2a, 0x13, 0x27, 0xbd, 0xf9, 0xfc, 0xa8, 0x66, 0xcc, 0xa0, 0x63, 0x66, - 0x2e, 0xed, 0x1f, 0x15, 0x61, 0x2a, 0x71, 0xc2, 0xc7, 0x33, 0x77, 0x92, 0x02, 0x5b, 0x3c, 0x74, - 0xfd, 0xce, 0xc6, 0xea, 0x6d, 0xaa, 0x9b, 0xd4, 0x53, 0x1b, 0x48, 0xea, 0x72, 0x5a, 0x4c, 0x50, - 0x30, 0xc5, 0x49, 0xf6, 0xa1, 0x22, 0xfc, 0x04, 0x79, 0x6f, 0x7b, 0x52, 0x75, 0xc4, 0x9d, 0x05, - 0xf2, 0x8a, 0x34, 0xd7, 0xa3, 0x28, 0xe0, 0xb5, 0x00, 0x26, 0xe3, 0x1c, 0x6c, 0x20, 0x89, 0xd4, - 0xde, 0x6a, 0x42, 0xe5, 0xdd, 0x80, 0x52, 0x10, 0x8c, 0x7a, 0x46, 0x83, 0xf0, 0x3b, 0xed, 0x6c, - 0x22, 0xc3, 0xd0, 0xf6, 0xd9, 0xbf, 0xf3, 0x7d, 0xcb, 0x75, 0xe4, 0x81, 0xfd, 0xbb, 0x50, 0x95, - 0xab, 0xa7, 0x11, 0xcf, 0x98, 0xe0, 0xba, 0x92, 0x32, 0xbb, 0x2a, 0x2c, 0xed, 0xdf, 0x15, 0xa1, - 0x1e, 0x9a, 0x49, 0xce, 0x70, 0x10, 0xbe, 0x0b, 0xf5, 0x30, 0xa0, 0x31, 0xf7, 0x55, 0xb6, 0x51, - 0x9c, 0x1d, 0x5f, 0xd9, 0x87, 0xaf, 0x18, 0xc9, 0x88, 0x07, 0x4b, 0x96, 0x72, 0x04, 0x4b, 0xf6, - 0xa0, 0x1a, 0x78, 0x56, 0xa7, 0x23, 0x57, 0x09, 0x79, 0xa2, 0x25, 0xc3, 0xea, 0xda, 0x11, 0x80, - 0xb2, 0x66, 0xc5, 0x0b, 0x2a, 0x31, 0xda, 0xfb, 0x30, 0x9b, 0xe6, 0xe4, 0x2a, 0xb4, 0x71, 0x40, - 0xcd, 0xbe, 0xad, 0xea, 0x38, 0x52, 0xa1, 0x65, 0x3a, 0x86, 0x1c, 0xe4, 0x26, 0xd4, 0xd8, 0x6f, - 0xfa, 0xc0, 0x75, 0x94, 0x1a, 0xcb, 0x57, 0x23, 0x3b, 0x32, 0x0d, 0x43, 0xaa, 0xf6, 0x5f, 0x4a, - 0x70, 0x35, 0x32, 0x76, 0x6d, 0xe9, 0x8e, 0xde, 0x39, 0xc3, 0xfd, 0xa5, 0x9f, 0xee, 0xda, 0x3b, - 0xef, 0x6d, 0x26, 0xa5, 0x67, 0xe0, 0x36, 0x93, 0xff, 0x57, 0x04, 0x1e, 0x7c, 0x4d, 0xbe, 0x05, - 0x93, 0x7a, 0xec, 0xea, 0x6a, 0xf9, 0x3b, 0x6f, 0xe5, 0xfe, 0x9d, 0x3c, 0xc6, 0x3b, 0x0c, 0x80, - 0x8b, 0xa7, 0x62, 0x42, 0x20, 0x71, 0xa1, 0xb6, 0xaf, 0xdb, 0x36, 0xd3, 0x85, 0x72, 0x3b, 0xef, - 0x12, 0xc2, 0x79, 0x33, 0x5f, 0x93, 0xd0, 0x18, 0x0a, 0x21, 0xdf, 0x29, 0xc0, 0x94, 0x17, 0x5f, - 0xae, 0xc9, 0x1f, 0x92, 0x27, 0xb4, 0x23, 0x86, 0x16, 0x0f, 0xb7, 0x8b, 0xaf, 0x09, 0x93, 0x32, - 0xb5, 0xff, 0x5c, 0x80, 0xa9, 0xb6, 0x6d, 0x99, 0x96, 0xd3, 0xb9, 0xc0, 0xcb, 0x54, 0xee, 0x41, - 0xc5, 0xb7, 0x2d, 0x93, 0x8e, 0x38, 0x9b, 0x88, 0x79, 0x8c, 0x01, 0xa0, 0xc0, 0x49, 0xde, 0xce, - 0x52, 0x3a, 0xc3, 0xed, 0x2c, 0x7f, 0x30, 0x01, 0x72, 0x1b, 0x01, 0xe9, 0x43, 0xbd, 0xa3, 0x2e, - 0x7d, 0x90, 0xdf, 0x78, 0x3b, 0xc7, 0x81, 0xa1, 0x89, 0xeb, 0x23, 0xc4, 0xd8, 0x1f, 0x26, 0x62, - 0x24, 0x89, 0xd0, 0xe4, 0x9d, 0xe9, 0xab, 0x39, 0xef, 0x4c, 0x17, 0xe2, 0x06, 0x6f, 0x4d, 0xd7, - 0xa1, 0x7c, 0x10, 0x04, 0x3d, 0xd9, 0x98, 0x46, 0xdf, 0x27, 0x12, 0x9d, 0x59, 0x25, 0x74, 0x22, - 0xf6, 0x8e, 0x1c, 0x9a, 0x89, 0x70, 0xf4, 0xf0, 0x66, 0xca, 0x95, 0x5c, 0x61, 0x24, 0x71, 0x11, - 0xec, 0x1d, 0x39, 0x34, 0xf9, 0x45, 0x68, 0x04, 0x9e, 0xee, 0xf8, 0xfb, 0xae, 0xd7, 0xa5, 0x9e, - 0x5c, 0xa3, 0xae, 0xe5, 0xb8, 0x36, 0x7c, 0x27, 0x42, 0x13, 0x26, 0xd9, 0x44, 0x12, 0xc6, 0xa5, - 0x91, 0x43, 0xa8, 0xf5, 0x4d, 0x51, 0x30, 0x69, 0x06, 0x5b, 0xce, 0x73, 0x13, 0x7c, 0x2c, 0x48, - 0x44, 0xbd, 0x61, 0x28, 0x20, 0x79, 0x09, 0x6b, 0x75, 0x5c, 0x97, 0xb0, 0xc6, 0x5b, 0x63, 0xd6, - 0x81, 0x3a, 0xa4, 0x2b, 0xf5, 0x5a, 0xa7, 0x23, 0x63, 0xdc, 0xd6, 0x72, 0xab, 0x9c, 0x42, 0x64, - 0x23, 0xd4, 0x8d, 0x9d, 0x0e, 0x2a, 0x19, 0x5a, 0x17, 0xa4, 0xef, 0x88, 0x18, 0x89, 0x8b, 0xa7, - 0xc4, 0xce, 0xc8, 0xc5, 0xb3, 0x8d, 0x07, 0xe1, 0x0d, 0x48, 0xb1, 0x83, 0xef, 0x33, 0x6f, 0x98, - 0xd2, 0xfe, 0x7d, 0x11, 0x4a, 0x3b, 0x9b, 0x6d, 0x71, 0x98, 0x2d, 0xbf, 0xca, 0x8e, 0xb6, 0x0f, - 0xad, 0xde, 0x7d, 0xea, 0x59, 0xfb, 0xc7, 0x72, 0xe9, 0x1d, 0x3b, 0xcc, 0x36, 0xcd, 0x81, 0x19, - 0xb9, 0xc8, 0xbb, 0x30, 0x69, 0xe8, 0x2b, 0xd4, 0x0b, 0x46, 0x31, 0x2c, 0xf0, 0xed, 0xec, 0x2b, - 0xcb, 0x51, 0x76, 0x4c, 0x80, 0x91, 0x5d, 0x00, 0x23, 0x82, 0x2e, 0x9d, 0xdb, 0x1c, 0x12, 0x03, - 0x8e, 0x01, 0x11, 0x84, 0xfa, 0x21, 0x63, 0xe5, 0xa8, 0xe5, 0xf3, 0xa0, 0xf2, 0x96, 0x73, 0x47, - 0xe5, 0xc5, 0x08, 0x46, 0x73, 0x60, 0x2a, 0x71, 0x1b, 0x15, 0xf9, 0x32, 0xd4, 0xdc, 0x5e, 0x6c, - 0x38, 0xad, 0xf3, 0x68, 0xda, 0xda, 0x3d, 0x99, 0xf6, 0xe8, 0xa4, 0x39, 0xb5, 0xe9, 0x76, 0x2c, - 0x43, 0x25, 0x60, 0xc8, 0x4e, 0x34, 0x98, 0xe0, 0xfb, 0x36, 0xd5, 0x5d, 0x54, 0x7c, 0xee, 0xe0, - 0xd7, 0xc5, 0xf8, 0x28, 0x29, 0xda, 0x2f, 0x97, 0x21, 0xf2, 0xb8, 0x12, 0x1f, 0x26, 0xc4, 0x9e, - 0x11, 0x39, 0x72, 0x5f, 0xe8, 0xf6, 0x14, 0x29, 0x8a, 0x74, 0xa0, 0xf4, 0xbe, 0xbb, 0x97, 0x7b, - 0xe0, 0x8e, 0x1d, 0x3e, 0x21, 0x6c, 0x65, 0xb1, 0x04, 0x64, 0x12, 0xc8, 0x5f, 0x2d, 0xc0, 0xf3, - 0x7e, 0x5a, 0xf5, 0x95, 0xcd, 0x01, 0xf3, 0xeb, 0xf8, 0x69, 0x65, 0x5a, 0x86, 0x3d, 0x0f, 0x23, - 0xe3, 0x60, 0x59, 0x58, 0xfd, 0x0b, 0x57, 0xa8, 0x6c, 0x4e, 0xeb, 0x39, 0xef, 0xdc, 0x4d, 0xd6, - 0x7f, 0x32, 0x0d, 0xa5, 0x28, 0xed, 0xdb, 0x45, 0x68, 0xc4, 0x46, 0xeb, 0xdc, 0x57, 0x9c, 0x3d, - 0x4c, 0x5d, 0x71, 0xb6, 0x3d, 0x7a, 0x64, 0x40, 0x54, 0xaa, 0x8b, 0xbe, 0xe5, 0xec, 0x9f, 0x16, - 0xa1, 0xb4, 0xbb, 0xba, 0x96, 0x5c, 0xb4, 0x16, 0x9e, 0xc2, 0xa2, 0xf5, 0x00, 0xaa, 0x7b, 0x7d, - 0xcb, 0x0e, 0x2c, 0x27, 0xf7, 0xf1, 0x38, 0xea, 0x46, 0x38, 0xe9, 0xeb, 0x10, 0xa8, 0xa8, 0xe0, - 0x49, 0x07, 0xaa, 0x1d, 0x71, 0x3e, 0x69, 0xee, 0x78, 0x49, 0x79, 0xce, 0xa9, 0x10, 0x24, 0x5f, - 0x50, 0xa1, 0x6b, 0xc7, 0x30, 0xb1, 0xbb, 0x2a, 0xd5, 0xfe, 0xa7, 0x5b, 0x9b, 0xda, 0x2f, 0x42, - 0xa8, 0x05, 0x3c, 0x7d, 0xe1, 0xff, 0xad, 0x00, 0x49, 0xc5, 0xe7, 0xe9, 0xb7, 0xa6, 0xc3, 0x74, - 0x6b, 0x5a, 0x1d, 0x47, 0xe7, 0xcb, 0x6e, 0x50, 0xda, 0xbf, 0x29, 0x40, 0x6a, 0xa3, 0x1f, 0x79, - 0x4d, 0x1e, 0x75, 0x97, 0x0c, 0x4c, 0x53, 0x47, 0xdd, 0x91, 0x24, 0x77, 0xec, 0xc8, 0xbb, 0x0f, - 0xd9, 0x72, 0x2d, 0xee, 0x40, 0x93, 0xc5, 0xbf, 0x3b, 0xfa, 0x72, 0x2d, 0xcb, 0x1d, 0x27, 0x83, - 0x27, 0xe3, 0x24, 0x4c, 0xca, 0xd5, 0xfe, 0x41, 0x11, 0x26, 0x9e, 0xda, 0xd9, 0x06, 0x34, 0x11, - 0xcf, 0xba, 0x92, 0x73, 0xb4, 0x1f, 0x1a, 0xcd, 0xda, 0x4d, 0x45, 0xb3, 0xe6, 0xbd, 0xca, 0xfd, - 0x09, 0xb1, 0xac, 0xff, 0xaa, 0x00, 0x72, 0xae, 0xd9, 0x70, 0xfc, 0x40, 0x77, 0x0c, 0x4a, 0x8c, - 0x70, 0x62, 0xcb, 0x1b, 0x34, 0x25, 0x03, 0x0b, 0x85, 0x2e, 0xc3, 0x9f, 0xd5, 0x44, 0x46, 0x7e, - 0x12, 0x6a, 0x07, 0xae, 0x1f, 0xf0, 0xc9, 0xab, 0x98, 0x34, 0x99, 0xdd, 0x96, 0xe9, 0x18, 0x72, - 0xa4, 0xdd, 0xd9, 0x95, 0xe1, 0xee, 0x6c, 0xed, 0x37, 0x8b, 0x30, 0xf9, 0x49, 0x39, 0x3c, 0x21, - 0x2b, 0xfa, 0xb7, 0x94, 0x33, 0xfa, 0xb7, 0x7c, 0x9e, 0xe8, 0x5f, 0xed, 0x07, 0x05, 0x80, 0xa7, - 0x76, 0x72, 0x83, 0x99, 0x0c, 0xcc, 0xcd, 0xdd, 0xae, 0xb2, 0xc3, 0x72, 0xff, 0x6e, 0x45, 0x7d, - 0x12, 0x0f, 0xca, 0xfd, 0xb0, 0x00, 0xd3, 0x7a, 0x22, 0xd0, 0x35, 0xb7, 0xbe, 0x9c, 0x8a, 0x9b, - 0x0d, 0xe3, 0xb4, 0x92, 0xe9, 0x98, 0x12, 0x4b, 0x5e, 0x8f, 0x4e, 0x59, 0xbf, 0x1b, 0x35, 0xfb, - 0x81, 0xe3, 0xd1, 0xb9, 0xee, 0x96, 0xe0, 0x7c, 0x42, 0x60, 0x71, 0x69, 0x2c, 0x81, 0xc5, 0xf1, - 0x2d, 0x93, 0xe5, 0xc7, 0x6e, 0x99, 0x3c, 0x82, 0xfa, 0xbe, 0xe7, 0x76, 0x79, 0xec, 0xae, 0xbc, - 0xc7, 0xfc, 0x56, 0x8e, 0x89, 0xb2, 0xbb, 0x67, 0x39, 0xd4, 0xe4, 0x71, 0xc1, 0xa1, 0xe1, 0x6a, - 0x4d, 0xe1, 0x63, 0x24, 0x8a, 0xdb, 0xfa, 0x5d, 0x21, 0x75, 0x62, 0x9c, 0x52, 0xc3, 0xb1, 0x64, - 0x47, 0xa0, 0xa3, 0x12, 0x93, 0x8c, 0xd7, 0xad, 0x3e, 0x9d, 0x78, 0x5d, 0xed, 0xcf, 0x57, 0xd5, - 0x00, 0xf6, 0xcc, 0x1d, 0xe8, 0xfb, 0xe9, 0x46, 0xf7, 0x0e, 0x1d, 0xd8, 0x85, 0x5e, 0x7b, 0x8a, - 0xbb, 0xd0, 0xeb, 0xe3, 0xd9, 0x85, 0x0e, 0xf9, 0x76, 0xa1, 0x37, 0xc6, 0xb4, 0x0b, 0x7d, 0x72, - 0x5c, 0xbb, 0xd0, 0xa7, 0x46, 0xda, 0x85, 0x3e, 0x7d, 0xa6, 0x5d, 0xe8, 0x27, 0x25, 0x48, 0x2d, - 0xc6, 0x3f, 0x75, 0xbc, 0xfd, 0x48, 0x39, 0xde, 0xbe, 0x5b, 0x84, 0x68, 0x20, 0x3e, 0x67, 0x60, - 0xd2, 0x3b, 0x50, 0xeb, 0xea, 0x0f, 0x79, 0xe0, 0x74, 0x9e, 0x7b, 0xb0, 0xb7, 0x24, 0x06, 0x86, - 0x68, 0xc4, 0x07, 0xb0, 0xc2, 0xbb, 0x28, 0x72, 0xbb, 0x30, 0xa2, 0x6b, 0x2d, 0x84, 0x91, 0x34, - 0x7a, 0xc7, 0x98, 0x18, 0xed, 0x5f, 0x16, 0x41, 0x5e, 0x5a, 0x42, 0x28, 0x54, 0xf6, 0xad, 0x87, - 0xd4, 0xcc, 0x1d, 0xee, 0xbc, 0xc6, 0x50, 0xe4, 0xcd, 0x28, 0xdc, 0x47, 0xc3, 0x13, 0x50, 0xa0, - 0x73, 0xe3, 0xbb, 0xf0, 0xb9, 0xc9, 0xfa, 0xcb, 0x61, 0x7c, 0x8f, 0xfb, 0xee, 0xa4, 0xf1, 0x5d, - 0x24, 0xa1, 0x92, 0x21, 0x6c, 0xfd, 0x3c, 0xfc, 0x22, 0xb7, 0x8b, 0x31, 0x11, 0xc6, 0xa1, 0x6c, - 0xfd, 0xbe, 0x38, 0x86, 0x42, 0xca, 0x68, 0xfd, 0xc2, 0xf7, 0x7f, 0x78, 0xfd, 0xb9, 0x1f, 0xfc, - 0xf0, 0xfa, 0x73, 0x1f, 0xfd, 0xf0, 0xfa, 0x73, 0xbf, 0x7c, 0x7a, 0xbd, 0xf0, 0xfd, 0xd3, 0xeb, - 0x85, 0x1f, 0x9c, 0x5e, 0x2f, 0x7c, 0x74, 0x7a, 0xbd, 0xf0, 0x1f, 0x4f, 0xaf, 0x17, 0xfe, 0xd2, - 0x7f, 0xba, 0xfe, 0xdc, 0xcf, 0x7f, 0x29, 0x2a, 0xc2, 0xa2, 0x2a, 0xc2, 0xa2, 0x12, 0xb8, 0xd8, - 0x3b, 0xec, 0x2c, 0xb2, 0x22, 0x44, 0x29, 0xaa, 0x08, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xa1, - 0x3f, 0x0d, 0xca, 0x09, 0xa0, 0x00, 0x00, + // 8292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6f, 0x6c, 0x24, 0x47, + 0x76, 0x9f, 0xe6, 0xff, 0xcc, 0x1b, 0x92, 0x4b, 0xd5, 0xae, 0x56, 0x5c, 0xde, 0x6a, 0x67, 0xaf, + 0xcf, 0xa7, 0x5b, 0xc7, 0x67, 0x32, 0xa2, 0x4f, 0x3a, 0x9d, 0xed, 0x3b, 0x89, 0x43, 0x2e, 0x77, + 0xa9, 0x25, 0x77, 0x79, 0x6f, 0xc8, 0x95, 0xce, 0x8a, 0x4f, 0x69, 0x76, 0x17, 0x87, 0x2d, 0xf6, + 0x74, 0xcf, 0x75, 0xf7, 0x70, 0x97, 0x72, 0x8c, 0xb3, 0xef, 0x12, 0xe8, 0x82, 0x24, 0x48, 0xe0, + 0x4f, 0x06, 0x02, 0x27, 0x48, 0x10, 0xc0, 0x1f, 0x0c, 0xe7, 0x83, 0x91, 0xcb, 0x87, 0x00, 0xf9, + 0xe3, 0x20, 0x48, 0x2e, 0xff, 0x0f, 0x41, 0x80, 0x28, 0x1f, 0x42, 0xe4, 0x18, 0x04, 0x41, 0x02, + 0x24, 0x70, 0x62, 0x24, 0x76, 0x16, 0x41, 0x1c, 0xd4, 0xbf, 0xfe, 0x37, 0x3d, 0xbb, 0xe4, 0xf4, + 0x70, 0xb5, 0x72, 0xf4, 0x6d, 0xa6, 0xde, 0xab, 0xdf, 0xab, 0xae, 0xae, 0xae, 0x7a, 0xf5, 0xde, + 0xab, 0x57, 0x70, 0xab, 0x6b, 0x05, 0xfb, 0x83, 0xdd, 0x05, 0xc3, 0xed, 0x2d, 0x3a, 0x83, 0x9e, + 0xde, 0xf7, 0xdc, 0xf7, 0xf9, 0x8f, 0x3d, 0xdb, 0x7d, 0xb0, 0xd8, 0x3f, 0xe8, 0x2e, 0xea, 0x7d, + 0xcb, 0x8f, 0x4a, 0x0e, 0x5f, 0xd1, 0xed, 0xfe, 0xbe, 0xfe, 0xca, 0x62, 0x97, 0x3a, 0xd4, 0xd3, + 0x03, 0x6a, 0x2e, 0xf4, 0x3d, 0x37, 0x70, 0xc9, 0x97, 0x23, 0xa0, 0x05, 0x05, 0xb4, 0xa0, 0xaa, + 0x2d, 0xf4, 0x0f, 0xba, 0x0b, 0x0c, 0x28, 0x2a, 0x51, 0x40, 0xf3, 0x3f, 0x19, 0x6b, 0x41, 0xd7, + 0xed, 0xba, 0x8b, 0x1c, 0x6f, 0x77, 0xb0, 0xc7, 0xff, 0xf1, 0x3f, 0xfc, 0x97, 0x90, 0x33, 0xaf, + 0x1d, 0xbc, 0xee, 0x2f, 0x58, 0x2e, 0x6b, 0xd6, 0xa2, 0xe1, 0x7a, 0x74, 0xf1, 0x70, 0xa8, 0x2d, + 0xf3, 0x5f, 0x8a, 0x78, 0x7a, 0xba, 0xb1, 0x6f, 0x39, 0xd4, 0x3b, 0x52, 0xcf, 0xb2, 0xe8, 0x51, + 0xdf, 0x1d, 0x78, 0x06, 0x3d, 0x53, 0x2d, 0x7f, 0xb1, 0x47, 0x03, 0x3d, 0x4b, 0xd6, 0xe2, 0xa8, + 0x5a, 0xde, 0xc0, 0x09, 0xac, 0xde, 0xb0, 0x98, 0xd7, 0x9e, 0x54, 0xc1, 0x37, 0xf6, 0x69, 0x4f, + 0x1f, 0xaa, 0xf7, 0x53, 0xa3, 0xea, 0x0d, 0x02, 0xcb, 0x5e, 0xb4, 0x9c, 0xc0, 0x0f, 0xbc, 0x74, + 0x25, 0xed, 0xb7, 0x01, 0x2e, 0x2e, 0xef, 0xfa, 0x81, 0xa7, 0x1b, 0xc1, 0x96, 0x6b, 0x6e, 0xd3, + 0x5e, 0xdf, 0xd6, 0x03, 0x4a, 0x0e, 0xa0, 0xce, 0x1e, 0xc8, 0xd4, 0x03, 0x7d, 0xae, 0x70, 0xbd, + 0x70, 0xa3, 0xb9, 0xb4, 0xbc, 0x30, 0xe6, 0x0b, 0x5c, 0xd8, 0x94, 0x40, 0xed, 0xa9, 0x93, 0xe3, + 0x56, 0x5d, 0xfd, 0xc3, 0x50, 0x00, 0xf9, 0xd5, 0x02, 0x4c, 0x39, 0xae, 0x49, 0x3b, 0xd4, 0xa6, + 0x46, 0xe0, 0x7a, 0x73, 0xc5, 0xeb, 0xa5, 0x1b, 0xcd, 0xa5, 0x6f, 0x8e, 0x2d, 0x31, 0xe3, 0x89, + 0x16, 0xee, 0xc6, 0x04, 0xdc, 0x74, 0x02, 0xef, 0xa8, 0x7d, 0xe9, 0x07, 0xc7, 0xad, 0xe7, 0x4e, + 0x8e, 0x5b, 0x53, 0x71, 0x12, 0x26, 0x5a, 0x42, 0x76, 0xa0, 0x19, 0xb8, 0x36, 0xeb, 0x32, 0xcb, + 0x75, 0xfc, 0xb9, 0x12, 0x6f, 0xd8, 0xb5, 0x05, 0xd1, 0xd5, 0x4c, 0xfc, 0x02, 0x1b, 0x63, 0x0b, + 0x87, 0xaf, 0x2c, 0x6c, 0x87, 0x6c, 0xed, 0x8b, 0x12, 0xb8, 0x19, 0x95, 0xf9, 0x18, 0xc7, 0x21, + 0x14, 0x2e, 0xf8, 0xd4, 0x18, 0x78, 0x56, 0x70, 0xb4, 0xe2, 0x3a, 0x01, 0x7d, 0x18, 0xcc, 0x95, + 0x79, 0x2f, 0xbf, 0x9c, 0x05, 0xbd, 0xe5, 0x9a, 0x9d, 0x24, 0x77, 0xfb, 0xe2, 0xc9, 0x71, 0xeb, + 0x42, 0xaa, 0x10, 0xd3, 0x98, 0xc4, 0x81, 0x59, 0xab, 0xa7, 0x77, 0xe9, 0xd6, 0xc0, 0xb6, 0x3b, + 0xd4, 0xf0, 0x68, 0xe0, 0xcf, 0x55, 0xf8, 0x23, 0xdc, 0xc8, 0x92, 0xb3, 0xe1, 0x1a, 0xba, 0x7d, + 0x6f, 0xf7, 0x7d, 0x6a, 0x04, 0x48, 0xf7, 0xa8, 0x47, 0x1d, 0x83, 0xb6, 0xe7, 0xe4, 0xc3, 0xcc, + 0xae, 0xa7, 0x90, 0x70, 0x08, 0x9b, 0xdc, 0x82, 0xe7, 0xfb, 0x9e, 0xe5, 0xf2, 0x26, 0xd8, 0xba, + 0xef, 0xdf, 0xd5, 0x7b, 0x74, 0xae, 0x7a, 0xbd, 0x70, 0xa3, 0xd1, 0xbe, 0x22, 0x61, 0x9e, 0xdf, + 0x4a, 0x33, 0xe0, 0x70, 0x1d, 0x72, 0x03, 0xea, 0xaa, 0x70, 0xae, 0x76, 0xbd, 0x70, 0xa3, 0x22, + 0xc6, 0x8e, 0xaa, 0x8b, 0x21, 0x95, 0xac, 0x41, 0x5d, 0xdf, 0xdb, 0xb3, 0x1c, 0xc6, 0x59, 0xe7, + 0x5d, 0x78, 0x35, 0xeb, 0xd1, 0x96, 0x25, 0x8f, 0xc0, 0x51, 0xff, 0x30, 0xac, 0x4b, 0xde, 0x02, + 0xe2, 0x53, 0xef, 0xd0, 0x32, 0xe8, 0xb2, 0x61, 0xb8, 0x03, 0x27, 0xe0, 0x6d, 0x6f, 0xf0, 0xb6, + 0xcf, 0xcb, 0xb6, 0x93, 0xce, 0x10, 0x07, 0x66, 0xd4, 0x22, 0x6f, 0xc2, 0xac, 0xfc, 0x56, 0xa3, + 0x5e, 0x00, 0x8e, 0x74, 0x89, 0x75, 0x24, 0xa6, 0x68, 0x38, 0xc4, 0x4d, 0x4c, 0xb8, 0xaa, 0x0f, + 0x02, 0xb7, 0xc7, 0x20, 0x93, 0x42, 0xb7, 0xdd, 0x03, 0xea, 0xcc, 0x35, 0xaf, 0x17, 0x6e, 0xd4, + 0xdb, 0xd7, 0x4f, 0x8e, 0x5b, 0x57, 0x97, 0x1f, 0xc3, 0x87, 0x8f, 0x45, 0x21, 0xf7, 0xa0, 0x61, + 0x3a, 0xfe, 0x96, 0x6b, 0x5b, 0xc6, 0xd1, 0xdc, 0x14, 0x6f, 0xe0, 0x2b, 0xf2, 0x51, 0x1b, 0xab, + 0x77, 0x3b, 0x82, 0xf0, 0xe8, 0xb8, 0x75, 0x75, 0x78, 0x4a, 0x5d, 0x08, 0xe9, 0x18, 0x61, 0x90, + 0x4d, 0x0e, 0xb8, 0xe2, 0x3a, 0x7b, 0x56, 0x77, 0x6e, 0x9a, 0xbf, 0x8d, 0xeb, 0x23, 0x06, 0xf4, + 0xea, 0xdd, 0x8e, 0xe0, 0x6b, 0x4f, 0x4b, 0x71, 0xe2, 0x2f, 0x46, 0x08, 0xc4, 0x84, 0x19, 0x35, + 0x19, 0xaf, 0xd8, 0xba, 0xd5, 0xf3, 0xe7, 0x66, 0xf8, 0xe0, 0xfd, 0xb1, 0x11, 0x98, 0x18, 0x67, + 0x6e, 0x5f, 0x96, 0x8f, 0x32, 0x93, 0x28, 0xf6, 0x31, 0x85, 0x39, 0xff, 0x06, 0x3c, 0x3f, 0x34, + 0x37, 0x90, 0x59, 0x28, 0x1d, 0xd0, 0x23, 0x3e, 0xf5, 0x35, 0x90, 0xfd, 0x24, 0x97, 0xa0, 0x72, + 0xa8, 0xdb, 0x03, 0x3a, 0x57, 0xe4, 0x65, 0xe2, 0xcf, 0x4f, 0x17, 0x5f, 0x2f, 0x68, 0x7f, 0xb5, + 0x04, 0x53, 0x6a, 0xc6, 0xe9, 0x58, 0xce, 0x01, 0x79, 0x1b, 0x4a, 0xb6, 0xdb, 0x95, 0xf3, 0xe6, + 0xcf, 0x8e, 0x3d, 0x8b, 0x6d, 0xb8, 0xdd, 0x76, 0xed, 0xe4, 0xb8, 0x55, 0xda, 0x70, 0xbb, 0xc8, + 0x10, 0x89, 0x01, 0x95, 0x03, 0x7d, 0xef, 0x40, 0xe7, 0x6d, 0x68, 0x2e, 0xb5, 0xc7, 0x86, 0xbe, + 0xc3, 0x50, 0x58, 0x5b, 0xdb, 0x8d, 0x93, 0xe3, 0x56, 0x85, 0xff, 0x45, 0x81, 0x4d, 0x5c, 0x68, + 0xec, 0xda, 0xba, 0x71, 0xb0, 0xef, 0xda, 0x74, 0xae, 0x94, 0x53, 0x50, 0x5b, 0x21, 0x89, 0xd7, + 0x1c, 0xfe, 0xc5, 0x48, 0x06, 0x31, 0xa0, 0x3a, 0x30, 0x7d, 0xcb, 0x39, 0x90, 0x73, 0xe0, 0x1b, + 0x63, 0x4b, 0xdb, 0x59, 0xe5, 0xcf, 0x04, 0x27, 0xc7, 0xad, 0xaa, 0xf8, 0x8d, 0x12, 0x5a, 0xfb, + 0xfd, 0x29, 0x98, 0x51, 0x2f, 0xe9, 0x3e, 0xf5, 0x02, 0xfa, 0x90, 0x5c, 0x87, 0xb2, 0xc3, 0x3e, + 0x4d, 0xfe, 0x92, 0xdb, 0x53, 0x72, 0xb8, 0x94, 0xf9, 0x27, 0xc9, 0x29, 0xac, 0x65, 0x62, 0xa8, + 0xc8, 0x0e, 0x1f, 0xbf, 0x65, 0x1d, 0x0e, 0x23, 0x5a, 0x26, 0x7e, 0xa3, 0x84, 0x26, 0xef, 0x42, + 0x99, 0x3f, 0xbc, 0xe8, 0xea, 0xaf, 0x8e, 0x2f, 0x82, 0x3d, 0x7a, 0x9d, 0x3d, 0x01, 0x7f, 0x70, + 0x0e, 0xca, 0x86, 0xe2, 0xc0, 0xdc, 0x93, 0x1d, 0xfb, 0xb3, 0x39, 0x3a, 0x76, 0x4d, 0x0c, 0xc5, + 0x9d, 0xd5, 0x35, 0x64, 0x88, 0xe4, 0xcf, 0x17, 0xe0, 0x79, 0xc3, 0x75, 0x02, 0x9d, 0xe9, 0x19, + 0x6a, 0x91, 0x9d, 0xab, 0x70, 0x39, 0x6f, 0x8d, 0x2d, 0x67, 0x25, 0x8d, 0xd8, 0x7e, 0x81, 0xad, + 0x19, 0x43, 0xc5, 0x38, 0x2c, 0x9b, 0xfc, 0xc5, 0x02, 0xbc, 0xc0, 0xe6, 0xf2, 0x21, 0x66, 0xbe, + 0x02, 0x4d, 0xb6, 0x55, 0x57, 0x4e, 0x8e, 0x5b, 0x2f, 0xac, 0x67, 0x09, 0xc3, 0xec, 0x36, 0xb0, + 0xd6, 0x5d, 0xd4, 0x87, 0xd5, 0x12, 0xbe, 0xba, 0x35, 0x97, 0x36, 0x26, 0xa9, 0xea, 0xb4, 0x3f, + 0x23, 0x87, 0x72, 0x96, 0x66, 0x87, 0x59, 0xad, 0x20, 0x37, 0xa1, 0x76, 0xe8, 0xda, 0x83, 0x1e, + 0xf5, 0xe7, 0xea, 0x7c, 0x8a, 0x9d, 0xcf, 0x9a, 0x62, 0xef, 0x73, 0x96, 0xf6, 0x05, 0x09, 0x5f, + 0x13, 0xff, 0x7d, 0x54, 0x75, 0x89, 0x05, 0x55, 0xdb, 0xea, 0x59, 0x81, 0xcf, 0x17, 0xce, 0xe6, + 0xd2, 0xcd, 0xb1, 0x1f, 0x4b, 0x7c, 0xa2, 0x1b, 0x1c, 0x4c, 0x7c, 0x35, 0xe2, 0x37, 0x4a, 0x01, + 0x6c, 0x2a, 0xf4, 0x0d, 0xdd, 0x16, 0x0b, 0x6b, 0x73, 0xe9, 0x6b, 0xe3, 0x7f, 0x36, 0x0c, 0xa5, + 0x3d, 0x2d, 0x9f, 0xa9, 0xc2, 0xff, 0xa2, 0xc0, 0x26, 0x3f, 0x0f, 0x33, 0x89, 0xb7, 0xe9, 0xcf, + 0x35, 0x79, 0xef, 0xbc, 0x94, 0xd5, 0x3b, 0x21, 0x57, 0xb4, 0xf2, 0x24, 0x46, 0x88, 0x8f, 0x29, + 0x30, 0x72, 0x07, 0xea, 0xbe, 0x65, 0x52, 0x43, 0xf7, 0xfc, 0xb9, 0xa9, 0xd3, 0x00, 0xcf, 0x4a, + 0xe0, 0x7a, 0x47, 0x56, 0xc3, 0x10, 0x80, 0x2c, 0x00, 0xf4, 0x75, 0x2f, 0xb0, 0x84, 0xa2, 0x3a, + 0xcd, 0x95, 0xa6, 0x99, 0x93, 0xe3, 0x16, 0x6c, 0x85, 0xa5, 0x18, 0xe3, 0x60, 0xfc, 0xac, 0xee, + 0xba, 0xd3, 0x1f, 0x04, 0x62, 0x61, 0x6d, 0x08, 0xfe, 0x4e, 0x58, 0x8a, 0x31, 0x0e, 0xf2, 0x9b, + 0x05, 0xf8, 0x4c, 0xf4, 0x77, 0xf8, 0x23, 0xbb, 0x30, 0xf1, 0x8f, 0xac, 0x75, 0x72, 0xdc, 0xfa, + 0x4c, 0x67, 0xb4, 0x48, 0x7c, 0x5c, 0x7b, 0xc8, 0x87, 0x05, 0x98, 0x19, 0xf4, 0x4d, 0x3d, 0xa0, + 0x9d, 0x80, 0xed, 0x78, 0xba, 0x47, 0x73, 0xb3, 0xbc, 0x89, 0xb7, 0xc6, 0x9f, 0x05, 0x13, 0x70, + 0xd1, 0x6b, 0x4e, 0x96, 0x63, 0x4a, 0xac, 0xf6, 0x36, 0x4c, 0x2f, 0x0f, 0x82, 0x7d, 0xd7, 0xb3, + 0x3e, 0xe0, 0xea, 0x3f, 0x59, 0x83, 0x4a, 0xc0, 0xd5, 0x38, 0xa1, 0x21, 0x7c, 0x3e, 0xeb, 0xa5, + 0x0b, 0x95, 0xfa, 0x0e, 0x3d, 0x52, 0x7a, 0x89, 0x58, 0xa9, 0x85, 0x5a, 0x27, 0xaa, 0x6b, 0x7f, + 0xb2, 0x00, 0xb5, 0xb6, 0x6e, 0x1c, 0xb8, 0x7b, 0x7b, 0xe4, 0x1d, 0xa8, 0x5b, 0x4e, 0x40, 0xbd, + 0x43, 0xdd, 0x96, 0xb0, 0x0b, 0x31, 0xd8, 0x70, 0x43, 0x18, 0x3d, 0x1e, 0xdb, 0x7d, 0x31, 0x41, + 0xab, 0x03, 0xb9, 0x6b, 0xe1, 0x9a, 0xf1, 0xba, 0xc4, 0xc0, 0x10, 0x8d, 0xb4, 0xa0, 0xe2, 0x07, + 0xb4, 0xef, 0xf3, 0x35, 0x70, 0x5a, 0x34, 0xa3, 0xc3, 0x0a, 0x50, 0x94, 0x6b, 0x7f, 0xa5, 0x00, + 0x8d, 0xb6, 0xee, 0x5b, 0x06, 0x7b, 0x4a, 0xb2, 0x02, 0xe5, 0x81, 0x4f, 0xbd, 0xb3, 0x3d, 0x1b, + 0x5f, 0xb6, 0x76, 0x7c, 0xea, 0x21, 0xaf, 0x4c, 0xee, 0x41, 0xbd, 0xaf, 0xfb, 0xfe, 0x03, 0xd7, + 0x33, 0xe5, 0xd2, 0x7b, 0x4a, 0x20, 0xb1, 0x4d, 0x90, 0x55, 0x31, 0x04, 0xd1, 0x9a, 0x10, 0xe9, + 0x1e, 0xda, 0xef, 0x16, 0xe0, 0x62, 0x7b, 0xb0, 0xb7, 0x47, 0x3d, 0xa9, 0x15, 0x4b, 0x7d, 0x93, + 0x42, 0xc5, 0xa3, 0xa6, 0xe5, 0xcb, 0xb6, 0xaf, 0x8e, 0x3d, 0x50, 0x90, 0xa1, 0x48, 0xf5, 0x96, + 0xf7, 0x17, 0x2f, 0x40, 0x81, 0x4e, 0x06, 0xd0, 0x78, 0x9f, 0xb2, 0xdd, 0x38, 0xd5, 0x7b, 0xf2, + 0xe9, 0x6e, 0x8f, 0x2d, 0xea, 0x2d, 0x1a, 0x74, 0x38, 0x52, 0x5c, 0x9b, 0x0e, 0x0b, 0x31, 0x92, + 0xa4, 0xfd, 0x76, 0x05, 0xa6, 0x56, 0xdc, 0xde, 0xae, 0xe5, 0x50, 0xf3, 0xa6, 0xd9, 0xa5, 0xe4, + 0x3d, 0x28, 0x53, 0xb3, 0x4b, 0xe5, 0xd3, 0x8e, 0xaf, 0x78, 0x30, 0xb0, 0x48, 0x7d, 0x62, 0xff, + 0x90, 0x03, 0x93, 0x0d, 0x98, 0xd9, 0xf3, 0xdc, 0x9e, 0x98, 0xcb, 0xb7, 0x8f, 0xfa, 0x52, 0x77, + 0x6e, 0xff, 0x98, 0xfa, 0x70, 0xd6, 0x12, 0xd4, 0x47, 0xc7, 0x2d, 0x88, 0xfe, 0x61, 0xaa, 0x2e, + 0x79, 0x07, 0xe6, 0xa2, 0x92, 0x70, 0x52, 0x5b, 0x61, 0xdb, 0x19, 0xae, 0x3b, 0x55, 0xda, 0x57, + 0x4f, 0x8e, 0x5b, 0x73, 0x6b, 0x23, 0x78, 0x70, 0x64, 0x6d, 0x36, 0x55, 0xcc, 0x46, 0x44, 0xb1, + 0xd0, 0x48, 0x95, 0x69, 0x42, 0x2b, 0x18, 0xdf, 0xf7, 0xad, 0xa5, 0x44, 0xe0, 0x90, 0x50, 0xb2, + 0x06, 0x53, 0x81, 0x1b, 0xeb, 0xaf, 0x0a, 0xef, 0x2f, 0x4d, 0x19, 0x2a, 0xb6, 0xdd, 0x91, 0xbd, + 0x95, 0xa8, 0x47, 0x10, 0x2e, 0xab, 0xff, 0xa9, 0x9e, 0xaa, 0xf2, 0x9e, 0x9a, 0x3f, 0x39, 0x6e, + 0x5d, 0xde, 0xce, 0xe4, 0xc0, 0x11, 0x35, 0xc9, 0x2f, 0x17, 0x60, 0x46, 0x91, 0x64, 0x1f, 0xd5, + 0x26, 0xd9, 0x47, 0x84, 0x8d, 0x88, 0xed, 0x84, 0x00, 0x4c, 0x09, 0xd4, 0xbe, 0x5f, 0x83, 0x46, + 0x38, 0xd5, 0x93, 0xcf, 0x41, 0x85, 0x9b, 0x20, 0xa4, 0x06, 0x1f, 0xae, 0xe1, 0xdc, 0x52, 0x81, + 0x82, 0x46, 0x3e, 0x0f, 0x35, 0xc3, 0xed, 0xf5, 0x74, 0xc7, 0xe4, 0x66, 0xa5, 0x46, 0xbb, 0xc9, + 0x54, 0x97, 0x15, 0x51, 0x84, 0x8a, 0x46, 0xae, 0x42, 0x59, 0xf7, 0xba, 0xc2, 0xc2, 0xd3, 0x10, + 0xf3, 0xd1, 0xb2, 0xd7, 0xf5, 0x91, 0x97, 0x92, 0xaf, 0x40, 0x89, 0x3a, 0x87, 0x73, 0xe5, 0xd1, + 0xba, 0xd1, 0x4d, 0xe7, 0xf0, 0xbe, 0xee, 0xb5, 0x9b, 0xb2, 0x0d, 0xa5, 0x9b, 0xce, 0x21, 0xb2, + 0x3a, 0x64, 0x03, 0x6a, 0xd4, 0x39, 0x64, 0xef, 0x5e, 0x9a, 0x5e, 0x3e, 0x3b, 0xa2, 0x3a, 0x63, + 0x91, 0xdb, 0x84, 0x50, 0xc3, 0x92, 0xc5, 0xa8, 0x20, 0xc8, 0x37, 0x60, 0x4a, 0x28, 0x5b, 0x9b, + 0xec, 0x9d, 0xf8, 0x73, 0x55, 0x0e, 0xd9, 0x1a, 0xad, 0xad, 0x71, 0xbe, 0xc8, 0xd4, 0x15, 0x2b, + 0xf4, 0x31, 0x01, 0x45, 0xbe, 0x01, 0x0d, 0xb5, 0x33, 0x56, 0x6f, 0x36, 0xd3, 0x4a, 0xa4, 0xb6, + 0xd3, 0x48, 0xbf, 0x35, 0xb0, 0x3c, 0xda, 0xa3, 0x4e, 0xe0, 0xb7, 0x9f, 0x57, 0x76, 0x03, 0x45, + 0xf5, 0x31, 0x42, 0x23, 0xbb, 0xc3, 0xe6, 0x2e, 0x61, 0xab, 0xf9, 0xdc, 0x88, 0x59, 0x7d, 0x0c, + 0x5b, 0xd7, 0x37, 0xe1, 0x42, 0x68, 0x8f, 0x92, 0x26, 0x0d, 0x61, 0xbd, 0xf9, 0x12, 0xab, 0xbe, + 0x9e, 0x24, 0x3d, 0x3a, 0x6e, 0xbd, 0x94, 0x61, 0xd4, 0x88, 0x18, 0x30, 0x0d, 0x46, 0x3e, 0x80, + 0x19, 0x8f, 0xea, 0xa6, 0xe5, 0x50, 0xdf, 0xdf, 0xf2, 0xdc, 0xdd, 0xfc, 0x9a, 0x27, 0x47, 0x11, + 0xc3, 0x1e, 0x13, 0xc8, 0x98, 0x92, 0x44, 0x1e, 0xc0, 0xb4, 0x6d, 0x1d, 0xd2, 0x48, 0x74, 0x73, + 0x22, 0xa2, 0x9f, 0x3f, 0x39, 0x6e, 0x4d, 0x6f, 0xc4, 0x81, 0x31, 0x29, 0x87, 0x69, 0x2a, 0x7d, + 0xd7, 0x0b, 0x94, 0x7a, 0xfa, 0xd9, 0xc7, 0xaa, 0xa7, 0x5b, 0xae, 0x17, 0x44, 0x1f, 0x21, 0xfb, + 0xe7, 0xa3, 0xa8, 0xae, 0xfd, 0x8d, 0x0a, 0x0c, 0x6f, 0xe2, 0x92, 0x23, 0xae, 0x30, 0xe9, 0x11, + 0x97, 0x1e, 0x0d, 0x62, 0xed, 0x79, 0x5d, 0x56, 0x9b, 0xc0, 0x88, 0xc8, 0x18, 0xd5, 0xa5, 0x49, + 0x8f, 0xea, 0x67, 0x66, 0xe2, 0x19, 0x1e, 0xfe, 0xd5, 0x8f, 0x6f, 0xf8, 0xd7, 0x9e, 0xce, 0xf0, + 0xd7, 0xbe, 0x57, 0x86, 0x99, 0x55, 0x9d, 0xf6, 0x5c, 0xe7, 0x89, 0xfb, 0xf8, 0xc2, 0x33, 0xb1, + 0x8f, 0xbf, 0x01, 0x75, 0x8f, 0xf6, 0x6d, 0xcb, 0xd0, 0x85, 0xba, 0x2e, 0xed, 0xe6, 0x28, 0xcb, + 0x30, 0xa4, 0x8e, 0xb0, 0xdf, 0x94, 0x9e, 0x49, 0xfb, 0x4d, 0xf9, 0xe3, 0xb7, 0xdf, 0x68, 0xbf, + 0x5c, 0x04, 0xae, 0xda, 0x92, 0xeb, 0x50, 0x66, 0x6a, 0x5b, 0xda, 0x6a, 0xc8, 0xbf, 0x16, 0x4e, + 0x21, 0xf3, 0x50, 0x0c, 0x5c, 0x39, 0xdd, 0x80, 0xa4, 0x17, 0xb7, 0x5d, 0x2c, 0x06, 0x2e, 0xf9, + 0x00, 0xc0, 0x70, 0x1d, 0xd3, 0x52, 0xee, 0xa4, 0x7c, 0x0f, 0xb6, 0xe6, 0x7a, 0x0f, 0x74, 0xcf, + 0x5c, 0x09, 0x11, 0xc5, 0x0e, 0x3e, 0xfa, 0x8f, 0x31, 0x69, 0xe4, 0x0d, 0xa8, 0xba, 0xce, 0xda, + 0xc0, 0xb6, 0x79, 0x87, 0x36, 0xda, 0x5f, 0x38, 0x39, 0x6e, 0x55, 0xef, 0xf1, 0x92, 0x47, 0xc7, + 0xad, 0x2b, 0x62, 0x47, 0xc4, 0xfe, 0xbd, 0xed, 0x59, 0x81, 0xe5, 0x74, 0xc3, 0x0d, 0xad, 0xac, + 0xa6, 0xfd, 0x4a, 0x01, 0x9a, 0x6b, 0xd6, 0x43, 0x6a, 0xbe, 0x6d, 0x39, 0xa6, 0xfb, 0x80, 0x20, + 0x54, 0x6d, 0xea, 0x74, 0x83, 0xfd, 0x31, 0x77, 0x9c, 0xc2, 0xae, 0xc3, 0x11, 0x50, 0x22, 0x91, + 0x45, 0x68, 0x88, 0xfd, 0x8a, 0xe5, 0x74, 0x79, 0x1f, 0xd6, 0xa3, 0x99, 0xbe, 0xa3, 0x08, 0x18, + 0xf1, 0x68, 0x47, 0xf0, 0xfc, 0x50, 0x37, 0x10, 0x13, 0xca, 0x81, 0xde, 0x55, 0x8b, 0xca, 0xda, + 0xd8, 0x1d, 0xbc, 0xad, 0x77, 0x63, 0x9d, 0xcb, 0xb5, 0xc2, 0x6d, 0x9d, 0x69, 0x85, 0x0c, 0x5d, + 0xfb, 0x3f, 0x05, 0xa8, 0xaf, 0x0d, 0x1c, 0x83, 0x6f, 0xea, 0x9f, 0x6c, 0x4d, 0x56, 0x2a, 0x66, + 0x31, 0x53, 0xc5, 0x1c, 0x40, 0xf5, 0xe0, 0x41, 0xa8, 0x82, 0x36, 0x97, 0x36, 0xc7, 0x1f, 0x15, + 0xb2, 0x49, 0x0b, 0x77, 0x38, 0x9e, 0x70, 0x76, 0xce, 0xc8, 0x06, 0x55, 0xef, 0xbc, 0xcd, 0x85, + 0x4a, 0x61, 0xf3, 0x5f, 0x81, 0x66, 0x8c, 0xed, 0x4c, 0x7e, 0x8f, 0xbf, 0x59, 0x86, 0xea, 0xad, + 0x4e, 0x67, 0x79, 0x6b, 0x9d, 0xbc, 0x0a, 0x4d, 0xe9, 0x07, 0xbb, 0x1b, 0xf5, 0x41, 0xe8, 0x06, + 0xed, 0x44, 0x24, 0x8c, 0xf3, 0x31, 0x05, 0xde, 0xa3, 0xba, 0xdd, 0x93, 0x1f, 0x4b, 0xa8, 0x3b, + 0x20, 0x2b, 0x44, 0x41, 0x23, 0x3a, 0xcc, 0x0c, 0x7c, 0xea, 0xb1, 0x2e, 0x14, 0xfb, 0x7d, 0xf9, + 0xd9, 0x9c, 0xd2, 0x22, 0xc0, 0x17, 0x98, 0x9d, 0x04, 0x00, 0xa6, 0x00, 0xc9, 0xeb, 0x50, 0xd7, + 0x07, 0xc1, 0x3e, 0xdf, 0x72, 0x89, 0x6f, 0xe3, 0x2a, 0x77, 0x13, 0xca, 0xb2, 0x47, 0xc7, 0xad, + 0xa9, 0x3b, 0xd8, 0x7e, 0x55, 0xfd, 0xc7, 0x90, 0x9b, 0x35, 0x4e, 0xd9, 0x18, 0x64, 0xe3, 0x2a, + 0x67, 0x6e, 0xdc, 0x56, 0x02, 0x00, 0x53, 0x80, 0xe4, 0x5d, 0x98, 0x3a, 0xa0, 0x47, 0x81, 0xbe, + 0x2b, 0x05, 0x54, 0xcf, 0x22, 0x60, 0x96, 0x29, 0xfd, 0x77, 0x62, 0xd5, 0x31, 0x01, 0x46, 0x7c, + 0xb8, 0x74, 0x40, 0xbd, 0x5d, 0xea, 0xb9, 0xd2, 0x5e, 0x21, 0x85, 0xd4, 0xce, 0x22, 0x64, 0xee, + 0xe4, 0xb8, 0x75, 0xe9, 0x4e, 0x06, 0x0c, 0x66, 0x82, 0x6b, 0xff, 0xbb, 0x08, 0x17, 0x6e, 0x89, + 0x40, 0x04, 0xd7, 0x13, 0x9a, 0x07, 0xb9, 0x02, 0x25, 0xaf, 0x3f, 0xe0, 0x23, 0xa7, 0x24, 0x5c, + 0x0d, 0xb8, 0xb5, 0x83, 0xac, 0x8c, 0xbc, 0x03, 0x75, 0x53, 0x4e, 0x19, 0xd2, 0x5c, 0x32, 0x96, + 0x69, 0x4b, 0xfd, 0xc3, 0x10, 0x8d, 0xed, 0x0d, 0x7b, 0x7e, 0xb7, 0x63, 0x7d, 0x40, 0xa5, 0x05, + 0x81, 0xef, 0x0d, 0x37, 0x45, 0x11, 0x2a, 0x1a, 0x5b, 0x55, 0x0f, 0xe8, 0x91, 0xd8, 0x3f, 0x97, + 0xa3, 0x55, 0xf5, 0x8e, 0x2c, 0xc3, 0x90, 0x4a, 0x5a, 0xea, 0x63, 0x61, 0xa3, 0xa0, 0x2c, 0x6c, + 0x3f, 0xf7, 0x59, 0x81, 0xfc, 0x6e, 0xd8, 0x94, 0xf9, 0xbe, 0x15, 0x04, 0xd4, 0x93, 0xaf, 0x71, + 0xac, 0x29, 0xf3, 0x2d, 0x8e, 0x80, 0x12, 0x89, 0xfc, 0x04, 0x34, 0x38, 0x78, 0xdb, 0x76, 0x77, + 0xf9, 0x8b, 0x6b, 0x08, 0x2b, 0xd0, 0x7d, 0x55, 0x88, 0x11, 0x5d, 0xfb, 0x83, 0x22, 0x5c, 0xbe, + 0x45, 0x03, 0xa1, 0xd5, 0xac, 0xd2, 0xbe, 0xed, 0x1e, 0x31, 0x7d, 0x1a, 0xe9, 0xb7, 0xc8, 0x9b, + 0x00, 0x96, 0xbf, 0xdb, 0x39, 0x34, 0xf8, 0x77, 0x20, 0xbe, 0xe1, 0xeb, 0xf2, 0x93, 0x84, 0xf5, + 0x4e, 0x5b, 0x52, 0x1e, 0x25, 0xfe, 0x61, 0xac, 0x4e, 0xb4, 0x21, 0x2f, 0x3e, 0x66, 0x43, 0xde, + 0x01, 0xe8, 0x47, 0x5a, 0x79, 0x89, 0x73, 0xfe, 0x94, 0x12, 0x73, 0x16, 0x85, 0x3c, 0x06, 0x93, + 0x47, 0x4f, 0x76, 0x60, 0xd6, 0xa4, 0x7b, 0xfa, 0xc0, 0x0e, 0xc2, 0x9d, 0x84, 0xfc, 0x88, 0x4f, + 0xbf, 0x19, 0x09, 0x83, 0x24, 0x56, 0x53, 0x48, 0x38, 0x84, 0xad, 0xfd, 0xad, 0x12, 0xcc, 0xdf, + 0xa2, 0x41, 0x68, 0xa3, 0x93, 0xb3, 0x63, 0xa7, 0x4f, 0x0d, 0xf6, 0x16, 0x3e, 0x2c, 0x40, 0xd5, + 0xd6, 0x77, 0xa9, 0xcd, 0x56, 0x2f, 0xf6, 0x34, 0xef, 0x8d, 0xbd, 0x10, 0x8c, 0x96, 0xb2, 0xb0, + 0xc1, 0x25, 0xa4, 0x96, 0x06, 0x51, 0x88, 0x52, 0x3c, 0x9b, 0xd4, 0x0d, 0x7b, 0xe0, 0x07, 0x62, + 0x67, 0x27, 0xf5, 0xc9, 0x70, 0x52, 0x5f, 0x89, 0x48, 0x18, 0xe7, 0x23, 0x4b, 0x00, 0x86, 0x6d, + 0x51, 0x27, 0xe0, 0xb5, 0xc4, 0x77, 0x45, 0xd4, 0xfb, 0x5d, 0x09, 0x29, 0x18, 0xe3, 0x62, 0xa2, + 0x7a, 0xae, 0x63, 0x05, 0xae, 0x10, 0x55, 0x4e, 0x8a, 0xda, 0x8c, 0x48, 0x18, 0xe7, 0xe3, 0xd5, + 0x68, 0xe0, 0x59, 0x86, 0xcf, 0xab, 0x55, 0x52, 0xd5, 0x22, 0x12, 0xc6, 0xf9, 0xd8, 0x9a, 0x17, + 0x7b, 0xfe, 0x33, 0xad, 0x79, 0xbf, 0xd1, 0x80, 0x6b, 0x89, 0x6e, 0x0d, 0xf4, 0x80, 0xee, 0x0d, + 0xec, 0x0e, 0x0d, 0xd4, 0x0b, 0x1c, 0x73, 0x2d, 0xfc, 0x33, 0xd1, 0x7b, 0x17, 0xe1, 0x4f, 0xc6, + 0x64, 0xde, 0xfb, 0x50, 0x03, 0x4f, 0xf5, 0xee, 0x17, 0xa1, 0xe1, 0xe8, 0x81, 0xcf, 0x3f, 0x5c, + 0xf9, 0x8d, 0x86, 0x6a, 0xd8, 0x5d, 0x45, 0xc0, 0x88, 0x87, 0x6c, 0xc1, 0x25, 0xd9, 0xc5, 0x37, + 0x1f, 0xb2, 0x3d, 0x3f, 0xf5, 0x44, 0x5d, 0xb9, 0x9c, 0xca, 0xba, 0x97, 0x36, 0x33, 0x78, 0x30, + 0xb3, 0x26, 0xd9, 0x84, 0x8b, 0x86, 0x08, 0x09, 0xa1, 0xb6, 0xab, 0x9b, 0x0a, 0x50, 0x98, 0x44, + 0xc3, 0xad, 0xd1, 0xca, 0x30, 0x0b, 0x66, 0xd5, 0x4b, 0x8f, 0xe6, 0xea, 0x58, 0xa3, 0xb9, 0x36, + 0xce, 0x68, 0xae, 0x8f, 0x37, 0x9a, 0x1b, 0xa7, 0x1b, 0xcd, 0xac, 0xe7, 0xd9, 0x38, 0xa2, 0x1e, + 0x53, 0x4f, 0xc4, 0x0a, 0x1b, 0x8b, 0x38, 0x0a, 0x7b, 0xbe, 0x93, 0xc1, 0x83, 0x99, 0x35, 0xc9, + 0x2e, 0xcc, 0x8b, 0xf2, 0x9b, 0x8e, 0xe1, 0x1d, 0xf5, 0xd9, 0xc2, 0x13, 0xc3, 0x6d, 0x26, 0x6c, + 0xd2, 0xf3, 0x9d, 0x91, 0x9c, 0xf8, 0x18, 0x14, 0xf2, 0x33, 0x30, 0x2d, 0xde, 0xd2, 0xa6, 0xde, + 0xe7, 0xb0, 0x22, 0xfe, 0xe8, 0x05, 0x09, 0x3b, 0xbd, 0x12, 0x27, 0x62, 0x92, 0x97, 0x2c, 0xc3, + 0x85, 0xfe, 0xa1, 0xc1, 0x7e, 0xae, 0xef, 0xdd, 0xa5, 0xd4, 0xa4, 0x26, 0x77, 0x78, 0x36, 0xda, + 0x2f, 0x2a, 0xeb, 0xce, 0x56, 0x92, 0x8c, 0x69, 0x7e, 0xf2, 0x3a, 0x4c, 0xf9, 0x81, 0xee, 0x05, + 0xd2, 0x10, 0x3c, 0x37, 0x23, 0xe2, 0xb3, 0x94, 0x9d, 0xb4, 0x13, 0xa3, 0x61, 0x82, 0x33, 0x73, + 0xbd, 0xb8, 0x70, 0x7e, 0xeb, 0x45, 0x9e, 0xd9, 0xea, 0x1f, 0x15, 0xe1, 0xfa, 0x2d, 0x1a, 0x6c, + 0xba, 0x8e, 0x34, 0xa3, 0x67, 0x2d, 0xfb, 0xa7, 0xb2, 0xa2, 0x27, 0x17, 0xed, 0xe2, 0x44, 0x17, + 0xed, 0xd2, 0x84, 0x16, 0xed, 0xf2, 0x39, 0x2e, 0xda, 0x7f, 0xa7, 0x08, 0x2f, 0x26, 0x7a, 0x72, + 0xcb, 0x35, 0xd5, 0x84, 0xff, 0x69, 0x07, 0x9e, 0xa2, 0x03, 0x1f, 0x09, 0xbd, 0x93, 0x3b, 0x42, + 0x53, 0x1a, 0xcf, 0x77, 0xd3, 0x1a, 0xcf, 0xbb, 0x79, 0x56, 0xbe, 0x0c, 0x09, 0xa7, 0x5a, 0xf1, + 0xde, 0x02, 0xe2, 0x49, 0xb7, 0x6d, 0x64, 0xce, 0x96, 0x4a, 0x4f, 0x18, 0x00, 0x8a, 0x43, 0x1c, + 0x98, 0x51, 0x8b, 0x74, 0xe0, 0x05, 0x9f, 0x3a, 0x81, 0xe5, 0x50, 0x3b, 0x09, 0x27, 0xb4, 0xa1, + 0x97, 0x24, 0xdc, 0x0b, 0x9d, 0x2c, 0x26, 0xcc, 0xae, 0x9b, 0x67, 0x1e, 0xf8, 0x67, 0xc0, 0x55, + 0x4e, 0xd1, 0x35, 0x13, 0xd3, 0x58, 0x3e, 0x4c, 0x6b, 0x2c, 0xef, 0xe5, 0x7f, 0x6f, 0xe3, 0x69, + 0x2b, 0x4b, 0x00, 0xfc, 0x2d, 0xc4, 0xd5, 0x95, 0x70, 0x91, 0xc6, 0x90, 0x82, 0x31, 0x2e, 0xb6, + 0x00, 0xa9, 0x7e, 0x8e, 0x6b, 0x2a, 0xe1, 0x02, 0xd4, 0x89, 0x13, 0x31, 0xc9, 0x3b, 0x52, 0xdb, + 0xa9, 0x8c, 0xad, 0xed, 0xbc, 0x05, 0x24, 0x61, 0x78, 0x14, 0x78, 0xd5, 0x64, 0xfc, 0xf1, 0xfa, + 0x10, 0x07, 0x66, 0xd4, 0x1a, 0x31, 0x94, 0x6b, 0x93, 0x1d, 0xca, 0xf5, 0xf1, 0x87, 0x32, 0x79, + 0x0f, 0xae, 0x70, 0x51, 0xb2, 0x7f, 0x92, 0xc0, 0x42, 0xef, 0xf9, 0xac, 0x04, 0xbe, 0x82, 0xa3, + 0x18, 0x71, 0x34, 0x06, 0x7b, 0x3f, 0x86, 0x47, 0x4d, 0x26, 0x5c, 0xb7, 0x47, 0xeb, 0x44, 0x2b, + 0x19, 0x3c, 0x98, 0x59, 0x93, 0x0d, 0xb1, 0x80, 0x0d, 0x43, 0x7d, 0xd7, 0xa6, 0xa6, 0x8c, 0xbf, + 0x0e, 0x87, 0xd8, 0xf6, 0x46, 0x47, 0x52, 0x30, 0xc6, 0x95, 0xa5, 0xa6, 0x4c, 0x9d, 0x51, 0x4d, + 0xb9, 0xc5, 0xad, 0xf4, 0x7b, 0x09, 0x6d, 0x48, 0xea, 0x3a, 0x61, 0x44, 0xfd, 0x4a, 0x9a, 0x01, + 0x87, 0xeb, 0x70, 0x2d, 0xd1, 0xf0, 0xac, 0x7e, 0xe0, 0x27, 0xb1, 0x66, 0x52, 0x5a, 0x62, 0x06, + 0x0f, 0x66, 0xd6, 0x64, 0xfa, 0xf9, 0x3e, 0xd5, 0xed, 0x60, 0x3f, 0x09, 0x78, 0x21, 0xa9, 0x9f, + 0xdf, 0x1e, 0x66, 0xc1, 0xac, 0x7a, 0x99, 0x0b, 0xd2, 0xec, 0xb3, 0xa9, 0x56, 0x7d, 0xa7, 0x04, + 0x57, 0x6e, 0xd1, 0x20, 0x0c, 0x4d, 0xfb, 0xd4, 0x8c, 0xf2, 0x31, 0x98, 0x51, 0x7e, 0xbd, 0x02, + 0x17, 0x6f, 0xd1, 0x60, 0x48, 0x1b, 0xfb, 0xff, 0xb4, 0xfb, 0x37, 0xe1, 0x62, 0x14, 0x0d, 0xd9, + 0x09, 0x5c, 0x4f, 0xac, 0xe5, 0xa9, 0xdd, 0x72, 0x67, 0x98, 0x05, 0xb3, 0xea, 0x91, 0x6f, 0xc0, + 0x8b, 0x7c, 0xa9, 0x77, 0xba, 0xc2, 0x3e, 0x2b, 0x8c, 0x09, 0xb1, 0xf3, 0x3c, 0x2d, 0x09, 0xf9, + 0x62, 0x27, 0x9b, 0x0d, 0x47, 0xd5, 0x27, 0xdf, 0x86, 0xa9, 0xbe, 0xd5, 0xa7, 0xb6, 0xe5, 0x70, + 0xfd, 0x2c, 0x77, 0x10, 0xd1, 0x56, 0x0c, 0x2c, 0xda, 0xc0, 0xc5, 0x4b, 0x31, 0x21, 0x30, 0x73, + 0xa4, 0xd6, 0xcf, 0x71, 0xa4, 0xfe, 0x8f, 0x22, 0xd4, 0x6e, 0x79, 0xee, 0xa0, 0xdf, 0x3e, 0x22, + 0x5d, 0xa8, 0x3e, 0xe0, 0xce, 0x33, 0xe9, 0x9a, 0x1a, 0xff, 0x44, 0x81, 0xf0, 0xc1, 0x45, 0x2a, + 0x91, 0xf8, 0x8f, 0x12, 0x9e, 0x0d, 0xe2, 0x03, 0x7a, 0x44, 0x4d, 0xe9, 0x43, 0x0b, 0x07, 0xf1, + 0x1d, 0x56, 0x88, 0x82, 0x46, 0x7a, 0x70, 0x41, 0xb7, 0x6d, 0xf7, 0x01, 0x35, 0x37, 0xf4, 0x80, + 0xfb, 0xbd, 0xa5, 0x6f, 0xe5, 0xac, 0x66, 0x69, 0x1e, 0xcc, 0xb0, 0x9c, 0x84, 0xc2, 0x34, 0x36, + 0x79, 0x1f, 0x6a, 0x7e, 0xe0, 0x7a, 0x4a, 0xd9, 0x6a, 0x2e, 0xad, 0x8c, 0xff, 0xd2, 0xdb, 0x5f, + 0xef, 0x08, 0x28, 0x61, 0xb3, 0x97, 0x7f, 0x50, 0x09, 0xd0, 0x7e, 0xad, 0x00, 0x70, 0x7b, 0x7b, + 0x7b, 0x4b, 0xba, 0x17, 0x4c, 0x28, 0xeb, 0x83, 0xd0, 0x51, 0x39, 0xbe, 0x43, 0x30, 0x11, 0xc8, + 0x2b, 0x7d, 0x78, 0x83, 0x60, 0x1f, 0x39, 0x3a, 0xf9, 0x71, 0xa8, 0x49, 0x05, 0x59, 0x76, 0x7b, + 0x18, 0x4f, 0x21, 0x95, 0x68, 0x54, 0x74, 0xed, 0xb7, 0x8a, 0x00, 0xeb, 0xa6, 0x4d, 0x3b, 0xea, + 0x10, 0x48, 0x23, 0xd8, 0xf7, 0xa8, 0xbf, 0xef, 0xda, 0xe6, 0x98, 0xde, 0x54, 0x6e, 0xf3, 0xdf, + 0x56, 0x20, 0x18, 0xe1, 0x11, 0x13, 0xa6, 0xfc, 0x80, 0xf6, 0x55, 0x6c, 0xef, 0x98, 0x4e, 0x94, + 0x59, 0x61, 0x17, 0x89, 0x70, 0x30, 0x81, 0x4a, 0x74, 0x68, 0x5a, 0x8e, 0x21, 0x3e, 0x90, 0xf6, + 0xd1, 0x98, 0x03, 0xe9, 0x02, 0xdb, 0x71, 0xac, 0x47, 0x30, 0x18, 0xc7, 0xd4, 0x7e, 0xa7, 0x08, + 0x97, 0xb9, 0x3c, 0xd6, 0x8c, 0x44, 0x04, 0x2f, 0xf9, 0xe3, 0x43, 0x07, 0x56, 0xff, 0xe8, 0xe9, + 0x44, 0x8b, 0xf3, 0x8e, 0x9b, 0x34, 0xd0, 0x23, 0x7d, 0x2e, 0x2a, 0x8b, 0x9d, 0x52, 0x1d, 0x40, + 0xd9, 0x67, 0xf3, 0x95, 0xe8, 0xbd, 0xce, 0xd8, 0x43, 0x28, 0xfb, 0x01, 0xf8, 0xec, 0x15, 0x7a, + 0x8d, 0xf9, 0xac, 0xc5, 0xc5, 0x91, 0x5f, 0x84, 0xaa, 0x1f, 0xe8, 0xc1, 0x40, 0x7d, 0x9a, 0x3b, + 0x93, 0x16, 0xcc, 0xc1, 0xa3, 0x79, 0x44, 0xfc, 0x47, 0x29, 0x54, 0xfb, 0x9d, 0x02, 0xcc, 0x67, + 0x57, 0xdc, 0xb0, 0xfc, 0x80, 0xfc, 0xb1, 0xa1, 0x6e, 0x3f, 0xe5, 0x1b, 0x67, 0xb5, 0x79, 0xa7, + 0x87, 0x67, 0x1a, 0x54, 0x49, 0xac, 0xcb, 0x03, 0xa8, 0x58, 0x01, 0xed, 0xa9, 0xfd, 0xe5, 0xbd, + 0x09, 0x3f, 0x7a, 0x6c, 0x69, 0x67, 0x52, 0x50, 0x08, 0xd3, 0xbe, 0x57, 0x1c, 0xf5, 0xc8, 0x7c, + 0xf9, 0xb0, 0x93, 0x51, 0xe2, 0x77, 0xf2, 0x45, 0x89, 0x27, 0x1b, 0x34, 0x1c, 0x2c, 0xfe, 0x27, + 0x86, 0x83, 0xc5, 0xef, 0xe5, 0x0f, 0x16, 0x4f, 0x75, 0xc3, 0xc8, 0x98, 0xf1, 0x8f, 0x4a, 0x70, + 0xf5, 0x71, 0xc3, 0x86, 0xad, 0x67, 0x72, 0x74, 0xe6, 0x5d, 0xcf, 0x1e, 0x3f, 0x0e, 0xc9, 0x12, + 0x54, 0xfa, 0xfb, 0xba, 0xaf, 0x94, 0xb2, 0xab, 0x61, 0x98, 0x21, 0x2b, 0x7c, 0xc4, 0x26, 0x0d, + 0xae, 0xcc, 0xf1, 0xbf, 0x28, 0x58, 0xd9, 0x74, 0xdc, 0xa3, 0xbe, 0x1f, 0xd9, 0x04, 0xc2, 0xe9, + 0x78, 0x53, 0x14, 0xa3, 0xa2, 0x93, 0x00, 0xaa, 0xc2, 0xc4, 0x2c, 0x57, 0xa6, 0xf1, 0x03, 0xb9, + 0x32, 0x0e, 0x16, 0x44, 0x0f, 0x25, 0xbd, 0x15, 0x52, 0x16, 0x59, 0x80, 0x72, 0x10, 0x85, 0x79, + 0xab, 0xad, 0x79, 0x39, 0x43, 0x3f, 0xe5, 0x7c, 0x6c, 0x63, 0xef, 0xee, 0x72, 0xa3, 0xba, 0x29, + 0xfd, 0xe7, 0x96, 0xeb, 0x70, 0x85, 0xac, 0x14, 0x6d, 0xec, 0xef, 0x0d, 0x71, 0x60, 0x46, 0x2d, + 0xed, 0x5f, 0xd6, 0xe1, 0x72, 0xf6, 0x78, 0x60, 0xfd, 0x76, 0x48, 0x3d, 0x9f, 0x61, 0x17, 0x92, + 0xfd, 0x76, 0x5f, 0x14, 0xa3, 0xa2, 0x7f, 0xa2, 0x03, 0xce, 0x7e, 0xbd, 0x00, 0x57, 0x3c, 0xe9, + 0x23, 0x7a, 0x1a, 0x41, 0x67, 0x2f, 0x09, 0x73, 0xc6, 0x08, 0x81, 0x38, 0xba, 0x2d, 0xe4, 0xaf, + 0x15, 0x60, 0xae, 0x97, 0xb2, 0x73, 0x9c, 0xe3, 0x99, 0x4b, 0x7e, 0x8e, 0x62, 0x73, 0x84, 0x3c, + 0x1c, 0xd9, 0x12, 0xf2, 0x6d, 0x68, 0xf6, 0xd9, 0xb8, 0xf0, 0x03, 0xea, 0x18, 0x2a, 0x40, 0x74, + 0xfc, 0x2f, 0x69, 0x2b, 0xc2, 0x0a, 0xcf, 0x5c, 0x71, 0xfd, 0x20, 0x46, 0xc0, 0xb8, 0xc4, 0x67, + 0xfc, 0x90, 0xe5, 0x0d, 0xa8, 0xfb, 0x34, 0x08, 0x2c, 0xa7, 0x2b, 0xf6, 0x1b, 0x0d, 0xf1, 0xad, + 0x74, 0x64, 0x19, 0x86, 0x54, 0xf2, 0x13, 0xd0, 0xe0, 0x2e, 0xa7, 0x65, 0xaf, 0xeb, 0xcf, 0x35, + 0x78, 0xb8, 0xd8, 0xb4, 0x08, 0x80, 0x93, 0x85, 0x18, 0xd1, 0xc9, 0x97, 0x60, 0x6a, 0x97, 0x7f, + 0xbe, 0xf2, 0xdc, 0xbd, 0xb0, 0x71, 0x71, 0x6d, 0xad, 0x1d, 0x2b, 0xc7, 0x04, 0x17, 0x59, 0x02, + 0xa0, 0xa1, 0x5f, 0x2e, 0x6d, 0xcf, 0x8a, 0x3c, 0x76, 0x18, 0xe3, 0x22, 0x2f, 0x41, 0x29, 0xb0, + 0x7d, 0x6e, 0xc3, 0xaa, 0x47, 0x5b, 0xd0, 0xed, 0x8d, 0x0e, 0xb2, 0x72, 0xed, 0x0f, 0x0a, 0x70, + 0x21, 0x75, 0x1c, 0x89, 0x55, 0x19, 0x78, 0xb6, 0x9c, 0x46, 0xc2, 0x2a, 0x3b, 0xb8, 0x81, 0xac, + 0x9c, 0xbc, 0x27, 0xd5, 0xf2, 0x62, 0xce, 0x14, 0x23, 0x77, 0xf5, 0xc0, 0x67, 0x7a, 0xf8, 0x90, + 0x46, 0xce, 0xdd, 0x7c, 0x51, 0x7b, 0xe4, 0x3a, 0x10, 0x73, 0xf3, 0x45, 0x34, 0x4c, 0x70, 0xa6, + 0x0c, 0x7e, 0xe5, 0xd3, 0x18, 0xfc, 0xb4, 0x5f, 0x29, 0xc6, 0x7a, 0x40, 0x6a, 0xf6, 0x4f, 0xe8, + 0x81, 0x97, 0xd9, 0x02, 0x1a, 0x2e, 0xee, 0x8d, 0xf8, 0xfa, 0xc7, 0x17, 0x63, 0x49, 0x25, 0x6f, + 0x8b, 0xbe, 0x2f, 0xe5, 0x3c, 0xc8, 0xbd, 0xbd, 0xd1, 0x11, 0xd1, 0x55, 0xea, 0xad, 0x85, 0xaf, + 0xa0, 0x7c, 0x4e, 0xaf, 0x40, 0xfb, 0x27, 0x25, 0x68, 0xbe, 0xe5, 0xee, 0x7e, 0x42, 0x22, 0xa8, + 0xb3, 0x97, 0xa9, 0xe2, 0xc7, 0xb8, 0x4c, 0xed, 0xc0, 0x8b, 0x41, 0x60, 0x77, 0xa8, 0xe1, 0x3a, + 0xa6, 0xbf, 0xbc, 0x17, 0x50, 0x6f, 0xcd, 0x72, 0x2c, 0x7f, 0x9f, 0x9a, 0xd2, 0x9d, 0xf4, 0x99, + 0x93, 0xe3, 0xd6, 0x8b, 0xdb, 0xdb, 0x1b, 0x59, 0x2c, 0x38, 0xaa, 0x2e, 0x9f, 0x36, 0xc4, 0xd9, + 0x51, 0x7e, 0xb6, 0x4a, 0xc6, 0xdc, 0x88, 0x69, 0x23, 0x56, 0x8e, 0x09, 0x2e, 0xed, 0xdf, 0x15, + 0xa1, 0x11, 0x26, 0x8f, 0x20, 0x9f, 0x87, 0xda, 0xae, 0xe7, 0x1e, 0x50, 0x4f, 0x78, 0xee, 0xe4, + 0xd9, 0xaa, 0xb6, 0x28, 0x42, 0x45, 0x23, 0x9f, 0x83, 0x4a, 0xe0, 0xf6, 0x2d, 0x23, 0x6d, 0x50, + 0xdb, 0x66, 0x85, 0x28, 0x68, 0xfc, 0x43, 0xe0, 0x61, 0x85, 0xfc, 0xa9, 0xea, 0xb1, 0x0f, 0x81, + 0x97, 0xa2, 0xa4, 0xaa, 0x0f, 0xa1, 0x3c, 0xf1, 0x0f, 0xe1, 0xe5, 0x50, 0x05, 0xac, 0x24, 0xbf, + 0xc4, 0x94, 0xd2, 0xf6, 0x2e, 0x94, 0x7d, 0xdd, 0xb7, 0xe5, 0xf2, 0x96, 0x23, 0x5f, 0xc3, 0x72, + 0x67, 0x43, 0xe6, 0x6b, 0x58, 0xee, 0x6c, 0x20, 0x07, 0xd5, 0x7e, 0xab, 0x04, 0x4d, 0xd1, 0xbf, + 0x62, 0xf6, 0x98, 0x64, 0x0f, 0xbf, 0xc1, 0x43, 0x2e, 0xfc, 0x41, 0x8f, 0x7a, 0xdc, 0x1c, 0x25, + 0x27, 0xc3, 0xb8, 0x1f, 0x21, 0x22, 0x86, 0x61, 0x17, 0x51, 0xd1, 0x1f, 0xee, 0xae, 0x67, 0x4b, + 0x05, 0x4f, 0x80, 0x22, 0x75, 0x5c, 0x19, 0x49, 0x19, 0x2e, 0x15, 0x77, 0x62, 0x34, 0x4c, 0x70, + 0x6a, 0xff, 0xbd, 0x08, 0x8d, 0x0d, 0x6b, 0x8f, 0x1a, 0x47, 0x86, 0x4d, 0xc9, 0x37, 0x61, 0xde, + 0xa4, 0x36, 0x65, 0x2b, 0xe6, 0x2d, 0x4f, 0x37, 0xe8, 0x16, 0xf5, 0x2c, 0x9e, 0xc0, 0x89, 0x7d, + 0x83, 0x32, 0xc0, 0xf5, 0xda, 0xc9, 0x71, 0x6b, 0x7e, 0x75, 0x24, 0x17, 0x3e, 0x06, 0x81, 0xac, + 0xc3, 0x94, 0x49, 0x7d, 0xcb, 0xa3, 0xe6, 0x56, 0x6c, 0x43, 0xf4, 0x79, 0xd5, 0xce, 0xd5, 0x18, + 0xed, 0xd1, 0x71, 0x6b, 0x5a, 0x19, 0x42, 0xc5, 0xce, 0x28, 0x51, 0x95, 0x4d, 0x2d, 0x7d, 0x7d, + 0xe0, 0xd3, 0x8c, 0x76, 0x96, 0x78, 0x3b, 0xf9, 0xd4, 0xb2, 0x95, 0xcd, 0x82, 0xa3, 0xea, 0x92, + 0x5d, 0x98, 0xe3, 0xed, 0xcf, 0xc2, 0x2d, 0x73, 0xdc, 0x97, 0x4f, 0x8e, 0x5b, 0xda, 0x2a, 0xed, + 0x7b, 0xd4, 0xd0, 0x03, 0x6a, 0xae, 0x8e, 0xe0, 0xc6, 0x91, 0x38, 0x5a, 0x05, 0x4a, 0x1b, 0x6e, + 0x57, 0xfb, 0x5e, 0x09, 0xc2, 0x8c, 0x62, 0xe4, 0x4f, 0x17, 0xa0, 0xa9, 0x3b, 0x8e, 0x1b, 0xc8, + 0x6c, 0x5d, 0x22, 0x9a, 0x00, 0x73, 0x27, 0x2e, 0x5b, 0x58, 0x8e, 0x40, 0x85, 0x23, 0x3a, 0x74, + 0x8e, 0xc7, 0x28, 0x18, 0x97, 0x4d, 0x06, 0x29, 0xdf, 0xf8, 0x66, 0xfe, 0x56, 0x9c, 0xc2, 0x13, + 0x3e, 0xff, 0x35, 0x98, 0x4d, 0x37, 0xf6, 0x2c, 0xae, 0xad, 0x5c, 0x41, 0x06, 0x45, 0x80, 0x28, + 0x3e, 0xe6, 0x29, 0x18, 0xe4, 0xac, 0x84, 0x41, 0x6e, 0xfc, 0xb4, 0x0e, 0x51, 0xa3, 0x47, 0x1a, + 0xe1, 0xbe, 0x95, 0x32, 0xc2, 0xad, 0x4f, 0x42, 0xd8, 0xe3, 0x0d, 0x6f, 0xbb, 0x70, 0x31, 0xe2, + 0x8d, 0x66, 0x97, 0x3b, 0xa9, 0xaf, 0x5f, 0xe8, 0x95, 0x5f, 0x18, 0xf1, 0xf5, 0x5f, 0x88, 0x05, + 0x2c, 0x0d, 0x7f, 0xff, 0xda, 0x5f, 0x2f, 0xc0, 0x6c, 0x5c, 0x08, 0x3f, 0x83, 0xfe, 0x65, 0x98, + 0xf6, 0xa8, 0x6e, 0xb6, 0xf5, 0xc0, 0xd8, 0xe7, 0xa1, 0xf1, 0x05, 0x1e, 0xcb, 0xce, 0x4f, 0xcb, + 0x61, 0x9c, 0x80, 0x49, 0x3e, 0xa2, 0x43, 0x93, 0x15, 0x6c, 0x5b, 0x3d, 0xea, 0x0e, 0x82, 0x31, + 0xad, 0xcc, 0x7c, 0x83, 0x87, 0x11, 0x0c, 0xc6, 0x31, 0xb5, 0x8f, 0x0a, 0x30, 0x13, 0x6f, 0xf0, + 0xb9, 0x5b, 0x20, 0xf7, 0x93, 0x16, 0xc8, 0x95, 0x09, 0xbc, 0xf7, 0x11, 0x56, 0xc7, 0xef, 0x34, + 0xe3, 0x8f, 0xc6, 0x2d, 0x8d, 0x71, 0xe3, 0x4a, 0xe1, 0xb1, 0xc6, 0x95, 0x4f, 0x7e, 0xa2, 0xaa, + 0x51, 0xbb, 0x82, 0xf2, 0x33, 0xbc, 0x2b, 0xf8, 0x38, 0xb3, 0x5d, 0xc5, 0x32, 0x36, 0x55, 0x73, + 0x64, 0x6c, 0xea, 0x85, 0x19, 0x9b, 0x6a, 0x13, 0x9b, 0xd8, 0x4e, 0x93, 0xb5, 0xa9, 0xfe, 0x54, + 0xb3, 0x36, 0x35, 0xce, 0x2b, 0x6b, 0x13, 0xe4, 0xcd, 0xda, 0xf4, 0xdd, 0x02, 0xcc, 0x98, 0x89, + 0x13, 0xc6, 0xf2, 0x6c, 0xff, 0xf8, 0xcb, 0x59, 0xf2, 0xc0, 0xb2, 0x38, 0x62, 0x96, 0x2c, 0xc3, + 0x94, 0xc8, 0xac, 0x5c, 0x49, 0x53, 0x1f, 0x4b, 0xae, 0x24, 0xf2, 0x8b, 0xd0, 0xb0, 0xd5, 0x5a, + 0x27, 0x33, 0x48, 0x6e, 0x4c, 0x64, 0x48, 0x4a, 0xcc, 0xe8, 0x14, 0x43, 0x58, 0x84, 0x91, 0x44, + 0xed, 0xf7, 0x6a, 0xf1, 0x05, 0xf1, 0x69, 0xfb, 0x38, 0x5e, 0x4b, 0xfa, 0x38, 0xae, 0xa7, 0x7d, + 0x1c, 0x43, 0xab, 0xb9, 0xf4, 0x73, 0x7c, 0x31, 0xb6, 0x4e, 0x94, 0x78, 0x92, 0xa6, 0x70, 0xc8, + 0x65, 0xac, 0x15, 0xcb, 0x70, 0x41, 0x2a, 0x01, 0x8a, 0xc8, 0x27, 0xd9, 0xe9, 0x28, 0x2a, 0x6d, + 0x35, 0x49, 0xc6, 0x34, 0x3f, 0x13, 0xe8, 0xab, 0x5c, 0xbd, 0x62, 0xc7, 0x16, 0x8d, 0x71, 0x95, + 0x47, 0x37, 0xe4, 0x60, 0xbb, 0x3b, 0x8f, 0xea, 0xbe, 0xf4, 0x54, 0xc4, 0x76, 0x77, 0xc8, 0x4b, + 0x51, 0x52, 0xe3, 0xee, 0x9a, 0xda, 0x13, 0xdc, 0x35, 0x3a, 0x34, 0x6d, 0xdd, 0x0f, 0xc4, 0x60, + 0x32, 0xe5, 0x6c, 0xf2, 0x47, 0x4e, 0xb7, 0xee, 0x33, 0x5d, 0x22, 0x52, 0xe0, 0x37, 0x22, 0x18, + 0x8c, 0x63, 0x12, 0x13, 0xa6, 0xd8, 0x5f, 0x3e, 0xb3, 0x98, 0xcb, 0x81, 0xcc, 0x68, 0x77, 0x16, + 0x19, 0xe1, 0xd6, 0x71, 0x23, 0x86, 0x83, 0x09, 0xd4, 0x11, 0x1e, 0x1d, 0x18, 0xc7, 0xa3, 0x43, + 0x7e, 0x46, 0x28, 0x6e, 0x47, 0xe1, 0x6b, 0x6d, 0xf2, 0xd7, 0x1a, 0x46, 0xb4, 0x62, 0x9c, 0x88, + 0x49, 0x5e, 0x36, 0x2a, 0x06, 0xb2, 0x1b, 0x54, 0xf5, 0xa9, 0xe4, 0xa8, 0xd8, 0x49, 0x92, 0x31, + 0xcd, 0x4f, 0xb6, 0xe0, 0x52, 0x58, 0x14, 0x6f, 0xc6, 0x34, 0xc7, 0x09, 0x43, 0x0c, 0x77, 0x32, + 0x78, 0x30, 0xb3, 0x26, 0x3f, 0xb3, 0x33, 0xf0, 0x3c, 0xea, 0x04, 0xb7, 0x75, 0x7f, 0x5f, 0xc6, + 0x2a, 0x46, 0x67, 0x76, 0x22, 0x12, 0xc6, 0xf9, 0xc8, 0x12, 0x80, 0x80, 0xe3, 0xb5, 0x2e, 0x24, + 0xc3, 0x81, 0x77, 0x42, 0x0a, 0xc6, 0xb8, 0xb4, 0xef, 0x36, 0xa0, 0x79, 0x57, 0x0f, 0xac, 0x43, + 0xca, 0xdd, 0xaf, 0xe7, 0xe3, 0x03, 0xfb, 0x4b, 0x05, 0xb8, 0x9c, 0x8c, 0xb1, 0x3d, 0x47, 0x47, + 0x18, 0xcf, 0xf1, 0x84, 0x99, 0xd2, 0x70, 0x44, 0x2b, 0xb8, 0x4b, 0x6c, 0x28, 0x64, 0xf7, 0xbc, + 0x5d, 0x62, 0x9d, 0x51, 0x02, 0x71, 0x74, 0x5b, 0x3e, 0x29, 0x2e, 0xb1, 0x67, 0x3b, 0x29, 0x69, + 0xca, 0x61, 0x57, 0x7b, 0x66, 0x1c, 0x76, 0xf5, 0x67, 0x42, 0xeb, 0xef, 0xc7, 0x1c, 0x76, 0x8d, + 0x9c, 0x81, 0x63, 0xf2, 0x58, 0x8a, 0x40, 0x1b, 0xe5, 0xf8, 0xe3, 0x19, 0x25, 0x94, 0x23, 0x85, + 0x29, 0xcb, 0xbb, 0xba, 0x6f, 0x19, 0x52, 0xed, 0xc8, 0x91, 0x84, 0x59, 0x25, 0x67, 0x14, 0xf1, + 0x25, 0xfc, 0x2f, 0x0a, 0xec, 0x28, 0x17, 0x65, 0x31, 0x57, 0x2e, 0x4a, 0xb2, 0x02, 0x65, 0xe7, + 0x80, 0x1e, 0x9d, 0x2d, 0x37, 0x03, 0xdf, 0x04, 0xde, 0xbd, 0x43, 0x8f, 0x90, 0x57, 0xd6, 0xbe, + 0x5f, 0x04, 0x60, 0x8f, 0x7f, 0x3a, 0xd7, 0xd9, 0x8f, 0x43, 0xcd, 0x1f, 0x70, 0xc3, 0x90, 0x54, + 0x98, 0xa2, 0x68, 0x3b, 0x51, 0x8c, 0x8a, 0x4e, 0x3e, 0x07, 0x95, 0x6f, 0x0d, 0xe8, 0x40, 0xc5, + 0x81, 0x84, 0xfb, 0x86, 0xaf, 0xb3, 0x42, 0x14, 0xb4, 0xf3, 0x33, 0x6f, 0x2b, 0x17, 0x5b, 0xe5, + 0xbc, 0x5c, 0x6c, 0x0d, 0xa8, 0xdd, 0x75, 0x79, 0xf0, 0xae, 0xf6, 0x5f, 0x8a, 0x00, 0x51, 0x70, + 0x24, 0xf9, 0xb5, 0x02, 0xbc, 0x10, 0x7e, 0x70, 0x81, 0xd8, 0xfe, 0xf1, 0xbc, 0xe7, 0xb9, 0xdd, + 0x6d, 0x59, 0x1f, 0x3b, 0x9f, 0x81, 0xb6, 0xb2, 0xc4, 0x61, 0x76, 0x2b, 0x08, 0x42, 0x9d, 0xf6, + 0xfa, 0xc1, 0xd1, 0xaa, 0xe5, 0xc9, 0x11, 0x98, 0x19, 0x83, 0x7b, 0x53, 0xf2, 0x88, 0xaa, 0xd2, + 0x46, 0xc1, 0x3f, 0x22, 0x45, 0xc1, 0x10, 0x87, 0xec, 0x43, 0xdd, 0x71, 0xdf, 0xf3, 0x59, 0x77, + 0xc8, 0xe1, 0xf8, 0xe6, 0xf8, 0x5d, 0x2e, 0xba, 0x55, 0xb8, 0x5d, 0xe4, 0x1f, 0xac, 0x39, 0xb2, + 0xb3, 0x7f, 0xb5, 0x08, 0x17, 0x33, 0xfa, 0x81, 0xbc, 0x09, 0xb3, 0x32, 0x0e, 0x35, 0xba, 0x00, + 0xa0, 0x10, 0x5d, 0x00, 0xd0, 0x49, 0xd1, 0x70, 0x88, 0x9b, 0xbc, 0x07, 0xa0, 0x1b, 0x06, 0xf5, + 0xfd, 0x4d, 0xd7, 0x54, 0xfb, 0x81, 0x37, 0x98, 0xfa, 0xb2, 0x1c, 0x96, 0x3e, 0x3a, 0x6e, 0xfd, + 0x64, 0x56, 0x68, 0x79, 0xaa, 0x9f, 0xa3, 0x0a, 0x18, 0x83, 0x24, 0xdf, 0x04, 0x10, 0x36, 0x80, + 0x30, 0xfb, 0xc5, 0x13, 0x0c, 0x67, 0x0b, 0x2a, 0xb9, 0xda, 0xc2, 0xd7, 0x07, 0xba, 0x13, 0x58, + 0xc1, 0x91, 0x48, 0x36, 0x74, 0x3f, 0x44, 0xc1, 0x18, 0xa2, 0xf6, 0x0f, 0x8b, 0x50, 0x57, 0xae, + 0x87, 0xa7, 0x60, 0x0b, 0xee, 0x26, 0x6c, 0xc1, 0x13, 0x0a, 0x26, 0xcf, 0xb2, 0x04, 0xbb, 0x29, + 0x4b, 0xf0, 0xad, 0xfc, 0xa2, 0x1e, 0x6f, 0x07, 0xfe, 0xcd, 0x22, 0xcc, 0x28, 0xd6, 0xbc, 0x16, + 0xda, 0xaf, 0xc2, 0x05, 0x11, 0x04, 0xb2, 0xa9, 0x3f, 0x14, 0x79, 0x97, 0x78, 0x87, 0x95, 0x45, + 0xfc, 0x76, 0x3b, 0x49, 0xc2, 0x34, 0x2f, 0x1b, 0xd6, 0xa2, 0x68, 0x87, 0x6d, 0xc2, 0x84, 0xdb, + 0x58, 0xec, 0x37, 0xf9, 0xb0, 0x6e, 0xa7, 0x68, 0x38, 0xc4, 0x9d, 0x36, 0x11, 0x97, 0xcf, 0xc1, + 0x44, 0xfc, 0xaf, 0x0b, 0x30, 0x15, 0xf5, 0xd7, 0xb9, 0x1b, 0x88, 0xf7, 0x92, 0x06, 0xe2, 0xe5, + 0xdc, 0xc3, 0x61, 0x84, 0x79, 0xf8, 0xcf, 0xd5, 0x20, 0x71, 0xa6, 0x81, 0xec, 0xc2, 0xbc, 0x95, + 0x19, 0x99, 0x19, 0x9b, 0x6d, 0xc2, 0x43, 0xfa, 0xeb, 0x23, 0x39, 0xf1, 0x31, 0x28, 0x64, 0x00, + 0xf5, 0x43, 0xea, 0x05, 0x96, 0x41, 0xd5, 0xf3, 0xdd, 0xca, 0xad, 0x92, 0x49, 0x23, 0x78, 0xd8, + 0xa7, 0xf7, 0xa5, 0x00, 0x0c, 0x45, 0x91, 0x5d, 0xa8, 0x50, 0xb3, 0x4b, 0x55, 0x26, 0xac, 0x9c, + 0x99, 0x89, 0xc3, 0xfe, 0x64, 0xff, 0x7c, 0x14, 0xd0, 0xc4, 0x8f, 0x1b, 0x9a, 0xca, 0x39, 0x15, + 0xac, 0x53, 0x9a, 0x97, 0xc8, 0x41, 0x68, 0x6d, 0xad, 0x4c, 0x68, 0xf2, 0x78, 0x8c, 0xad, 0xd5, + 0x87, 0xc6, 0x03, 0x3d, 0xa0, 0x5e, 0x4f, 0xf7, 0x0e, 0xe4, 0x6e, 0x63, 0xfc, 0x27, 0x7c, 0x5b, + 0x21, 0x45, 0x4f, 0x18, 0x16, 0x61, 0x24, 0x87, 0xb8, 0xd0, 0x08, 0xa4, 0xfa, 0xac, 0x4c, 0xca, + 0xe3, 0x0b, 0x55, 0x8a, 0xb8, 0x2f, 0xcf, 0x36, 0xa8, 0xbf, 0x18, 0xc9, 0x20, 0x87, 0x89, 0x34, + 0xf6, 0xe2, 0xf2, 0x82, 0x76, 0x0e, 0xd7, 0x84, 0x84, 0x8a, 0x96, 0x9b, 0xec, 0x74, 0xf8, 0xda, + 0xff, 0xac, 0x44, 0xd3, 0xf2, 0xd3, 0xb6, 0x13, 0x7e, 0x29, 0x69, 0x27, 0xbc, 0x96, 0xb6, 0x13, + 0xa6, 0x7c, 0xfe, 0x67, 0x8f, 0x86, 0x4e, 0x99, 0xd7, 0xca, 0xe7, 0x60, 0x5e, 0x7b, 0x05, 0x9a, + 0x87, 0x7c, 0x26, 0x10, 0x69, 0xb5, 0x2a, 0x7c, 0x19, 0xe1, 0x33, 0xfb, 0xfd, 0xa8, 0x18, 0xe3, + 0x3c, 0xac, 0x8a, 0xbc, 0xb8, 0x27, 0xcc, 0x64, 0x2d, 0xab, 0x74, 0xa2, 0x62, 0x8c, 0xf3, 0xf0, + 0x40, 0x4a, 0xcb, 0x39, 0x10, 0x15, 0x6a, 0xbc, 0x82, 0x08, 0xa4, 0x54, 0x85, 0x18, 0xd1, 0xc9, + 0x0d, 0xa8, 0x0f, 0xcc, 0x3d, 0xc1, 0x5b, 0xe7, 0xbc, 0x5c, 0xc3, 0xdc, 0x59, 0x5d, 0x93, 0x69, + 0xbe, 0x14, 0x95, 0xb5, 0xa4, 0xa7, 0xf7, 0x15, 0x81, 0xef, 0x0d, 0x65, 0x4b, 0x36, 0xa3, 0x62, + 0x8c, 0xf3, 0x90, 0x9f, 0x86, 0x19, 0x8f, 0x9a, 0x03, 0x83, 0x86, 0xb5, 0x80, 0xd7, 0x92, 0xf9, + 0x4f, 0xe3, 0x14, 0x4c, 0x71, 0x8e, 0x30, 0x12, 0x36, 0xc7, 0x32, 0x12, 0x7e, 0x0d, 0x66, 0x4c, + 0x4f, 0xb7, 0x1c, 0x6a, 0xde, 0x73, 0x78, 0x60, 0x87, 0x0c, 0xe7, 0x0c, 0x0d, 0xf4, 0xab, 0x09, + 0x2a, 0xa6, 0xb8, 0xb5, 0x7f, 0x5a, 0x84, 0x8a, 0xc8, 0xca, 0xba, 0x0e, 0x17, 0x2d, 0xc7, 0x0a, + 0x2c, 0xdd, 0x5e, 0xa5, 0xb6, 0x7e, 0x14, 0x0f, 0x70, 0xa9, 0xb4, 0x5f, 0x64, 0x1b, 0xed, 0xf5, + 0x61, 0x32, 0x66, 0xd5, 0x61, 0x9d, 0x13, 0x88, 0xe5, 0x5b, 0xa1, 0x08, 0x3b, 0x9a, 0x48, 0x09, + 0x9e, 0xa0, 0x60, 0x8a, 0x93, 0x29, 0x43, 0xfd, 0xa1, 0xc8, 0x95, 0x8a, 0x50, 0x86, 0x92, 0xc1, + 0x24, 0x49, 0x3e, 0xae, 0xa4, 0x0f, 0xb8, 0x42, 0x1c, 0x1e, 0x9a, 0x92, 0x41, 0x70, 0x42, 0x49, + 0x4f, 0xd1, 0x70, 0x88, 0x9b, 0x21, 0xec, 0xe9, 0x96, 0x3d, 0xf0, 0x68, 0x84, 0x50, 0x89, 0x10, + 0xd6, 0x52, 0x34, 0x1c, 0xe2, 0xd6, 0xb6, 0x01, 0xb6, 0x06, 0xb6, 0xaf, 0xf3, 0x0c, 0x3c, 0x13, + 0xbb, 0x17, 0xe2, 0xf7, 0x8b, 0x30, 0x25, 0x60, 0xe5, 0x46, 0x7a, 0x09, 0x40, 0x26, 0xfa, 0x31, + 0x4d, 0x4f, 0xea, 0x06, 0xd1, 0x04, 0x17, 0x52, 0x30, 0xc6, 0x75, 0xba, 0x90, 0xb2, 0xd7, 0x61, + 0x4a, 0x85, 0x88, 0x71, 0xb5, 0x23, 0x15, 0x5e, 0xbb, 0x12, 0xa3, 0x61, 0x82, 0x93, 0xac, 0xb2, + 0xde, 0xdf, 0x15, 0x07, 0xcb, 0x2d, 0xd7, 0xe1, 0xb5, 0x45, 0x06, 0x86, 0xf0, 0x68, 0x65, 0x27, + 0x45, 0xc7, 0xa1, 0x1a, 0xe4, 0x8b, 0x50, 0xef, 0xe9, 0x0f, 0x77, 0x1c, 0xdd, 0x38, 0x90, 0x53, + 0x48, 0xa8, 0x57, 0x6c, 0xca, 0x72, 0x0c, 0x39, 0x88, 0x2e, 0xf7, 0xe1, 0xd5, 0xbc, 0x87, 0x0f, + 0xc3, 0x57, 0x36, 0xb4, 0x13, 0xff, 0x6f, 0x05, 0x20, 0xc3, 0xe7, 0x7a, 0xc8, 0x3e, 0x54, 0x1d, + 0x6e, 0x5c, 0xce, 0x7d, 0xb5, 0x44, 0xcc, 0x46, 0x2d, 0x56, 0x7d, 0x59, 0x20, 0xf1, 0x89, 0x03, + 0x75, 0xfa, 0x30, 0xa0, 0x9e, 0x13, 0x9e, 0xf3, 0x9b, 0xcc, 0x35, 0x16, 0x62, 0xb3, 0x2d, 0x91, + 0x31, 0x94, 0xa1, 0xfd, 0x6e, 0x11, 0x9a, 0x31, 0xbe, 0x27, 0xd9, 0x6c, 0x78, 0xaa, 0x11, 0x61, + 0xd3, 0xdd, 0xf1, 0x6c, 0x39, 0xb6, 0x62, 0xa9, 0x46, 0x24, 0x09, 0x37, 0x30, 0xce, 0xc7, 0x06, + 0x70, 0x4f, 0xf7, 0x83, 0xc4, 0x28, 0x0b, 0x07, 0xf0, 0x66, 0x48, 0xc1, 0x18, 0x17, 0xb9, 0x2e, + 0x2f, 0x22, 0x29, 0x27, 0x13, 0xb2, 0x8e, 0xb8, 0x65, 0xa4, 0x32, 0x81, 0x5b, 0x46, 0x48, 0x17, + 0x66, 0x55, 0xab, 0x15, 0xf5, 0x6c, 0xe9, 0x3a, 0xc5, 0xcc, 0x93, 0x82, 0xc0, 0x21, 0x50, 0xed, + 0xfb, 0x05, 0x98, 0x4e, 0x58, 0x14, 0x45, 0x2a, 0x55, 0x75, 0x2a, 0x2d, 0x91, 0x4a, 0x35, 0x76, + 0x98, 0xec, 0x65, 0xa8, 0x8a, 0x0e, 0x4a, 0x07, 0x9b, 0x8b, 0x2e, 0x44, 0x49, 0x65, 0xaa, 0x82, + 0xf4, 0x59, 0xa4, 0x55, 0x05, 0xe9, 0xd4, 0x40, 0x45, 0x17, 0xae, 0x40, 0xd1, 0x3a, 0xd9, 0xd3, + 0x31, 0x57, 0xa0, 0x28, 0xc7, 0x90, 0x43, 0xfb, 0xbb, 0xbc, 0xdd, 0x81, 0x77, 0x14, 0x9a, 0x4a, + 0xba, 0x50, 0x93, 0x01, 0xc6, 0xf2, 0xd3, 0x78, 0x33, 0x87, 0x99, 0x93, 0xe3, 0xc8, 0x10, 0x59, + 0xdd, 0x38, 0xb8, 0xb7, 0xb7, 0x87, 0x0a, 0x9d, 0xdc, 0x84, 0x86, 0xeb, 0xc8, 0x29, 0x59, 0x3e, + 0xfe, 0x17, 0x98, 0x2a, 0x70, 0x4f, 0x15, 0x3e, 0x3a, 0x6e, 0x5d, 0x0e, 0xff, 0x24, 0x1a, 0x89, + 0x51, 0x4d, 0xed, 0x4f, 0x15, 0xe0, 0x05, 0x74, 0x6d, 0xdb, 0x72, 0xba, 0x49, 0x57, 0x36, 0xb1, + 0x61, 0x46, 0xcc, 0x34, 0x87, 0xba, 0x65, 0xeb, 0xbb, 0x36, 0x7d, 0xa2, 0xa9, 0x63, 0x10, 0x58, + 0xf6, 0x82, 0xb8, 0x98, 0x75, 0x61, 0xdd, 0x09, 0xee, 0x79, 0x9d, 0xc0, 0xb3, 0x9c, 0xae, 0x58, + 0xf6, 0x36, 0x13, 0x58, 0x98, 0xc2, 0xd6, 0x7e, 0xaf, 0x04, 0x3c, 0x78, 0x95, 0x7c, 0x19, 0x1a, + 0x3d, 0x6a, 0xec, 0xeb, 0x8e, 0xe5, 0xab, 0xa4, 0xd4, 0x57, 0xd8, 0x73, 0x6d, 0xaa, 0xc2, 0x47, + 0xec, 0x55, 0x2c, 0x77, 0x36, 0xf8, 0x39, 0xb2, 0x88, 0x97, 0x18, 0x50, 0xed, 0xfa, 0xbe, 0xde, + 0xb7, 0x72, 0xc7, 0x0c, 0x89, 0x24, 0xc0, 0x62, 0x3a, 0x12, 0xbf, 0x51, 0x42, 0x13, 0x03, 0x2a, + 0x7d, 0x5b, 0xb7, 0x9c, 0xdc, 0x17, 0x09, 0xb2, 0x27, 0xd8, 0x62, 0x48, 0x62, 0xbd, 0xe3, 0x3f, + 0x51, 0x60, 0x93, 0x01, 0x34, 0x7d, 0xc3, 0xd3, 0x7b, 0xfe, 0xbe, 0xbe, 0xf4, 0xea, 0x6b, 0xb9, + 0x77, 0x73, 0x91, 0x28, 0xa1, 0x5c, 0xae, 0xe0, 0xf2, 0x66, 0xe7, 0xf6, 0xf2, 0xd2, 0xab, 0xaf, + 0x61, 0x5c, 0x4e, 0x5c, 0xec, 0xab, 0xaf, 0x2c, 0xc9, 0x19, 0x64, 0xe2, 0x62, 0x5f, 0x7d, 0x65, + 0x09, 0xe3, 0x72, 0xb4, 0xff, 0x55, 0x80, 0x46, 0xc8, 0x4b, 0x76, 0x00, 0xd8, 0x5c, 0x26, 0xd3, + 0xf6, 0x9e, 0x49, 0x71, 0xe0, 0xe6, 0xbb, 0x9d, 0xb0, 0x32, 0xc6, 0x80, 0x32, 0xf2, 0x1a, 0x17, + 0x27, 0x9d, 0xd7, 0x78, 0x11, 0x1a, 0xfb, 0xba, 0x63, 0xfa, 0xfb, 0xfa, 0x01, 0x95, 0x31, 0xff, + 0xe1, 0xde, 0xf2, 0xb6, 0x22, 0x60, 0xc4, 0xa3, 0xfd, 0xfd, 0x2a, 0x88, 0x40, 0x1f, 0x36, 0xe9, + 0x98, 0x96, 0x2f, 0x4e, 0xe6, 0x14, 0x78, 0xcd, 0x70, 0xd2, 0x59, 0x95, 0xe5, 0x18, 0x72, 0x90, + 0x2b, 0x50, 0xea, 0x59, 0x8e, 0x54, 0x29, 0xb9, 0x65, 0x7e, 0xd3, 0x72, 0x90, 0x95, 0x71, 0x92, + 0xfe, 0x50, 0xaa, 0x8c, 0x82, 0xa4, 0x3f, 0x44, 0x56, 0x46, 0xbe, 0x0a, 0x17, 0x6c, 0xd7, 0x3d, + 0x60, 0xd3, 0x47, 0x3c, 0x76, 0x79, 0x5a, 0xd8, 0xca, 0x36, 0x92, 0x24, 0x4c, 0xf3, 0x92, 0x1d, + 0x78, 0xf1, 0x03, 0xea, 0xb9, 0x72, 0xbe, 0xec, 0xd8, 0x94, 0xf6, 0x15, 0x8c, 0x50, 0x54, 0x78, + 0x68, 0xf5, 0xcf, 0x65, 0xb3, 0xe0, 0xa8, 0xba, 0xfc, 0x30, 0x88, 0xee, 0x75, 0x69, 0xb0, 0xe5, + 0xb9, 0x4c, 0x19, 0xb5, 0x9c, 0xae, 0x82, 0xad, 0x46, 0xb0, 0xdb, 0xd9, 0x2c, 0x38, 0xaa, 0x2e, + 0x79, 0x07, 0xe6, 0x04, 0x49, 0xa8, 0x2d, 0xcb, 0x62, 0x9a, 0xb1, 0x6c, 0x75, 0xff, 0xee, 0xb4, + 0x70, 0x80, 0x6e, 0x8f, 0xe0, 0xc1, 0x91, 0xb5, 0xc9, 0x5b, 0x30, 0xab, 0xdc, 0xdf, 0x5b, 0xd4, + 0xeb, 0x84, 0xc1, 0x5f, 0xd3, 0x2a, 0x06, 0x5e, 0xc5, 0x80, 0x63, 0x8a, 0x0b, 0x87, 0xea, 0x11, + 0x84, 0xcb, 0x3c, 0xc2, 0x6b, 0xa7, 0xbf, 0xe2, 0xba, 0xb6, 0xe9, 0x3e, 0x70, 0xd4, 0xb3, 0x8b, + 0x1d, 0x18, 0xf7, 0x78, 0x77, 0x32, 0x39, 0x70, 0x44, 0x4d, 0xf6, 0xe4, 0x9c, 0xb2, 0xea, 0x3e, + 0x70, 0xd2, 0xa8, 0x10, 0x3d, 0x79, 0x67, 0x04, 0x0f, 0x8e, 0xac, 0x4d, 0xd6, 0x80, 0xa4, 0x9f, + 0x60, 0xa7, 0x2f, 0x63, 0x32, 0x2e, 0x8b, 0x0c, 0x5c, 0x69, 0x2a, 0x66, 0xd4, 0x20, 0x1b, 0x70, + 0x29, 0x5d, 0xca, 0xc4, 0xc9, 0xf0, 0x0c, 0x9e, 0x7b, 0x1b, 0x33, 0xe8, 0x98, 0x59, 0x4b, 0xfb, + 0x07, 0x45, 0x98, 0x4e, 0xa4, 0x6c, 0x79, 0xe6, 0x52, 0x63, 0xb0, 0xdd, 0x60, 0xcf, 0xef, 0xae, + 0xaf, 0xde, 0xa6, 0xba, 0x49, 0x3d, 0x75, 0x22, 0xa8, 0x21, 0x97, 0xc5, 0x04, 0x05, 0x53, 0x9c, + 0x64, 0x0f, 0x2a, 0xc2, 0xf1, 0x93, 0xf7, 0xfa, 0x2e, 0xd5, 0x47, 0xdc, 0xfb, 0x23, 0xef, 0xbc, + 0x73, 0x3d, 0x8a, 0x02, 0x5e, 0x0b, 0x60, 0x2a, 0xce, 0xc1, 0x26, 0x92, 0x48, 0xed, 0xad, 0x25, + 0x54, 0xde, 0x75, 0x28, 0x05, 0xc1, 0xb8, 0x49, 0x37, 0x84, 0x23, 0x71, 0x7b, 0x03, 0x19, 0x86, + 0xb6, 0xc7, 0xde, 0x9d, 0xef, 0x5b, 0xae, 0x23, 0x6f, 0x60, 0xd8, 0x81, 0x9a, 0xdc, 0x0e, 0x8f, + 0x99, 0x34, 0x84, 0xeb, 0x4a, 0xca, 0x8e, 0xae, 0xb0, 0xb4, 0x7f, 0x53, 0x84, 0x46, 0x68, 0xf7, + 0x3a, 0xc5, 0xcd, 0x06, 0x2e, 0x34, 0xc2, 0x08, 0xd5, 0xdc, 0x77, 0x13, 0x47, 0x81, 0x93, 0xdc, + 0x54, 0x13, 0xfe, 0xc5, 0x48, 0x46, 0x3c, 0xfa, 0xb5, 0x94, 0x23, 0xfa, 0xb5, 0x0f, 0xb5, 0xc0, + 0xb3, 0xba, 0x5d, 0xb9, 0x4b, 0xc8, 0x13, 0xfe, 0x1a, 0x76, 0xd7, 0xb6, 0x00, 0x94, 0x3d, 0x2b, + 0xfe, 0xa0, 0x12, 0xa3, 0xbd, 0x0f, 0xb3, 0x69, 0x4e, 0xae, 0x42, 0x1b, 0xfb, 0xd4, 0x1c, 0xd8, + 0xaa, 0x8f, 0x23, 0x15, 0x5a, 0x96, 0x63, 0xc8, 0x41, 0x6e, 0x40, 0x9d, 0xbd, 0xa6, 0x0f, 0x5c, + 0x47, 0xa9, 0xb1, 0x7c, 0x37, 0xb2, 0x2d, 0xcb, 0x30, 0xa4, 0x6a, 0xff, 0xb9, 0x04, 0x57, 0x22, + 0xeb, 0xe5, 0xa6, 0xee, 0xe8, 0xdd, 0x53, 0x5c, 0x48, 0xfb, 0xe9, 0x31, 0xcc, 0xb3, 0x5e, 0x4f, + 0x53, 0x7a, 0x06, 0xae, 0xa7, 0xf9, 0xbf, 0x45, 0xe0, 0xd1, 0xf4, 0xe4, 0xdb, 0x30, 0xa5, 0xc7, + 0xee, 0x22, 0x97, 0xaf, 0xf3, 0x66, 0xee, 0xd7, 0xc9, 0x83, 0xf6, 0x43, 0xc3, 0x4e, 0xbc, 0x14, + 0x13, 0x02, 0x89, 0x0b, 0xf5, 0x3d, 0xdd, 0xb6, 0x99, 0x2e, 0x94, 0xdb, 0x1b, 0x9b, 0x10, 0xce, + 0x87, 0xf9, 0x9a, 0x84, 0xc6, 0x50, 0x08, 0xf9, 0x6e, 0x01, 0xa6, 0xbd, 0xf8, 0x76, 0x4d, 0xbe, + 0x90, 0x3c, 0xb1, 0x3a, 0x31, 0xb4, 0x78, 0xfc, 0x64, 0x7c, 0x4f, 0x98, 0x94, 0xa9, 0xfd, 0xc7, + 0x02, 0x4c, 0x77, 0x6c, 0xcb, 0xb4, 0x9c, 0xee, 0x39, 0xde, 0x8e, 0x73, 0x0f, 0x2a, 0xbe, 0x6d, + 0x99, 0x74, 0xcc, 0xd5, 0x44, 0xac, 0x63, 0x0c, 0x00, 0x05, 0x4e, 0xf2, 0xba, 0x9d, 0xd2, 0x29, + 0xae, 0xdb, 0xf9, 0x4f, 0x35, 0x90, 0xe7, 0x42, 0xc8, 0x00, 0x1a, 0x5d, 0x75, 0x8b, 0x87, 0x7c, + 0xc6, 0xdb, 0x39, 0x32, 0xc0, 0x26, 0xee, 0x03, 0x11, 0x73, 0x7f, 0x58, 0x88, 0x91, 0x24, 0x42, + 0x93, 0x97, 0xe0, 0xaf, 0xe6, 0xbc, 0x04, 0x5f, 0x88, 0x1b, 0xbe, 0x06, 0x5f, 0x87, 0xf2, 0x7e, + 0x10, 0xf4, 0xe5, 0x60, 0x1a, 0xdf, 0x58, 0x18, 0x25, 0x21, 0x13, 0x3a, 0x11, 0xfb, 0x8f, 0x1c, + 0x9a, 0x89, 0x70, 0xf4, 0xf0, 0xaa, 0xd1, 0x95, 0x5c, 0x71, 0x41, 0x71, 0x11, 0xec, 0x3f, 0x72, + 0x68, 0xf2, 0x0b, 0xd0, 0x0c, 0x3c, 0xdd, 0xf1, 0xf7, 0x5c, 0xaf, 0x47, 0x3d, 0xb9, 0x47, 0x5d, + 0xcb, 0x71, 0x0f, 0xfc, 0x76, 0x84, 0x26, 0x6c, 0xec, 0x89, 0x22, 0x8c, 0x4b, 0x23, 0x07, 0x50, + 0x1f, 0x98, 0xa2, 0x61, 0xd2, 0x0c, 0xb6, 0x9c, 0xe7, 0x6a, 0xff, 0x58, 0xd4, 0x8f, 0xfa, 0x87, + 0xa1, 0x80, 0xe4, 0xad, 0xba, 0xb5, 0x49, 0xdd, 0xaa, 0x1b, 0x1f, 0x8d, 0x59, 0x19, 0x92, 0x48, + 0x4f, 0xea, 0xb5, 0x4e, 0x57, 0x06, 0x2d, 0xae, 0xe5, 0x56, 0x39, 0x85, 0xc8, 0x66, 0xa8, 0x1b, + 0x3b, 0x5d, 0x54, 0x32, 0x88, 0x05, 0xd5, 0x3e, 0xb7, 0x3e, 0xe7, 0xbe, 0x61, 0x3d, 0xee, 0x20, + 0x10, 0x73, 0x8d, 0x28, 0x41, 0x29, 0x40, 0xeb, 0x81, 0xf4, 0x3b, 0x12, 0x23, 0x71, 0x69, 0x99, + 0x38, 0x55, 0xbb, 0x78, 0xba, 0xa9, 0x27, 0xbc, 0x3d, 0x2b, 0x76, 0x69, 0x42, 0xe6, 0xed, 0x64, + 0xda, 0xbf, 0x2d, 0x42, 0x69, 0x7b, 0xa3, 0x23, 0x12, 0x21, 0xf3, 0x6b, 0x10, 0x69, 0xe7, 0xc0, + 0xea, 0xdf, 0xa7, 0x9e, 0xb5, 0x77, 0x24, 0x77, 0xf9, 0xb1, 0x44, 0xc8, 0x69, 0x0e, 0xcc, 0xa8, + 0x45, 0xde, 0x85, 0x29, 0x43, 0x5f, 0xa1, 0x5e, 0x30, 0x8e, 0x0d, 0x83, 0xa7, 0x42, 0x58, 0x59, + 0x8e, 0xaa, 0x63, 0x02, 0x8c, 0xec, 0x00, 0x18, 0x11, 0x74, 0xe9, 0xcc, 0x96, 0x97, 0x18, 0x70, + 0x0c, 0x88, 0x20, 0x34, 0x0e, 0x18, 0x2b, 0x47, 0x2d, 0x9f, 0x05, 0x95, 0x0f, 0xd2, 0x3b, 0xaa, + 0x2e, 0x46, 0x30, 0x9a, 0x03, 0xd3, 0x89, 0x9b, 0xcc, 0xc8, 0x57, 0xa0, 0xee, 0xf6, 0x63, 0x33, + 0x77, 0x83, 0x47, 0x62, 0xd7, 0xef, 0xc9, 0xb2, 0x47, 0xc7, 0xad, 0xe9, 0x0d, 0xb7, 0x6b, 0x19, + 0xaa, 0x00, 0x43, 0x76, 0xa2, 0x41, 0x95, 0x9f, 0xf9, 0x55, 0xf7, 0x98, 0xf1, 0xa1, 0xc3, 0xaf, + 0x1a, 0xf2, 0x51, 0x52, 0xb4, 0x5f, 0x2a, 0x43, 0xe4, 0xad, 0x27, 0x3e, 0x54, 0xc5, 0x79, 0x23, + 0xb9, 0x48, 0x9c, 0xeb, 0xd1, 0x26, 0x29, 0x8a, 0x74, 0xa1, 0xf4, 0xbe, 0xbb, 0x9b, 0x7b, 0x8d, + 0x88, 0x25, 0x2e, 0x11, 0x66, 0xb9, 0x58, 0x01, 0x32, 0x09, 0xe4, 0x2f, 0x17, 0xe0, 0x79, 0x3f, + 0xad, 0x65, 0xcb, 0xe1, 0x80, 0xf9, 0xb7, 0x13, 0x69, 0xbd, 0x5d, 0x86, 0xcc, 0x8f, 0x22, 0xe3, + 0x70, 0x5b, 0x58, 0xff, 0x0b, 0x37, 0xba, 0x1c, 0x4e, 0xb7, 0x72, 0xde, 0xd7, 0x9c, 0xec, 0xff, + 0x64, 0x19, 0x4a, 0x51, 0xda, 0x77, 0x8a, 0xd0, 0x8c, 0x2d, 0x0c, 0xb9, 0xaf, 0xc7, 0x7b, 0x98, + 0xba, 0x1e, 0x6f, 0x6b, 0xfc, 0xa8, 0x92, 0xa8, 0x55, 0xe7, 0x7d, 0x43, 0xde, 0x3f, 0x2e, 0x42, + 0x69, 0x67, 0x75, 0x2d, 0xb9, 0x3f, 0x2e, 0x3c, 0x85, 0xfd, 0xf1, 0x3e, 0xd4, 0x76, 0x07, 0x96, + 0x1d, 0x58, 0x4e, 0xee, 0xd4, 0x4a, 0xea, 0x36, 0x41, 0xe9, 0x56, 0x11, 0xa8, 0xa8, 0xe0, 0x49, + 0x17, 0x6a, 0x5d, 0x91, 0xdb, 0x36, 0x77, 0xac, 0xad, 0xcc, 0x91, 0x2b, 0x04, 0xc9, 0x3f, 0xa8, + 0xd0, 0xb5, 0x23, 0xa8, 0xee, 0xac, 0xca, 0x1d, 0xc6, 0xd3, 0xed, 0x4d, 0xed, 0x17, 0x20, 0x54, + 0x38, 0x9e, 0xbe, 0xf0, 0xff, 0x5a, 0x80, 0xa4, 0x8e, 0xf5, 0xf4, 0x47, 0xd3, 0x41, 0x7a, 0x34, + 0xad, 0x4e, 0xe2, 0xe3, 0xcb, 0x1e, 0x50, 0xda, 0xbf, 0x2a, 0x40, 0xea, 0x90, 0x28, 0x79, 0x4d, + 0xa6, 0x49, 0x4c, 0x06, 0x35, 0xaa, 0x34, 0x89, 0x24, 0xc9, 0x1d, 0x4b, 0x97, 0xf8, 0x21, 0xdb, + 0x19, 0xc6, 0x7d, 0x75, 0xb2, 0xf9, 0x77, 0xc7, 0xdf, 0x19, 0x66, 0x79, 0xfe, 0x64, 0xe0, 0x6d, + 0x9c, 0x84, 0x49, 0xb9, 0xda, 0xdf, 0x2b, 0x42, 0xf5, 0xa9, 0xe5, 0xc5, 0xa0, 0x89, 0x58, 0xe8, + 0x95, 0x9c, 0xb3, 0xfd, 0xc8, 0x48, 0xe8, 0x5e, 0x2a, 0x12, 0xfa, 0x66, 0x5e, 0x41, 0x8f, 0x8f, + 0x83, 0xfe, 0x17, 0x05, 0x90, 0x6b, 0xcd, 0xba, 0xe3, 0x07, 0xba, 0x63, 0x50, 0x62, 0x84, 0x0b, + 0x5b, 0xde, 0x80, 0x3b, 0x19, 0x94, 0x2a, 0x74, 0x19, 0xfe, 0x5b, 0x2d, 0x64, 0xe4, 0x8b, 0x50, + 0xdf, 0x77, 0xfd, 0x80, 0x2f, 0x5e, 0xc5, 0xa4, 0x75, 0xee, 0xb6, 0x2c, 0xc7, 0x90, 0x23, 0xed, + 0x39, 0xaf, 0x8c, 0xf6, 0x9c, 0x6b, 0xbf, 0x51, 0x84, 0xa9, 0x4f, 0x4a, 0xe2, 0x8d, 0xac, 0xc8, + 0xf1, 0x52, 0xce, 0xc8, 0xf1, 0xf2, 0x59, 0x22, 0xc7, 0xb5, 0x1f, 0x16, 0x00, 0x9e, 0x5a, 0xd6, + 0x0f, 0x33, 0x19, 0xd4, 0x9d, 0x7b, 0x5c, 0x65, 0x87, 0x74, 0xff, 0xed, 0x8a, 0x7a, 0x24, 0x1e, + 0xd0, 0xfd, 0x61, 0x01, 0x66, 0xf4, 0x44, 0x90, 0x74, 0x6e, 0x7d, 0x39, 0x15, 0x73, 0x1d, 0xc6, + 0xf8, 0x25, 0xcb, 0x31, 0x25, 0x96, 0xbc, 0x1e, 0x65, 0xe8, 0xbf, 0x1b, 0x0d, 0xfb, 0xa1, 0xd4, + 0xfa, 0x22, 0xaa, 0x2b, 0xce, 0xf9, 0x84, 0xa0, 0xf4, 0xd2, 0x44, 0x82, 0xd2, 0xe3, 0xc7, 0x6d, + 0xcb, 0x8f, 0x3d, 0x6e, 0x7b, 0x08, 0x8d, 0x3d, 0xcf, 0xed, 0xf1, 0xb8, 0x6f, 0x79, 0x07, 0xfe, + 0xcd, 0x1c, 0x0b, 0x65, 0x6f, 0xd7, 0x72, 0xa8, 0xc9, 0x63, 0xca, 0x43, 0x1b, 0xd9, 0x9a, 0xc2, + 0xc7, 0x48, 0x14, 0x77, 0x2b, 0xb8, 0x42, 0x6a, 0x75, 0x92, 0x52, 0xc3, 0xb9, 0x64, 0x5b, 0xa0, + 0xa3, 0x12, 0x93, 0x8c, 0xf5, 0xae, 0x3d, 0x9d, 0x58, 0x6f, 0xed, 0xcf, 0xd6, 0xd4, 0x04, 0xf6, + 0xcc, 0x25, 0x83, 0xfe, 0x34, 0x49, 0x42, 0x97, 0x0e, 0x65, 0x30, 0xa8, 0x3f, 0xc5, 0x0c, 0x06, + 0x8d, 0xc9, 0x64, 0x30, 0x80, 0x7c, 0x19, 0x0c, 0x9a, 0x13, 0xca, 0x60, 0x30, 0x35, 0xa9, 0x0c, + 0x06, 0xd3, 0x63, 0x65, 0x30, 0x98, 0x39, 0x55, 0x06, 0x83, 0xe3, 0x12, 0xa4, 0x36, 0xe3, 0x9f, + 0xfa, 0xf8, 0xfe, 0x50, 0xf9, 0xf8, 0xbe, 0x57, 0x84, 0x68, 0x22, 0x3e, 0x63, 0x0c, 0xd4, 0x3b, + 0x3c, 0x50, 0x9a, 0x07, 0xdd, 0xe7, 0xb9, 0x43, 0x7d, 0x53, 0x62, 0x60, 0x88, 0x46, 0x7c, 0x00, + 0x2b, 0xbc, 0xc7, 0x24, 0xb7, 0xb7, 0x24, 0xba, 0x12, 0x45, 0x18, 0x49, 0xa3, 0xff, 0x18, 0x13, + 0xa3, 0xfd, 0xf3, 0x22, 0xc8, 0x0b, 0x6f, 0x08, 0x85, 0xca, 0x9e, 0xf5, 0x90, 0x9a, 0xb9, 0x23, + 0xab, 0xd7, 0x18, 0x8a, 0xbc, 0x55, 0x87, 0xbb, 0x83, 0x78, 0x01, 0x0a, 0x74, 0x6e, 0xe7, 0x17, + 0xee, 0x3d, 0xd9, 0x7f, 0x39, 0xec, 0xfc, 0x71, 0x37, 0xa1, 0xb4, 0xf3, 0x8b, 0x22, 0x54, 0x32, + 0x84, 0x5b, 0x81, 0x47, 0x7a, 0xe4, 0xf6, 0x66, 0x26, 0x22, 0x46, 0x94, 0x5b, 0xc1, 0x17, 0x29, + 0x4c, 0xa4, 0x8c, 0xf6, 0xcf, 0xff, 0xe0, 0x47, 0xd7, 0x9e, 0xfb, 0xe1, 0x8f, 0xae, 0x3d, 0xf7, + 0xd1, 0x8f, 0xae, 0x3d, 0xf7, 0x4b, 0x27, 0xd7, 0x0a, 0x3f, 0x38, 0xb9, 0x56, 0xf8, 0xe1, 0xc9, + 0xb5, 0xc2, 0x47, 0x27, 0xd7, 0x0a, 0xff, 0xfe, 0xe4, 0x5a, 0xe1, 0x2f, 0xfc, 0x87, 0x6b, 0xcf, + 0xfd, 0xdc, 0x97, 0xa3, 0x26, 0x2c, 0xaa, 0x26, 0x2c, 0x2a, 0x81, 0x8b, 0xfd, 0x83, 0xee, 0x22, + 0x6b, 0x42, 0x54, 0xa2, 0x9a, 0xf0, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x30, 0x66, + 0x45, 0xa2, 0x00, 0x00, } func (m *AbstractPodTemplate) Marshal() (dAtA []byte, err error) { @@ -7621,6 +7687,99 @@ func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PulsarAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PulsarAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PulsarAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Token != nil { + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PulsarSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PulsarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PulsarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnack)) + i-- + dAtA[i] = 0x28 + i -= len(m.SubscriptionName) + copy(dAtA[i:], m.SubscriptionName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubscriptionName))) + i-- + dAtA[i] = 0x22 + i -= len(m.ConsumerName) + copy(dAtA[i:], m.ConsumerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConsumerName))) + i-- + dAtA[i] = 0x1a + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x12 + i -= len(m.ServerAddr) + copy(dAtA[i:], m.ServerAddr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddr))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *RedisBufferService) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8501,6 +8660,18 @@ func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Pulsar != nil { + { + size, err := m.Pulsar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if m.Serving != nil { { size, err := m.Serving.MarshalToSizedBuffer(dAtA[:i]) @@ -11198,6 +11369,41 @@ func (m *Probe) Size() (n int) { return n } +func (m *PulsarAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Token != nil { + l = m.Token.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PulsarSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerAddr) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ConsumerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubscriptionName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MaxUnack)) + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *RedisBufferService) Size() (n int) { if m == nil { return 0 @@ -11561,6 +11767,10 @@ func (m *Source) Size() (n int) { l = m.Serving.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Pulsar != nil { + l = m.Pulsar.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -13023,6 +13233,31 @@ func (this *Probe) String() string { }, "") return s } +func (this *PulsarAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PulsarAuth{`, + `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PulsarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PulsarSource{`, + `ServerAddr:` + fmt.Sprintf("%v", this.ServerAddr) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `ConsumerName:` + fmt.Sprintf("%v", this.ConsumerName) + `,`, + `SubscriptionName:` + fmt.Sprintf("%v", this.SubscriptionName) + `,`, + `MaxUnack:` + fmt.Sprintf("%v", this.MaxUnack) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "PulsarAuth", "PulsarAuth", 1) + `,`, + `}`, + }, "") + return s +} func (this *RedisBufferService) String() string { if this == nil { return "nil" @@ -13242,6 +13477,7 @@ func (this *Source) String() string { `UDSource:` + strings.Replace(this.UDSource.String(), "UDSource", "UDSource", 1) + `,`, `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamSource", "JetStreamSource", 1) + `,`, `Serving:` + strings.Replace(this.Serving.String(), "ServingSource", "ServingSource", 1) + `,`, + `Pulsar:` + strings.Replace(this.Pulsar.String(), "PulsarSource", "PulsarSource", 1) + `,`, `}`, }, "") return s @@ -27159,7 +27395,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } return nil } -func (m *RedisBufferService) Unmarshal(dAtA []byte) error { +func (m *PulsarAuth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27182,15 +27418,15 @@ func (m *RedisBufferService) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RedisBufferService: wiretype end group for non-group") + return fmt.Errorf("proto: PulsarAuth: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RedisBufferService: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PulsarAuth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Native", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27217,20 +27453,339 @@ func (m *RedisBufferService) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Native == nil { - m.Native = &NativeRedis{} + if m.Token == nil { + m.Token = &v1.SecretKeySelector{} } - if err := m.Native.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PulsarSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PulsarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PulsarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Topic = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnack", wireType) + } + m.MaxUnack = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxUnack |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &PulsarAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedisBufferService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedisBufferService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedisBufferService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Native", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Native == nil { + m.Native = &NativeRedis{} + } + if err := m.Native.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { @@ -30022,6 +30577,42 @@ func (m *Source) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pulsar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pulsar == nil { + m.Pulsar = &PulsarSource{} + } + if err := m.Pulsar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index dfe8339b6b..7b81e2235e 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -1314,6 +1314,30 @@ message Probe { optional int32 failureThreshold = 5; } +// PulsarAuth defines how to authenticate with Pulsar +message PulsarAuth { + // JWT Token auth + // +optional + optional .k8s.io.api.core.v1.SecretKeySelector token = 1; +} + +message PulsarSource { + optional string serverAddr = 1; + + optional string topic = 2; + + optional string consumerName = 3; + + optional string subscriptionName = 4; + + // Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list. + optional uint32 maxUnack = 5; + + // Auth information + // +optional + optional PulsarAuth auth = 6; +} + message RedisBufferService { // Native brings up a native Redis service optional NativeRedis native = 1; @@ -1607,6 +1631,9 @@ message Source { // +optional optional ServingSource serving = 8; + + // +optional + optional PulsarSource pulsar = 9; } // Status is a common structure which can be used for Status field. diff --git a/pkg/apis/numaflow/v1alpha1/pulsar_auth.go b/pkg/apis/numaflow/v1alpha1/pulsar_auth.go new file mode 100644 index 0000000000..9e6d719e08 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/pulsar_auth.go @@ -0,0 +1,26 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import corev1 "k8s.io/api/core/v1" + +// PulsarAuth defines how to authenticate with Pulsar +type PulsarAuth struct { + // JWT Token auth + // +optional + Token *corev1.SecretKeySelector `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} diff --git a/pkg/apis/numaflow/v1alpha1/pulsar_source.go b/pkg/apis/numaflow/v1alpha1/pulsar_source.go new file mode 100644 index 0000000000..1b4d5adb21 --- /dev/null +++ b/pkg/apis/numaflow/v1alpha1/pulsar_source.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type PulsarSource struct { + ServerAddr string `json:"serverAddr" protobuf:"bytes,1,name=server_addr"` + Topic string `json:"topic" protobuf:"bytes,2,name=topic"` + ConsumerName string `json:"consumerName" protobuf:"bytes,3,name=consumerName"` + SubscriptionName string `json:"subscriptionName" protobuf:"bytes,4,name=subscriptionName"` + // Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list. + MaxUnack uint32 `json:"maxUnack,omitempty" protobuf:"bytes,5,opt,name=maxUnack"` + // Auth information + // +optional + Auth *PulsarAuth `json:"auth,omitempty" protobuf:"bytes,6,opt,name=auth"` +} diff --git a/pkg/apis/numaflow/v1alpha1/source.go b/pkg/apis/numaflow/v1alpha1/source.go index e1308cf28e..0873076102 100644 --- a/pkg/apis/numaflow/v1alpha1/source.go +++ b/pkg/apis/numaflow/v1alpha1/source.go @@ -43,6 +43,8 @@ type Source struct { JetStream *JetStreamSource `json:"jetstream,omitempty" protobuf:"bytes,7,opt,name=jetstream"` // +optional Serving *ServingSource `json:"serving,omitempty" protobuf:"bytes,8,opt,name=serving"` + // +optional + Pulsar *PulsarSource `json:"pulsar,omitempty" protobuf:"bytes,9,opt,name=pulsar"` } func (s Source) getContainers(req getContainerReq) ([]corev1.Container, []corev1.Container, error) { diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go index 2576898f49..ee979ab2bc 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.deepcopy.go @@ -2006,6 +2006,48 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PulsarAuth) DeepCopyInto(out *PulsarAuth) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PulsarAuth. +func (in *PulsarAuth) DeepCopy() *PulsarAuth { + if in == nil { + return nil + } + out := new(PulsarAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PulsarSource) DeepCopyInto(out *PulsarSource) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(PulsarAuth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PulsarSource. +func (in *PulsarSource) DeepCopy() *PulsarSource { + if in == nil { + return nil + } + out := new(PulsarSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedisBufferService) DeepCopyInto(out *RedisBufferService) { *out = *in @@ -2510,6 +2552,11 @@ func (in *Source) DeepCopyInto(out *Source) { *out = new(ServingSource) (*in).DeepCopyInto(*out) } + if in.Pulsar != nil { + in, out := &in.Pulsar, &out.Pulsar + *out = new(PulsarSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index 1cd2d83b7f..b5cff624f8 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -89,6 +89,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineSpec": schema_pkg_apis_numaflow_v1alpha1_PipelineSpec(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PipelineStatus": schema_pkg_apis_numaflow_v1alpha1_PipelineStatus(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.Probe": schema_pkg_apis_numaflow_v1alpha1_Probe(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarAuth": schema_pkg_apis_numaflow_v1alpha1_PulsarAuth(ref), + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarSource": schema_pkg_apis_numaflow_v1alpha1_PulsarSource(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisBufferService": schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisConfig": schema_pkg_apis_numaflow_v1alpha1_RedisConfig(ref), "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.RedisSettings": schema_pkg_apis_numaflow_v1alpha1_RedisSettings(ref), @@ -4310,6 +4312,83 @@ func schema_pkg_apis_numaflow_v1alpha1_Probe(ref common.ReferenceCallback) commo } } +func schema_pkg_apis_numaflow_v1alpha1_PulsarAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PulsarAuth defines how to authenticate with Pulsar", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "token": { + SchemaProps: spec.SchemaProps{ + Description: "JWT Token auth", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_numaflow_v1alpha1_PulsarSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serverAddr": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "topic": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "consumerName": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subscriptionName": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "maxUnack": { + SchemaProps: spec.SchemaProps{ + Description: "Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "Auth information", + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarAuth"), + }, + }, + }, + Required: []string{"serverAddr", "topic", "consumerName", "subscriptionName"}, + }, + }, + Dependencies: []string{ + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarAuth"}, + } +} + func schema_pkg_apis_numaflow_v1alpha1_RedisBufferService(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -5120,11 +5199,16 @@ func schema_pkg_apis_numaflow_v1alpha1_Source(ref common.ReferenceCallback) comm Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingSource"), }, }, + "pulsar": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarSource"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GeneratorSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.HTTPSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDTransformer"}, + "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.GeneratorSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.HTTPSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.JetStreamSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.KafkaSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.NatsSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.PulsarSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.ServingSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDSource", "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1.UDTransformer"}, } } diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 48a609ce9e..a6493870a6 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -77,7 +77,7 @@ dependencies = [ "serde_json", "serde_nanos", "serde_repr", - "thiserror", + "thiserror 1.0.64", "time", "tokio", "tokio-rustls 0.26.0", @@ -110,7 +110,7 @@ dependencies = [ "serde_json", "serde_nanos", "serde_repr", - "thiserror", + "thiserror 1.0.64", "time", "tokio", "tokio-rustls 0.26.0", @@ -139,7 +139,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -150,7 +150,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -255,7 +255,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -323,6 +323,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.4" @@ -336,16 +345,22 @@ dependencies = [ "lazy_static", "lazycell", "log", - "prettyplease", + "prettyplease 0.2.22", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", - "syn", + "syn 2.0.89", "which", ] +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -384,9 +399,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -546,6 +561,21 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crunchy" version = "0.2.2" @@ -585,7 +615,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -794,7 +824,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -809,6 +839,12 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.31" @@ -940,6 +976,12 @@ dependencies = [ "http 1.1.0", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -1219,6 +1261,15 @@ version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.12.1" @@ -1284,7 +1335,7 @@ dependencies = [ "pest_derive", "regex", "serde_json", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -1341,7 +1392,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-util", "tower 0.4.13", @@ -1362,7 +1413,7 @@ dependencies = [ "serde", "serde-value", "serde_json", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -1491,6 +1542,12 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "multimap" version = "0.10.0" @@ -1597,11 +1654,11 @@ dependencies = [ "chrono", "futures-util", "hyper-util", - "prost", - "prost-types", + "prost 0.13.3", + "prost-types 0.13.3", "serde", "serde_json", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-stream", "tokio-util", @@ -1629,12 +1686,14 @@ dependencies = [ "numaflow 0.1.1", "numaflow-models", "numaflow-pb", + "numaflow-pulsar", "parking_lot", "pep440_rs", "pin-project", "prometheus-client", - "prost", - "prost-types", + "prost 0.13.3", + "prost-types 0.13.3", + "pulsar", "rand", "rcgen", "rustls 0.23.14", @@ -1642,16 +1701,14 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-stream", "tokio-util", "tonic", "tower 0.4.13", "tracing", - "tracing-subscriber", "trait-variant", - "uuid", ] [[package]] @@ -1672,13 +1729,29 @@ dependencies = [ name = "numaflow-pb" version = "0.1.0" dependencies = [ - "prost", - "prost-build", - "prost-types", + "prost 0.13.3", + "prost-build 0.13.3", + "prost-types 0.13.3", "tonic", "tonic-build", ] +[[package]] +name = "numaflow-pulsar" +version = "0.1.0" +dependencies = [ + "bincode", + "bytes", + "chrono", + "prost 0.11.9", + "pulsar", + "serde", + "thiserror 2.0.3", + "tokio", + "tonic", + "tracing", +] + [[package]] name = "object" version = "0.36.5" @@ -1803,7 +1876,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.64", "ucd-trie", ] @@ -1827,7 +1900,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -1868,7 +1941,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -1914,6 +1987,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + [[package]] name = "prettyplease" version = "0.2.22" @@ -1921,14 +2004,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -1953,7 +2036,17 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive 0.11.9", ] [[package]] @@ -1963,7 +2056,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.13.3", +] + +[[package]] +name = "prost-build" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +dependencies = [ + "bytes", + "heck 0.4.1", + "itertools 0.10.5", + "lazy_static", + "log", + "multimap 0.8.3", + "petgraph", + "prettyplease 0.1.25", + "prost 0.11.9", + "prost-types 0.11.9", + "regex", + "syn 1.0.109", + "tempfile", + "which", ] [[package]] @@ -1973,20 +2088,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", - "heck", + "heck 0.5.0", "itertools 0.13.0", "log", - "multimap", + "multimap 0.10.0", "once_cell", "petgraph", - "prettyplease", - "prost", - "prost-types", + "prettyplease 0.2.22", + "prost 0.13.3", + "prost-types 0.13.3", "regex", - "syn", + "syn 2.0.89", "tempfile", ] +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "prost-derive" version = "0.13.3" @@ -1997,7 +2125,16 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn", + "syn 2.0.89", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost 0.11.9", ] [[package]] @@ -2006,7 +2143,38 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" dependencies = [ - "prost", + "prost 0.13.3", +] + +[[package]] +name = "pulsar" +version = "6.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7f3541ff84e39da334979ac4bf171e0f277f4f782603aeae65bf5795dc7275a" +dependencies = [ + "async-trait", + "bit-vec", + "bytes", + "chrono", + "crc", + "futures", + "futures-io", + "futures-timer", + "log", + "nom", + "pem", + "prost 0.11.9", + "prost-build 0.11.9", + "prost-derive 0.11.9", + "rand", + "regex", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", + "tokio-util", + "url", + "uuid", + "webpki-roots 0.25.4", ] [[package]] @@ -2022,7 +2190,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.14", "socket2", - "thiserror", + "thiserror 1.0.64", "tokio", "tracing", ] @@ -2039,7 +2207,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.14", "slab", - "thiserror", + "thiserror 1.0.64", "tinyvec", "tracing", ] @@ -2526,9 +2694,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -2545,13 +2713,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -2593,7 +2761,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -2663,7 +2831,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.64", "tokio", "tower 0.4.13", "tower-http", @@ -2796,9 +2964,20 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.79" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -2860,7 +3039,16 @@ version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.64", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -2871,7 +3059,18 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -2941,9 +3140,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -2965,7 +3164,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -3078,7 +3277,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost", + "prost 0.13.3", "socket2", "tokio", "tokio-stream", @@ -3094,12 +3293,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ - "prettyplease", + "prettyplease 0.2.22", "proc-macro2", - "prost-build", - "prost-types", + "prost-build 0.13.3", + "prost-types 0.13.3", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -3190,7 +3389,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -3240,7 +3439,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] @@ -3350,6 +3549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", + "rand", "serde", ] @@ -3402,7 +3602,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -3436,7 +3636,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3748,7 +3948,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.89", ] [[package]] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 99df3be31d..b9a11c653f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -5,6 +5,7 @@ workspace = { members = [ "serving", "numaflow-core", "numaflow-pb", + "numaflow-extns/pulsar", ] } [[bin]] @@ -16,7 +17,6 @@ name = "numaflow" version = "0.1.0" edition = "2021" - [dependencies] tokio = "1.39.2" backoff = { path = "backoff" } diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 179da28dd8..6df96fb14d 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -5,7 +5,8 @@ edition = "2021" [features] nats-tests = [] -all-tests = ["nats-tests"] +pulsar-tests = [] +all-tests = ["nats-tests", "pulsar-tests"] [dependencies] axum = "0.7.5" @@ -21,10 +22,8 @@ prost = "0.13.2" prost-types = "0.13.1" chrono = "0.4.31" base64 = "0.22.1" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } hyper-util = "0.1.6" tower = "0.4.13" -uuid = { version = "1.10.0", features = ["v4"] } serde_json = "1.0.122" numaflow-models = { path = "../numaflow-models" } numaflow-pb = { path = "../numaflow-pb" } @@ -43,10 +42,12 @@ futures = "0.3.30" pin-project = "1.1.5" rand = "0.8.5" async-nats = "0.37.0" +numaflow-pulsar = {path = "../numaflow-extns/pulsar"} [dev-dependencies] tempfile = "3.11.0" numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } +pulsar = {version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"]} [build-dependencies] diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index ce62407f11..840ad39e59 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -3,14 +3,14 @@ pub(crate) mod source { const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; - use std::time::Duration; - - use bytes::Bytes; - use numaflow_models::models::Source; - use tracing::warn; + use std::{fmt::Debug, time::Duration}; use crate::error::Error; use crate::Result; + use bytes::Bytes; + use numaflow_models::models::{GeneratorSource, PulsarSource, Source}; + use numaflow_pulsar::source::{PulsarAuth, PulsarSourceConfig}; + use tracing::warn; #[derive(Debug, Clone, PartialEq)] pub(crate) struct SourceConfig { @@ -29,51 +29,87 @@ pub(crate) mod source { pub(crate) enum SourceType { Generator(GeneratorConfig), UserDefined(UserDefinedConfig), + Pulsar(PulsarSourceConfig), + } + + impl From> for SourceType { + fn from(generator: Box) -> Self { + let mut generator_config = GeneratorConfig::default(); + + if let Some(value_blob) = &generator.value_blob { + generator_config.content = Bytes::from(value_blob.clone()); + } + + if let Some(msg_size) = generator.msg_size { + if msg_size >= 0 { + generator_config.msg_size_bytes = msg_size as u32; + } else { + warn!("'msgSize' cannot be negative, using default value (8 bytes)"); + } + } + + generator_config.value = generator.value; + generator_config.rpu = generator.rpu.unwrap_or(1) as usize; + generator_config.duration = + generator.duration.map_or(Duration::from_millis(1000), |d| { + std::time::Duration::from(d) + }); + generator_config.key_count = generator + .key_count + .map_or(0, |kc| std::cmp::min(kc, u8::MAX as i32) as u8); + generator_config.jitter = generator + .jitter + .map_or(Duration::from_secs(0), std::time::Duration::from); + + SourceType::Generator(generator_config) + } + } + + impl TryFrom> for SourceType { + type Error = Error; + fn try_from(value: Box) -> Result { + let auth: Option = match value.auth { + Some(auth) => 'out: { + let Some(token) = auth.token else { + tracing::warn!("JWT Token authentication is specified, but token is empty"); + break 'out None; + }; + let secret = + crate::shared::utils::get_secret_from_volume(&token.name, &token.key) + .unwrap(); + Some(PulsarAuth::JWT(secret)) + } + None => None, + }; + let pulsar_config = PulsarSourceConfig { + pulsar_server_addr: value.server_addr, + topic: value.topic, + consumer_name: value.consumer_name, + subscription: value.subscription_name, + max_unack: value.max_unack.unwrap_or(1000) as usize, + auth, + }; + Ok(SourceType::Pulsar(pulsar_config)) + } } impl TryFrom> for SourceType { type Error = Error; - fn try_from(source: Box) -> Result { - source - .udsource - .as_ref() - .map(|_| Ok(SourceType::UserDefined(UserDefinedConfig::default()))) - .or_else(|| { - source.generator.as_ref().map(|generator| { - let mut generator_config = GeneratorConfig::default(); - - if let Some(value_blob) = &generator.value_blob { - generator_config.content = Bytes::from(value_blob.clone()); - } - - if let Some(msg_size) = generator.msg_size { - if msg_size >= 0 { - generator_config.msg_size_bytes = msg_size as u32; - } else { - warn!( - "'msgSize' cannot be negative, using default value (8 bytes)" - ); - } - } - - generator_config.value = generator.value; - generator_config.rpu = generator.rpu.unwrap_or(1) as usize; - generator_config.duration = - generator.duration.map_or(Duration::from_millis(1000), |d| { - std::time::Duration::from(d) - }); - generator_config.key_count = generator - .key_count - .map_or(0, |kc| std::cmp::min(kc, u8::MAX as i32) as u8); - generator_config.jitter = generator - .jitter - .map_or(Duration::from_secs(0), std::time::Duration::from); - - Ok(SourceType::Generator(generator_config)) - }) - }) - .ok_or_else(|| Error::Config("Source type not found".to_string()))? + fn try_from(mut source: Box) -> Result { + if let Some(generator) = source.generator.take() { + return Ok(generator.into()); + } + + if source.udsource.is_some() { + return Ok(SourceType::UserDefined(UserDefinedConfig::default())); + } + + if let Some(pulsar) = source.pulsar.take() { + return pulsar.try_into(); + } + + Err(Error::Config(format!("Invalid source type: {source:?}"))) } } diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 45d1f59c66..4767c0aa70 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -284,6 +284,8 @@ impl PipelineConfig { #[cfg(test)] mod tests { + use numaflow_pulsar::source::PulsarSourceConfig; + use super::*; use crate::config::components::sink::{BlackholeConfig, LogConfig, SinkType}; use crate::config::components::source::{GeneratorConfig, SourceType}; @@ -426,4 +428,56 @@ mod tests { assert_eq!(pipeline_config, expected); } + + #[test] + fn test_pipeline_config_pulsar_source() { + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLWluIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsImNyZWF0aW9uVGltZXN0YW1wIjpudWxsfSwic3BlYyI6eyJuYW1lIjoiaW4iLCJzb3VyY2UiOnsicHVsc2FyIjp7InNlcnZlckFkZHIiOiJwdWxzYXI6Ly9wdWxzYXItc2VydmljZTo2NjUwIiwidG9waWMiOiJ0ZXN0X3BlcnNpc3RlbnQiLCJjb25zdW1lck5hbWUiOiJteV9wZXJzaXN0ZW50X2NvbnN1bWVyIiwic3Vic2NyaXB0aW9uTmFtZSI6Im15X3BlcnNpc3RlbnRfc3Vic2NyaXB0aW9uIn19LCJsaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAsInJlYWRUaW1lb3V0IjoiMXMiLCJidWZmZXJNYXhMZW5ndGgiOjMwMDAwLCJidWZmZXJVc2FnZUxpbWl0Ijo4MH0sInNjYWxlIjp7Im1pbiI6MSwibWF4IjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJ0b0VkZ2VzIjpbeyJmcm9tIjoiaW4iLCJ0byI6Im91dCIsImNvbmRpdGlvbnMiOm51bGwsImZyb21WZXJ0ZXhUeXBlIjoiU291cmNlIiwiZnJvbVZlcnRleFBhcnRpdGlvbkNvdW50IjoxLCJmcm9tVmVydGV4TGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAsInJlYWRUaW1lb3V0IjoiMXMiLCJidWZmZXJNYXhMZW5ndGgiOjMwMDAwLCJidWZmZXJVc2FnZUxpbWl0Ijo4MH19XSwid2F0ZXJtYXJrIjp7Im1heERlbGF5IjoiMHMifX0sInN0YXR1cyI6eyJwaGFzZSI6IiIsInJlcGxpY2FzIjowLCJkZXNpcmVkUmVwbGljYXMiOjAsImxhc3RTY2FsZWRBdCI6bnVsbH19"; + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = + PipelineConfig::load(pipeline_cfg_base64.to_string(), env_vars).unwrap(); + + let expected = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "in".to_string(), + replica: 0, + batch_size: 50, + paf_batch_size: 30000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + from_vertex_config: vec![], + to_vertex_config: vec![ToVertexConfig { + name: "out".to_string(), + writer_config: BufferWriterConfig { + streams: vec![("default-simple-pipeline-out-0".to_string(), 0)], + partitions: 1, + max_length: 30000, + usage_limit: 0.8, + ..Default::default() + }, + partitions: 1, + conditions: None, + }], + vertex_config: VertexType::Source(SourceVtxConfig { + source_config: SourceConfig { + source_type: SourceType::Pulsar(PulsarSourceConfig { + pulsar_server_addr: "pulsar://pulsar-service:6650".to_string(), + topic: "test_persistent".to_string(), + consumer_name: "my_persistent_consumer".to_string(), + subscription: "my_persistent_subscription".to_string(), + max_unack: 1000, + auth: None, + }), + }, + transformer_config: None, + }), + metrics_config: Default::default(), + }; + + assert_eq!(pipeline_config, expected); + } } diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index b64896f26b..27cbcb6fc9 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -40,6 +40,15 @@ pub enum Error { #[error("OneShot Receiver Error - {0}")] ActorPatternRecv(String), + + #[error("Ack Pending Exceeded, pending={0}")] + AckPendingExceeded(usize), + + #[error("Offset (id={0}) not found to Ack")] + AckOffsetNotFound(String), + + #[error("Lag cannot be fetched, {0}")] + Lag(String), } impl From for Error { diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index f24212967f..3d8407ec59 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -122,8 +122,8 @@ impl TryFrom for Message { /// IntOffset is integer based offset enum type. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct IntOffset { - offset: u64, - partition_idx: u16, + pub(crate) offset: u64, + pub(crate) partition_idx: u16, } impl IntOffset { diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 185c96bfe9..8e6d9cad19 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -18,6 +18,7 @@ use crate::shared::utils::{ }; use crate::sink::{SinkClientType, SinkHandle}; use crate::source::generator::new_generator; +use crate::source::pulsar::new_pulsar_source; use crate::source::user_defined::new_source; use crate::source::{SourceHandle, SourceType}; use crate::transformer::user_defined::SourceTransformHandle; @@ -219,25 +220,35 @@ async fn fetch_source( config: &MonovertexConfig, source_grpc_client: &mut Option>, ) -> crate::Result { - // check whether the source grpc client is provided, this happens only of the source is a - // user defined source - if let Some(source_grpc_client) = source_grpc_client.clone() { - let (source_read, source_ack, lag_reader) = - new_source(source_grpc_client, config.batch_size, config.read_timeout).await?; - return Ok(SourceType::UserDefinedSource( - source_read, - source_ack, - lag_reader, - )); - } - - // now that we know it is not a user-defined source, it has to be a built-in - if let source::SourceType::Generator(generator_config) = &config.source_config.source_type { - let (source_read, source_ack, lag_reader) = - new_generator(generator_config.clone(), config.batch_size)?; - Ok(SourceType::Generator(source_read, source_ack, lag_reader)) - } else { - Err(Error::Config("No valid source configuration found".into())) + match &config.source_config.source_type { + source::SourceType::Generator(generator_config) => { + let (source_read, source_ack, lag_reader) = + new_generator(generator_config.clone(), config.batch_size)?; + Ok(SourceType::Generator(source_read, source_ack, lag_reader)) + } + source::SourceType::UserDefined(_) => { + let Some(source_grpc_client) = source_grpc_client.clone() else { + return Err(Error::Config( + "Configuration type is user-defined, however no grpc client is provided".into(), + )); + }; + let (source_read, source_ack, lag_reader) = + new_source(source_grpc_client, config.batch_size, config.read_timeout).await?; + Ok(SourceType::UserDefinedSource( + source_read, + source_ack, + lag_reader, + )) + } + source::SourceType::Pulsar(pulsar_config) => { + let pulsar = new_pulsar_source( + pulsar_config.clone(), + config.batch_size, + config.read_timeout, + ) + .await?; + Ok(SourceType::Pulsar(pulsar)) + } } } diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index 52719c8827..ef49bdc759 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,14 +1,3 @@ -use async_nats::jetstream::Context; -use async_nats::{jetstream, ConnectOptions}; -use futures::future::try_join_all; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; -use std::collections::HashMap; -use std::time::Duration; -use tokio_util::sync::CancellationToken; -use tonic::transport::Channel; - use crate::config::components::source::SourceType; use crate::config::pipeline; use crate::config::pipeline::PipelineConfig; @@ -22,9 +11,20 @@ use crate::shared::utils::{ }; use crate::sink::SinkWriter; use crate::source::generator::new_generator; +use crate::source::pulsar::new_pulsar_source; use crate::source::user_defined::new_source; use crate::transformer::user_defined::SourceTransformHandle; use crate::{config, error, source, Result}; +use async_nats::jetstream::Context; +use async_nats::{jetstream, ConnectOptions}; +use futures::future::try_join_all; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use std::collections::HashMap; +use std::time::Duration; +use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; mod forwarder; mod isb; @@ -256,6 +256,15 @@ async fn create_source_type( Some(source_grpc_client), )) } + SourceType::Pulsar(pulsar_config) => { + let pulsar_source = new_pulsar_source( + pulsar_config.clone(), + config.batch_size, + config.read_timeout, + ) + .await?; + Ok((source::SourceType::Pulsar(pulsar_source), None)) + } } } /// Creates a transformer if it is configured in the pipeline @@ -296,7 +305,6 @@ async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Re .max_reconnects(None) // -1 for unlimited reconnects .ping_interval(Duration::from_secs(3)) .max_reconnects(None) - .ping_interval(Duration::from_secs(3)) .retry_on_initial_connect(); if let (Some(user), Some(password)) = (config.user, config.password) { diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs index a6cea7eaed..c035c67fce 100644 --- a/rust/numaflow-core/src/shared/utils.rs +++ b/rust/numaflow-core/src/shared/utils.rs @@ -207,6 +207,15 @@ pub(crate) async fn create_sink_handle( } } +// Retrieve value from mounted secret volume +// "/var/numaflow/secrets/${secretRef.name}/${secretRef.key}" is expected to be the file path +pub(crate) fn get_secret_from_volume(name: &str, key: &str) -> std::result::Result { + let path = format!("/var/numaflow/secrets/{name}/{key}"); + let val = std::fs::read_to_string(path.clone()) + .map_err(|e| format!("Reading secret from file {path}: {e:?}"))?; + Ok(val.trim().into()) +} + #[cfg(test)] mod tests { use numaflow::source::{Message, Offset, SourceReadRequest}; diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 2c7bc0b83c..f851268b72 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,9 +1,9 @@ -use tokio::sync::{mpsc, oneshot}; - use crate::{ message::{Message, Offset}, reader::LagReader, }; +use numaflow_pulsar::source::PulsarSource; +use tokio::sync::{mpsc, oneshot}; /// [User-Defined Source] extends Numaflow to add custom sources supported outside the builtins. /// @@ -15,6 +15,11 @@ pub(crate) mod user_defined; /// [Generator]: https://numaflow.numaproj.io/user-guide/sources/generator/ pub(crate) mod generator; +/// [Pulsar] is a builtin to ingest data from a Pulsar topic +/// +/// [Pulsar]: https://numaflow.numaproj.io/user-guide/sources/pulsar/ +pub(crate) mod pulsar; + /// Set of Read related items that has to be implemented to become a Source. pub(crate) trait SourceReader { #[allow(dead_code)] @@ -123,6 +128,19 @@ impl SourceHandle { } }); } + SourceType::Pulsar(pulsar_source) => { + tokio::spawn(async move { + let mut actor = SourceActor::new( + receiver, + pulsar_source.clone(), + pulsar_source.clone(), + pulsar_source, + ); + while let Some(msg) = actor.receiver.recv().await { + actor.handle_message(msg).await; + } + }); + } }; Self { sender } } @@ -175,4 +193,5 @@ pub(crate) enum SourceType { generator::GeneratorAck, generator::GeneratorLagReader, ), + Pulsar(PulsarSource), } diff --git a/rust/numaflow-core/src/source/pulsar.rs b/rust/numaflow-core/src/source/pulsar.rs new file mode 100644 index 0000000000..6d8d1d33f3 --- /dev/null +++ b/rust/numaflow-core/src/source/pulsar.rs @@ -0,0 +1,157 @@ +use std::time::Duration; + +use crate::error::Error; +use crate::message::{get_vertex_name, IntOffset, Message, MessageID, Offset}; +use crate::source; +use numaflow_pulsar::source::{PulsarMessage, PulsarSource, PulsarSourceConfig}; + +impl TryFrom for Message { + type Error = Error; + + fn try_from(message: PulsarMessage) -> crate::Result { + let offset = Offset::Int(IntOffset::new(message.offset, 1)); // FIXME: partition id + + Ok(Message { + keys: vec![message.key], + value: message.payload, + offset: Some(offset.clone()), + event_time: message.event_time, + id: MessageID { + vertex_name: get_vertex_name().to_string(), + offset: offset.to_string(), + index: 0, + }, + headers: message.headers, + }) + } +} + +impl From for Error { + fn from(value: numaflow_pulsar::Error) -> Self { + match value { + numaflow_pulsar::Error::Pulsar(e) => Error::Source(e.to_string()), + numaflow_pulsar::Error::UnknownOffset(_) => Error::Source(value.to_string()), + numaflow_pulsar::Error::AckPendingExceeded(pending) => { + Error::AckPendingExceeded(pending) + } + numaflow_pulsar::Error::ActorTaskTerminated(_) => { + Error::ActorPatternRecv(value.to_string()) + } + numaflow_pulsar::Error::Other(e) => Error::Source(e), + } + } +} + +pub(crate) async fn new_pulsar_source( + cfg: PulsarSourceConfig, + batch_size: usize, + timeout: Duration, +) -> crate::Result { + Ok(PulsarSource::new(cfg, batch_size, timeout).await?) +} + +impl source::SourceReader for PulsarSource { + fn name(&self) -> &'static str { + "Pulsar" + } + + async fn read(&mut self) -> crate::Result> { + self.read_messages() + .await? + .into_iter() + .map(|msg| msg.try_into()) + .collect() + } + + fn partitions(&self) -> Vec { + Self::partitions(self) + } +} + +impl source::SourceAcker for PulsarSource { + async fn ack(&mut self, offsets: Vec) -> crate::error::Result<()> { + let mut pulsar_offsets = Vec::with_capacity(offsets.len()); + for offset in offsets { + let Offset::Int(int_offset) = offset else { + return Err(Error::Source(format!( + "Expected Offset::Int type for Pulsar. offset={offset:?}" + ))); + }; + pulsar_offsets.push(int_offset.offset); + } + self.ack_offsets(pulsar_offsets).await.map_err(Into::into) + } +} + +impl source::LagReader for PulsarSource { + async fn pending(&mut self) -> crate::error::Result> { + Ok(self.pending_count().await) + } +} + +#[cfg(feature = "pulsar-tests")] +#[cfg(test)] +mod tests { + use pulsar::{producer, proto, Pulsar, TokioExecutor}; + use source::{LagReader, SourceAcker, SourceReader}; + + use super::*; + + type Result = std::result::Result>; + + #[tokio::test] + async fn test_pulsar_source() -> Result<()> { + let cfg = PulsarSourceConfig { + pulsar_server_addr: "pulsar://localhost:6650".into(), + topic: "persistent://public/default/test_persistent".into(), + consumer_name: "test".into(), + subscription: "test".into(), + max_unack: 100, + auth: None, + }; + let mut pulsar = new_pulsar_source(cfg, 10, Duration::from_millis(200)).await?; + assert_eq!(pulsar.name(), "Pulsar"); + + // Read should return before the timeout + let msgs = tokio::time::timeout(Duration::from_millis(400), pulsar.read_messages()).await; + assert!(msgs.is_ok()); + + assert!(pulsar.pending().await.unwrap().is_none()); + + let pulsar_producer: Pulsar<_> = Pulsar::builder("pulsar://localhost:6650", TokioExecutor) + .build() + .await + .unwrap(); + let mut pulsar_producer = pulsar_producer + .producer() + .with_topic("persistent://public/default/test_persistent") + .with_name("my producer") + .with_options(producer::ProducerOptions { + schema: Some(proto::Schema { + r#type: proto::schema::Type::String as i32, + ..Default::default() + }), + ..Default::default() + }) + .build() + .await + .unwrap(); + + let data: Vec = (0..10).map(|i| format!("test_data_{i}")).collect(); + let send_futures = pulsar_producer + .send_all(data) + .await + .map_err(|e| format!("Sending messages to Pulsar: {e:?}"))?; + for fut in send_futures { + fut.await?; + } + + let messages = pulsar.read().await?; + assert_eq!(messages.len(), 10); + + let offsets: Vec = messages.into_iter().map(|m| m.offset.unwrap()).collect(); + pulsar.ack(offsets).await?; + + Ok(()) + } +} diff --git a/rust/numaflow-extns/pulsar/Cargo.toml b/rust/numaflow-extns/pulsar/Cargo.toml new file mode 100644 index 0000000000..cc91a495df --- /dev/null +++ b/rust/numaflow-extns/pulsar/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "numaflow-pulsar" +version = "0.1.0" +edition = "2021" + +[lints.rust] +unsafe_code = "forbid" +unused_must_use = "forbid" + +[lints.clippy] +enum_glob_use = "deny" + +[dependencies] +prost = "0.11.9" +tokio = "1.41.1" +tonic = "0.12.3" +serde = { version = "1.0.204", features = ["derive"] } +tracing = "0.1.40" +bincode = "1.3.3" +chrono = "0.4.38" +# Rustls doesn't allow accepting self-signed certs: https://github.com/streamnative/pulsar-rs/blob/715411cb365932c379d4b5d0a8fde2ac46c54055/src/connection.rs#L912 +pulsar = {version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"]} +bytes = "1.7.1" +thiserror = "2.0.3" diff --git a/rust/numaflow-extns/pulsar/src/lib.rs b/rust/numaflow-extns/pulsar/src/lib.rs new file mode 100644 index 0000000000..f6e8306a4e --- /dev/null +++ b/rust/numaflow-extns/pulsar/src/lib.rs @@ -0,0 +1,35 @@ +use tokio::sync::oneshot; + +pub mod source; + +pub type Result = core::result::Result; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("metrics Error - {0}")] + Pulsar(pulsar::Error), + + #[error("Messages to be acknowledged has reached its configured limit. Pending={0}")] + AckPendingExceeded(usize), + + #[error("Failed to receive message from channel. Actor task is terminated: {0:?}")] + ActorTaskTerminated(oneshot::error::RecvError), + + #[error("Received unknown offset for acknowledgement. offset={0}")] + UnknownOffset(u64), + + #[error("{0}")] + Other(String), +} + +impl From for Error { + fn from(value: pulsar::Error) -> Self { + Error::Pulsar(value) + } +} + +impl From for Error { + fn from(value: String) -> Self { + Error::Other(value) + } +} diff --git a/rust/numaflow-extns/pulsar/src/source.rs b/rust/numaflow-extns/pulsar/src/source.rs new file mode 100644 index 0000000000..7d2c13ed1d --- /dev/null +++ b/rust/numaflow-extns/pulsar/src/source.rs @@ -0,0 +1,298 @@ +use std::collections::BTreeMap; +use std::{collections::HashMap, time::Duration}; + +use bytes::Bytes; +use chrono::{DateTime, Utc}; +use pulsar::Authentication; +use pulsar::{proto::MessageIdData, Consumer, ConsumerOptions, Pulsar, SubType, TokioExecutor}; +use tokio::time::Instant; +use tokio::{ + sync::{mpsc, oneshot}, + time, +}; +use tonic::codegen::tokio_stream::StreamExt; + +use crate::{Error, Result}; + +#[derive(Debug, Clone, PartialEq)] +pub struct PulsarSourceConfig { + pub pulsar_server_addr: String, + pub topic: String, + pub consumer_name: String, + pub subscription: String, + pub max_unack: usize, + pub auth: Option, +} + +#[derive(Clone, PartialEq)] +pub enum PulsarAuth { + JWT(String), +} + +impl std::fmt::Debug for PulsarAuth { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PulsarAuth::JWT(token) => { + write!(f, "{}****{}", &token[..6], &token[token.len() - 6..]) + } + } + } +} + +enum ConsumerActorMessage { + Read { + count: usize, + timeout_at: Instant, + respond_to: oneshot::Sender>>, + }, + Ack { + offsets: Vec, + respond_to: oneshot::Sender>, + }, +} + +pub struct PulsarMessage { + pub key: String, + pub payload: Bytes, + pub offset: u64, + pub event_time: DateTime, + pub headers: HashMap, +} + +struct ConsumerReaderActor { + consumer: Consumer, TokioExecutor>, + handler_rx: mpsc::Receiver, + message_ids: BTreeMap, + max_unack: usize, + topic: String, +} + +impl ConsumerReaderActor { + async fn start( + config: PulsarSourceConfig, + handler_rx: mpsc::Receiver, + ) -> Result<()> { + tracing::info!( + addr = &config.pulsar_server_addr, + "Pulsar connection details" + ); + + // Rustls doesn't allow accepting self-signed certs: https://github.com/streamnative/pulsar-rs/blob/715411cb365932c379d4b5d0a8fde2ac46c54055/src/connection.rs#L912 + // The `with_allow_insecure_connection()` option has no effect + let mut pulsar = Pulsar::builder(&config.pulsar_server_addr, TokioExecutor); + if let Some(PulsarAuth::JWT(token)) = config.auth { + let auth_token = Authentication { + name: "token".into(), + data: token.into(), + }; + pulsar = pulsar.with_auth(auth_token); + } + + let pulsar: Pulsar<_> = pulsar + .build() + .await + .map_err(|e| format!("Creating Pulsar client connection: {e:?}"))?; + + let consumer: Consumer, TokioExecutor> = pulsar + .consumer() + .with_topic(&config.topic) + .with_consumer_name(&config.consumer_name) + .with_subscription_type(SubType::Shared) + .with_subscription(&config.subscription) + .with_options(ConsumerOptions::default().durable(true)) + .build() + .await + .map_err(|e| format!("Creating a Pulsar consumer: {e:?}"))?; + + tokio::spawn(async move { + let mut consumer_actor = ConsumerReaderActor { + consumer, + handler_rx, + message_ids: BTreeMap::new(), + max_unack: config.max_unack, + topic: config.topic, + }; + consumer_actor.run().await; + }); + Ok(()) + } + + async fn run(&mut self) { + while let Some(msg) = self.handler_rx.recv().await { + self.handle_message(msg).await; + } + } + + async fn handle_message(&mut self, msg: ConsumerActorMessage) { + match msg { + ConsumerActorMessage::Read { + count, + timeout_at, + respond_to, + } => { + let messages = self.get_messages(count, timeout_at).await; + let _ = respond_to.send(messages); + } + ConsumerActorMessage::Ack { + offsets, + respond_to, + } => { + let status = self.ack_messages(offsets).await; + let _ = respond_to.send(status); + } + } + } + + async fn get_messages( + &mut self, + count: usize, + timeout_at: Instant, + ) -> Result> { + if self.message_ids.len() >= self.max_unack { + return Err(Error::AckPendingExceeded(self.message_ids.len())); + } + let mut messages = vec![]; + for _ in 0..count { + let remaining_time = timeout_at - Instant::now(); + let Ok(msg) = time::timeout(remaining_time, self.consumer.try_next()).await else { + return Ok(messages); + }; + let msg = match msg { + Ok(Some(msg)) => msg, + Ok(None) => break, + Err(e) => { + tracing::error!(?e, "Fetching message from Pulsar"); + let remaining_time = timeout_at - Instant::now(); + if remaining_time.as_millis() >= 100 { + time::sleep(Duration::from_millis(50)).await; // FIXME: add error metrics. Also, respect the timeout + continue; + } + return Err(Error::Pulsar(e)); + } + }; + let offset = msg.message_id().entry_id; + let event_time = msg + .metadata() + .event_time + .unwrap_or(msg.metadata().publish_time); + let Some(event_time) = chrono::DateTime::from_timestamp_millis(event_time as i64) + else { + // This should never happen + tracing::error!( + event_time = msg.metadata().event_time, + publish_time = msg.metadata().publish_time, + parsed_event_time = event_time, + "Pulsar message contains invalid event_time/publish_time timestamp" + ); + continue; + //FIXME: NACK the message + }; + + self.message_ids.insert(offset, msg.message_id().clone()); + let headers = msg + .metadata() + .properties + .iter() + .map(|prop| (prop.key.clone(), prop.value.clone())) + .collect(); + + messages.push(PulsarMessage { + key: msg.key().unwrap_or_else(|| "".to_string()), // FIXME: This is partition key. Identify the correct option. Also, there is a partition_key_b64_encoded boolean option in Pulsar metadata + payload: msg.payload.data.into(), + offset, + event_time, + headers, + }); + + // stop reading as soon as we hit max_unack + if messages.len() >= self.max_unack { + return Ok(messages); + } + } + Ok(messages) + } + + // TODO: Identify the longest continuous batch and use cumulative_ack_with_id() to ack them all. + async fn ack_messages(&mut self, offsets: Vec) -> Result<()> { + for offset in offsets { + let msg_id = self.message_ids.remove(&offset); + + let Some(msg_id) = msg_id else { + return Err(Error::UnknownOffset(offset)); + }; + + let Err(e) = self.consumer.ack_with_id(&self.topic, msg_id.clone()).await else { + continue; + }; + // Insert offset back + self.message_ids.insert(offset, msg_id); + return Err(Error::Pulsar(e.into())); + } + Ok(()) + } +} + +#[derive(Clone)] +pub struct PulsarSource { + batch_size: usize, + /// timeout for each batch read request + timeout: Duration, + actor_tx: mpsc::Sender, +} + +impl PulsarSource { + pub async fn new( + config: PulsarSourceConfig, + batch_size: usize, + timeout: Duration, + ) -> Result { + let (tx, rx) = mpsc::channel(10); + ConsumerReaderActor::start(config, rx).await?; + Ok(Self { + actor_tx: tx, + batch_size, + timeout, + }) + } +} + +impl PulsarSource { + pub async fn read_messages(&self) -> Result> { + let start = Instant::now(); + let (tx, rx) = oneshot::channel(); + let msg = ConsumerActorMessage::Read { + count: self.batch_size, + timeout_at: Instant::now() + self.timeout, + respond_to: tx, + }; + let _ = self.actor_tx.send(msg).await; + let messages = rx.await.map_err(Error::ActorTaskTerminated)??; + tracing::debug!( + count = messages.len(), + requested_count = self.batch_size, + time_taken_ms = start.elapsed().as_millis(), + "Got messages from pulsar" + ); + Ok(messages) + } + + pub async fn ack_offsets(&self, offsets: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + let _ = self + .actor_tx + .send(ConsumerActorMessage::Ack { + offsets, + respond_to: tx, + }) + .await; + rx.await.map_err(Error::ActorTaskTerminated)? + } + + pub async fn pending_count(&self) -> Option { + None + } + + pub fn partitions(&self) -> Vec { + todo!() + } +} diff --git a/rust/numaflow-models/src/models/mod.rs b/rust/numaflow-models/src/models/mod.rs index bfbcd121d0..7d890089a8 100644 --- a/rust/numaflow-models/src/models/mod.rs +++ b/rust/numaflow-models/src/models/mod.rs @@ -124,6 +124,10 @@ pub mod pipeline_status; pub use self::pipeline_status::PipelineStatus; pub mod probe; pub use self::probe::Probe; +pub mod pulsar_auth; +pub use self::pulsar_auth::PulsarAuth; +pub mod pulsar_source; +pub use self::pulsar_source::PulsarSource; pub mod redis_buffer_service; pub use self::redis_buffer_service::RedisBufferService; pub mod redis_config; diff --git a/rust/numaflow-models/src/models/pulsar_auth.rs b/rust/numaflow-models/src/models/pulsar_auth.rs new file mode 100644 index 0000000000..7b5299157d --- /dev/null +++ b/rust/numaflow-models/src/models/pulsar_auth.rs @@ -0,0 +1,32 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +/// PulsarAuth : PulsarAuth defines how to authenticate with Pulsar + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PulsarAuth { + #[serde(rename = "token", skip_serializing_if = "Option::is_none")] + pub token: Option, +} + +impl PulsarAuth { + /// PulsarAuth defines how to authenticate with Pulsar + pub fn new() -> PulsarAuth { + PulsarAuth { token: None } + } +} diff --git a/rust/numaflow-models/src/models/pulsar_source.rs b/rust/numaflow-models/src/models/pulsar_source.rs new file mode 100644 index 0000000000..3324ebf12b --- /dev/null +++ b/rust/numaflow-models/src/models/pulsar_source.rs @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Numaproj Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by Openapi Generator. DO NOT EDIT. + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PulsarSource { + #[serde(rename = "auth", skip_serializing_if = "Option::is_none")] + pub auth: Option>, + #[serde(rename = "consumerName")] + pub consumer_name: String, + /// Maximum number of messages that are in not yet acked state. Once this limit is crossed, futher read requests will return empty list. + #[serde(rename = "maxUnack", skip_serializing_if = "Option::is_none")] + pub max_unack: Option, + #[serde(rename = "serverAddr")] + pub server_addr: String, + #[serde(rename = "subscriptionName")] + pub subscription_name: String, + #[serde(rename = "topic")] + pub topic: String, +} + +impl PulsarSource { + pub fn new( + consumer_name: String, + server_addr: String, + subscription_name: String, + topic: String, + ) -> PulsarSource { + PulsarSource { + auth: None, + consumer_name, + max_unack: None, + server_addr, + subscription_name, + topic, + } + } +} diff --git a/rust/numaflow-models/src/models/source.rs b/rust/numaflow-models/src/models/source.rs index a5569b86fa..f5ede490e2 100644 --- a/rust/numaflow-models/src/models/source.rs +++ b/rust/numaflow-models/src/models/source.rs @@ -28,6 +28,8 @@ pub struct Source { pub kafka: Option>, #[serde(rename = "nats", skip_serializing_if = "Option::is_none")] pub nats: Option>, + #[serde(rename = "pulsar", skip_serializing_if = "Option::is_none")] + pub pulsar: Option>, #[serde(rename = "serving", skip_serializing_if = "Option::is_none")] pub serving: Option>, #[serde(rename = "transformer", skip_serializing_if = "Option::is_none")] @@ -44,6 +46,7 @@ impl Source { jetstream: None, kafka: None, nats: None, + pulsar: None, serving: None, transformer: None, udsource: None, diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml index d298aadb69..06bea96e8f 100644 --- a/rust/rust-toolchain.toml +++ b/rust/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] profile = "default" -channel = "1.81" +channel = "1.82" From 2530bade00d6c2088a9e9d649e2de84ba22036a8 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 4 Dec 2024 11:36:00 +0530 Subject: [PATCH 151/188] feat: Asynchronous Streaming for Source and Sink in Numaflow Core (#2251) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- examples/21-simple-mono-vertex.yaml | 2 +- rust/.rustfmt.toml | 2 +- rust/Cargo.lock | 904 ++++++++----- rust/Cargo.toml | 4 +- rust/Dockerfile | 7 +- rust/backoff/Cargo.toml | 2 +- rust/numaflow-core/Cargo.toml | 7 +- rust/numaflow-core/src/config.rs | 52 + rust/numaflow-core/src/config/components.rs | 15 +- rust/numaflow-core/src/config/monovertex.rs | 3 +- rust/numaflow-core/src/config/pipeline.rs | 25 +- rust/numaflow-core/src/config/pipeline/isb.rs | 6 +- rust/numaflow-core/src/lib.rs | 3 +- rust/numaflow-core/src/message.rs | 46 +- rust/numaflow-core/src/metrics.rs | 214 +++- rust/numaflow-core/src/monovertex.rs | 330 +---- .../numaflow-core/src/monovertex/forwarder.rs | 1128 +++-------------- rust/numaflow-core/src/pipeline.rs | 392 +++--- rust/numaflow-core/src/pipeline/forwarder.rs | 35 +- .../src/pipeline/forwarder/sink_forwarder.rs | 9 +- .../pipeline/forwarder/source_forwarder.rs | 437 ++++--- rust/numaflow-core/src/pipeline/isb.rs | 2 + .../src/pipeline/isb/jetstream.rs | 389 +++--- .../src/pipeline/isb/jetstream/reader.rs | 400 +++--- .../src/pipeline/isb/jetstream/writer.rs | 327 ++--- rust/numaflow-core/src/shared.rs | 14 +- .../src/shared/create_components.rs | 402 ++++++ rust/numaflow-core/src/shared/grpc.rs | 127 ++ rust/numaflow-core/src/shared/metrics.rs | 44 + rust/numaflow-core/src/shared/server_info.rs | 2 +- rust/numaflow-core/src/shared/utils.rs | 354 ------ rust/numaflow-core/src/sink.rs | 502 ++++++-- rust/numaflow-core/src/sink/blackhole.rs | 6 +- rust/numaflow-core/src/sink/log.rs | 8 +- rust/numaflow-core/src/sink/user_defined.rs | 15 +- rust/numaflow-core/src/source.rs | 409 +++++- rust/numaflow-core/src/source/generator.rs | 6 +- rust/numaflow-core/src/source/pulsar.rs | 6 +- rust/numaflow-core/src/source/user_defined.rs | 2 +- rust/numaflow-core/src/transformer.rs | 301 +++++ .../src/transformer/user_defined.rs | 350 ++--- rust/servesink/Cargo.toml | 2 +- rust/serving/Cargo.toml | 2 +- rust/src/bin/main.rs | 2 + 44 files changed, 3997 insertions(+), 3298 deletions(-) create mode 100644 rust/numaflow-core/src/shared/create_components.rs create mode 100644 rust/numaflow-core/src/shared/grpc.rs create mode 100644 rust/numaflow-core/src/shared/metrics.rs delete mode 100644 rust/numaflow-core/src/shared/utils.rs diff --git a/examples/21-simple-mono-vertex.yaml b/examples/21-simple-mono-vertex.yaml index 2a437b44b3..9ca99cf1bc 100644 --- a/examples/21-simple-mono-vertex.yaml +++ b/examples/21-simple-mono-vertex.yaml @@ -14,4 +14,4 @@ spec: sink: udsink: container: - image: quay.io/numaio/numaflow-rs/sink-log:stable + image: quay.io/numaio/numaflow-rs/sink-log:stable \ No newline at end of file diff --git a/rust/.rustfmt.toml b/rust/.rustfmt.toml index 3a26366d4d..36c419bb3e 100644 --- a/rust/.rustfmt.toml +++ b/rust/.rustfmt.toml @@ -1 +1 @@ -edition = "2021" +edition = "2021" \ No newline at end of file diff --git a/rust/Cargo.lock b/rust/Cargo.lock index a6493870a6..a5d40a88e8 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -17,6 +17,18 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -26,6 +38,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -43,9 +61,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arc-swap" @@ -53,6 +71,12 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + [[package]] name = "async-nats" version = "0.35.1" @@ -77,7 +101,7 @@ dependencies = [ "serde_json", "serde_nanos", "serde_repr", - "thiserror 1.0.64", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls 0.26.0", @@ -88,9 +112,9 @@ dependencies = [ [[package]] name = "async-nats" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3bdd6ea595b2ea504500a3566071beb81125fc15d40a6f6bffa43575f64152" +checksum = "76433c4de73442daedb3a59e991d94e85c14ebfc33db53dfcd347a21cd6ef4f8" dependencies = [ "base64 0.22.1", "bytes", @@ -99,6 +123,7 @@ dependencies = [ "nkeys", "nuid", "once_cell", + "pin-project", "portable-atomic", "rand", "regex", @@ -110,11 +135,12 @@ dependencies = [ "serde_json", "serde_nanos", "serde_repr", - "thiserror 1.0.64", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls 0.26.0", "tokio-util", + "tokio-websockets", "tracing", "tryhard", "url", @@ -139,7 +165,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -150,7 +176,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -167,21 +193,20 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.10.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" +checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3" dependencies = [ "aws-lc-sys", - "mirai-annotations", "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.22.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" +checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7" dependencies = [ "bindgen", "cc", @@ -194,9 +219,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -205,7 +230,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "itoa", "matchit", @@ -218,7 +243,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tower 0.5.1", "tower-layer", @@ -241,7 +266,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", @@ -255,7 +280,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -270,10 +295,10 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "pin-project-lite", - "rustls 0.23.14", + "rustls 0.23.19", "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", @@ -334,9 +359,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -345,13 +370,13 @@ dependencies = [ "lazy_static", "lazycell", "log", - "prettyplease 0.2.22", + "prettyplease 0.2.25", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.90", "which", ] @@ -399,18 +424,18 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.1.26" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cbd4ab9fef358caa9c599eae3105af638ead5fb47a718315d8e03c852b9f0d" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" dependencies = [ "jobserver", "libc", @@ -432,6 +457,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -460,9 +491,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] @@ -483,14 +514,13 @@ dependencies = [ [[package]] name = "config" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" dependencies = [ "async-trait", "convert_case", "json5", - "lazy_static", "nom", "pathdiff", "ron", @@ -498,7 +528,7 @@ dependencies = [ "serde", "serde_json", "toml", - "yaml-rust", + "yaml-rust2", ] [[package]] @@ -546,6 +576,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -554,9 +594,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -615,7 +655,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -655,6 +695,17 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "dlv-list" version = "0.5.2" @@ -706,9 +757,9 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] @@ -721,19 +772,19 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fiat-crypto" @@ -824,7 +875,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -880,8 +931,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -908,7 +961,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -917,9 +970,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -927,7 +980,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -942,15 +995,28 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + +[[package]] +name = "hashlink" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "headers" @@ -988,12 +1054,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - [[package]] name = "home" version = "0.5.9" @@ -1073,9 +1133,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -1097,14 +1157,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", + "h2 0.4.7", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -1126,7 +1186,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-rustls 0.27.3", "hyper-util", "pin-project-lite", @@ -1144,7 +1204,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -1158,25 +1218,25 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "log", - "rustls 0.23.14", - "rustls-native-certs 0.8.0", + "rustls 0.23.19", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.6", + "webpki-roots 0.26.7", ] [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "pin-project-lite", "tokio", @@ -1185,16 +1245,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.1", "pin-project-lite", "socket2", "tokio", @@ -1225,14 +1285,143 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1247,12 +1436,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.2", ] [[package]] @@ -1290,9 +1479,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -1305,10 +1494,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1335,7 +1525,7 @@ dependencies = [ "pest_derive", "regex", "serde_json", - "thiserror 1.0.64", + "thiserror 1.0.69", ] [[package]] @@ -1377,7 +1567,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-http-proxy", "hyper-rustls 0.27.3", "hyper-timeout", @@ -1386,13 +1576,13 @@ dependencies = [ "k8s-openapi", "kube-core", "pem", - "rustls 0.23.14", + "rustls 0.23.19", "rustls-pemfile 2.2.0", "secrecy", "serde", "serde_json", "serde_yaml", - "thiserror 1.0.64", + "thiserror 1.0.69", "tokio", "tokio-util", "tower 0.4.13", @@ -1413,7 +1603,7 @@ dependencies = [ "serde", "serde-value", "serde_json", - "thiserror 1.0.64", + "thiserror 1.0.69", ] [[package]] @@ -1430,32 +1620,32 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "lock_api" version = "0.4.12" @@ -1526,22 +1716,15 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", ] -[[package]] -name = "mirai-annotations" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" - [[package]] name = "multimap" version = "0.8.3" @@ -1636,9 +1819,7 @@ dependencies = [ name = "numaflow" version = "0.1.0" dependencies = [ - "backoff", "numaflow-core", - "numaflow-pb", "servesink", "serving", "tokio", @@ -1658,7 +1839,7 @@ dependencies = [ "prost-types 0.13.3", "serde", "serde_json", - "thiserror 1.0.64", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -1672,7 +1853,7 @@ dependencies = [ name = "numaflow-core" version = "0.1.0" dependencies = [ - "async-nats 0.37.0", + "async-nats 0.38.0", "axum", "axum-server", "backoff", @@ -1696,12 +1877,12 @@ dependencies = [ "pulsar", "rand", "rcgen", - "rustls 0.23.14", + "rustls 0.23.19", "semver", "serde", "serde_json", "tempfile", - "thiserror 1.0.64", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -1784,12 +1965,12 @@ dependencies = [ [[package]] name = "ordered-multimap" -version = "0.6.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" dependencies = [ "dlv-list", - "hashbrown 0.13.2", + "hashbrown 0.14.5", ] [[package]] @@ -1829,9 +2010,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" [[package]] name = "pem" @@ -1871,20 +2052,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror 1.0.64", + "thiserror 1.0.69", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" dependencies = [ "pest", "pest_generator", @@ -1892,22 +2073,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "pest_meta" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" dependencies = [ "once_cell", "pest", @@ -1921,34 +2102,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.6.0", + "indexmap 2.7.0", ] [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -1968,9 +2149,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -1999,12 +2180,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2036,7 +2217,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2094,11 +2275,11 @@ dependencies = [ "multimap 0.10.0", "once_cell", "petgraph", - "prettyplease 0.2.22", + "prettyplease 0.2.25", "prost 0.13.3", "prost-types 0.13.3", "regex", - "syn 2.0.89", + "syn 2.0.90", "tempfile", ] @@ -2125,7 +2306,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2179,45 +2360,49 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustc-hash 2.1.0", + "rustls 0.23.19", "socket2", - "thiserror 1.0.64", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring", - "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustc-hash 2.1.0", + "rustls 0.23.19", + "rustls-pki-types", "slab", - "thiserror 1.0.64", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -2313,13 +2498,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -2334,9 +2519,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -2369,7 +2554,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -2399,9 +2584,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -2410,7 +2595,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-rustls 0.27.3", "hyper-util", "ipnet", @@ -2421,13 +2606,13 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.14", + "rustls 0.23.19", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls 0.26.0", "tower-service", @@ -2435,7 +2620,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.6", + "webpki-roots 0.26.7", "windows-registry", ] @@ -2468,9 +2653,9 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" +checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" dependencies = [ "cfg-if", "ordered-multimap", @@ -2490,9 +2675,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" @@ -2505,9 +2690,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -2530,9 +2715,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "aws-lc-rs", "log", @@ -2554,20 +2739,19 @@ dependencies = [ "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.0.1", ] [[package]] @@ -2590,9 +2774,12 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -2618,9 +2805,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" @@ -2630,9 +2817,9 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -2670,7 +2857,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", "core-foundation-sys", "libc", "security-framework-sys", @@ -2678,9 +2878,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -2719,14 +2919,14 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -2761,7 +2961,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2791,7 +2991,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -2803,7 +3003,7 @@ name = "servesink" version = "0.1.0" dependencies = [ "numaflow 0.1.1", - "reqwest 0.12.8", + "reqwest 0.12.9", "tokio", "tonic", "tracing", @@ -2831,7 +3031,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror 1.0.64", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tower-http", @@ -2932,9 +3132,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2956,6 +3156,12 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "subtle" version = "2.6.1" @@ -2975,9 +3181,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -2992,13 +3198,24 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3006,7 +3223,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys", ] @@ -3022,9 +3239,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", @@ -3035,11 +3252,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.64", + "thiserror-impl 1.0.69", ] [[package]] @@ -3053,13 +3270,13 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3070,7 +3287,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3123,6 +3340,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -3164,7 +3391,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3194,7 +3421,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.14", + "rustls 0.23.19", "rustls-pki-types", "tokio", ] @@ -3223,6 +3450,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-websockets" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f591660438b3038dd04d16c938271c79e7e06260ad2ea2885a4861bfb238605d" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-sink", + "http 1.1.0", + "httparse", + "rand", + "ring", + "rustls-native-certs 0.8.1", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tokio-util", +] + [[package]] name = "toml" version = "0.8.19" @@ -3250,7 +3498,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -3268,11 +3516,11 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.6", + "h2 0.4.7", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -3293,12 +3541,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ - "prettyplease 0.2.22", + "prettyplease 0.2.25", "proc-macro2", "prost-build 0.13.3", "prost-types 0.13.3", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3371,9 +3619,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -3383,20 +3631,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3415,9 +3663,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -3439,7 +3687,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3473,33 +3721,15 @@ checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-segmentation" @@ -3533,20 +3763,32 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom", "rand", @@ -3582,9 +3824,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" dependencies = [ "cfg-if", "once_cell", @@ -3593,36 +3835,37 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3630,28 +3873,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3665,9 +3918,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -3913,12 +4166,26 @@ dependencies = [ ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "write16" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yaml-rust2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" dependencies = [ - "linked-hash-map", + "arraydeque", + "encoding_rs", + "hashlink", ] [[package]] @@ -3930,6 +4197,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -3948,7 +4239,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", ] [[package]] @@ -3956,3 +4268,25 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index b9a11c653f..3cbd68ef23 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -18,11 +18,9 @@ version = "0.1.0" edition = "2021" [dependencies] -tokio = "1.39.2" -backoff = { path = "backoff" } +tokio = "1.41.1" servesink = { path = "servesink" } serving = { path = "serving" } numaflow-core = { path = "numaflow-core" } -numaflow-pb = { path = "numaflow-pb" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/Dockerfile b/rust/Dockerfile index 3fcd606faa..ce50094cab 100644 --- a/rust/Dockerfile +++ b/rust/Dockerfile @@ -20,6 +20,9 @@ COPY ./backoff/Cargo.toml ./backoff/Cargo.toml RUN cargo new numaflow-models COPY ./numaflow-models/Cargo.toml ./numaflow-models/Cargo.toml +RUN cargo new numaflow-pb +COPY ./numaflow-pb/Cargo.toml ./numaflow-pb/Cargo.toml + RUN cargo new numaflow-core COPY numaflow-core/Cargo.toml ./numaflow-core/Cargo.toml @@ -40,9 +43,7 @@ COPY ./backoff/src ./backoff/src COPY ./numaflow-models/src ./numaflow-models/src COPY ./serving/src ./serving/src COPY numaflow-core/src ./numaflow-core/src -COPY numaflow-core/build.rs ./numaflow-core/build.rs -COPY numaflow-core/proto ./numaflow-core/proto - +COPY ./numaflow-pb/src ./numaflow-pb/src # Build the real binaries RUN touch src/bin/main.rs && \ cargo build --workspace --all --release diff --git a/rust/backoff/Cargo.toml b/rust/backoff/Cargo.toml index 9c2904925e..c82508001a 100644 --- a/rust/backoff/Cargo.toml +++ b/rust/backoff/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" [dependencies] pin-project = "1.1.5" -tokio = { version = "1.38.0", features = ["full"] } +tokio = { version = "1.41.1", features = ["full"] } diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 6df96fb14d..c72b20b5d9 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -13,8 +13,8 @@ axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } tonic = "0.12.3" bytes = "1.7.1" -thiserror = "1.0.63" -tokio = { version = "1.39.3", features = ["full"] } +thiserror = "2.0.3" +tokio = { version = "1.41.1", features = ["full"] } tracing = "0.1.40" tokio-util = "0.7.11" tokio-stream = "0.1.15" @@ -41,7 +41,7 @@ log = "0.4.22" futures = "0.3.30" pin-project = "1.1.5" rand = "0.8.5" -async-nats = "0.37.0" +async-nats = "0.38.0" numaflow-pulsar = {path = "../numaflow-extns/pulsar"} [dev-dependencies] @@ -50,4 +50,3 @@ numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd8795 pulsar = {version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"]} [build-dependencies] - diff --git a/rust/numaflow-core/src/config.rs b/rust/numaflow-core/src/config.rs index e36ab4dc21..167c2f1cd4 100644 --- a/rust/numaflow-core/src/config.rs +++ b/rust/numaflow-core/src/config.rs @@ -18,6 +18,58 @@ pub(crate) mod monovertex; /// Pipeline specific configs. pub(crate) mod pipeline; +pub const NUMAFLOW_MONO_VERTEX_NAME: &str = "NUMAFLOW_MONO_VERTEX_NAME"; +const NUMAFLOW_VERTEX_NAME: &str = "NUMAFLOW_VERTEX_NAME"; +const NUMAFLOW_REPLICA: &str = "NUMAFLOW_REPLICA"; +static VERTEX_NAME: OnceLock = OnceLock::new(); + +/// fetch the vertex name from the environment variable +pub(crate) fn get_vertex_name() -> &'static str { + VERTEX_NAME.get_or_init(|| { + env::var(NUMAFLOW_MONO_VERTEX_NAME) + .or_else(|_| env::var(NUMAFLOW_VERTEX_NAME)) + .unwrap_or_default() + }) +} + +static IS_MONO_VERTEX: OnceLock = OnceLock::new(); + +/// returns true if the vertex is a mono vertex +pub(crate) fn is_mono_vertex() -> bool { + *IS_MONO_VERTEX.get_or_init(|| env::var(NUMAFLOW_MONO_VERTEX_NAME).is_ok()) +} + +static COMPONENT_TYPE: OnceLock = OnceLock::new(); + +/// fetch the component type from the environment variable +pub(crate) fn get_component_type() -> &'static str { + COMPONENT_TYPE.get_or_init(|| { + if is_mono_vertex() { + "mono-vertex".to_string() + } else { + "pipeline".to_string() + } + }) +} + +static PIPELINE_NAME: OnceLock = OnceLock::new(); + +pub(crate) fn get_pipeline_name() -> &'static str { + PIPELINE_NAME.get_or_init(|| env::var("NUMAFLOW_PIPELINE_NAME").unwrap_or_default()) +} + +static VERTEX_REPLICA: OnceLock = OnceLock::new(); + +/// fetch the vertex replica information from the environment variable +pub(crate) fn get_vertex_replica() -> &'static u16 { + VERTEX_REPLICA.get_or_init(|| { + env::var(NUMAFLOW_REPLICA) + .unwrap_or_default() + .parse() + .unwrap_or_default() + }) +} + /// Exposes the [Settings] via lazy loading. pub fn config() -> &'static Settings { static CONF: OnceLock = OnceLock::new(); diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index 840ad39e59..adb2784e2a 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -5,13 +5,14 @@ pub(crate) mod source { use std::{fmt::Debug, time::Duration}; - use crate::error::Error; - use crate::Result; use bytes::Bytes; use numaflow_models::models::{GeneratorSource, PulsarSource, Source}; use numaflow_pulsar::source::{PulsarAuth, PulsarSourceConfig}; use tracing::warn; + use crate::error::Error; + use crate::Result; + #[derive(Debug, Clone, PartialEq)] pub(crate) struct SourceConfig { pub(crate) source_type: SourceType, @@ -74,9 +75,11 @@ pub(crate) mod source { tracing::warn!("JWT Token authentication is specified, but token is empty"); break 'out None; }; - let secret = - crate::shared::utils::get_secret_from_volume(&token.name, &token.key) - .unwrap(); + let secret = crate::shared::create_components::get_secret_from_volume( + &token.name, + &token.key, + ) + .unwrap(); Some(PulsarAuth::JWT(secret)) } None => None, @@ -359,6 +362,7 @@ pub(crate) mod transformer { #[derive(Debug, Clone, PartialEq)] pub(crate) struct TransformerConfig { + pub(crate) concurrency: usize, pub(crate) transformer_type: TransformerType, } @@ -609,6 +613,7 @@ mod transformer_tests { fn test_transformer_config_user_defined() { let user_defined_config = UserDefinedConfig::default(); let transformer_config = TransformerConfig { + concurrency: 1, transformer_type: TransformerType::UserDefined(user_defined_config.clone()), }; if let TransformerType::UserDefined(config) = transformer_config.transformer_type { diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index 0d1b0c1a9f..356e97d827 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -12,8 +12,8 @@ use crate::config::components::transformer::{ TransformerConfig, TransformerType, UserDefinedConfig, }; use crate::config::components::{sink, source}; +use crate::config::get_vertex_replica; use crate::error::Error; -use crate::message::get_vertex_replica; use crate::Result; const DEFAULT_BATCH_SIZE: u64 = 500; @@ -94,6 +94,7 @@ impl MonovertexConfig { .as_ref() .and_then(|source| source.transformer.as_ref()) .map(|_| TransformerConfig { + concurrency: batch_size as usize, // FIXME: introduce a new config called udf concurrency in the spec transformer_type: TransformerType::UserDefined(UserDefinedConfig::default()), }); diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 4767c0aa70..c05ca73d3c 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -11,9 +11,9 @@ use crate::config::components::metrics::MetricsConfig; use crate::config::components::sink::SinkConfig; use crate::config::components::source::SourceConfig; use crate::config::components::transformer::{TransformerConfig, TransformerType}; +use crate::config::get_vertex_replica; use crate::config::pipeline::isb::{BufferReaderConfig, BufferWriterConfig}; use crate::error::Error; -use crate::message::get_vertex_replica; use crate::Result; const DEFAULT_BATCH_SIZE: u64 = 500; @@ -31,7 +31,7 @@ pub(crate) struct PipelineConfig { pub(crate) replica: u16, pub(crate) batch_size: usize, // FIXME(cr): we cannot leak this as a paf, we need to use a different terminology. - pub(crate) paf_batch_size: usize, + pub(crate) paf_concurrency: usize, pub(crate) read_timeout: Duration, pub(crate) js_client_config: isb::jetstream::ClientConfig, // TODO: make it enum, since we can have different ISB implementations pub(crate) from_vertex_config: Vec, @@ -47,7 +47,7 @@ impl Default for PipelineConfig { vertex_name: "default-vtx".to_string(), replica: 0, batch_size: DEFAULT_BATCH_SIZE as usize, - paf_batch_size: (DEFAULT_BATCH_SIZE * 2) as usize, + paf_concurrency: (DEFAULT_BATCH_SIZE * 2) as usize, read_timeout: Duration::from_secs(DEFAULT_TIMEOUT_IN_MS as u64), js_client_config: isb::jetstream::ClientConfig::default(), from_vertex_config: vec![], @@ -150,6 +150,7 @@ impl PipelineConfig { let vertex: VertexType = if let Some(source) = vertex_obj.spec.source { let transformer_config = source.transformer.as_ref().map(|_| TransformerConfig { + concurrency: batch_size as usize, // FIXME: introduce a separate field in the spec transformer_type: TransformerType::UserDefined(Default::default()), }); @@ -211,8 +212,12 @@ impl PipelineConfig { let partition_count = edge.to_vertex_partition_count.unwrap_or_default() as u16; let buffer_name = format!("{}-{}-{}", namespace, pipeline_name, edge.to); - let streams: Vec<(String, u16)> = (0..partition_count) - .map(|i| (format!("{}-{}", buffer_name, i), i)) + let streams: Vec<(&'static str, u16)> = (0..partition_count) + .map(|i| { + let stream: &'static str = + Box::leak(Box::new(format!("{}-{}", buffer_name, i))); + (stream, i) + }) .collect(); from_vertex_config.push(FromVertexConfig { @@ -265,7 +270,7 @@ impl PipelineConfig { Ok(PipelineConfig { batch_size: batch_size as usize, - paf_batch_size: env::var("PAF_BATCH_SIZE") + paf_concurrency: env::var("PAF_BATCH_SIZE") .unwrap_or("30000".to_string()) .parse() .unwrap(), @@ -297,7 +302,7 @@ mod tests { vertex_name: "default-vtx".to_string(), replica: 0, batch_size: DEFAULT_BATCH_SIZE as usize, - paf_batch_size: (DEFAULT_BATCH_SIZE * 2) as usize, + paf_concurrency: (DEFAULT_BATCH_SIZE * 2) as usize, read_timeout: Duration::from_secs(DEFAULT_TIMEOUT_IN_MS as u64), js_client_config: isb::jetstream::ClientConfig::default(), from_vertex_config: vec![], @@ -343,7 +348,7 @@ mod tests { vertex_name: "out".to_string(), replica: 0, batch_size: 500, - paf_batch_size: 30000, + paf_concurrency: 30000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -389,7 +394,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 1000, - paf_batch_size: 30000, + paf_concurrency: 30000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -442,7 +447,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 50, - paf_batch_size: 30000, + paf_concurrency: 30000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), diff --git a/rust/numaflow-core/src/config/pipeline/isb.rs b/rust/numaflow-core/src/config/pipeline/isb.rs index c010f9a15d..30c72351c9 100644 --- a/rust/numaflow-core/src/config/pipeline/isb.rs +++ b/rust/numaflow-core/src/config/pipeline/isb.rs @@ -75,7 +75,7 @@ impl fmt::Display for BufferFullStrategy { #[derive(Debug, Clone, PartialEq)] pub(crate) struct BufferReaderConfig { pub(crate) partitions: u16, - pub(crate) streams: Vec<(String, u16)>, + pub(crate) streams: Vec<(&'static str, u16)>, pub(crate) wip_ack_interval: Duration, } @@ -83,7 +83,7 @@ impl Default for BufferReaderConfig { fn default() -> Self { BufferReaderConfig { partitions: DEFAULT_PARTITIONS, - streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + streams: vec![("default-0", DEFAULT_PARTITION_IDX)], wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), } } @@ -138,7 +138,7 @@ mod tests { fn test_default_buffer_reader_config() { let expected = BufferReaderConfig { partitions: DEFAULT_PARTITIONS, - streams: vec![("default-0".to_string(), DEFAULT_PARTITION_IDX)], + streams: vec![("default-0", DEFAULT_PARTITION_IDX)], wip_ack_interval: Duration::from_millis(DEFAULT_WIP_ACK_INTERVAL_MILLIS), }; let config = BufferReaderConfig::default(); diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index e324c0ff33..f90633d06b 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -9,9 +9,10 @@ use crate::config::{config, CustomResourceType}; mod error; pub(crate) use crate::error::{Error, Result}; -/// MonoVertex is a simplified version of the [Pipeline] spec which is ideal for high TPS, low latency +/// [MonoVertex] is a simplified version of the [Pipeline] spec which is ideal for high TPS, low latency /// use-cases which do not require [ISB]. /// +/// [MonoVertex]: https://numaflow.numaproj.io/core-concepts/monovertex/ /// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ /// [ISB]: https://numaflow.numaproj.io/core-concepts/inter-step-buffer/ pub mod monovertex; diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 3d8407ec59..10cb3063ac 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -1,7 +1,6 @@ use std::cmp::PartialEq; use std::collections::HashMap; -use std::sync::OnceLock; -use std::{env, fmt}; +use std::fmt; use async_nats::HeaderValue; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; @@ -17,35 +16,12 @@ use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; use tokio::sync::oneshot; -use crate::shared::utils::{prost_timestamp_from_utc, utc_from_timestamp}; -use crate::Error; +use crate::shared::grpc::prost_timestamp_from_utc; +use crate::shared::grpc::utc_from_timestamp; use crate::Result; +use crate::{config, Error}; -const NUMAFLOW_MONO_VERTEX_NAME: &str = "NUMAFLOW_MONO_VERTEX_NAME"; -const NUMAFLOW_VERTEX_NAME: &str = "NUMAFLOW_VERTEX_NAME"; -const NUMAFLOW_REPLICA: &str = "NUMAFLOW_REPLICA"; - -static VERTEX_NAME: OnceLock = OnceLock::new(); - -pub(crate) fn get_vertex_name() -> &'static str { - VERTEX_NAME.get_or_init(|| { - env::var(NUMAFLOW_MONO_VERTEX_NAME) - .or_else(|_| env::var(NUMAFLOW_VERTEX_NAME)) - .unwrap_or_default() - }) -} - -static VERTEX_REPLICA: OnceLock = OnceLock::new(); - -// fetch the vertex replica information from the environment variable -pub(crate) fn get_vertex_replica() -> &'static u16 { - VERTEX_REPLICA.get_or_init(|| { - env::var(NUMAFLOW_REPLICA) - .unwrap_or_default() - .parse() - .unwrap_or_default() - }) -} +const DROP: &str = "U+005C__DROP__"; /// A message that is sent from the source to the sink. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -103,7 +79,7 @@ impl TryFrom for Message { let event_time = Utc::now(); let offset = None; let id = MessageID { - vertex_name: get_vertex_name().to_string(), + vertex_name: config::get_vertex_name().to_string(), offset: "0".to_string(), index: 0, }; @@ -119,6 +95,13 @@ impl TryFrom for Message { } } +impl Message { + // Check if the message should be dropped. + pub(crate) fn dropped(&self) -> bool { + self.keys.len() == 1 && self.keys[0] == DROP + } +} + /// IntOffset is integer based offset enum type. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct IntOffset { @@ -163,6 +146,7 @@ impl fmt::Display for StringOffset { } } +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub(crate) enum ReadAck { /// Message was successfully processed. Ack, @@ -319,7 +303,7 @@ impl TryFrom for Message { offset: Some(source_offset.clone()), event_time: utc_from_timestamp(result.event_time), id: MessageID { - vertex_name: get_vertex_name().to_string(), + vertex_name: config::get_vertex_name().to_string(), offset: source_offset.to_string(), index: 0, }, diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 0cce19f956..81c8a3ccaa 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -27,11 +27,10 @@ use tonic::transport::Channel; use tonic::Request; use tracing::{debug, error, info}; -use crate::source::SourceHandle; +use crate::config::{get_pipeline_name, get_vertex_name, get_vertex_replica}; +use crate::source::Source; use crate::Error; -pub const COMPONENT_MVTX: &str = "mono-vertex"; - // SDK information const SDK_INFO: &str = "sdk_info"; const COMPONENT: &str = "component"; @@ -74,7 +73,7 @@ const DROPPED_TOTAL: &str = "dropped"; const FALLBACK_SINK_WRITE_TOTAL: &str = "write"; // pending as gauge -const SOURCE_PENDING: &str = "pending"; +const PENDING: &str = "pending"; // processing times as timers const E2E_TIME: &str = "processing_time"; @@ -185,7 +184,7 @@ pub(crate) struct MonoVtxMetrics { pub(crate) dropped_total: Family, Counter>, // gauge - pub(crate) source_pending: Family, Gauge>, + pub(crate) pending: Family, Gauge>, // timers pub(crate) e2e_time: Family, Histogram>, @@ -201,6 +200,7 @@ pub(crate) struct MonoVtxMetrics { // TODO: Add the metrics for the pipeline pub(crate) struct PipelineMetrics { pub(crate) forwarder: PipelineForwarderMetrics, + pub(crate) isb: PipelineISBMetrics, } /// Family of metrics for the sink @@ -221,7 +221,18 @@ pub(crate) struct TransformerMetrics { } pub(crate) struct PipelineForwarderMetrics { - pub(crate) data_read: Family, Counter>, + pub(crate) read_total: Family, Counter>, + pub(crate) read_time: Family, Histogram>, + pub(crate) ack_total: Family, Counter>, + pub(crate) ack_time: Family, Histogram>, + pub(crate) write_total: Family, Counter>, + pub(crate) read_bytes_total: Family, Counter>, + pub(crate) processed_time: Family, Histogram>, + pub(crate) pending: Family, Gauge>, +} + +pub(crate) struct PipelineISBMetrics { + pub(crate) paf_resolution_time: Family, Histogram>, } /// Exponential bucket distribution with range. @@ -254,7 +265,7 @@ impl MonoVtxMetrics { ack_total: Family::, Counter>::default(), dropped_total: Family::, Counter>::default(), // gauge - source_pending: Family::, Gauge>::default(), + pending: Family::, Gauge>::default(), // timers // exponential buckets in the range 100 microseconds to 15 minutes e2e_time: Family::, Histogram>::new_with_constructor(|| { @@ -312,9 +323,9 @@ impl MonoVtxMetrics { // gauges registry.register( - SOURCE_PENDING, + PENDING, "A Gauge to keep track of the total number of pending messages for the monovtx", - metrics.source_pending.clone(), + metrics.pending.clone(), ); // timers registry.register( @@ -370,7 +381,26 @@ impl PipelineMetrics { fn new() -> Self { let metrics = Self { forwarder: PipelineForwarderMetrics { - data_read: Default::default(), + read_total: Family::, Counter>::default(), + processed_time: Family::, Histogram>::new_with_constructor( + || Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)), + ), + read_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) + }), + read_bytes_total: Family::, Counter>::default(), + ack_total: Family::, Counter>::default(), + ack_time: Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) + }), + pending: Family::, Gauge>::default(), + write_total: Family::, Counter>::default(), + }, + isb: PipelineISBMetrics { + paf_resolution_time: + Family::, Histogram>::new_with_constructor(|| { + Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)) + }), }, }; let mut registry = global_registry().registry.lock(); @@ -380,7 +410,37 @@ impl PipelineMetrics { forwarder_registry.register( PIPELINE_FORWARDER_READ_TOTAL, "Total number of Data Messages Read", - metrics.forwarder.data_read.clone(), + metrics.forwarder.read_total.clone(), + ); + forwarder_registry.register( + READ_TIME, + "Time taken to read data", + metrics.forwarder.read_time.clone(), + ); + forwarder_registry.register( + READ_BYTES_TOTAL, + "Total number of bytes read", + metrics.forwarder.read_bytes_total.clone(), + ); + forwarder_registry.register( + E2E_TIME, + "Time taken to process data", + metrics.forwarder.processed_time.clone(), + ); + forwarder_registry.register( + ACK_TOTAL, + "Total number of Ack Messages", + metrics.forwarder.ack_total.clone(), + ); + forwarder_registry.register( + ACK_TIME, + "Time taken to ack data", + metrics.forwarder.ack_time.clone(), + ); + forwarder_registry.register( + PENDING, + "Number of pending messages", + metrics.forwarder.pending.clone(), ); metrics } @@ -391,16 +451,16 @@ static MONOVTX_METRICS: OnceLock = OnceLock::new(); // forward_metrics is a helper function used to fetch the // MonoVtxMetrics object -pub(crate) fn forward_mvtx_metrics() -> &'static MonoVtxMetrics { +pub(crate) fn monovertex_metrics() -> &'static MonoVtxMetrics { MONOVTX_METRICS.get_or_init(MonoVtxMetrics::new) } /// PIPELINE_METRICS is the PipelineMetrics object which stores the metrics static PIPELINE_METRICS: OnceLock = OnceLock::new(); -// forward_pipeline_metrics is a helper function used to fetch the +// pipeline_metrics is a helper function used to fetch the // PipelineMetrics object -pub(crate) fn forward_pipeline_metrics() -> &'static PipelineMetrics { +pub(crate) fn pipeline_metrics() -> &'static PipelineMetrics { PIPELINE_METRICS.get_or_init(PipelineMetrics::new) } @@ -427,14 +487,11 @@ static MONOVTX_METRICS_LABELS: OnceLock> = OnceLock::new() // forward_metrics_labels is a helper function used to fetch the // MONOVTX_METRICS_LABELS object -pub(crate) fn mvtx_forward_metric_labels( - mvtx_name: String, - replica: u16, -) -> &'static Vec<(String, String)> { +pub(crate) fn mvtx_forward_metric_labels() -> &'static Vec<(String, String)> { MONOVTX_METRICS_LABELS.get_or_init(|| { let common_labels = vec![ - (MVTX_NAME_LABEL.to_string(), mvtx_name), - (REPLICA_LABEL.to_string(), replica.to_string()), + (MVTX_NAME_LABEL.to_string(), get_vertex_name().to_string()), + (REPLICA_LABEL.to_string(), get_vertex_replica().to_string()), ]; common_labels }) @@ -442,26 +499,58 @@ pub(crate) fn mvtx_forward_metric_labels( static PIPELINE_READ_METRICS_LABELS: OnceLock> = OnceLock::new(); -pub(crate) fn pipeline_forward_read_metric_labels( - pipeline_name: &str, - partition_name: &str, - vertex_name: &str, +pub(crate) fn pipeline_forward_metric_labels( vertex_type: &str, - replica: u16, + partition_name: Option<&str>, ) -> &'static Vec<(String, String)> { PIPELINE_READ_METRICS_LABELS.get_or_init(|| { - vec![ - (PIPELINE_NAME_LABEL.to_string(), pipeline_name.to_string()), - (PIPELINE_REPLICA_LABEL.to_string(), replica.to_string()), + let mut labels = vec![ ( - PIPELINE_PARTITION_NAME_LABEL.to_string(), - partition_name.to_string(), + PIPELINE_NAME_LABEL.to_string(), + get_pipeline_name().to_string(), + ), + ( + PIPELINE_REPLICA_LABEL.to_string(), + get_vertex_replica().to_string(), ), ( PIPELINE_VERTEX_TYPE_LABEL.to_string(), vertex_type.to_string(), ), - (PIPELINE_VERTEX_LABEL.to_string(), vertex_name.to_string()), + ( + PIPELINE_VERTEX_LABEL.to_string(), + get_vertex_name().to_string(), + ), + ]; + + if let Some(partition) = partition_name { + labels.push(( + PIPELINE_PARTITION_NAME_LABEL.to_string(), + partition.to_string(), + )); + } + + labels + }) +} + +static PIPELINE_ISB_METRICS_LABELS: OnceLock> = OnceLock::new(); + +pub(crate) fn pipeline_isb_metric_labels() -> &'static Vec<(String, String)> { + PIPELINE_ISB_METRICS_LABELS.get_or_init(|| { + vec![ + ( + PIPELINE_NAME_LABEL.to_string(), + get_pipeline_name().to_string(), + ), + ( + PIPELINE_REPLICA_LABEL.to_string(), + get_vertex_replica().to_string(), + ), + ( + PIPELINE_VERTEX_LABEL.to_string(), + get_vertex_name().to_string(), + ), ] }) } @@ -595,9 +684,7 @@ struct TimestampedPending { /// and exposing the metrics. It maintains a list of pending stats and ensures that /// only the most recent entries are kept. pub(crate) struct PendingReader { - mvtx_name: String, - replica: u16, - lag_reader: SourceHandle, + lag_reader: Source, lag_checking_interval: Duration, refresh_interval: Duration, pending_stats: Arc>>, @@ -610,18 +697,14 @@ pub(crate) struct PendingReaderTasks { /// PendingReaderBuilder is used to build a [LagReader] instance. pub(crate) struct PendingReaderBuilder { - mvtx_name: String, - replica: u16, - lag_reader: SourceHandle, + lag_reader: Source, lag_checking_interval: Option, refresh_interval: Option, } impl PendingReaderBuilder { - pub(crate) fn new(mvtx_name: String, replica: u16, lag_reader: SourceHandle) -> Self { + pub(crate) fn new(lag_reader: Source) -> Self { Self { - mvtx_name, - replica, lag_reader, lag_checking_interval: None, refresh_interval: None, @@ -640,8 +723,6 @@ impl PendingReaderBuilder { pub(crate) fn build(self) -> PendingReader { PendingReader { - mvtx_name: self.mvtx_name, - replica: self.replica, lag_reader: self.lag_reader, lag_checking_interval: self .lag_checking_interval @@ -662,7 +743,7 @@ impl PendingReader { /// - Another to periodically expose the pending metrics. /// /// Dropping the PendingReaderTasks will abort the background tasks. - pub async fn start(&self) -> PendingReaderTasks { + pub async fn start(&self, is_mono_vertex: bool) -> PendingReaderTasks { let pending_reader = self.lag_reader.clone(); let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; @@ -673,10 +754,8 @@ impl PendingReader { }); let pending_stats = self.pending_stats.clone(); - let mvtx_name = self.mvtx_name.clone(); - let replica = self.replica; let expose_handle = tokio::spawn(async move { - expose_pending_metrics(mvtx_name, replica, refresh_interval, pending_stats).await; + expose_pending_metrics(is_mono_vertex, refresh_interval, pending_stats).await; }); PendingReaderTasks { buildup_handle, @@ -696,7 +775,7 @@ impl Drop for PendingReaderTasks { /// Periodically checks the pending messages from the source client and build the pending stats. async fn build_pending_info( - source: SourceHandle, + source: Source, lag_checking_interval: Duration, pending_stats: Arc>>, ) { @@ -725,7 +804,7 @@ async fn build_pending_info( } } -async fn fetch_pending(lag_reader: &SourceHandle) -> crate::error::Result { +async fn fetch_pending(lag_reader: &Source) -> crate::error::Result { let response: i64 = lag_reader.pending().await?.map_or(-1, |p| p as i64); // default to -1(unavailable) Ok(response) } @@ -735,8 +814,7 @@ const LOOKBACK_SECONDS_MAP: [(&str, i64); 4] = // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( - mvtx_name: String, - replica: u16, + is_mono_vertex: bool, refresh_interval: Duration, pending_stats: Arc>>, ) { @@ -751,14 +829,21 @@ async fn expose_pending_metrics( for (label, seconds) in LOOKBACK_SECONDS_MAP { let pending = calculate_pending(seconds, &pending_stats).await; if pending != -1 { - let mut metric_labels = - mvtx_forward_metric_labels(mvtx_name.clone(), replica).clone(); + let mut metric_labels = mvtx_forward_metric_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); pending_info.insert(label, pending); - forward_mvtx_metrics() - .source_pending - .get_or_create(&metric_labels) - .set(pending); + if is_mono_vertex { + monovertex_metrics() + .pending + .get_or_create(&metric_labels) + .set(pending); + } else { + pipeline_metrics() + .forwarder + .pending + .get_or_create(&metric_labels) + .set(pending); + } } } // skip for those the pending is not implemented @@ -806,7 +891,7 @@ mod tests { use tokio::sync::mpsc::Sender; use super::*; - use crate::shared::utils::create_rpc_channel; + use crate::shared::grpc::create_rpc_channel; struct SimpleSource; #[tonic::async_trait] @@ -913,7 +998,7 @@ mod tests { // wait for the servers to start // FIXME: we need to have a better way, this is flaky - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let metrics_state = UserDefinedContainerState::Monovertex(MonovertexContainerState { source_client: Some(SourceClient::new( create_rpc_channel(src_sock_file).await.unwrap(), @@ -991,8 +1076,7 @@ mod tests { tokio::spawn({ let pending_stats = pending_stats.clone(); async move { - expose_pending_metrics("test".to_string(), 0, refresh_interval, pending_stats) - .await; + expose_pending_metrics(true, refresh_interval, pending_stats).await; } }); // We use tokio::time::interval() as the ticker in the expose_pending_metrics() function. @@ -1004,10 +1088,10 @@ mod tests { let mut stored_values: [i64; 4] = [0; 4]; { for (i, (label, _)) in LOOKBACK_SECONDS_MAP.iter().enumerate() { - let mut metric_labels = mvtx_forward_metric_labels("test".to_string(), 0).clone(); + let mut metric_labels = mvtx_forward_metric_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); - let guage = forward_mvtx_metrics() - .source_pending + let guage = monovertex_metrics() + .pending .get_or_create(&metric_labels) .get(); stored_values[i] = guage; @@ -1077,7 +1161,7 @@ mod tests { ); global_metrics.sdk_info.get_or_create(&sdk_labels).set(1); - let metrics = forward_mvtx_metrics(); + let metrics = monovertex_metrics(); // Use a fixed set of labels instead of the ones from mvtx_forward_metric_labels() since other test functions may also set it. let common_labels = vec![ ( @@ -1091,7 +1175,7 @@ mod tests { metrics.read_bytes_total.get_or_create(&common_labels).inc(); metrics.ack_total.get_or_create(&common_labels).inc(); metrics.dropped_total.get_or_create(&common_labels).inc(); - metrics.source_pending.get_or_create(&common_labels).set(10); + metrics.pending.get_or_create(&common_labels).set(10); metrics.e2e_time.get_or_create(&common_labels).observe(10.0); metrics.read_time.get_or_create(&common_labels).observe(3.0); metrics.ack_time.get_or_create(&common_labels).observe(2.0); diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 8e6d9cad19..b8016624fa 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -1,27 +1,15 @@ use forwarder::ForwarderBuilder; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tokio_util::sync::CancellationToken; -use tonic::transport::Channel; use tracing::info; -use crate::config::components::{sink, source, transformer}; +use crate::config::is_mono_vertex; use crate::config::monovertex::MonovertexConfig; -use crate::error::{self, Error}; -use crate::metrics; -use crate::shared::server_info::{sdk_server_info, ContainerType}; -use crate::shared::utils; -use crate::shared::utils::{ - create_rpc_channel, wait_until_sink_ready, wait_until_source_ready, - wait_until_transformer_ready, -}; -use crate::sink::{SinkClientType, SinkHandle}; -use crate::source::generator::new_generator; -use crate::source::pulsar::new_pulsar_source; -use crate::source::user_defined::new_source; -use crate::source::{SourceHandle, SourceType}; -use crate::transformer::user_defined::SourceTransformHandle; +use crate::error::{self}; +use crate::shared::create_components; +use crate::sink::SinkWriter; +use crate::source::Source; +use crate::transformer::Transformer; +use crate::{metrics, shared}; /// [forwarder] orchestrates data movement from the Source to the Sink via the optional SourceTransformer. /// The forward-a-chunk executes the following in an infinite loop till a shutdown signal is received: @@ -35,155 +23,31 @@ pub(crate) async fn start_forwarder( cln_token: CancellationToken, config: &MonovertexConfig, ) -> error::Result<()> { - let mut source_grpc_client = if let source::SourceType::UserDefined(source_config) = - &config.source_config.source_type - { - // do server compatibility check - let server_info = sdk_server_info( - source_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; - - let metric_labels = metrics::sdk_info_labels( - metrics::COMPONENT_MVTX.to_string(), - config.name.clone(), - server_info.language, - server_info.version, - ContainerType::Sourcer.to_string(), - ); - metrics::global_metrics() - .sdk_info - .get_or_create(&metric_labels) - .set(1); - - let mut source_grpc_client = - SourceClient::new(create_rpc_channel(source_config.socket_path.clone().into()).await?) - .max_encoding_message_size(source_config.grpc_max_message_size) - .max_encoding_message_size(source_config.grpc_max_message_size); - - wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; - Some(source_grpc_client) - } else { - None - }; - - let sink_grpc_client = if let sink::SinkType::UserDefined(udsink_config) = - &config.sink_config.sink_type - { - // do server compatibility check - let server_info = sdk_server_info( - udsink_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; + let (source, source_grpc_client) = create_components::create_source( + config.batch_size, + config.read_timeout, + &config.source_config, + cln_token.clone(), + ) + .await?; - let metric_labels = metrics::sdk_info_labels( - metrics::COMPONENT_MVTX.to_string(), - config.name.clone(), - server_info.language, - server_info.version, - ContainerType::Sinker.to_string(), - ); - metrics::global_metrics() - .sdk_info - .get_or_create(&metric_labels) - .set(1); - - let mut sink_grpc_client = - SinkClient::new(create_rpc_channel(udsink_config.socket_path.clone().into()).await?) - .max_encoding_message_size(udsink_config.grpc_max_message_size) - .max_encoding_message_size(udsink_config.grpc_max_message_size); - - wait_until_sink_ready(&cln_token, &mut sink_grpc_client).await?; - Some(sink_grpc_client) - } else { - None - }; - - let fb_sink_grpc_client = if let Some(fb_sink) = &config.fb_sink_config { - if let sink::SinkType::UserDefined(fb_sink_config) = &fb_sink.sink_type { - // do server compatibility check - let server_info = sdk_server_info( - fb_sink_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; - - let metric_labels = metrics::sdk_info_labels( - metrics::COMPONENT_MVTX.to_string(), - config.name.clone(), - server_info.language, - server_info.version, - ContainerType::FbSinker.to_string(), - ); - metrics::global_metrics() - .sdk_info - .get_or_create(&metric_labels) - .set(1); - - let mut fb_sink_grpc_client = SinkClient::new( - create_rpc_channel(fb_sink_config.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(fb_sink_config.grpc_max_message_size) - .max_encoding_message_size(fb_sink_config.grpc_max_message_size); - - wait_until_sink_ready(&cln_token, &mut fb_sink_grpc_client).await?; - Some(fb_sink_grpc_client) - } else { - None - } - } else { - None - }; - - let transformer_grpc_client = if let Some(transformer) = &config.transformer_config { - if let transformer::TransformerType::UserDefined(transformer_config) = - &transformer.transformer_type - { - // do server compatibility check - let server_info = sdk_server_info( - transformer_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; - - let metric_labels = metrics::sdk_info_labels( - metrics::COMPONENT_MVTX.to_string(), - config.name.clone(), - server_info.language, - server_info.version, - ContainerType::SourceTransformer.to_string(), - ); - - metrics::global_metrics() - .sdk_info - .get_or_create(&metric_labels) - .set(1); - - let mut transformer_grpc_client = SourceTransformClient::new( - create_rpc_channel(transformer_config.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(transformer_config.grpc_max_message_size) - .max_encoding_message_size(transformer_config.grpc_max_message_size); - - wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; - Some(transformer_grpc_client.clone()) - } else { - None - } - } else { - None - }; - - let source_type = fetch_source(config, &mut source_grpc_client).await?; - let (sink, fb_sink) = fetch_sink( - config, - sink_grpc_client.clone(), - fb_sink_grpc_client.clone(), + let (transformer, transformer_grpc_client) = create_components::create_transformer( + config.batch_size, + config.transformer_config.clone(), + cln_token.clone(), ) .await?; + let (sink_writer, sink_grpc_client, fb_sink_grpc_client) = + create_components::create_sink_writer( + config.batch_size, + config.read_timeout, + config.sink_config.clone(), + config.fb_sink_config.clone(), + &cln_token, + ) + .await?; + // Start the metrics server in a separate background async spawn, // This should be running throughout the lifetime of the application, hence the handle is not // joined. @@ -197,145 +61,41 @@ pub(crate) async fn start_forwarder( // start the metrics server // FIXME: what to do with the handle - utils::start_metrics_server(config.metrics_config.clone(), metrics_state).await; - - let source = SourceHandle::new(source_type, config.batch_size); - start_forwarder_with_source( - config.clone(), - source, - sink, - transformer_grpc_client, - fb_sink, - cln_token, - ) - .await?; - - info!("Forwarder stopped gracefully"); - Ok(()) -} - -// fetch right the source. -// source_grpc_client can be optional because it is valid only for user-defined source. -async fn fetch_source( - config: &MonovertexConfig, - source_grpc_client: &mut Option>, -) -> crate::Result { - match &config.source_config.source_type { - source::SourceType::Generator(generator_config) => { - let (source_read, source_ack, lag_reader) = - new_generator(generator_config.clone(), config.batch_size)?; - Ok(SourceType::Generator(source_read, source_ack, lag_reader)) - } - source::SourceType::UserDefined(_) => { - let Some(source_grpc_client) = source_grpc_client.clone() else { - return Err(Error::Config( - "Configuration type is user-defined, however no grpc client is provided".into(), - )); - }; - let (source_read, source_ack, lag_reader) = - new_source(source_grpc_client, config.batch_size, config.read_timeout).await?; - Ok(SourceType::UserDefinedSource( - source_read, - source_ack, - lag_reader, - )) - } - source::SourceType::Pulsar(pulsar_config) => { - let pulsar = new_pulsar_source( - pulsar_config.clone(), - config.batch_size, - config.read_timeout, - ) - .await?; - Ok(SourceType::Pulsar(pulsar)) - } - } -} - -// fetch the actor handle for the sink. -// sink_grpc_client can be optional because it is valid only for user-defined sink. -async fn fetch_sink( - config: &MonovertexConfig, - sink_grpc_client: Option>, - fallback_sink_grpc_client: Option>, -) -> crate::Result<(SinkHandle, Option)> { - let fb_sink = match fallback_sink_grpc_client { - Some(fallback_sink) => Some( - SinkHandle::new( - SinkClientType::UserDefined(fallback_sink), - config.batch_size, - ) - .await?, - ), - None => { - if let Some(fb_sink_config) = &config.fb_sink_config { - if let sink::SinkType::Log(_) = &fb_sink_config.sink_type { - let log = SinkHandle::new(SinkClientType::Log, config.batch_size).await?; - return Ok((log, None)); - } - if let sink::SinkType::Blackhole(_) = &fb_sink_config.sink_type { - let blackhole = - SinkHandle::new(SinkClientType::Blackhole, config.batch_size).await?; - return Ok((blackhole, None)); - } - return Err(Error::Config( - "No valid Fallback Sink configuration found".to_string(), - )); - } + shared::metrics::start_metrics_server(config.metrics_config.clone(), metrics_state).await; - None - } - }; + start(config.clone(), source, sink_writer, transformer, cln_token).await?; - if let Some(sink_client) = sink_grpc_client { - let sink = - SinkHandle::new(SinkClientType::UserDefined(sink_client), config.batch_size).await?; - return Ok((sink, fb_sink)); - } - if let sink::SinkType::Log(_) = &config.sink_config.sink_type { - let log = SinkHandle::new(SinkClientType::Log, config.batch_size).await?; - return Ok((log, fb_sink)); - } - if let sink::SinkType::Blackhole(_) = &config.sink_config.sink_type { - let blackhole = SinkHandle::new(SinkClientType::Blackhole, config.batch_size).await?; - return Ok((blackhole, fb_sink)); - } - Err(Error::Config( - "No valid Sink configuration found".to_string(), - )) + Ok(()) } -async fn start_forwarder_with_source( +async fn start( mvtx_config: MonovertexConfig, - source: SourceHandle, - sink: SinkHandle, - transformer_client: Option>, - fallback_sink: Option, + source: Source, + sink: SinkWriter, + transformer: Option, cln_token: CancellationToken, ) -> error::Result<()> { // start the pending reader to publish pending metrics - let pending_reader = utils::create_pending_reader(&mvtx_config, source.clone()).await; - let _pending_reader_handle = pending_reader.start().await; + let pending_reader = + shared::metrics::create_pending_reader(&mvtx_config.metrics_config, source.clone()).await; + let _pending_reader_handle = pending_reader.start(is_mono_vertex()).await; - let mut forwarder_builder = ForwarderBuilder::new(source, sink, mvtx_config, cln_token); + let mut forwarder_builder = ForwarderBuilder::new(source, sink, cln_token); // add transformer if exists - if let Some(transformer_client) = transformer_client { - let transformer = SourceTransformHandle::new(transformer_client).await?; - forwarder_builder = forwarder_builder.source_transformer(transformer); + if let Some(transformer_client) = transformer { + forwarder_builder = forwarder_builder.transformer(transformer_client); } - // add fallback sink if exists - if let Some(fallback_sink) = fallback_sink { - forwarder_builder = forwarder_builder.fallback_sink_writer(fallback_sink); - } // build the final forwarder - let mut forwarder = forwarder_builder.build(); + let forwarder = forwarder_builder.build(); + + info!("Forwarder is starting..."); // start the forwarder, it will return only on Signal forwarder.start().await?; - info!("Forwarder stopped gracefully"); + info!("Forwarder stopped gracefully."); Ok(()) } diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index f84cade170..dc77154c6f 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -1,598 +1,162 @@ -use std::collections::HashMap; +//! The forwarder for [MonoVertex] at its core orchestrates message movement asynchronously using +//! [Stream] over channels between the components. The messages send over this channel using +//! [Actor Pattern]. +//! +//! ```text +//! (source) --[c]--> (transformer)* --[c]--> (sink) +//! +//! [c] - channel +//! * - optional +//! ``` +//! +//! Most of the data move forward except for the ack which can happen only after the Write. +//! ```text +//! (Read) +-------> (UDF) -------> (Write) + +//! | | +//! | | +//! +-------> {Ack} <----------------+ +//! +//! {} -> Listens on a OneShot +//! () -> Streaming Interface +//! ``` +//! +//! [MonoVertex]: https://numaflow.numaproj.io/core-concepts/monovertex/ +//! [Stream]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.ReceiverStream.html +//! [Actor Pattern]: https://ryhl.io/blog/actors-with-tokio/ -use chrono::Utc; -use log::warn; -use tokio::time::sleep; use tokio_util::sync::CancellationToken; -use tracing::{debug, info}; -use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; -use crate::config::monovertex::MonovertexConfig; use crate::error; -use crate::message::{Message, Offset, ResponseStatusFromSink}; -use crate::metrics; -use crate::metrics::forward_mvtx_metrics; -use crate::sink::SinkHandle; +use crate::sink::SinkWriter; +use crate::source::Source; +use crate::transformer::Transformer; use crate::Error; -use crate::{source::SourceHandle, transformer::user_defined::SourceTransformHandle}; /// Forwarder is responsible for reading messages from the source, applying transformation if /// transformer is present, writing the messages to the sink, and then acknowledging the messages /// back to the source. pub(crate) struct Forwarder { - source_reader: SourceHandle, - sink_writer: SinkHandle, - source_transformer: Option, - fb_sink_writer: Option, + source: Source, + transformer: Option, + sink_writer: SinkWriter, cln_token: CancellationToken, - common_labels: Vec<(String, String)>, - mvtx_config: MonovertexConfig, } -/// ForwarderBuilder is used to build a Forwarder instance with optional fields. pub(crate) struct ForwarderBuilder { - source_reader: SourceHandle, - sink_writer: SinkHandle, + source: Source, + sink_writer: SinkWriter, cln_token: CancellationToken, - source_transformer: Option, - fb_sink_writer: Option, - mvtx_config: MonovertexConfig, + transformer: Option, } impl ForwarderBuilder { /// Create a new builder with mandatory fields pub(crate) fn new( - source_reader: SourceHandle, - sink_writer: SinkHandle, - mvtx_config: MonovertexConfig, + streaming_source: Source, + streaming_sink: SinkWriter, cln_token: CancellationToken, ) -> Self { Self { - source_reader, - sink_writer, + source: streaming_source, + sink_writer: streaming_sink, cln_token, - source_transformer: None, - fb_sink_writer: None, - mvtx_config, + transformer: None, } } /// Set the optional transformer client - pub(crate) fn source_transformer(mut self, transformer_client: SourceTransformHandle) -> Self { - self.source_transformer = Some(transformer_client); + pub(crate) fn transformer(mut self, transformer: Transformer) -> Self { + self.transformer = Some(transformer); self } - /// Set the optional fallback client - pub(crate) fn fallback_sink_writer(mut self, fallback_client: SinkHandle) -> Self { - self.fb_sink_writer = Some(fallback_client); - self - } - - /// Build the Forwarder instance + /// Build the StreamingForwarder instance #[must_use] pub(crate) fn build(self) -> Forwarder { - let common_labels = metrics::mvtx_forward_metric_labels( - self.mvtx_config.name.clone(), - self.mvtx_config.replica, - ) - .clone(); Forwarder { - source_reader: self.source_reader, + source: self.source, sink_writer: self.sink_writer, - source_transformer: self.source_transformer, - fb_sink_writer: self.fb_sink_writer, + transformer: self.transformer, cln_token: self.cln_token, - mvtx_config: self.mvtx_config, - common_labels, } } } impl Forwarder { - /// start starts the forward-a-chunk loop and exits only after a chunk has been forwarded and ack'ed. - /// this means that, in the happy path scenario a block is always completely processed. - /// this function will return on any error and will cause end up in a non-0 exit code. - pub(crate) async fn start(&mut self) -> error::Result<()> { - let mut processed_msgs_count: usize = 0; - let mut last_forwarded_at = std::time::Instant::now(); - info!("Forwarder has started"); - loop { - let start_time = tokio::time::Instant::now(); - if self.cln_token.is_cancelled() { - break; - } - - processed_msgs_count += self.read_and_process_messages().await?; - - // if the last forward was more than 1 second ago, forward a chunk print the number of messages forwarded - // TODO: add histogram details (p99, etc.) - if last_forwarded_at.elapsed().as_millis() >= 1000 { - info!( - "Forwarded {} messages at time {}", - processed_msgs_count, - Utc::now() - ); - processed_msgs_count = 0; - last_forwarded_at = std::time::Instant::now(); + pub(crate) async fn start(&self) -> error::Result<()> { + let (messages_stream, reader_handle) = + self.source.streaming_read(self.cln_token.clone())?; + + let (transformed_messages_stream, transformer_handle) = + if let Some(transformer) = &self.transformer { + let (transformed_messages_rx, transformer_handle) = + transformer.transform_stream(messages_stream)?; + (transformed_messages_rx, Some(transformer_handle)) + } else { + (messages_stream, None) + }; + + let sink_writer_handle = self + .sink_writer + .streaming_write(transformed_messages_stream, self.cln_token.clone()) + .await?; + + match tokio::try_join!( + reader_handle, + transformer_handle.unwrap_or_else(|| tokio::spawn(async { Ok(()) })), + sink_writer_handle, + ) { + Ok((reader_result, transformer_result, sink_writer_result)) => { + reader_result?; + transformer_result?; + sink_writer_result?; + Ok(()) } - - forward_mvtx_metrics() - .e2e_time - .get_or_create(&self.common_labels) - .observe(start_time.elapsed().as_micros() as f64); - } - Ok(()) - } - - /// Read messages from the source, apply transformation if transformer is present, - /// write the messages to the sink, if fallback messages are present write them to the fallback sink, - /// and then acknowledge the messages back to the source. - async fn read_and_process_messages(&mut self) -> error::Result { - let start_time = tokio::time::Instant::now(); - let messages = self.source_reader.read().await.map_err(|e| { - Error::Forwarder(format!("Failed to read messages from source {:?}", e)) - })?; - - debug!( - "Read batch size: {} and latency - {}ms", - messages.len(), - start_time.elapsed().as_millis() - ); - - forward_mvtx_metrics() - .read_time - .get_or_create(&self.common_labels) - .observe(start_time.elapsed().as_micros() as f64); - - // read returned 0 messages, nothing more to be done. - if messages.is_empty() { - return Ok(0); - } - - let msg_count = messages.len() as u64; - forward_mvtx_metrics() - .read_total - .get_or_create(&self.common_labels) - .inc_by(msg_count); - - let (offsets, bytes_count): (Vec, u64) = messages.iter().try_fold( - (Vec::with_capacity(messages.len()), 0), - |(mut offsets, mut bytes_count), msg| { - if let Some(offset) = &msg.offset { - offsets.push(offset.clone()); - bytes_count += msg.value.len() as u64; - Ok((offsets, bytes_count)) - } else { - Err(Error::Forwarder("Message offset is missing".to_string())) - } - }, - )?; - - forward_mvtx_metrics() - .read_bytes_total - .get_or_create(&self.common_labels) - .inc_by(bytes_count); - - // Apply transformation if transformer is present - let transformed_messages = self.apply_transformer(messages).await.map_err(|e| { - Error::Forwarder(format!( - "Failed to apply transformation to messages {:?}", - e - )) - })?; - - // Write the messages to the sink - self.write_to_sink(transformed_messages) - .await - .map_err(|e| Error::Forwarder(format!("Failed to write messages to sink {:?}", e)))?; - - // Acknowledge the messages back to the source - self.acknowledge_messages(offsets).await.map_err(|e| { - Error::Forwarder(format!( - "Failed to acknowledge messages back to source {:?}", + Err(e) => Err(Error::Forwarder(format!( + "Error while joining reader, transformer, and sink writer: {:?}", e - )) - })?; - - Ok(msg_count as usize) - } - - // Applies transformation to the messages if transformer is present - // we concurrently apply transformation to all the messages. - async fn apply_transformer(&mut self, messages: Vec) -> error::Result> { - let Some(client) = &mut self.source_transformer else { - // return early if there is no transformer - return Ok(messages); - }; - - let start_time = tokio::time::Instant::now(); - let results = client.transform(messages).await?; - - debug!( - "Transformer latency - {}ms", - start_time.elapsed().as_millis() - ); - forward_mvtx_metrics() - .transformer - .time - .get_or_create(&self.common_labels) - .observe(start_time.elapsed().as_micros() as f64); - - Ok(results) - } - - // Writes the messages to the sink and handles fallback messages if present - async fn write_to_sink(&mut self, messages: Vec) -> error::Result<()> { - let msg_count = messages.len() as u64; - - if messages.is_empty() { - return Ok(()); - } - - // this start time is for tracking the total time taken - let start_time_e2e = tokio::time::Instant::now(); - - let mut attempts = 0; - let mut error_map = HashMap::new(); - let mut fallback_msgs = Vec::new(); - // start with the original set of message to be sent. - // we will overwrite this vec with failed messages and will keep retrying. - let mut messages_to_send = messages; - - // only breaks out of this loop based on the retry strategy unless all the messages have been written to sink - // successfully. - let retry_config = &self - .mvtx_config - .sink_config - .retry_config - .clone() - .unwrap_or_default(); - - loop { - while attempts < retry_config.sink_max_retry_attempts { - let status = self - .write_to_sink_once( - &mut error_map, - &mut fallback_msgs, - &mut messages_to_send, - retry_config, - ) - .await; - match status { - Ok(true) => break, - Ok(false) => { - attempts += 1; - warn!( - "Retry attempt {} due to retryable error. Errors: {:?}", - attempts, error_map - ); - } - Err(e) => Err(e)?, - } - - // if we are shutting down, stop the retry - if self.cln_token.is_cancelled() { - return Err(Error::Sink( - "Cancellation token triggered during retry".to_string(), - )); - } - } - - // If after the retries we still have messages to process, handle the post retry failures - let need_retry = self.handle_sink_post_retry( - &mut attempts, - &mut error_map, - &mut fallback_msgs, - &mut messages_to_send, - retry_config, - ); - - match need_retry { - // if we are done with the messages, break the loop - Ok(false) => break, - // if we need to retry, reset the attempts and error_map - Ok(true) => { - attempts = 0; - error_map.clear(); - } - Err(e) => Err(e)?, - } - } - - // If there are fallback messages, write them to the fallback sink - if !fallback_msgs.is_empty() { - self.handle_fallback_messages(fallback_msgs, retry_config) - .await?; - } - - forward_mvtx_metrics() - .sink - .time - .get_or_create(&self.common_labels) - .observe(start_time_e2e.elapsed().as_micros() as f64); - - // update the metric for number of messages written to the sink - // this included primary and fallback sink - forward_mvtx_metrics() - .sink - .write_total - .get_or_create(&self.common_labels) - .inc_by(msg_count); - Ok(()) - } - - /// Handles the post retry failures based on the configured strategy, - /// returns true if we need to retry, else false. - fn handle_sink_post_retry( - &mut self, - attempts: &mut u16, - error_map: &mut HashMap, - fallback_msgs: &mut Vec, - messages_to_send: &mut Vec, - retry_config: &RetryConfig, - ) -> error::Result { - // if we are done with the messages, break the loop - if messages_to_send.is_empty() { - return Ok(false); - } - // check what is the failure strategy in the config - let strategy = retry_config.sink_retry_on_fail_strategy.clone(); - match strategy { - // if we need to retry, return true - OnFailureStrategy::Retry => { - warn!( - "Using onFailure Retry, Retry attempts {} completed", - attempts - ); - return Ok(true); - } - // if we need to drop the messages, log and return false - OnFailureStrategy::Drop => { - // log that we are dropping the messages as requested - warn!( - "Dropping messages after {} attempts. Errors: {:?}", - attempts, error_map - ); - // update the metrics - forward_mvtx_metrics() - .dropped_total - .get_or_create(&self.common_labels) - .inc_by(messages_to_send.len() as u64); - } - // if we need to move the messages to the fallback, return false - OnFailureStrategy::Fallback => { - // log that we are moving the messages to the fallback as requested - warn!( - "Moving messages to fallback after {} attempts. Errors: {:?}", - attempts, error_map - ); - // move the messages to the fallback messages - fallback_msgs.append(messages_to_send); - } - } - // if we are done with the messages, break the loop - Ok(false) - } - - /// Writes to sink once and will return true if successful, else false. Please note that it - /// mutates is incoming fields. - async fn write_to_sink_once( - &mut self, - error_map: &mut HashMap, - fallback_msgs: &mut Vec, - messages_to_send: &mut Vec, - retry_config: &RetryConfig, - ) -> error::Result { - let start_time = tokio::time::Instant::now(); - match self.sink_writer.sink(messages_to_send.clone()).await { - Ok(response) => { - debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); - - // create a map of id to result, since there is no strict requirement - // for the udsink to return the results in the same order as the requests - let result_map = response - .into_iter() - .map(|resp| (resp.id, resp.status)) - .collect::>(); - - error_map.clear(); - // drain all the messages that were successfully written - // and keep only the failed messages to send again - // construct the error map for the failed messages - messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.id.to_string()) { - return match result { - ResponseStatusFromSink::Success => false, - ResponseStatusFromSink::Failed(err_msg) => { - *error_map.entry(err_msg.clone()).or_insert(0) += 1; - true - } - ResponseStatusFromSink::Fallback => { - fallback_msgs.push(msg.clone()); - false - } - }; - } - false - }); - - // if all messages are successfully written, break the loop - if messages_to_send.is_empty() { - return Ok(true); - } - - sleep(tokio::time::Duration::from_millis( - retry_config.sink_retry_interval_in_ms as u64, - )) - .await; - - // we need to retry - Ok(false) - } - Err(e) => Err(e), - } - } - - // Writes the fallback messages to the fallback sink - async fn handle_fallback_messages( - &mut self, - fallback_msgs: Vec, - retry_config: &RetryConfig, - ) -> error::Result<()> { - if self.fb_sink_writer.is_none() { - return Err(Error::Sink( - "Response contains fallback messages but no fallback sink is configured" - .to_string(), - )); - } - - let fallback_client = self.fb_sink_writer.as_mut().unwrap(); - let mut attempts = 0; - let mut fallback_error_map = HashMap::new(); - // start with the original set of message to be sent. - // we will overwrite this vec with failed messages and will keep retrying. - let mut messages_to_send = fallback_msgs; - let fb_msg_count = messages_to_send.len() as u64; - - let default_retry = retry_config - .sink_default_retry_strategy - .clone() - .backoff - .unwrap(); - let max_attempts = default_retry.steps.unwrap(); - let sleep_interval = default_retry.interval.unwrap(); - - while attempts < max_attempts { - let start_time = tokio::time::Instant::now(); - match fallback_client.sink(messages_to_send.clone()).await { - Ok(fb_response) => { - debug!( - "Fallback sink latency - {}ms", - start_time.elapsed().as_millis() - ); - - // create a map of id to result, since there is no strict requirement - // for the udsink to return the results in the same order as the requests - let result_map = fb_response - .into_iter() - .map(|resp| (resp.id, resp.status)) - .collect::>(); - - let mut contains_fallback_status = false; - - fallback_error_map.clear(); - // drain all the messages that were successfully written - // and keep only the failed messages to send again - // construct the error map for the failed messages - messages_to_send.retain(|msg| { - if let Some(result) = result_map.get(&msg.id.to_string()) { - return match result { - ResponseStatusFromSink::Success => false, - ResponseStatusFromSink::Failed(err_msg) => { - *fallback_error_map.entry(err_msg.clone()).or_insert(0) += 1; - true - } - ResponseStatusFromSink::Fallback => { - contains_fallback_status = true; - false - } - }; - } else { - false - } - }); - - // specifying fallback status in fallback response is not allowed - if contains_fallback_status { - return Err(Error::Sink( - "Fallback response contains fallback status".to_string(), - )); - } - - attempts += 1; - - if messages_to_send.is_empty() { - break; - } - - warn!( - "Retry attempt {} due to retryable error. Errors: {:?}", - attempts, fallback_error_map - ); - sleep(tokio::time::Duration::from(sleep_interval)).await; - } - Err(e) => return Err(e), - } - } - if !messages_to_send.is_empty() { - return Err(Error::Sink(format!( - "Failed to write messages to fallback sink after {} attempts. Errors: {:?}", - attempts, fallback_error_map - ))); + ))), } - // increment the metric for the fallback sink write - forward_mvtx_metrics() - .fb_sink - .write_total - .get_or_create(&self.common_labels) - .inc_by(fb_msg_count); - Ok(()) - } - - // Acknowledge the messages back to the source - async fn acknowledge_messages(&mut self, offsets: Vec) -> error::Result<()> { - let n = offsets.len(); - let start_time = tokio::time::Instant::now(); - - self.source_reader.ack(offsets).await?; - - debug!("Ack latency - {}ms", start_time.elapsed().as_millis()); - - forward_mvtx_metrics() - .ack_time - .get_or_create(&self.common_labels) - .observe(start_time.elapsed().as_micros() as f64); - - forward_mvtx_metrics() - .ack_total - .get_or_create(&self.common_labels) - .inc_by(n as u64); - Ok(()) } } #[cfg(test)] mod tests { use std::collections::HashSet; + use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; use chrono::Utc; use numaflow::source::{Message, Offset, SourceReadRequest}; - use numaflow::{sink, source, sourcetransform}; - use numaflow_pb::clients::sink::sink_client::SinkClient; + use numaflow::{source, sourcetransform}; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; - use tokio::sync::mpsc; + use tempfile::TempDir; use tokio::sync::mpsc::Sender; + use tokio::sync::oneshot; + use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use crate::monovertex::forwarder::ForwarderBuilder; - use crate::shared::utils::create_rpc_channel; - use crate::sink::{SinkClientType, SinkHandle}; + use crate::shared::grpc::create_rpc_channel; + use crate::sink::{SinkClientType, SinkWriterBuilder}; use crate::source::user_defined::new_source; - use crate::source::SourceHandle; - use crate::source::SourceType; - use crate::transformer::user_defined::SourceTransformHandle; + use crate::source::{Source, SourceType}; + use crate::transformer::Transformer; + use crate::Result; struct SimpleSource { - yet_to_be_acked: std::sync::RwLock>, + num: usize, + sent_count: AtomicUsize, + yet_to_ack: std::sync::RwLock>, } impl SimpleSource { - fn new() -> Self { + fn new(num: usize) -> Self { Self { - yet_to_be_acked: std::sync::RwLock::new(HashSet::new()), + num, + sent_count: AtomicUsize::new(0), + yet_to_ack: std::sync::RwLock::new(HashSet::new()), } } } @@ -602,32 +166,35 @@ mod tests { async fn read(&self, request: SourceReadRequest, transmitter: Sender) { let event_time = Utc::now(); let mut message_offsets = Vec::with_capacity(request.count); - for i in 0..2 { + + for i in 0..request.count { + if self.sent_count.load(Ordering::SeqCst) >= self.num { + return; + } + let offset = format!("{}-{}", event_time.timestamp_nanos_opt().unwrap(), i); transmitter .send(Message { - value: "test-message".as_bytes().to_vec(), + value: b"hello".to_vec(), event_time, offset: Offset { offset: offset.clone().into_bytes(), partition_id: 0, }, - keys: vec!["test-key".to_string()], + keys: vec![], headers: Default::default(), }) .await .unwrap(); - message_offsets.push(offset) + message_offsets.push(offset); + self.sent_count.fetch_add(1, Ordering::SeqCst); } - self.yet_to_be_acked - .write() - .unwrap() - .extend(message_offsets) + self.yet_to_ack.write().unwrap().extend(message_offsets); } async fn ack(&self, offsets: Vec) { for offset in offsets { - self.yet_to_be_acked + self.yet_to_ack .write() .unwrap() .remove(&String::from_utf8(offset.offset).unwrap()); @@ -635,464 +202,127 @@ mod tests { } async fn pending(&self) -> usize { - self.yet_to_be_acked.read().unwrap().len() + self.num - self.sent_count.load(Ordering::SeqCst) + + self.yet_to_ack.read().unwrap().len() } async fn partitions(&self) -> Option> { - Some(vec![0]) + Some(vec![1, 2]) } } struct SimpleTransformer; + #[tonic::async_trait] impl sourcetransform::SourceTransformer for SimpleTransformer { async fn transform( &self, input: sourcetransform::SourceTransformRequest, ) -> Vec { - let keys = input - .keys - .iter() - .map(|k| k.clone() + "-transformed") - .collect(); - let message = sourcetransform::Message::new(input.value, Utc::now()) - .keys(keys) - .tags(vec![]); + let message = sourcetransform::Message::new(input.value, Utc::now()).keys(input.keys); vec![message] } } - struct InMemorySink { - sender: Sender, - } - - impl InMemorySink { - fn new(sender: Sender) -> Self { - Self { sender } - } - } - - #[tonic::async_trait] - impl sink::Sinker for InMemorySink { - async fn sink(&self, mut input: mpsc::Receiver) -> Vec { - let mut responses: Vec = Vec::new(); - while let Some(datum) = input.recv().await { - let response = match std::str::from_utf8(&datum.value) { - Ok(_) => { - self.sender - .send(Message { - value: datum.value.clone(), - event_time: datum.event_time, - offset: Offset { - offset: "test-offset".to_string().into_bytes(), - partition_id: 0, - }, - keys: datum.keys.clone(), - headers: Default::default(), - }) - .await - .unwrap(); - sink::Response::ok(datum.id) - } - Err(e) => { - sink::Response::failure(datum.id, format!("Invalid UTF-8 sequence: {}", e)) - } - }; - responses.push(response); - } - responses - } - } - #[tokio::test] - async fn test_forwarder_source_sink() { - let batch_size = 100; - let timeout_in_ms = 1000; - - let (sink_tx, mut sink_rx) = mpsc::channel(10); - - // Start the source server - let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let source_sock_file = tmp_dir.path().join("source.sock"); - let server_info_file = tmp_dir.path().join("source-server-info"); - - let server_info = server_info_file.clone(); - let source_socket = source_sock_file.clone(); - let source_server_handle = tokio::spawn(async move { - source::Server::new(SimpleSource::new()) - .with_socket_file(source_socket) - .with_server_info_file(server_info) - .start_with_shutdown(source_shutdown_rx) - .await - .unwrap(); - }); - - // Start the sink server - let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); - let sink_tmp_dir = tempfile::TempDir::new().unwrap(); - let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); - let server_info_file = sink_tmp_dir.path().join("sink-server-info"); - - let server_info = server_info_file.clone(); - let sink_socket = sink_sock_file.clone(); - let sink_server_handle = tokio::spawn(async move { - sink::Server::new(InMemorySink::new(sink_tx)) - .with_socket_file(sink_socket) - .with_server_info_file(server_info) - .start_with_shutdown(sink_shutdown_rx) - .await - .unwrap(); - }); - - // Start the transformer server - let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let transformer_sock_file = tmp_dir.path().join("transformer.sock"); - let server_info_file = tmp_dir.path().join("transformer-server-info"); - - let server_info = server_info_file.clone(); - let transformer_socket = transformer_sock_file.clone(); - let transformer_server_handle = tokio::spawn(async move { - sourcetransform::Server::new(SimpleTransformer) - .with_socket_file(transformer_socket) - .with_server_info_file(server_info) - .start_with_shutdown(transformer_shutdown_rx) - .await - .unwrap(); - }); - - // Wait for the servers to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - + async fn test_forwarder() { + // create the source which produces x number of messages let cln_token = CancellationToken::new(); - let (source_read, source_ack, source_lag_reader) = new_source( - SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), - batch_size, - Duration::from_millis(timeout_in_ms), - ) - .await - .expect("failed to connect to source server"); - - let src_reader = SourceHandle::new( - SourceType::UserDefinedSource(source_read, source_ack, source_lag_reader), - batch_size, - ); - - let sink_grpc_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = - SinkHandle::new(SinkClientType::UserDefined(sink_grpc_client), batch_size) - .await - .expect("failed to connect to sink server"); - - let transformer_client = SourceTransformHandle::new(SourceTransformClient::new( - create_rpc_channel(transformer_sock_file).await.unwrap(), - )) - .await - .expect("failed to connect to transformer server"); - - let mut forwarder = ForwarderBuilder::new( - src_reader, - sink_writer, - Default::default(), - cln_token.clone(), - ) - .source_transformer(transformer_client) - .build(); - - // Assert the received message in a different task - let assert_handle = tokio::spawn(async move { - let received_message = sink_rx.recv().await.unwrap(); - assert_eq!(received_message.value, "test-message".as_bytes()); - assert_eq!( - received_message.keys, - vec!["test-key-transformed".to_string()] - ); - cln_token.cancel(); - }); - - forwarder.start().await.unwrap(); - - // Wait for the assertion task to complete - assert_handle.await.unwrap(); - - drop(forwarder); - // stop the servers - source_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - source_server_handle - .await - .expect("failed to join source server task"); - - transformer_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - transformer_server_handle - .await - .expect("failed to join transformer server task"); - - sink_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - sink_server_handle - .await - .expect("failed to join sink server task"); - } - - struct ErrorSink {} - - #[tonic::async_trait] - impl sink::Sinker for ErrorSink { - async fn sink( - &self, - mut input: tokio::sync::mpsc::Receiver, - ) -> Vec { - let mut responses = vec![]; - while let Some(datum) = input.recv().await { - responses.append(&mut vec![sink::Response::failure( - datum.id, - "error".to_string(), - )]); - } - responses - } - } - - #[tokio::test] - async fn test_forwarder_sink_error() { - let batch_size = 100; - let timeout_in_ms = 1000; - - // Start the source server - let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let source_sock_file = tmp_dir.path().join("source.sock"); + let (src_shutdown_tx, src_shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("source.sock"); let server_info_file = tmp_dir.path().join("source-server-info"); let server_info = server_info_file.clone(); - let source_socket = source_sock_file.clone(); - let source_server_handle = tokio::spawn(async move { - source::Server::new(SimpleSource::new()) - .with_socket_file(source_socket) - .with_server_info_file(server_info) - .start_with_shutdown(source_shutdown_rx) - .await - .unwrap(); - }); - - // Start the sink server - let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); - let sink_tmp_dir = tempfile::TempDir::new().unwrap(); - let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); - let server_info_file = sink_tmp_dir.path().join("sink-server-info"); - - let server_info = server_info_file.clone(); - let sink_socket = sink_sock_file.clone(); - let sink_server_handle = tokio::spawn(async move { - sink::Server::new(ErrorSink {}) - .with_socket_file(sink_socket) + let server_socket = sock_file.clone(); + let source_handle = tokio::spawn(async move { + // a simple source which generates total of 100 messages + source::Server::new(SimpleSource::new(100)) + .with_socket_file(server_socket) .with_server_info_file(server_info) - .start_with_shutdown(sink_shutdown_rx) + .start_with_shutdown(src_shutdown_rx) .await - .unwrap(); - }); - - // Wait for the servers to start - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - let cln_token = CancellationToken::new(); - - let (source_read, source_ack, lag_reader) = new_source( - SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), - batch_size, - Duration::from_millis(timeout_in_ms), - ) - .await - .expect("failed to connect to source server"); - - let source_reader = SourceHandle::new( - SourceType::UserDefinedSource(source_read, source_ack, lag_reader), - batch_size, - ); - - let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client), batch_size) - .await - .expect("failed to connect to sink server"); - - let mut forwarder = ForwarderBuilder::new( - source_reader, - sink_writer, - Default::default(), - cln_token.clone(), - ) - .build(); - - let cancel_handle = tokio::spawn(async move { - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - cln_token.cancel(); + .unwrap() }); - let forwarder_result = forwarder.start().await; - assert!(forwarder_result.is_err()); - cancel_handle.await.unwrap(); + // wait for the server to start + // TODO: flaky + tokio::time::sleep(Duration::from_millis(100)).await; - // stop the servers - drop(forwarder); - source_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - source_server_handle - .await - .expect("failed to join source server task"); + let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); - sink_shutdown_tx - .send(()) - .expect("failed to send sink shutdown signal"); - sink_server_handle + let (src_read, src_ack, lag_reader) = new_source(client, 5, Duration::from_millis(1000)) .await - .expect("failed to join sink server task"); - } - - // Sink that returns status fallback - struct FallbackSender {} - - #[tonic::async_trait] - impl sink::Sinker for FallbackSender { - async fn sink(&self, mut input: mpsc::Receiver) -> Vec { - let mut responses = vec![]; - while let Some(datum) = input.recv().await { - responses.append(&mut vec![sink::Response::fallback(datum.id)]); - } - responses - } - } - - #[tokio::test] - async fn test_fb_sink() { - let batch_size = 100; - - let (sink_tx, mut sink_rx) = mpsc::channel(10); - - // Start the source server - let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let source_sock_file = tmp_dir.path().join("source.sock"); - let server_info_file = tmp_dir.path().join("source-server-info"); - - let server_info = server_info_file.clone(); - let source_socket = source_sock_file.clone(); - let source_server_handle = tokio::spawn(async move { - source::Server::new(SimpleSource::new()) - .with_socket_file(source_socket) - .with_server_info_file(server_info) - .start_with_shutdown(source_shutdown_rx) - .await - .unwrap(); - }); - - // Start the primary sink server (which returns status fallback) - let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); - let sink_tmp_dir = tempfile::TempDir::new().unwrap(); - let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); - let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); - let server_info = server_info_file.clone(); - let sink_socket = sink_sock_file.clone(); - let sink_server_handle = tokio::spawn(async move { - sink::Server::new(FallbackSender {}) - .with_socket_file(sink_socket) - .with_server_info_file(server_info) - .start_with_shutdown(sink_shutdown_rx) - .await - .unwrap(); - }); + let source = Source::new( + 5, + SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + ); - // Start the fb sink server - let (fb_sink_shutdown_tx, fb_sink_shutdown_rx) = tokio::sync::oneshot::channel(); - let fb_sink_tmp_dir = tempfile::TempDir::new().unwrap(); - let fb_sink_sock_file = fb_sink_tmp_dir.path().join("fb-sink.sock"); - let server_info_file = fb_sink_tmp_dir.path().join("fb-sinker-server-info"); + // create a transformer + let (st_shutdown_tx, st_shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); let server_info = server_info_file.clone(); - let fb_sink_socket = fb_sink_sock_file.clone(); - let fb_sink_server_handle = tokio::spawn(async move { - sink::Server::new(InMemorySink::new(sink_tx)) - .with_socket_file(fb_sink_socket) + let server_socket = sock_file.clone(); + let transformer_handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer) + .with_socket_file(server_socket) .with_server_info_file(server_info) - .start_with_shutdown(fb_sink_shutdown_rx) + .start_with_shutdown(st_shutdown_rx) .await - .unwrap(); + .expect("server failed"); }); - // Wait for the servers to start + // wait for the server to start tokio::time::sleep(Duration::from_millis(100)).await; - let cln_token = CancellationToken::new(); - - let (source_read, source_ack, source_lag_reader) = new_source( - SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()), - 500, - Duration::from_millis(100), - ) - .await - .expect("failed to connect to source server"); - - let source = SourceHandle::new( - SourceType::UserDefinedSource(source_read, source_ack, source_lag_reader), - batch_size, - ); - - let sink_client = SinkClient::new(create_rpc_channel(sink_sock_file).await.unwrap()); - let sink_writer = SinkHandle::new(SinkClientType::UserDefined(sink_client), batch_size) - .await - .expect("failed to connect to sink server"); + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await.unwrap()); + let transformer = Transformer::new(10, 10, client).await.unwrap(); - let fb_sink_writer = SinkClient::new(create_rpc_channel(fb_sink_sock_file).await.unwrap()); - let fb_sink_writer = - SinkHandle::new(SinkClientType::UserDefined(fb_sink_writer), batch_size) + let sink_writer = + SinkWriterBuilder::new(10, Duration::from_millis(100), SinkClientType::Log) + .build() .await - .expect("failed to connect to fb sink server"); + .unwrap(); - let mut forwarder = - ForwarderBuilder::new(source, sink_writer, Default::default(), cln_token.clone()) - .fallback_sink_writer(fb_sink_writer) - .build(); + // create the forwarder with the source, transformer, and writer + let forwarder = ForwarderBuilder::new(source.clone(), sink_writer, cln_token.clone()) + .transformer(transformer) + .build(); - let assert_handle = tokio::spawn(async move { - let received_message = sink_rx.recv().await.unwrap(); - assert_eq!(received_message.value, "test-message".as_bytes()); - assert_eq!(received_message.keys, vec!["test-key".to_string()]); - cln_token.cancel(); + let forwarder_handle: JoinHandle> = tokio::spawn(async move { + forwarder.start().await?; + Ok(()) }); - forwarder.start().await.unwrap(); - - assert_handle.await.unwrap(); - - drop(forwarder); - // stop the servers - source_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - source_server_handle - .await - .expect("failed to join source server task"); - - sink_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - sink_server_handle - .await - .expect("failed to join sink server task"); + // wait for one sec to check if the pending becomes zero, because all the messages + // should be read and acked; if it doesn't, then fail the test + let tokio_result = tokio::time::timeout(Duration::from_secs(1), async move { + loop { + let pending = source.pending().await.unwrap(); + if pending == Some(0) { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await; - fb_sink_shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - fb_sink_server_handle - .await - .expect("failed to join fb sink server task"); + assert!( + tokio_result.is_ok(), + "Timeout occurred before pending became zero" + ); + cln_token.cancel(); + forwarder_handle.await.unwrap().unwrap(); + st_shutdown_tx.send(()).unwrap(); + src_shutdown_tx.send(()).unwrap(); + source_handle.await.unwrap(); + transformer_handle.await.unwrap(); } } diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index ef49bdc759..e29ebe5043 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,30 +1,21 @@ -use crate::config::components::source::SourceType; -use crate::config::pipeline; -use crate::config::pipeline::PipelineConfig; -use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; -use crate::pipeline::isb::jetstream::reader::JetstreamReader; -use crate::pipeline::isb::jetstream::WriterHandle; -use crate::shared::server_info::sdk_server_info; -use crate::shared::utils; -use crate::shared::utils::{ - create_rpc_channel, start_metrics_server, wait_until_source_ready, wait_until_transformer_ready, -}; -use crate::sink::SinkWriter; -use crate::source::generator::new_generator; -use crate::source::pulsar::new_pulsar_source; -use crate::source::user_defined::new_source; -use crate::transformer::user_defined::SourceTransformHandle; -use crate::{config, error, source, Result}; +use std::time::Duration; + use async_nats::jetstream::Context; use async_nats::{jetstream, ConnectOptions}; use futures::future::try_join_all; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; -use std::collections::HashMap; -use std::time::Duration; use tokio_util::sync::CancellationToken; -use tonic::transport::Channel; +use tracing::info; + +use crate::config::pipeline; +use crate::config::pipeline::{PipelineConfig, SinkVtxConfig, SourceVtxConfig}; +use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; +use crate::pipeline::forwarder::source_forwarder; +use crate::pipeline::isb::jetstream::reader::JetstreamReader; +use crate::pipeline::isb::jetstream::ISBWriter; +use crate::shared::create_components; +use crate::shared::create_components::create_sink_writer; +use crate::shared::metrics::start_metrics_server; +use crate::{error, Result}; mod forwarder; mod isb; @@ -34,117 +25,142 @@ pub(crate) async fn start_forwarder( cln_token: CancellationToken, config: PipelineConfig, ) -> Result<()> { - let js_context = create_js_context(config.js_client_config.clone()).await?; - match &config.vertex_config { pipeline::VertexType::Source(source) => { - let buffer_writers = - create_buffer_writers(&config, js_context.clone(), cln_token.clone()).await?; - - let (source_type, source_grpc_client) = - create_source_type(source, &config, cln_token.clone()).await?; - let (transformer, transformer_grpc_client) = - create_transformer(source, cln_token.clone()).await?; - - start_metrics_server( - config.metrics_config.clone(), - UserDefinedContainerState::Pipeline(PipelineContainerState::Source(( - source_grpc_client.clone(), - transformer_grpc_client.clone(), - ))), - ) - .await; - - let source_handle = source::SourceHandle::new(source_type, config.batch_size); - let mut forwarder = forwarder::source_forwarder::ForwarderBuilder::new( - source_handle, - transformer, - buffer_writers, - cln_token.clone(), - config.clone(), - ) - .build(); - forwarder.start().await?; + info!("Starting source forwarder"); + start_source_forwarder(cln_token, config.clone(), source.clone()).await?; } pipeline::VertexType::Sink(sink) => { - // Create buffer readers for each partition - let buffer_readers = create_buffer_readers(&config, js_context.clone()).await?; - - // Create sink writers and clients - let mut sink_writers = Vec::new(); - for _ in &buffer_readers { - let (sink_writer, sink_grpc_client, fb_sink_grpc_client) = - create_sink_writer(&config, sink, cln_token.clone()).await?; - sink_writers.push((sink_writer, sink_grpc_client, fb_sink_grpc_client)); - } + info!("Starting sink forwarder"); + start_sink_forwarder(cln_token, config.clone(), sink.clone()).await?; + } + } + Ok(()) +} - // Start the metrics server with one of the clients - if let Some((_, sink, fb_sink)) = sink_writers.first() { - start_metrics_server( - config.metrics_config.clone(), - UserDefinedContainerState::Pipeline(PipelineContainerState::Sink(( - sink.clone(), - fb_sink.clone(), - ))), - ) - .await; - } +async fn start_source_forwarder( + cln_token: CancellationToken, + config: PipelineConfig, + source_config: SourceVtxConfig, +) -> Result<()> { + let js_context = create_js_context(config.js_client_config.clone()).await?; - // Start a new forwarder for each buffer reader - let mut forwarder_tasks = Vec::new(); - for (buffer_reader, (sink_writer, _, _)) in buffer_readers.into_iter().zip(sink_writers) - { - let forwarder = forwarder::sink_forwarder::SinkForwarder::new( - buffer_reader, - sink_writer, - cln_token.clone(), - ) - .await; + let buffer_writer = create_buffer_writer(&config, js_context.clone(), cln_token.clone()).await; - let task = tokio::spawn({ - let config = config.clone(); - async move { forwarder.start(config.clone()).await } - }); + let (source, source_grpc_client) = create_components::create_source( + config.batch_size, + config.read_timeout, + &source_config.source_config, + cln_token.clone(), + ) + .await?; + let (transformer, transformer_grpc_client) = create_components::create_transformer( + config.batch_size, + source_config.transformer_config.clone(), + cln_token.clone(), + ) + .await?; - forwarder_tasks.push(task); - } + start_metrics_server( + config.metrics_config.clone(), + UserDefinedContainerState::Pipeline(PipelineContainerState::Source(( + source_grpc_client.clone(), + transformer_grpc_client.clone(), + ))), + ) + .await; - try_join_all(forwarder_tasks) - .await - .map_err(|e| error::Error::Forwarder(e.to_string()))?; - } + let forwarder = + source_forwarder::SourceForwarderBuilder::new(source, buffer_writer, cln_token.clone()); + + let forwarder = if let Some(transformer) = transformer { + forwarder.with_transformer(transformer).build() + } else { + forwarder.build() + }; + + forwarder.start().await?; + Ok(()) +} + +async fn start_sink_forwarder( + cln_token: CancellationToken, + config: PipelineConfig, + sink: SinkVtxConfig, +) -> Result<()> { + let js_context = create_js_context(config.js_client_config.clone()).await?; + + // Create buffer readers for each partition + let buffer_readers = create_buffer_readers(&config, js_context.clone()).await?; + + // Create sink writers and clients + let mut sink_writers = Vec::new(); + for _ in &buffer_readers { + let (sink_writer, sink_grpc_client, fb_sink_grpc_client) = create_sink_writer( + config.batch_size, + config.read_timeout, + sink.sink_config.clone(), + sink.fb_sink_config.clone(), + &cln_token, + ) + .await?; + sink_writers.push((sink_writer, sink_grpc_client, fb_sink_grpc_client)); + } + + // Start the metrics server with one of the clients + if let Some((_, sink, fb_sink)) = sink_writers.first() { + start_metrics_server( + config.metrics_config.clone(), + UserDefinedContainerState::Pipeline(PipelineContainerState::Sink(( + sink.clone(), + fb_sink.clone(), + ))), + ) + .await; + } + + // Start a new forwarder for each buffer reader + let mut forwarder_tasks = Vec::new(); + for (buffer_reader, (sink_writer, _, _)) in buffer_readers.into_iter().zip(sink_writers) { + info!(%buffer_reader, "Starting forwarder for buffer reader"); + let forwarder = forwarder::sink_forwarder::SinkForwarder::new( + buffer_reader, + sink_writer, + cln_token.clone(), + ) + .await; + + let task = tokio::spawn({ + let config = config.clone(); + async move { forwarder.start(config.clone()).await } + }); + + forwarder_tasks.push(task); } + + try_join_all(forwarder_tasks) + .await + .map_err(|e| error::Error::Forwarder(e.to_string()))?; + info!("All forwarders have stopped successfully"); Ok(()) } -/// Creates the required buffer writers based on the pipeline configuration, it creates a map -/// of vertex name to a list of writer handles. -async fn create_buffer_writers( +async fn create_buffer_writer( config: &PipelineConfig, js_context: Context, cln_token: CancellationToken, -) -> Result>> { - let mut buffer_writers = HashMap::new(); - for to_vertex in &config.to_vertex_config { - let writers = to_vertex - .writer_config - .streams +) -> ISBWriter { + ISBWriter::new( + config.paf_concurrency, + config + .to_vertex_config .iter() - .map(|stream| { - WriterHandle::new( - stream.0.clone(), - stream.1, - to_vertex.writer_config.clone(), - js_context.clone(), - config.batch_size, - config.paf_batch_size, - cln_token.clone(), - ) - }) - .collect(); - buffer_writers.insert(to_vertex.name.clone(), writers); - } - Ok(buffer_writers) + .map(|tv| tv.writer_config.clone()) + .collect(), + js_context, + cln_token, + ) + .await } async fn create_buffer_readers( @@ -153,17 +169,16 @@ async fn create_buffer_readers( ) -> Result> { // Only the reader config of the first "from" vertex is needed, as all "from" vertices currently write // to a common buffer, in the case of a join. - let reader_config = config + let reader_config = &config .from_vertex_config .first() .ok_or_else(|| error::Error::Config("No from vertex config found".to_string()))? - .reader_config - .clone(); + .reader_config; let mut readers = Vec::new(); for stream in &reader_config.streams { let reader = JetstreamReader::new( - stream.0.clone(), + stream.0, stream.1, js_context.clone(), reader_config.clone(), @@ -175,129 +190,6 @@ async fn create_buffer_readers( Ok(readers) } -// Creates a sink writer based on the pipeline configuration -async fn create_sink_writer( - config: &PipelineConfig, - sink_vtx_config: &pipeline::SinkVtxConfig, - cln_token: CancellationToken, -) -> Result<( - SinkWriter, - Option>, - Option>, -)> { - let (sink_handle, sink_grpc_client) = utils::create_sink_handle( - config.batch_size, - &sink_vtx_config.sink_config.sink_type, - &cln_token, - ) - .await?; - let (fb_sink_handle, fb_sink_grpc_client) = match &sink_vtx_config.fb_sink_config { - None => (None, None), - Some(fb_sink_config) => { - let (handle, client) = - utils::create_sink_handle(config.batch_size, &fb_sink_config.sink_type, &cln_token) - .await?; - (Some(handle), client) - } - }; - - Ok(( - SinkWriter::new( - config.batch_size, - config.read_timeout, - sink_vtx_config.clone(), - sink_handle, - fb_sink_handle, - ) - .await?, - sink_grpc_client, - fb_sink_grpc_client, - )) -} - -/// Creates a source type based on the pipeline configuration -async fn create_source_type( - source: &pipeline::SourceVtxConfig, - config: &PipelineConfig, - cln_token: CancellationToken, -) -> Result<(source::SourceType, Option>)> { - match &source.source_config.source_type { - SourceType::Generator(generator_config) => { - let (generator_read, generator_ack, generator_lag) = - new_generator(generator_config.clone(), config.batch_size)?; - Ok(( - source::SourceType::Generator(generator_read, generator_ack, generator_lag), - None, - )) - } - SourceType::UserDefined(udsource_config) => { - _ = sdk_server_info( - udsource_config.server_info_path.clone().into(), - cln_token.clone(), - ) - .await?; - - // TODO: Add sdk info metric - - let mut source_grpc_client = SourceClient::new( - create_rpc_channel(udsource_config.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(udsource_config.grpc_max_message_size) - .max_encoding_message_size(udsource_config.grpc_max_message_size); - wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; - let (ud_read, ud_ack, ud_lag) = new_source( - source_grpc_client.clone(), - config.batch_size, - config.read_timeout, - ) - .await?; - Ok(( - source::SourceType::UserDefinedSource(ud_read, ud_ack, ud_lag), - Some(source_grpc_client), - )) - } - SourceType::Pulsar(pulsar_config) => { - let pulsar_source = new_pulsar_source( - pulsar_config.clone(), - config.batch_size, - config.read_timeout, - ) - .await?; - Ok((source::SourceType::Pulsar(pulsar_source), None)) - } - } -} -/// Creates a transformer if it is configured in the pipeline -async fn create_transformer( - source: &pipeline::SourceVtxConfig, - cln_token: CancellationToken, -) -> Result<( - Option, - Option>, -)> { - if let Some(transformer_config) = &source.transformer_config { - if let config::components::transformer::TransformerType::UserDefined(ud_transformer) = - &transformer_config.transformer_type - { - _ = sdk_server_info(ud_transformer.socket_path.clone().into(), cln_token.clone()) - .await?; - // TODO: Add sdk info metric - - let mut transformer_grpc_client = SourceTransformClient::new( - create_rpc_channel(ud_transformer.socket_path.clone().into()).await?, - ) - .max_encoding_message_size(ud_transformer.grpc_max_message_size) - .max_encoding_message_size(ud_transformer.grpc_max_message_size); - wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; - return Ok(( - Some(SourceTransformHandle::new(transformer_grpc_client.clone()).await?), - Some(transformer_grpc_client), - )); - } - } - Ok((None, None)) -} - /// Creates a jetstream context based on the provided configuration async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Result { // TODO: make these configurable. today this is hardcoded on Golang code too. @@ -305,6 +197,7 @@ async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Re .max_reconnects(None) // -1 for unlimited reconnects .ping_interval(Duration::from_secs(3)) .max_reconnects(None) + .ping_interval(Duration::from_secs(3)) .retry_on_initial_connect(); if let (Some(user), Some(password)) = (config.user, config.password) { @@ -320,6 +213,7 @@ async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Re #[cfg(test)] mod tests { + use std::collections::HashMap; use std::time::Duration; use async_nats::jetstream; @@ -327,7 +221,6 @@ mod tests { use futures::StreamExt; use super::*; - use crate::config::components::metrics::MetricsConfig; use crate::config::components::sink::{BlackholeConfig, SinkConfig, SinkType}; use crate::config::components::source::GeneratorConfig; @@ -393,7 +286,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 1000, - paf_batch_size: 30000, + paf_concurrency: 30000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -516,8 +409,9 @@ mod tests { .unwrap(); // Publish some messages into the stream - use crate::message::{Message, MessageID, Offset, StringOffset}; use chrono::{TimeZone, Utc}; + + use crate::message::{Message, MessageID, Offset, StringOffset}; let message = Message { keys: vec!["key1".to_string()], value: vec![1, 2, 3].into(), @@ -560,7 +454,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 1000, - paf_batch_size: 30000, + paf_concurrency: 30000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -575,7 +469,7 @@ mod tests { streams: streams .iter() .enumerate() - .map(|(i, key)| (key.to_string(), i as u16)) + .map(|(i, key)| (*key, i as u16)) .collect(), wip_ack_interval: Duration::from_secs(1), }, diff --git a/rust/numaflow-core/src/pipeline/forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder.rs index 6e8774c320..9c1f8deeff 100644 --- a/rust/numaflow-core/src/pipeline/forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder.rs @@ -1,12 +1,29 @@ -/// Forwarder consists -/// (Read) +-------> (UDF) -------> (Write) + -/// | | -/// | | -/// +-------> {Ack} <----------------+ -/// -/// {} -> Listens on a OneShot -/// () -> Streaming Interface -/// +//! The forwarder for [Pipeline] at its core orchestrates message movement asynchronously using +//! [Stream] over channels between the components. The messages send over this channel using +//! [Actor Pattern]. +//! +//! ```text +//! (source) --[c]--> (transformer)* --[c]--> ==> (map)* --[c]--> ===> (reducer)* --[c]--> ===> --[c]--> (sink) +//! +//! ==> - ISB +//! [c] - channel +//! * - optional +//! ``` +//! +//! Most of the data move forward except for the ack which can happen only after the Write. +//! ```text +//! (Read) +-------> (UDF) -------> (Write) + +//! | | +//! | | +//! +-------> {Ack} <----------------+ +//! +//! {} -> Listens on a OneShot +//! () -> Streaming Interface +//! ``` +//! +//! [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ +//! [Stream]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.ReceiverStream.html +//! [Actor Pattern]: https://ryhl.io/blog/actors-with-tokio/ /// Forwarder specific to Sink where reader is ISB, UDF is not present, while /// the Write is User-defined Sink or builtin. diff --git a/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs index 74846e931b..7153a4ff1d 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs @@ -6,7 +6,8 @@ use crate::pipeline::isb::jetstream::reader::JetstreamReader; use crate::sink::SinkWriter; use crate::Result; -/// Sink forwarder reads messages from the jetstream and writes to the sink. +/// Sink forwarder is a component which starts a streaming reader and a sink writer +/// and manages the lifecycle of these components. pub(crate) struct SinkForwarder { jetstream_reader: JetstreamReader, sink_writer: SinkWriter, @@ -29,14 +30,14 @@ impl SinkForwarder { pub(crate) async fn start(&self, pipeline_config: PipelineConfig) -> Result<()> { // Create a child cancellation token only for the reader so that we can stop the reader first let reader_cancellation_token = self.cln_token.child_token(); - let (read_messages_rx, reader_handle) = self + let (read_messages_stream, reader_handle) = self .jetstream_reader - .start(reader_cancellation_token.clone(), &pipeline_config) + .streaming_read(reader_cancellation_token.clone(), &pipeline_config) .await?; let sink_writer_handle = self .sink_writer - .start(read_messages_rx, self.cln_token.clone()) + .streaming_write(read_messages_stream, self.cln_token.clone()) .await?; // Join the reader and sink writer diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index 9ba2ba94fd..6246334447 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -1,197 +1,340 @@ -use std::collections::HashMap; - -use chrono::Utc; use tokio_util::sync::CancellationToken; -use tracing::{debug, info}; -use crate::config::pipeline::PipelineConfig; use crate::error; use crate::error::Error; -use crate::message::{Message, Offset}; -use crate::metrics::{forward_pipeline_metrics, pipeline_forward_read_metric_labels}; -use crate::pipeline::isb::jetstream::WriterHandle; -use crate::source::SourceHandle; -use crate::transformer::user_defined::SourceTransformHandle; - -/// Simple source forwarder that reads messages from the source, applies transformation if present -/// and writes to the messages to ISB. -pub(crate) struct Forwarder { - source_reader: SourceHandle, - transformer: Option, - buffer_writers: HashMap>, +use crate::pipeline::isb::jetstream::ISBWriter; +use crate::source::Source; +use crate::transformer::Transformer; + +/// Source forwarder is the orchestrator which starts streaming source, a transformer, and an isb writer +/// and manages the lifecycle of these components. +pub(crate) struct SourceForwarder { + source: Source, + transformer: Option, + writer: ISBWriter, cln_token: CancellationToken, - config: PipelineConfig, } -pub(crate) struct ForwarderBuilder { - source_reader: SourceHandle, - transformer: Option, - buffer_writers: HashMap>, +/// ForwarderBuilder is a builder for Forwarder. +pub(crate) struct SourceForwarderBuilder { + streaming_source: Source, + transformer: Option, + writer: ISBWriter, cln_token: CancellationToken, - config: PipelineConfig, } -impl ForwarderBuilder { +impl SourceForwarderBuilder { pub(crate) fn new( - source_reader: SourceHandle, - transformer: Option, - buffer_writers: HashMap>, + streaming_source: Source, + writer: ISBWriter, cln_token: CancellationToken, - config: PipelineConfig, ) -> Self { Self { - source_reader, - transformer, - buffer_writers, + streaming_source, + transformer: None, + writer, cln_token, - config, } } - pub(crate) fn build(self) -> Forwarder { - Forwarder { - source_reader: self.source_reader, + pub(crate) fn with_transformer(mut self, transformer: Transformer) -> Self { + self.transformer = Some(transformer); + self + } + + pub(crate) fn build(self) -> SourceForwarder { + SourceForwarder { + source: self.streaming_source, transformer: self.transformer, - buffer_writers: self.buffer_writers, + writer: self.writer, cln_token: self.cln_token, - config: self.config, } } } -impl Forwarder { - pub(crate) async fn start(&mut self) -> Result<(), Error> { - let mut processed_msgs_count: usize = 0; - let mut last_forwarded_at = std::time::Instant::now(); - info!("Forwarder has started"); - loop { - tokio::time::Instant::now(); - if self.cln_token.is_cancelled() { - break; +impl SourceForwarder { + /// Start the forwarder by starting the streaming source, transformer, and writer. + pub(crate) async fn start(&self) -> error::Result<()> { + // RETHINK: only source should stop when the token is cancelled, transformer and writer should drain the streams + // and then stop. + let (read_messages_stream, reader_handle) = + self.source.streaming_read(self.cln_token.clone())?; + + // start the transformer if it is present + let (transformed_messages_stream, transformer_handle) = + if let Some(transformer) = &self.transformer { + let (transformed_messages_stream, transformer_handle) = + transformer.transform_stream(read_messages_stream)?; + (transformed_messages_stream, Some(transformer_handle)) + } else { + (read_messages_stream, None) + }; + + let writer_handle = self + .writer + .streaming_write(transformed_messages_stream) + .await?; + + match tokio::try_join!( + reader_handle, + transformer_handle.unwrap_or_else(|| tokio::spawn(async { Ok(()) })), + writer_handle, + ) { + Ok((reader_result, transformer_result, sink_writer_result)) => { + reader_result?; + transformer_result?; + sink_writer_result?; + Ok(()) } - processed_msgs_count += self.read_and_process_messages().await?; - - if last_forwarded_at.elapsed().as_millis() >= 1000 { - info!( - "Forwarded {} messages at time in the pipeline {}", - processed_msgs_count, - Utc::now() - ); - processed_msgs_count = 0; - last_forwarded_at = std::time::Instant::now(); + Err(e) => Err(Error::Forwarder(format!( + "Error while joining reader, transformer, and sink writer: {:?}", + e + ))), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::time::Duration; + + use async_nats::jetstream; + use async_nats::jetstream::{consumer, stream}; + use chrono::Utc; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{source, sourcetransform}; + use numaflow_pb::clients::source::source_client::SourceClient; + use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use tempfile::TempDir; + use tokio::sync::mpsc::Sender; + use tokio::sync::oneshot; + use tokio::task::JoinHandle; + use tokio_util::sync::CancellationToken; + + use crate::config::pipeline::isb::BufferWriterConfig; + use crate::pipeline::isb::jetstream::ISBWriter; + use crate::pipeline::source_forwarder::SourceForwarderBuilder; + use crate::shared::grpc::create_rpc_channel; + use crate::source::user_defined::new_source; + use crate::source::{Source, SourceType}; + use crate::transformer::Transformer; + use crate::Result; + + struct SimpleSource { + num: usize, + sent_count: AtomicUsize, + yet_to_ack: std::sync::RwLock>, + } + + impl SimpleSource { + fn new(num: usize) -> Self { + Self { + num, + sent_count: AtomicUsize::new(0), + yet_to_ack: std::sync::RwLock::new(HashSet::new()), } } - Ok(()) } - async fn read_and_process_messages(&mut self) -> Result { - let start_time = tokio::time::Instant::now(); - let messages = self.source_reader.read().await.map_err(|e| { - Error::Forwarder(format!("Failed to read messages from source {:?}", e)) - })?; + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, request: SourceReadRequest, transmitter: Sender) { + let event_time = Utc::now(); + let mut message_offsets = Vec::with_capacity(request.count); - debug!( - "Read batch size: {} and latency - {}ms", - messages.len(), - start_time.elapsed().as_millis() - ); + for i in 0..request.count { + if self.sent_count.load(Ordering::SeqCst) >= self.num { + return; + } - let labels = pipeline_forward_read_metric_labels( - self.config.pipeline_name.as_ref(), - self.config.vertex_name.as_ref(), - self.config.vertex_name.as_ref(), - "Source", - self.config.replica, - ); - forward_pipeline_metrics() - .forwarder - .data_read - .get_or_create(labels) - .inc_by(messages.len() as u64); - - if messages.is_empty() { - return Ok(0); + let offset = format!("{}-{}", event_time.timestamp_nanos_opt().unwrap(), i); + transmitter + .send(Message { + value: b"hello".to_vec(), + event_time, + offset: Offset { + offset: offset.clone().into_bytes(), + partition_id: 0, + }, + keys: vec![], + headers: Default::default(), + }) + .await + .unwrap(); + message_offsets.push(offset); + self.sent_count.fetch_add(1, Ordering::SeqCst); + } + self.yet_to_ack.write().unwrap().extend(message_offsets); } - let msg_count = messages.len() as u64; - let offsets: Vec = - messages - .iter() - .try_fold(Vec::with_capacity(messages.len()), |mut offsets, msg| { - if let Some(offset) = &msg.offset { - offsets.push(offset.clone()); - Ok(offsets) - } else { - Err(Error::Forwarder("Message offset is missing".to_string())) - } - })?; - - // Apply transformation if transformer is present - // FIXME: we should stream the responses back and write it to the jetstream writer - let transformed_messages = self.apply_transformer(messages).await.map_err(|e| { - Error::Forwarder(format!( - "Failed to apply transformation to messages {:?}", - e - )) - })?; + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_ack + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } + } - self.write_to_jetstream(transformed_messages).await?; + async fn pending(&self) -> usize { + self.num - self.sent_count.load(Ordering::SeqCst) + + self.yet_to_ack.read().unwrap().len() + } + + async fn partitions(&self) -> Option> { + Some(vec![1, 2]) + } + } - self.source_reader.ack(offsets).await?; + struct SimpleTransformer; - Ok(msg_count as usize) + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformer { + async fn transform( + &self, + input: sourcetransform::SourceTransformRequest, + ) -> Vec { + let message = sourcetransform::Message::new(input.value, Utc::now()).keys(input.keys); + vec![message] + } } - /// Applies the transformer to the messages. - async fn apply_transformer(&mut self, messages: Vec) -> error::Result> { - let Some(client) = &mut self.transformer else { - // return early if there is no transformer - return Ok(messages); - }; + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_source_forwarder() { + // create the source which produces x number of messages + let cln_token = CancellationToken::new(); + + let (src_shutdown_tx, src_shutdown_rx) = oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); - let start_time = tokio::time::Instant::now(); - let results = client.transform(messages).await?; + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let source_handle = tokio::spawn(async move { + // a simple source which generates total of 100 messages + source::Server::new(SimpleSource::new(100)) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(src_shutdown_rx) + .await + .unwrap() + }); + + // wait for the server to start + // TODO: flaky + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); - debug!( - "Transformer latency - {}ms", - start_time.elapsed().as_millis() + let (src_read, src_ack, lag_reader) = new_source(client, 5, Duration::from_millis(1000)) + .await + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); + + let source = Source::new( + 5, + SourceType::UserDefinedSource(src_read, src_ack, lag_reader), ); - Ok(results) - } + // create a js writer + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); - /// Writes messages to the jetstream, it writes to all the downstream buffers. - async fn write_to_jetstream(&mut self, messages: Vec) -> Result<(), Error> { - let start_time = tokio::time::Instant::now(); - if messages.is_empty() { - return Ok(()); - } + let stream_name = "test_source_forwarder"; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); - let mut results = Vec::new(); + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); - // write to all the buffers - for i in 0..messages.len() { - for writers in self.buffer_writers.values() { - // write to the stream writers in round-robin fashion - let writer = &writers[i % writers.len()]; // FIXME: we need to shuffle based on the message id hash - let result = writer.write(messages[i].clone()).await?; - results.push(result); - } - } + let writer = ISBWriter::new( + 10, + vec![BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }], + context.clone(), + cln_token.clone(), + ) + .await; + + // create a transformer + let (st_shutdown_tx, st_shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); - // await for all the result futures to complete - // FIXME: we should not await for the results to complete, that will make it sequential - for result in results { - // we can use the ack to publish watermark etc - result + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let transformer_handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(st_shutdown_rx) .await - .map_err(|e| Error::Forwarder(format!("Failed to write to jetstream {:?}", e)))??; - } - debug!( - len = messages.len(), - elapsed_ms = start_time.elapsed().as_millis(), - "Wrote messages to jetstream", + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await.unwrap()); + let transformer = Transformer::new(10, 10, client).await.unwrap(); + + // create the forwarder with the source, transformer, and writer + let forwarder = SourceForwarderBuilder::new(source.clone(), writer, cln_token.clone()) + .with_transformer(transformer) + .build(); + + let forwarder_handle: JoinHandle> = tokio::spawn(async move { + forwarder.start().await?; + Ok(()) + }); + + // wait for one sec to check if the pending becomes zero, because all the messages + // should be read and acked; if it doesn't, then fail the test + let tokio_result = tokio::time::timeout(Duration::from_secs(1), async move { + loop { + let pending = source.pending().await.unwrap(); + if pending == Some(0) { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await; + + assert!( + tokio_result.is_ok(), + "Timeout occurred before pending became zero" ); - Ok(()) + cln_token.cancel(); + forwarder_handle.await.unwrap().unwrap(); + st_shutdown_tx.send(()).unwrap(); + src_shutdown_tx.send(()).unwrap(); + source_handle.await.unwrap(); + transformer_handle.await.unwrap(); } } diff --git a/rust/numaflow-core/src/pipeline/isb.rs b/rust/numaflow-core/src/pipeline/isb.rs index 53ab02707f..e59d0b983e 100644 --- a/rust/numaflow-core/src/pipeline/isb.rs +++ b/rust/numaflow-core/src/pipeline/isb.rs @@ -1 +1,3 @@ +// TODO: implement a simple ISB and a trait for ISB + pub(crate) mod jetstream; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index ccba63a8d1..9f3635861e 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -1,13 +1,18 @@ use async_nats::jetstream::Context; use bytes::BytesMut; -use tokio::sync::mpsc::Receiver; -use tokio::sync::{mpsc, oneshot}; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; +use tracing::info; use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; -use crate::message::{Message, Offset}; -use crate::pipeline::isb::jetstream::writer::JetstreamWriter; +use crate::message::{ReadAck, ReadMessage}; +use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; +use crate::pipeline::isb::jetstream::writer::{ + JetstreamWriter, PafResolver, ResolveAndPublishResult, +}; use crate::Result; /// JetStream Writer is responsible for writing messages to JetStream ISB. @@ -19,115 +24,114 @@ pub(super) mod writer; pub(crate) mod reader; -/// ISB Writer accepts an Actor pattern based messages. -#[derive(Debug)] -struct ActorMessage { - /// Write the messages to ISB - message: Message, - /// once the message has been successfully written, we can let the sender know. - /// This can be used to trigger Acknowledgement of the message from the Reader. - // FIXME: concrete type and better name - callee_tx: oneshot::Sender>, -} - -impl ActorMessage { - fn new(message: Message, callee_tx: oneshot::Sender>) -> Self { - Self { message, callee_tx } - } -} +/// Stream is a combination of stream name and partition id. +type Stream = (String, u16); -/// WriterActor will handle the messages and write them to the Jetstream ISB. -struct WriterActor { - js_writer: JetstreamWriter, - receiver: Receiver, +/// StreamingJetstreamWriter is a streaming version of JetstreamWriter. It accepts a stream of messages +/// and writes them to Jetstream ISB. It also has a PAF resolver actor to resolve the PAFs. +#[derive(Clone)] +pub(crate) struct ISBWriter { + paf_concurrency: usize, + config: Vec, + writer: JetstreamWriter, } -impl WriterActor { - fn new(js_writer: JetstreamWriter, receiver: Receiver) -> Self { - Self { - js_writer, - receiver, - } - } - - async fn handle_message(&mut self, msg: ActorMessage) { - let payload: BytesMut = msg - .message - .try_into() - .expect("message serialization should not fail"); - self.js_writer.write(payload.into(), msg.callee_tx).await - } - - async fn run(&mut self) { - while let Some(msg) = self.receiver.recv().await { - self.handle_message(msg).await; - } - } -} - -/// WriterHandle is the handle to the WriterActor. It exposes a method to send messages to the Actor. -pub(crate) struct WriterHandle { - sender: mpsc::Sender, -} - -impl WriterHandle { - pub(crate) fn new( - stream_name: String, - partition_idx: u16, - config: BufferWriterConfig, +impl ISBWriter { + pub(crate) async fn new( + paf_concurrency: usize, + config: Vec, js_ctx: Context, - batch_size: usize, - paf_batch_size: usize, cancel_token: CancellationToken, ) -> Self { - let (sender, receiver) = mpsc::channel::(batch_size); + info!(?config, paf_concurrency, "Streaming JetstreamWriter",); let js_writer = JetstreamWriter::new( - stream_name, - partition_idx, - config, + // flatten the streams across the config + config.iter().flat_map(|c| c.streams.clone()).collect(), + config.first().unwrap().clone(), js_ctx, - paf_batch_size, cancel_token.clone(), ); - let mut actor = WriterActor::new(js_writer.clone(), receiver); - - tokio::spawn(async move { - actor.run().await; - }); - Self { sender } + Self { + config, + writer: js_writer, + paf_concurrency, + } } - pub(crate) async fn write( + /// Starts reading messages from the stream and writes them to Jetstream ISB. + pub(crate) async fn streaming_write( &self, - message: Message, - ) -> Result>> { - let (sender, receiver) = oneshot::channel(); - let msg = ActorMessage::new(message, sender); - self.sender - .send(msg) - .await - .map_err(|e| Error::ISB(format!("Failed to write message to actor channel: {}", e)))?; - - Ok(receiver) + messages_stream: ReceiverStream, + ) -> Result>> { + let handle: JoinHandle> = tokio::spawn({ + let writer = self.writer.clone(); + let paf_concurrency = self.paf_concurrency; + let config = self.config.clone(); + let mut messages_stream = messages_stream; + let mut index = 0; + + async move { + let paf_resolver = PafResolver::new(paf_concurrency, writer.clone()); + while let Some(read_message) = messages_stream.next().await { + // if message needs to be dropped, ack and continue + // TODO: add metric for dropped count + if read_message.message.dropped() { + read_message + .ack + .send(ReadAck::Ack) + .map_err(|e| Error::ISB(format!("Failed to send ack: {:?}", e)))?; + continue; + } + let mut pafs = vec![]; + + // FIXME(CF): This is a temporary solution to round-robin the streams + for buffer in &config { + let payload: BytesMut = read_message + .message + .clone() + .try_into() + .expect("message serialization should not fail"); + let stream = buffer.streams.get(index).unwrap(); + index = (index + 1) % buffer.streams.len(); + + let paf = writer.write(stream.clone(), payload.into()).await; + pafs.push((stream.clone(), paf)); + } + + pipeline_metrics() + .forwarder + .write_total + .get_or_create(pipeline_isb_metric_labels()) + .inc(); + + paf_resolver + .resolve_pafs(ResolveAndPublishResult { + pafs, + payload: read_message.message.value.clone().into(), + ack_tx: read_message.ack, + }) + .await?; + } + Ok(()) + } + }); + Ok(handle) } } #[cfg(test)] mod tests { use std::collections::HashMap; - use std::time::Duration; use async_nats::jetstream; - use async_nats::jetstream::stream; + use async_nats::jetstream::{consumer, stream}; use chrono::Utc; use tokio::sync::oneshot; - use tokio::time::Instant; - use tracing::info; use super::*; - use crate::message::{Message, MessageID}; + use crate::message::{Message, MessageID, ReadAck}; #[cfg(feature = "nats-tests")] #[tokio::test] @@ -138,29 +142,43 @@ mod tests { let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); - let stream_name = "default"; + let stream_name = "test_publish_messages"; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), subjects: vec![stream_name.into()], + max_messages: 1000, ..Default::default() }) .await .unwrap(); - // Create ISBMessageHandler - let batch_size = 500; - let handler = WriterHandle::new( - stream_name.to_string(), - 0, - Default::default(), + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let writer = ISBWriter::new( + 10, + vec![BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + max_length: 1000, + ..Default::default() + }], context.clone(), - batch_size, - 1000, cln_token.clone(), - ); + ) + .await; - let mut result_receivers = Vec::new(); + let mut ack_receivers = Vec::new(); + let (messages_tx, messages_rx) = tokio::sync::mpsc::channel(500); // Publish 500 messages for i in 0..500 { let message = Message { @@ -176,20 +194,22 @@ mod tests { headers: HashMap::new(), }; let (sender, receiver) = oneshot::channel(); - let msg = ActorMessage { + let read_message = ReadMessage { message, - callee_tx: sender, + ack: sender, }; - handler.sender.send(msg).await.unwrap(); - result_receivers.push(receiver); + messages_tx.send(read_message).await.unwrap(); + ack_receivers.push(receiver); } + drop(messages_tx); - // FIXME: Uncomment after we start awaiting for PAFs - //for receiver in result_receivers { - // let result = receiver.await.unwrap(); - // assert!(result.is_ok()); - //} + let receiver_stream = ReceiverStream::new(messages_rx); + let _handle = writer.streaming_write(receiver_stream).await.unwrap(); + for receiver in ack_receivers { + let result = receiver.await.unwrap(); + assert_eq!(result, ReadAck::Ack); + } context.delete_stream(stream_name).await.unwrap(); } @@ -212,18 +232,32 @@ mod tests { .await .unwrap(); + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new( - stream_name.to_string(), - 0, - Default::default(), + let writer = ISBWriter::new( + 10, + vec![BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }], context.clone(), - 500, - 1000, cancel_token.clone(), - ); + ) + .await; - let mut receivers = Vec::new(); + let mut ack_receivers = Vec::new(); + let (tx, rx) = tokio::sync::mpsc::channel(500); // Publish 100 messages successfully for i in 0..100 { let message = Message { @@ -238,14 +272,23 @@ mod tests { }, headers: HashMap::new(), }; - receivers.push(handler.write(message).await.unwrap()); + let (sender, receiver) = oneshot::channel(); + let read_message = ReadMessage { + message, + ack: sender, + }; + tx.send(read_message).await.unwrap(); + ack_receivers.push(receiver); } + let receiver_stream = ReceiverStream::new(rx); + let _handle = writer.streaming_write(receiver_stream).await.unwrap(); + // Attempt to publish the 101th message, which should get stuck in the retry loop // because the max message size is set to 1024 let message = Message { keys: vec!["key_101".to_string()], - value: vec![0; 1024].into(), + value: vec![0; 1025].into(), offset: None, event_time: Utc::now(), id: MessageID { @@ -255,111 +298,27 @@ mod tests { }, headers: HashMap::new(), }; - let receiver = handler.write(message).await.unwrap(); - receivers.push(receiver); + let (sender, receiver) = oneshot::channel(); + let read_message = ReadMessage { + message, + ack: sender, + }; + tx.send(read_message).await.unwrap(); + ack_receivers.push(receiver); + drop(tx); // Cancel the token to exit the retry loop cancel_token.cancel(); // Check the results - // FIXME: Uncomment after we start awaiting for PAFs - //for (i, receiver) in receivers.into_iter().enumerate() { - // let result = receiver.await.unwrap(); - // if i < 100 { - // assert!(result.is_ok()); - // } else { - // assert!(result.is_err()); - // } - //} - - context.delete_stream(stream_name).await.unwrap(); - } - - #[cfg(feature = "nats-tests")] - #[ignore] - #[tokio::test] - async fn benchmark_publish_messages() { - let js_url = "localhost:4222"; - // Create JetStream context - let client = async_nats::connect(js_url).await.unwrap(); - let context = jetstream::new(client); - - let stream_name = "benchmark_publish"; - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await - .unwrap(); - - let cancel_token = CancellationToken::new(); - let handler = WriterHandle::new( - stream_name.to_string(), - 0, - Default::default(), - context.clone(), - 500, - 1000, - cancel_token.clone(), - ); - - let (tx, mut rx) = mpsc::channel(100); - let test_start_time = Instant::now(); - let duration = Duration::from_secs(10); - - // Task to publish messages - let publish_task = tokio::spawn(async move { - let mut i = 0; - let mut sent_count = 0; - let mut start_time = Instant::now(); - while Instant::now().duration_since(test_start_time) < duration { - let message = Message { - keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec().into(), - offset: None, - event_time: Utc::now(), - id: MessageID { - vertex_name: "".to_string(), - offset: format!("offset_{}", i), - index: i, - }, - headers: HashMap::new(), - }; - tx.send(handler.write(message).await.unwrap()) - .await - .unwrap(); - sent_count += 1; - i += 1; - - if start_time.elapsed().as_secs() >= 1 { - info!("Messages sent: {}", sent_count); - sent_count = 0; - start_time = Instant::now(); - } + for (i, receiver) in ack_receivers.into_iter().enumerate() { + let result = receiver.await.unwrap(); + if i < 100 { + assert_eq!(result, ReadAck::Ack); + } else { + assert_eq!(result, ReadAck::Nak); } - }); - - // Task to await responses - let await_task = tokio::spawn(async move { - let mut start_time = Instant::now(); - let mut count = 0; - while let Some(receiver) = rx.recv().await { - if receiver.await.unwrap().is_ok() { - count += 1; - } - - if start_time.elapsed().as_secs() >= 1 { - info!("Messages received: {}", count); - count = 0; - start_time = Instant::now(); - } - } - }); - - let _ = tokio::join!(publish_task, await_task); - + } context.delete_stream(stream_name).await.unwrap(); } } diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 46faf2e95b..6e0aff77bc 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -1,30 +1,33 @@ +use std::fmt; use std::time::Duration; use async_nats::jetstream::{ consumer::PullConsumer, AckKind, Context, Message as JetstreamMessage, }; - -use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio::time::{self, Instant}; +use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info, warn}; +use tracing::{error, info}; use crate::config::pipeline::isb::BufferReaderConfig; use crate::config::pipeline::PipelineConfig; use crate::error::Error; use crate::message::{IntOffset, Message, Offset, ReadAck, ReadMessage}; -use crate::metrics::{forward_pipeline_metrics, pipeline_forward_read_metric_labels}; +use crate::metrics::{ + pipeline_forward_metric_labels, pipeline_isb_metric_labels, pipeline_metrics, +}; use crate::Result; -// The JetstreamReader is a handle to the background actor that continuously fetches messages from Jetstream. -// It can be used to cancel the background task and stop reading from Jetstream. -// The sender end of the channel is not stored in this struct, since the struct is clone-able and the mpsc channel is only closed when all the senders are dropped. -// Storing the Sender end of channel in this struct would make it difficult to close the channel with `cancel` method. +/// The JetstreamReader is a handle to the background actor that continuously fetches messages from Jetstream. +/// It can be used to cancel the background task and stop reading from Jetstream. +/// The sender end of the channel is not stored in this struct, since the struct is clone-able and the mpsc channel is only closed when all the senders are dropped. +/// Storing the Sender end of channel in this struct would make it difficult to close the channel with `cancel` method. #[derive(Clone)] pub(crate) struct JetstreamReader { + stream_name: &'static str, partition_idx: u16, config: BufferReaderConfig, consumer: PullConsumer, @@ -32,7 +35,7 @@ pub(crate) struct JetstreamReader { impl JetstreamReader { pub(crate) async fn new( - stream_name: String, + stream_name: &'static str, partition_idx: u16, js_ctx: Context, config: BufferReaderConfig, @@ -58,141 +61,127 @@ impl JetstreamReader { config.wip_ack_interval = wip_ack_interval; Ok(Self { + stream_name, partition_idx, config: config.clone(), consumer, }) } - // When we encounter an error, we log the error and return from the function. This drops the sender end of the channel. - // The closing of the channel should propagate to the receiver end and the receiver should exit gracefully. - // Within the loop, we only consider cancellationToken cancellation during the permit reservation and fetching messages, - // since rest of the operations should finish immediately. - pub(crate) async fn start( + /// streaming_read is a background task that continuously fetches messages from Jetstream and + /// emits them on a channel. When we encounter an error, we log the error and return from the + /// function. This drops the sender end of the channel. The closing of the channel should propagate + /// to the receiver end and the receiver should exit gracefully. Within the loop, we only consider + /// cancellationToken cancellation during the permit reservation and fetching messages, + /// since rest of the operations should finish immediately. + pub(crate) async fn streaming_read( &self, cancel_token: CancellationToken, pipeline_config: &PipelineConfig, - ) -> Result<(Receiver, JoinHandle>)> { - // FIXME: factor of 2 should be configurable, at the least a const + ) -> Result<(ReceiverStream, JoinHandle>)> { let (messages_tx, messages_rx) = mpsc::channel(2 * pipeline_config.batch_size); let handle: JoinHandle> = tokio::spawn({ - let this = self.clone(); - let pipeline_config = pipeline_config.clone(); + let consumer = self.consumer.clone(); + let partition_idx = self.partition_idx; + let config = self.config.clone(); + let cancel_token = cancel_token.clone(); + let stream_name = self.stream_name; async move { - // FIXME: - let partition: &str = pipeline_config - .from_vertex_config - .first() - .unwrap() - .reader_config - .streams - .first() - .unwrap() - .0 - .as_ref(); - - let labels = pipeline_forward_read_metric_labels( - pipeline_config.pipeline_name.as_ref(), - partition, - pipeline_config.vertex_name.as_ref(), - pipeline_config.vertex_config.to_string().as_ref(), - pipeline_config.replica, - ); - - let chunk_stream = this - .consumer - .messages() - .await - .unwrap() - .chunks_timeout(pipeline_config.batch_size, pipeline_config.read_timeout); - - tokio::pin!(chunk_stream); - - // The .next() call will not return if there is no data even if read_timeout is - // reached. - let mut total_messages = 0; - let mut chunk_time = Instant::now(); + let labels = pipeline_forward_metric_labels("Sink", Some(stream_name)); + + let mut message_stream = consumer.messages().await.map_err(|e| { + Error::ISB(format!( + "Failed to get message stream from Jetstream: {:?}", + e + )) + })?; + let mut start_time = Instant::now(); - while let Some(messages) = chunk_stream.next().await { - debug!( - len = messages.len(), - elapsed_ms = chunk_time.elapsed().as_millis(), - "Received messages from Jetstream", - ); - total_messages += messages.len(); - for message in messages { - let jetstream_message = message.map_err(|e| { - Error::ISB(format!( - "Error while fetching message from Jetstream: {:?}", - e - )) - })?; - - let msg_info = jetstream_message.info().map_err(|e| { - Error::ISB(format!( - "Error while fetching message info from Jetstream: {:?}", - e - )) - })?; - - let mut message: Message = - jetstream_message.payload.clone().try_into().map_err(|e| { - Error::ISB(format!( - "Error while converting Jetstream message to Message: {:?}", - e - )) + let mut total_messages = 0; + loop { + tokio::select! { + _ = cancel_token.cancelled() => { // should we drain from the stream when token is cancelled? + info!(?stream_name, "Cancellation token received, stopping the reader."); + break; + } + message = message_stream.next() => { + let Some(message) = message else { + // stream has been closed because we got none + info!(?stream_name, "Stream has been closed"); + break; + }; + + let jetstream_message = match message { + Ok(message) => message, + Err(e) => { + error!(?e, ?stream_name, "Failed to fetch messages from the Jetstream"); + continue; + } + }; + let msg_info = match jetstream_message.info() { + Ok(info) => info, + Err(e) => { + error!(?e, ?stream_name, "Failed to get message info from Jetstream"); + continue; + } + }; + + let mut message: Message = match jetstream_message.payload.clone().try_into() { + Ok(message) => message, + Err(e) => { + error!( + ?e, ?stream_name, ?jetstream_message, + "Failed to parse message payload received from Jetstream", + ); + continue; + } + }; + + message.offset = Some(Offset::Int(IntOffset::new( + msg_info.stream_sequence, + partition_idx, + ))); + + let (ack_tx, ack_rx) = oneshot::channel(); + tokio::spawn(Self::start_work_in_progress( + jetstream_message, + ack_rx, + config.wip_ack_interval, + )); + + let read_message = ReadMessage { + message, + ack: ack_tx, + }; + + messages_tx.send(read_message).await.map_err(|e| { + Error::ISB(format!("Error while sending message to channel: {:?}", e)) })?; - message.offset = Some(Offset::Int(IntOffset::new( - msg_info.stream_sequence, - this.partition_idx, - ))); - - let (ack_tx, ack_rx) = oneshot::channel(); - - tokio::spawn(Self::start_work_in_progress( - jetstream_message, - ack_rx, - this.config.wip_ack_interval, - )); - - let read_message = ReadMessage { - message, - ack: ack_tx, - }; - - messages_tx.send(read_message).await.map_err(|e| { - Error::ISB(format!("Error while sending message to channel: {:?}", e)) - })?; - - forward_pipeline_metrics() - .forwarder - .data_read - .get_or_create(labels) - .inc(); - - if start_time.elapsed() >= Duration::from_millis(1000) { - info!( - len = total_messages, - elapsed_ms = start_time.elapsed().as_millis(), - "Total messages read from Jetstream" - ); - start_time = Instant::now(); - total_messages = 0; + pipeline_metrics() + .forwarder + .read_total + .get_or_create(labels) + .inc(); + + if start_time.elapsed() >= Duration::from_millis(1000) { + info!( + "Total messages read from Jetstream in {:?} seconds: {}", + start_time.elapsed(), + total_messages + ); + start_time = Instant::now(); + total_messages = 0; + } } } - if cancel_token.is_cancelled() { - warn!("Cancellation token is cancelled. Exiting JetstreamReader"); - break; - } - chunk_time = Instant::now(); } Ok(()) } }); - Ok((messages_rx, handle)) + Ok((ReceiverStream::new(messages_rx), handle)) } // Intended to be run as background task which will continuously send InProgress acks to Jetstream. @@ -204,6 +193,7 @@ impl JetstreamReader { tick: Duration, ) { let mut interval = time::interval_at(Instant::now() + tick, tick); + let start = Instant::now(); loop { let wip = async { @@ -232,6 +222,17 @@ impl JetstreamReader { if let Err(e) = ack_result { error!(?e, "Failed to send Ack to Jetstream for message"); } + pipeline_metrics() + .forwarder + .ack_time + .get_or_create(pipeline_isb_metric_labels()) + .observe(start.elapsed().as_micros() as f64); + + pipeline_metrics() + .forwarder + .ack_total + .get_or_create(pipeline_isb_metric_labels()) + .inc(); return; } ReadAck::Nak => { @@ -246,6 +247,16 @@ impl JetstreamReader { } } +impl fmt::Display for JetstreamReader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "JetstreamReader {{ stream_name: {}, partition_idx: {}, config: {:?} }}", + self.stream_name, self.partition_idx, self.config + ) + } +} + #[cfg(test)] mod tests { use std::collections::HashMap; @@ -254,10 +265,10 @@ mod tests { use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; use chrono::Utc; - use tracing::info; use super::*; - use crate::message::{Message, MessageID, Offset}; + use crate::message::ReadAck::Ack; + use crate::message::{Message, MessageID}; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; #[cfg(feature = "nats-tests")] @@ -268,7 +279,7 @@ mod tests { let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); - let stream_name = "test_cancellation-2"; + let stream_name = "test_jetstream_read"; context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -296,14 +307,9 @@ mod tests { streams: vec![], wip_ack_interval: Duration::from_millis(5), }; - let js_reader = JetstreamReader::new( - stream_name.to_string(), - 0, - context.clone(), - buf_reader_config, - ) - .await - .unwrap(); + let js_reader = JetstreamReader::new(stream_name, 0, context.clone(), buf_reader_config) + .await + .unwrap(); let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); @@ -311,17 +317,15 @@ mod tests { let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); let reader_cancel_token = CancellationToken::new(); let (mut js_reader_rx, js_reader_task) = js_reader - .start(reader_cancel_token.clone(), &pipeline_config) + .streaming_read(reader_cancel_token.clone(), &pipeline_config) .await .unwrap(); let writer_cancel_token = CancellationToken::new(); let writer = JetstreamWriter::new( - stream_name.to_string(), - 0, + vec![(stream_name.to_string(), 0)], Default::default(), context.clone(), - 5000, writer_cancel_token.clone(), ); @@ -338,18 +342,20 @@ mod tests { }, headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); let message_bytes: BytesMut = message.try_into().unwrap(); - writer.write(message_bytes.into(), success_tx).await; - success_rx.await.unwrap().unwrap(); + writer + .write((stream_name.to_string(), 0), message_bytes.into()) + .await + .await + .unwrap(); } - info!("Sent 10 messages"); + // Cancel the token to exit the retry loop writer_cancel_token.cancel(); let mut buffer = vec![]; for _ in 0..10 { - let Some(val) = js_reader_rx.recv().await else { + let Some(val) = js_reader_rx.next().await else { break; }; buffer.push(val); @@ -362,12 +368,112 @@ mod tests { ); reader_cancel_token.cancel(); - // The token cancellation won't abort the task since we are using chunks_timeout in - // Jetstream reader. - // js_reader_task.await.unwrap().unwrap(); - js_reader_task.abort(); - let _ = js_reader_task.await; - assert!(js_reader_rx.is_closed()); + js_reader_task.await.unwrap().unwrap(); + + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_jetstream_ack() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + let stream_name = "test_ack"; + context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_on_stream( + consumer::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let buf_reader_config = BufferReaderConfig { + partitions: 0, + streams: vec![], + wip_ack_interval: Duration::from_millis(5), + }; + let js_reader = JetstreamReader::new(stream_name, 0, context.clone(), buf_reader_config) + .await + .unwrap(); + + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); + let reader_cancel_token = CancellationToken::new(); + let (mut js_reader_rx, js_reader_task) = js_reader + .streaming_read(reader_cancel_token.clone(), &pipeline_config) + .await + .unwrap(); + + let writer_cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new( + vec![(stream_name.to_string(), 0)], + Default::default(), + context.clone(), + writer_cancel_token.clone(), + ); + + // write 5 messages + for i in 0..5 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let message_bytes: BytesMut = message.try_into().unwrap(); + writer + .write((stream_name.to_string(), 0), message_bytes.into()) + .await + .await + .unwrap(); + } + // Cancel the token to exit the retry loop + writer_cancel_token.cancel(); + + for _ in 0..5 { + let Some(val) = js_reader_rx.next().await else { + break; + }; + val.ack.send(Ack).unwrap() + } + + let mut consumer: PullConsumer = context + .get_consumer_from_stream(stream_name, stream_name) + .await + .unwrap(); + + let consumer_info = consumer.info().await.unwrap(); + + assert_eq!(consumer_info.num_pending, 0); + assert_eq!(consumer_info.num_ack_pending, 0); + + reader_cancel_token.cancel(); + js_reader_task.await.unwrap().unwrap(); context.delete_stream(stream_name).await.unwrap(); } diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 9fbc7603a9..28a8ca6ec0 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; @@ -8,51 +9,50 @@ use async_nats::jetstream::publish::PublishAck; use async_nats::jetstream::stream::RetentionPolicy::Limits; use async_nats::jetstream::Context; use bytes::Bytes; -use tokio::sync::mpsc::Receiver; -use tokio::sync::{mpsc, oneshot}; -use tokio::time::sleep; +use tokio::sync::{oneshot, Semaphore}; +use tokio::time::{sleep, Instant}; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; -use crate::message::{IntOffset, Offset}; +use crate::message::{IntOffset, Offset, ReadAck}; +use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; +use crate::pipeline::isb::jetstream::Stream; use crate::Result; #[derive(Clone, Debug)] /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. -pub(super) struct JetstreamWriter { - stream_name: String, - partition_idx: u16, +/// JetstreamWriter is one to many mapping of streams to write messages to. It also +/// maintains the buffer usage metrics for each stream. +pub(crate) struct JetstreamWriter { + streams: Vec, config: BufferWriterConfig, js_ctx: Context, - is_full: Arc, - paf_resolver_tx: mpsc::Sender, + is_full: HashMap>, cancel_token: CancellationToken, } impl JetstreamWriter { /// Creates a JetStream Writer and a background task to make sure the Write futures (PAFs) are /// successful. Batch Size determines the maximum pending futures. - pub(super) fn new( - stream_name: String, - partition_idx: u16, + pub(crate) fn new( + streams: Vec, config: BufferWriterConfig, js_ctx: Context, - paf_batch_size: usize, cancel_token: CancellationToken, ) -> Self { - let (paf_resolver_tx, paf_resolver_rx) = - mpsc::channel::(paf_batch_size); + let is_full = streams + .iter() + .map(|stream| (stream.0.clone(), Arc::new(AtomicBool::new(false)))) + .collect::>(); let this = Self { - stream_name, - partition_idx, + streams, config, js_ctx, - is_full: Arc::new(AtomicBool::new(false)), - paf_resolver_tx, + is_full, cancel_token, }; @@ -64,33 +64,33 @@ impl JetstreamWriter { } }); - // spawn a task for resolving PAFs - let mut resolver_actor = PafResolverActor::new(this.clone(), paf_resolver_rx); - tokio::spawn(async move { - resolver_actor.run().await; - }); - this } - /// Checks the buffer usage metrics (soft and solid usage) for a given stream. + /// Checks the buffer usage metrics (soft and solid usage) for each stream in the streams vector. /// If the usage is greater than the bufferUsageLimit, it sets the is_full flag to true. async fn check_stream_status(&mut self) { let mut interval = tokio::time::interval(self.config.refresh_interval); loop { tokio::select! { _ = interval.tick() => { - match Self::fetch_buffer_usage(self.js_ctx.clone(), self.stream_name.as_str(), self.config.max_length).await { - Ok((soft_usage, solid_usage)) => { - if solid_usage >= self.config.usage_limit && soft_usage >= self.config.usage_limit { - self.is_full.store(true, Ordering::Relaxed); - } else { - self.is_full.store(false, Ordering::Relaxed); + for stream in &self.streams { + match Self::fetch_buffer_usage(self.js_ctx.clone(), stream.0.as_str(), self.config.max_length).await { + Ok((soft_usage, solid_usage)) => { + if solid_usage >= self.config.usage_limit && soft_usage >= self.config.usage_limit { + if let Some(is_full) = self.is_full.get(stream.0.as_str()) { + is_full.store(true, Ordering::Relaxed); + } + } else if let Some(is_full) = self.is_full.get(stream.0.as_str()) { + is_full.store(false, Ordering::Relaxed); + } + } + Err(e) => { + error!(?e, "Failed to fetch buffer usage for stream {}, updating isFull to true", stream.0.as_str()); + if let Some(is_full) = self.is_full.get(stream.0.as_str()) { + is_full.store(true, Ordering::Relaxed); + } } - } - Err(e) => { - error!(?e, "Failed to fetch buffer usage, updating isFull to true"); - self.is_full.store(true, Ordering::Relaxed); } } } @@ -101,7 +101,7 @@ impl JetstreamWriter { } } - /// Fetches the buffer usage metrics (soft and solid usage) for a given stream. + /// Fetches the buffer usage metrics (soft and solid usage) for the given stream. /// /// Soft Usage: /// Formula: (NumPending + NumAckPending) / maxLength @@ -154,20 +154,31 @@ impl JetstreamWriter { /// Writes the message to the JetStream ISB and returns a future which can be /// awaited to get the PublishAck. It will do infinite retries until the message /// gets published successfully. If it returns an error it means it is fatal error - pub(super) async fn write(&self, payload: Vec, callee_tx: oneshot::Sender>) { + pub(super) async fn write(&self, stream: Stream, payload: Vec) -> PublishAckFuture { let js_ctx = self.js_ctx.clone(); + let mut counter = 500u64; + // loop till we get a PAF, there could be other reasons why PAFs cannot be created. let paf = loop { - // let's write only if the buffer is not full - match self.is_full.load(Ordering::Relaxed) { - true => { + // let's write only if the buffer is not full for the stream + match self + .is_full + .get(&stream.0) + .map(|is_full| is_full.load(Ordering::Relaxed)) + { + Some(true) => { // FIXME: add metrics - info!(%self.stream_name, "stream is full"); + if counter >= 500 { + warn!(stream=?stream.0, "stream is full (throttled logging)"); + counter = 0; + } + counter += 1; + // FIXME: consider buffer-full strategy } - false => match js_ctx - .publish(self.stream_name.clone(), Bytes::from(payload.clone())) + Some(false) => match js_ctx + .publish(stream.0.clone(), Bytes::from(payload.clone())) .await { Ok(paf) => { @@ -177,40 +188,36 @@ impl JetstreamWriter { error!(?e, "publishing failed, retrying"); } }, + None => { + error!("Stream {} not found in is_full map", stream.0); + } } // short-circuit out in failure mode if shutdown has been initiated if self.cancel_token.is_cancelled() { error!("Shutdown signal received, exiting write loop"); - callee_tx - .send(Err(Error::ISB("Shutdown signal received".to_string()))) - .unwrap(); - return; } // sleep to avoid busy looping sleep(self.config.retry_interval).await; }; - // send the paf and callee_tx over - self.paf_resolver_tx - .send(ResolveAndPublishResult { - paf, - payload, - callee_tx, - }) - .await - .expect("send should not fail"); + paf } /// Writes the message to the JetStream ISB and returns the PublishAck. It will do /// infinite retries until the message gets published successfully. If it returns /// an error it means it is fatal non-retryable error. - pub(super) async fn blocking_write(&self, payload: Vec) -> Result { + pub(super) async fn blocking_write( + &self, + stream: Stream, + payload: Vec, + ) -> Result { let js_ctx = self.js_ctx.clone(); - let start_time = tokio::time::Instant::now(); + let start_time = Instant::now(); + info!("Blocking write for stream {}", stream.0); loop { match js_ctx - .publish(self.stream_name.clone(), Bytes::from(payload.clone())) + .publish(stream.0.clone(), Bytes::from(payload.clone())) .await { Ok(paf) => match paf.await { @@ -219,7 +226,7 @@ impl JetstreamWriter { // should we return an error here? Because duplicate messages are not fatal // But it can mess up the watermark progression because the offset will be // same as the previous message offset - warn!(ack = ?ack, "Duplicate message detected, ignoring"); + warn!(?ack, "Duplicate message detected, ignoring"); } debug!( elapsed_ms = start_time.elapsed().as_millis(), @@ -245,81 +252,109 @@ impl JetstreamWriter { } /// ResolveAndPublishResult resolves the result of the write PAF operation. -/// It contains the PublishAckFuture which can be awaited to get the PublishAck. Once PAF has +/// It contains the list of pafs(one message can be written to multiple streams) +/// and the payload that was written. Once the PAFs for all the streams have been /// resolved, the information is published to callee_tx. #[derive(Debug)] -pub(super) struct ResolveAndPublishResult { - paf: PublishAckFuture, - payload: Vec, - callee_tx: oneshot::Sender>, +pub(crate) struct ResolveAndPublishResult { + pub(crate) pafs: Vec<(Stream, PublishAckFuture)>, + pub(crate) payload: Vec, + // Acknowledgement oneshot to notify the reader that the message has been written + pub(crate) ack_tx: oneshot::Sender, } /// Resolves the PAF from the write call, if not successful it will do a blocking write so that /// it is eventually successful. Once the PAF has been resolved (by either means) it will notify /// the top-level callee via the oneshot rx. -struct PafResolverActor { +pub(crate) struct PafResolver { + sem: Arc, js_writer: JetstreamWriter, - receiver: Receiver, } -impl PafResolverActor { - fn new(js_writer: JetstreamWriter, receiver: Receiver) -> Self { - PafResolverActor { +impl PafResolver { + pub(crate) fn new(concurrency: usize, js_writer: JetstreamWriter) -> Self { + PafResolver { + sem: Arc::new(Semaphore::new(concurrency)), // concurrency limit for resolving PAFs js_writer, - receiver, } } - /// Tries to the resolve the original PAF from the write call. If it is successful, will send - /// the successful result to the top-level callee's oneshot channel. If the original PAF does - /// not successfully resolve, it will do blocking write till write to JetStream succeeds. - async fn successfully_resolve_paf(&mut self, result: ResolveAndPublishResult) { - match result.paf.await { - Ok(ack) => { - if ack.duplicate { - warn!("Duplicate message detected, ignoring {:?}", ack); - } - result - .callee_tx - .send(Ok(Offset::Int(IntOffset::new( - ack.sequence, - self.js_writer.partition_idx, - )))) - .unwrap_or_else(|e| { - error!("Failed to send offset: {:?}", e); - }) - } - Err(e) => { - error!(?e, "Failed to resolve the future, trying blocking write"); - match self.js_writer.blocking_write(result.payload.clone()).await { + /// resolve_pafs resolves the PAFs for the given result. It will try to resolve the PAFs + /// asynchronously, if it fails it will do a blocking write to resolve the PAFs. + /// At any point in time, we will only have X PAF resolvers running, this will help us create a + /// natural backpressure. + pub(crate) async fn resolve_pafs(&self, result: ResolveAndPublishResult) -> Result<()> { + let start_time = Instant::now(); + let permit = Arc::clone(&self.sem) + .acquire_owned() + .await + .map_err(|_e| Error::ISB("Failed to acquire semaphore permit".to_string()))?; + let mut offsets = Vec::new(); + + let js_writer = self.js_writer.clone(); + tokio::spawn(async move { + let _permit = permit; + for (stream, paf) in result.pafs { + match paf.await { Ok(ack) => { if ack.duplicate { - warn!("Duplicate message detected, ignoring {:?}", ack); + warn!( + "Duplicate message detected for stream {}, ignoring {:?}", + stream.0, ack + ); } - result - .callee_tx - .send(Ok(Offset::Int(IntOffset::new( - ack.sequence, - self.js_writer.partition_idx, - )))) - .unwrap() + offsets.push(( + stream.clone(), + Offset::Int(IntOffset::new(ack.sequence, stream.1)), + )); } Err(e) => { - error!(?e, "Blocking write failed"); - result - .callee_tx - .send(Err(Error::ISB("Shutdown signal received".to_string()))) - .unwrap() + error!( + ?e, + "Failed to resolve the future for stream {}, trying blocking write", + stream.0 + ); + match js_writer + .blocking_write(stream.clone(), result.payload.clone()) + .await + { + Ok(ack) => { + if ack.duplicate { + warn!( + "Duplicate message detected for stream {}, ignoring {:?}", + stream.0, ack + ); + } + offsets.push(( + stream.clone(), + Offset::Int(IntOffset::new(ack.sequence, stream.1)), + )); + } + Err(e) => { + error!(?e, "Blocking write failed for stream {}", stream.0); + // Since we failed to write to the stream, we need to send a NAK to the reader + result.ack_tx.send(ReadAck::Nak).unwrap_or_else(|e| { + error!("Failed to send error for stream {}: {:?}", stream.0, e); + }); + return; + } + } } } } - } - } - async fn run(&mut self) { - while let Some(result) = self.receiver.recv().await { - self.successfully_resolve_paf(result).await; - } + // Send an ack to the reader + result.ack_tx.send(ReadAck::Ack).unwrap_or_else(|e| { + error!("Failed to send ack: {:?}", e); + }); + + pipeline_metrics() + .isb + .paf_resolution_time + .get_or_create(pipeline_isb_metric_labels()) + .observe(start_time.elapsed().as_micros() as f64); + }); + Ok(()) } } @@ -334,7 +369,7 @@ mod tests { use chrono::Utc; use super::*; - use crate::message::{Message, MessageID, Offset}; + use crate::message::{Message, MessageID}; #[cfg(feature = "nats-tests")] #[tokio::test] @@ -368,11 +403,9 @@ mod tests { .unwrap(); let writer = JetstreamWriter::new( - stream_name.to_string(), - 0, + vec![(stream_name.to_string(), 0)], Default::default(), context.clone(), - 500, cln_token.clone(), ); @@ -389,10 +422,11 @@ mod tests { headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); let message_bytes: BytesMut = message.try_into().unwrap(); - writer.write(message_bytes.into(), success_tx).await; - assert!(success_rx.await.is_ok()); + let paf = writer + .write((stream_name.to_string(), 0), message_bytes.into()) + .await; + assert!(paf.await.is_ok()); context.delete_stream(stream_name).await.unwrap(); } @@ -429,11 +463,9 @@ mod tests { .unwrap(); let writer = JetstreamWriter::new( - stream_name.to_string(), - 0, + vec![(stream_name.to_string(), 0)], Default::default(), context.clone(), - 500, cln_token.clone(), ); @@ -451,7 +483,9 @@ mod tests { }; let message_bytes: BytesMut = message.try_into().unwrap(); - let result = writer.blocking_write(message_bytes.into()).await; + let result = writer + .blocking_write((stream_name.to_string(), 0), message_bytes.into()) + .await; assert!(result.is_ok()); let publish_ack = result.unwrap(); @@ -493,11 +527,9 @@ mod tests { let cancel_token = CancellationToken::new(); let writer = JetstreamWriter::new( - stream_name.to_string(), - 0, + vec![(stream_name.to_string(), 0)], Default::default(), context.clone(), - 500, cancel_token.clone(), ); @@ -516,10 +548,11 @@ mod tests { }, headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); let message_bytes: BytesMut = message.try_into().unwrap(); - writer.write(message_bytes.into(), success_tx).await; - result_receivers.push(success_rx); + let paf = writer + .write((stream_name.to_string(), 0), message_bytes.into()) + .await; + result_receivers.push(paf); } // Attempt to publish a message which has a payload size greater than the max_message_size @@ -536,32 +569,28 @@ mod tests { }, headers: HashMap::new(), }; - let (success_tx, success_rx) = oneshot::channel::>(); let message_bytes: BytesMut = message.try_into().unwrap(); - writer.write(message_bytes.into(), success_tx).await; - result_receivers.push(success_rx); + let paf = writer + .write((stream_name.to_string(), 0), message_bytes.into()) + .await; + result_receivers.push(paf); // Cancel the token to exit the retry loop cancel_token.cancel(); // Check the results for (i, receiver) in result_receivers.into_iter().enumerate() { - let result = receiver.await.unwrap(); if i < 10 { assert!( - result.is_ok(), + receiver.await.is_ok(), "Message {} should be published successfully", i ); } else { assert!( - result.is_err(), + receiver.await.is_err(), "Message 11 should fail with cancellation error" ); - assert_eq!( - result.err().unwrap().to_string(), - "ISB Error - Shutdown signal received", - ); } } @@ -677,14 +706,12 @@ mod tests { let cancel_token = CancellationToken::new(); let writer = JetstreamWriter::new( - stream_name.to_string(), - 0, + vec![(stream_name.to_string(), 0)], BufferWriterConfig { max_length: 100, ..Default::default() }, context.clone(), - 500, cancel_token.clone(), ); @@ -703,13 +730,23 @@ mod tests { } let start_time = Instant::now(); - while !writer.is_full.load(Ordering::Relaxed) && start_time.elapsed().as_millis() < 1000 { + while !writer + .is_full + .get(stream_name) + .map(|is_full| is_full.load(Ordering::Relaxed)) + .unwrap() + && start_time.elapsed().as_millis() < 1000 + { sleep(Duration::from_millis(5)).await; } // Verify the is_full flag assert!( - writer.is_full.load(Ordering::Relaxed), + writer + .is_full + .get(stream_name) + .map(|is_full| is_full.load(Ordering::Relaxed)) + .unwrap(), "Buffer should be full after publishing messages" ); diff --git a/rust/numaflow-core/src/shared.rs b/rust/numaflow-core/src/shared.rs index 63753fe858..0117040c49 100644 --- a/rust/numaflow-core/src/shared.rs +++ b/rust/numaflow-core/src/shared.rs @@ -1,2 +1,12 @@ -pub mod server_info; -pub mod utils; +/// All SDKs have to provide server info for all gRPC endpoints, so there is a lot of share. +pub(crate) mod server_info; + +/// All utilities related to gRPC. +pub(crate) mod grpc; + +/// Start metrics servers, pending readers, and possible other metrics related helpers. +pub(crate) mod metrics; + +/// Shared methods for creating Sources, Sinks, Transformers, etc. as they are required for both +/// MonoVertex and Pipeline. +pub(crate) mod create_components; diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs new file mode 100644 index 0000000000..b09f243de5 --- /dev/null +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -0,0 +1,402 @@ +use std::time::Duration; + +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use tokio_util::sync::CancellationToken; +use tonic::transport::Channel; + +use crate::config::components::sink::{SinkConfig, SinkType}; +use crate::config::components::source::{SourceConfig, SourceType}; +use crate::config::components::transformer::TransformerConfig; +use crate::shared::grpc; +use crate::shared::server_info::{sdk_server_info, ContainerType}; +use crate::sink::{SinkClientType, SinkWriter, SinkWriterBuilder}; +use crate::source::generator::new_generator; +use crate::source::pulsar::new_pulsar_source; +use crate::source::user_defined::new_source; +use crate::source::Source; +use crate::transformer::Transformer; +use crate::{config, error, metrics, source}; + +/// Creates a sink writer based on the configuration +pub(crate) async fn create_sink_writer( + batch_size: usize, + read_timeout: Duration, + primary_sink: SinkConfig, + fallback_sink: Option, + cln_token: &CancellationToken, +) -> error::Result<( + SinkWriter, + Option>, + Option>, +)> { + let (sink_writer_builder, sink_rpc_client) = match primary_sink.sink_type.clone() { + SinkType::Log(_) => ( + SinkWriterBuilder::new(batch_size, read_timeout, SinkClientType::Log), + None, + ), + SinkType::Blackhole(_) => ( + SinkWriterBuilder::new(batch_size, read_timeout, SinkClientType::Blackhole), + None, + ), + SinkType::UserDefined(ud_config) => { + let sink_server_info = + sdk_server_info(ud_config.server_info_path.clone().into(), cln_token.clone()) + .await?; + + let metric_labels = metrics::sdk_info_labels( + config::get_component_type().to_string(), + config::get_vertex_name().to_string(), + sink_server_info.language, + sink_server_info.version, + ContainerType::Sourcer.to_string(), + ); + + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + + let mut sink_grpc_client = SinkClient::new( + grpc::create_rpc_channel(ud_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(ud_config.grpc_max_message_size) + .max_encoding_message_size(ud_config.grpc_max_message_size); + grpc::wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; + ( + SinkWriterBuilder::new( + batch_size, + read_timeout, + SinkClientType::UserDefined(sink_grpc_client.clone()), + ) + .retry_config(primary_sink.retry_config.unwrap_or_default()), + Some(sink_grpc_client), + ) + } + }; + + if let Some(fb_sink) = fallback_sink { + return match fb_sink.sink_type.clone() { + SinkType::Log(_) => Ok(( + sink_writer_builder + .fb_sink_client(SinkClientType::Log) + .build() + .await?, + sink_rpc_client.clone(), + None, + )), + SinkType::Blackhole(_) => Ok(( + sink_writer_builder + .fb_sink_client(SinkClientType::Blackhole) + .build() + .await?, + sink_rpc_client.clone(), + None, + )), + SinkType::UserDefined(ud_config) => { + let fb_server_info = + sdk_server_info(ud_config.server_info_path.clone().into(), cln_token.clone()) + .await?; + + let metric_labels = metrics::sdk_info_labels( + config::get_component_type().to_string(), + config::get_vertex_name().to_string(), + fb_server_info.language, + fb_server_info.version, + ContainerType::Sourcer.to_string(), + ); + + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + + let mut sink_grpc_client = SinkClient::new( + grpc::create_rpc_channel(ud_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(ud_config.grpc_max_message_size) + .max_encoding_message_size(ud_config.grpc_max_message_size); + grpc::wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; + + Ok(( + sink_writer_builder + .fb_sink_client(SinkClientType::UserDefined(sink_grpc_client.clone())) + .build() + .await?, + sink_rpc_client.clone(), + Some(sink_grpc_client), + )) + } + }; + } + Ok((sink_writer_builder.build().await?, sink_rpc_client, None)) +} + +/// Creates a transformer if it is configured +pub async fn create_transformer( + batch_size: usize, + transformer_config: Option, + cln_token: CancellationToken, +) -> error::Result<(Option, Option>)> { + if let Some(transformer_config) = transformer_config { + if let config::components::transformer::TransformerType::UserDefined(ud_transformer) = + &transformer_config.transformer_type + { + let server_info = sdk_server_info( + ud_transformer.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + let metric_labels = metrics::sdk_info_labels( + config::get_component_type().to_string(), + config::get_vertex_name().to_string(), + server_info.language, + server_info.version, + ContainerType::Sourcer.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + + let mut transformer_grpc_client = SourceTransformClient::new( + grpc::create_rpc_channel(ud_transformer.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(ud_transformer.grpc_max_message_size) + .max_encoding_message_size(ud_transformer.grpc_max_message_size); + grpc::wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; + return Ok(( + Some( + Transformer::new( + batch_size, + transformer_config.concurrency, + transformer_grpc_client.clone(), + ) + .await?, + ), + Some(transformer_grpc_client), + )); + } + } + Ok((None, None)) +} + +/// Creates a source type based on the configuration +pub async fn create_source( + batch_size: usize, + read_timeout: Duration, + source_config: &SourceConfig, + cln_token: CancellationToken, +) -> error::Result<(Source, Option>)> { + match &source_config.source_type { + SourceType::Generator(generator_config) => { + let (generator_read, generator_ack, generator_lag) = + new_generator(generator_config.clone(), batch_size)?; + Ok(( + Source::new( + batch_size, + source::SourceType::Generator(generator_read, generator_ack, generator_lag), + ), + None, + )) + } + SourceType::UserDefined(udsource_config) => { + let server_info = sdk_server_info( + udsource_config.server_info_path.clone().into(), + cln_token.clone(), + ) + .await?; + + let metric_labels = metrics::sdk_info_labels( + config::get_component_type().to_string(), + config::get_vertex_name().to_string(), + server_info.language, + server_info.version, + ContainerType::Sourcer.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + + // TODO: Add sdk info metric + let mut source_grpc_client = SourceClient::new( + grpc::create_rpc_channel(udsource_config.socket_path.clone().into()).await?, + ) + .max_encoding_message_size(udsource_config.grpc_max_message_size) + .max_encoding_message_size(udsource_config.grpc_max_message_size); + grpc::wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; + let (ud_read, ud_ack, ud_lag) = + new_source(source_grpc_client.clone(), batch_size, read_timeout).await?; + Ok(( + Source::new( + batch_size, + source::SourceType::UserDefinedSource(ud_read, ud_ack, ud_lag), + ), + Some(source_grpc_client), + )) + } + SourceType::Pulsar(pulsar_config) => { + let pulsar = new_pulsar_source(pulsar_config.clone(), batch_size, read_timeout).await?; + Ok(( + Source::new(batch_size, source::SourceType::Pulsar(pulsar)), + None, + )) + } + } +} + +// Retrieve value from mounted secret volume +// "/var/numaflow/secrets/${secretRef.name}/${secretRef.key}" is expected to be the file path +pub(crate) fn get_secret_from_volume(name: &str, key: &str) -> Result { + let path = format!("/var/numaflow/secrets/{name}/{key}"); + let val = std::fs::read_to_string(path.clone()) + .map_err(|e| format!("Reading secret from file {path}: {e:?}"))?; + Ok(val.trim().into()) +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow::{sink, source, sourcetransform}; + use numaflow_pb::clients::sink::sink_client::SinkClient; + use numaflow_pb::clients::source::source_client::SourceClient; + use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use tokio::sync::mpsc; + use tokio::sync::mpsc::Sender; + use tokio::time::sleep; + use tokio_util::sync::CancellationToken; + + use crate::shared::grpc::{ + create_rpc_channel, wait_until_sink_ready, wait_until_source_ready, + wait_until_transformer_ready, + }; + + struct SimpleSource {} + + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, _request: SourceReadRequest, _transmitter: Sender) {} + + async fn ack(&self, _offset: Vec) {} + + async fn pending(&self) -> usize { + 0 + } + + async fn partitions(&self) -> Option> { + Some(vec![0]) + } + } + + struct SimpleTransformer; + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformer { + async fn transform( + &self, + _input: sourcetransform::SourceTransformRequest, + ) -> Vec { + vec![] + } + } + + struct InMemorySink {} + + #[tonic::async_trait] + impl sink::Sinker for InMemorySink { + async fn sink(&self, mut _input: mpsc::Receiver) -> Vec { + vec![] + } + } + + #[tokio::test] + async fn test_wait_until_ready() { + // Start the source server + let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let source_sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let source_socket = source_sock_file.clone(); + let source_server_handle = tokio::spawn(async move { + source::Server::new(SimpleSource {}) + .with_socket_file(source_socket) + .with_server_info_file(server_info) + .start_with_shutdown(source_shutdown_rx) + .await + .unwrap(); + }); + + // Start the sink server + let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); + let sink_tmp_dir = tempfile::TempDir::new().unwrap(); + let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); + let server_info_file = sink_tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let sink_socket = sink_sock_file.clone(); + let sink_server_handle = tokio::spawn(async move { + sink::Server::new(InMemorySink {}) + .with_socket_file(sink_socket) + .with_server_info_file(server_info) + .start_with_shutdown(sink_shutdown_rx) + .await + .unwrap(); + }); + + // Start the transformer server + let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let transformer_sock_file = tmp_dir.path().join("transformer.sock"); + let server_info_file = tmp_dir.path().join("transformer-server-info"); + + let server_info = server_info_file.clone(); + let transformer_socket = transformer_sock_file.clone(); + let transformer_server_handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer {}) + .with_socket_file(transformer_socket) + .with_server_info_file(server_info) + .start_with_shutdown(transformer_shutdown_rx) + .await + .unwrap(); + }); + + // Wait for the servers to start + sleep(Duration::from_millis(100)).await; + + let cln_token = CancellationToken::new(); + + let mut source_grpc_client = + SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); + wait_until_source_ready(&cln_token, &mut source_grpc_client) + .await + .unwrap(); + + let mut sink_grpc_client = + SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); + wait_until_sink_ready(&cln_token, &mut sink_grpc_client) + .await + .unwrap(); + + let mut transformer_grpc_client = Some(SourceTransformClient::new( + create_rpc_channel(transformer_sock_file.clone()) + .await + .unwrap(), + )); + wait_until_transformer_ready(&cln_token, transformer_grpc_client.as_mut().unwrap()) + .await + .unwrap(); + + source_shutdown_tx.send(()).unwrap(); + sink_shutdown_tx.send(()).unwrap(); + transformer_shutdown_tx.send(()).unwrap(); + + source_server_handle.await.unwrap(); + sink_server_handle.await.unwrap(); + transformer_server_handle.await.unwrap(); + } +} diff --git a/rust/numaflow-core/src/shared/grpc.rs b/rust/numaflow-core/src/shared/grpc.rs new file mode 100644 index 0000000000..d6246b60a6 --- /dev/null +++ b/rust/numaflow-core/src/shared/grpc.rs @@ -0,0 +1,127 @@ +use std::path::PathBuf; +use std::time::Duration; + +use axum::http::Uri; +use backoff::retry::Retry; +use backoff::strategy::fixed; +use chrono::{DateTime, TimeZone, Timelike, Utc}; +use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::source::source_client::SourceClient; +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use prost_types::Timestamp; +use tokio::net::UnixStream; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use tonic::transport::{Channel, Endpoint}; +use tonic::Request; +use tower::service_fn; +use tracing::info; + +use crate::error; +use crate::error::Error; + +/// Waits until the source server is ready, by doing health checks +pub(crate) async fn wait_until_source_ready( + cln_token: &CancellationToken, + client: &mut SourceClient, +) -> error::Result<()> { + info!("Waiting for source client to be ready..."); + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); + } + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for source client to be ready..."); + } + Ok(()) +} + +/// Waits until the sink server is ready, by doing health checks +pub(crate) async fn wait_until_sink_ready( + cln_token: &CancellationToken, + client: &mut SinkClient, +) -> error::Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); + } + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for sink client to be ready..."); + } + Ok(()) +} + +/// Waits until the transformer server is ready, by doing health checks +pub(crate) async fn wait_until_transformer_ready( + cln_token: &CancellationToken, + client: &mut SourceTransformClient, +) -> error::Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); + } + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for transformer client to be ready..."); + } + Ok(()) +} + +pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { + Some(Timestamp { + seconds: t.timestamp(), + nanos: t.nanosecond() as i32, + }) +} + +pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> error::Result { + const RECONNECT_INTERVAL: u64 = 1000; + const MAX_RECONNECT_ATTEMPTS: usize = 5; + + let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); + + let channel = Retry::retry( + interval, + || async { connect_with_uds(socket_path.clone()).await }, + |_: &Error| true, + ) + .await?; + Ok(channel) +} + +/// Connects to the UDS socket and returns a channel +pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> error::Result { + let channel = Endpoint::try_from("http://[::]:50051") + .map_err(|e| Error::Connection(format!("Failed to create endpoint: {:?}", e)))? + .connect_with_connector(service_fn(move |_: Uri| { + let uds_socket = uds_path.clone(); + async move { + Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( + UnixStream::connect(uds_socket).await?, + )) + } + })) + .await + .map_err(|e| Error::Connection(format!("Failed to connect: {:?}", e)))?; + Ok(channel) +} + +pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { + t.map_or(Utc.timestamp_nanos(-1), |t| { + DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) + }) +} diff --git a/rust/numaflow-core/src/shared/metrics.rs b/rust/numaflow-core/src/shared/metrics.rs new file mode 100644 index 0000000000..0fe06e05d4 --- /dev/null +++ b/rust/numaflow-core/src/shared/metrics.rs @@ -0,0 +1,44 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use tokio::task::JoinHandle; +use tracing::error; + +use crate::config::components::metrics::MetricsConfig; +use crate::metrics::{ + start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, +}; +use crate::source::Source; + +/// Starts the metrics server +pub(crate) async fn start_metrics_server( + metrics_config: MetricsConfig, + metrics_state: UserDefinedContainerState, +) -> JoinHandle<()> { + tokio::spawn(async move { + // Start the metrics server, which server the prometheus metrics. + let metrics_addr: SocketAddr = + format!("0.0.0.0:{}", metrics_config.metrics_server_listen_port) + .parse() + .expect("Invalid address"); + + if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { + error!("metrics server error: {:?}", e); + } + }) +} + +/// Creates a pending reader +pub(crate) async fn create_pending_reader( + metrics_config: &MetricsConfig, + lag_reader_grpc_client: Source, +) -> PendingReader { + PendingReaderBuilder::new(lag_reader_grpc_client) + .lag_checking_interval(Duration::from_secs( + metrics_config.lag_check_interval_in_secs.into(), + )) + .refresh_interval(Duration::from_secs( + metrics_config.lag_refresh_interval_in_secs.into(), + )) + .build() +} diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 9e7cf0b04f..40ec6b37d6 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -88,7 +88,7 @@ pub(crate) struct ServerInfo { pub(crate) metadata: Option>, // Metadata is optional } -/// check_for_server_compatibility waits until the server info file is ready and check whether the +/// sdk_server_info waits until the server info file is ready and check whether the /// server is compatible with Numaflow. pub(crate) async fn sdk_server_info( file_path: PathBuf, diff --git a/rust/numaflow-core/src/shared/utils.rs b/rust/numaflow-core/src/shared/utils.rs deleted file mode 100644 index c035c67fce..0000000000 --- a/rust/numaflow-core/src/shared/utils.rs +++ /dev/null @@ -1,354 +0,0 @@ -use std::net::SocketAddr; -use std::path::PathBuf; -use std::time::Duration; - -use axum::http::Uri; -use backoff::retry::Retry; -use backoff::strategy::fixed; -use chrono::{DateTime, TimeZone, Timelike, Utc}; -use numaflow_pb::clients::sink::sink_client::SinkClient; -use numaflow_pb::clients::source::source_client::SourceClient; -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; -use prost_types::Timestamp; -use tokio::net::UnixStream; -use tokio::task::JoinHandle; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tonic::transport::{Channel, Endpoint}; -use tonic::Request; -use tower::service_fn; -use tracing::info; - -use crate::config::components::metrics::MetricsConfig; -use crate::config::components::sink::SinkType; -use crate::config::monovertex::MonovertexConfig; -use crate::error; -use crate::metrics::{ - start_metrics_https_server, PendingReader, PendingReaderBuilder, UserDefinedContainerState, -}; -use crate::shared::server_info::sdk_server_info; -use crate::sink::{SinkClientType, SinkHandle}; -use crate::source::SourceHandle; -use crate::Error; -use crate::Result; - -pub(crate) async fn start_metrics_server( - metrics_config: MetricsConfig, - metrics_state: UserDefinedContainerState, -) -> JoinHandle<()> { - tokio::spawn(async move { - // Start the metrics server, which server the prometheus metrics. - let metrics_addr: SocketAddr = - format!("0.0.0.0:{}", metrics_config.metrics_server_listen_port) - .parse() - .expect("Invalid address"); - - if let Err(e) = start_metrics_https_server(metrics_addr, metrics_state).await { - error!("metrics server error: {:?}", e); - } - }) -} - -pub(crate) async fn create_pending_reader( - mvtx_config: &MonovertexConfig, - lag_reader_grpc_client: SourceHandle, -) -> PendingReader { - PendingReaderBuilder::new( - mvtx_config.name.clone(), - mvtx_config.replica, - lag_reader_grpc_client, - ) - .lag_checking_interval(Duration::from_secs( - mvtx_config.metrics_config.lag_check_interval_in_secs.into(), - )) - .refresh_interval(Duration::from_secs( - mvtx_config - .metrics_config - .lag_refresh_interval_in_secs - .into(), - )) - .build() -} -pub(crate) async fn wait_until_source_ready( - cln_token: &CancellationToken, - client: &mut SourceClient, -) -> Result<()> { - info!("Waiting for source client to be ready..."); - loop { - if cln_token.is_cancelled() { - return Err(Error::Forwarder( - "Cancellation token is cancelled".to_string(), - )); - } - match client.is_ready(Request::new(())).await { - Ok(_) => break, - Err(_) => sleep(Duration::from_secs(1)).await, - } - info!("Waiting for source client to be ready..."); - } - Ok(()) -} - -pub(crate) async fn wait_until_sink_ready( - cln_token: &CancellationToken, - client: &mut SinkClient, -) -> Result<()> { - loop { - if cln_token.is_cancelled() { - return Err(Error::Forwarder( - "Cancellation token is cancelled".to_string(), - )); - } - match client.is_ready(Request::new(())).await { - Ok(_) => break, - Err(_) => sleep(Duration::from_secs(1)).await, - } - info!("Waiting for sink client to be ready..."); - } - Ok(()) -} - -pub(crate) async fn wait_until_transformer_ready( - cln_token: &CancellationToken, - client: &mut SourceTransformClient, -) -> Result<()> { - loop { - if cln_token.is_cancelled() { - return Err(Error::Forwarder( - "Cancellation token is cancelled".to_string(), - )); - } - match client.is_ready(Request::new(())).await { - Ok(_) => break, - Err(_) => sleep(Duration::from_secs(1)).await, - } - info!("Waiting for transformer client to be ready..."); - } - Ok(()) -} - -pub(crate) fn utc_from_timestamp(t: Option) -> DateTime { - t.map_or(Utc.timestamp_nanos(-1), |t| { - DateTime::from_timestamp(t.seconds, t.nanos as u32).unwrap_or(Utc.timestamp_nanos(-1)) - }) -} - -pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { - Some(Timestamp { - seconds: t.timestamp(), - nanos: t.nanosecond() as i32, - }) -} - -pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> Result { - const RECONNECT_INTERVAL: u64 = 1000; - const MAX_RECONNECT_ATTEMPTS: usize = 5; - - let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); - - let channel = Retry::retry( - interval, - || async { connect_with_uds(socket_path.clone()).await }, - |_: &Error| true, - ) - .await?; - Ok(channel) -} - -pub(crate) async fn connect_with_uds(uds_path: PathBuf) -> Result { - let channel = Endpoint::try_from("http://[::]:50051") - .map_err(|e| Error::Connection(format!("Failed to create endpoint: {:?}", e)))? - .connect_with_connector(service_fn(move |_: Uri| { - let uds_socket = uds_path.clone(); - async move { - Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( - UnixStream::connect(uds_socket).await?, - )) - } - })) - .await - .map_err(|e| Error::Connection(format!("Failed to connect: {:?}", e)))?; - Ok(channel) -} - -pub(crate) async fn create_sink_handle( - batch_size: usize, - sink_type: &SinkType, - cln_token: &CancellationToken, -) -> Result<(SinkHandle, Option>)> { - match sink_type { - SinkType::Log(_) => Ok(( - SinkHandle::new(SinkClientType::Log, batch_size).await?, - None, - )), - SinkType::Blackhole(_) => Ok(( - SinkHandle::new(SinkClientType::Blackhole, batch_size).await?, - None, - )), - SinkType::UserDefined(ud_config) => { - _ = sdk_server_info(ud_config.server_info_path.clone().into(), cln_token.clone()) - .await?; - let mut sink_grpc_client = - SinkClient::new(create_rpc_channel(ud_config.socket_path.clone().into()).await?) - .max_encoding_message_size(ud_config.grpc_max_message_size) - .max_encoding_message_size(ud_config.grpc_max_message_size); - wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; - // TODO: server info? - - Ok(( - SinkHandle::new( - SinkClientType::UserDefined(sink_grpc_client.clone()), - batch_size, - ) - .await?, - Some(sink_grpc_client), - )) - } - } -} - -// Retrieve value from mounted secret volume -// "/var/numaflow/secrets/${secretRef.name}/${secretRef.key}" is expected to be the file path -pub(crate) fn get_secret_from_volume(name: &str, key: &str) -> std::result::Result { - let path = format!("/var/numaflow/secrets/{name}/{key}"); - let val = std::fs::read_to_string(path.clone()) - .map_err(|e| format!("Reading secret from file {path}: {e:?}"))?; - Ok(val.trim().into()) -} - -#[cfg(test)] -mod tests { - use numaflow::source::{Message, Offset, SourceReadRequest}; - use numaflow::{sink, source, sourcetransform}; - use tokio::sync::mpsc; - use tokio::sync::mpsc::Sender; - use tokio_util::sync::CancellationToken; - - use super::*; - use crate::shared::utils::create_rpc_channel; - - struct SimpleSource {} - - #[tonic::async_trait] - impl source::Sourcer for SimpleSource { - async fn read(&self, _request: SourceReadRequest, _transmitter: Sender) {} - - async fn ack(&self, _offset: Vec) {} - - async fn pending(&self) -> usize { - 0 - } - - async fn partitions(&self) -> Option> { - Some(vec![0]) - } - } - - struct SimpleTransformer; - #[tonic::async_trait] - impl sourcetransform::SourceTransformer for SimpleTransformer { - async fn transform( - &self, - _input: sourcetransform::SourceTransformRequest, - ) -> Vec { - vec![] - } - } - - struct InMemorySink {} - - #[tonic::async_trait] - impl sink::Sinker for InMemorySink { - async fn sink(&self, mut _input: mpsc::Receiver) -> Vec { - vec![] - } - } - - #[tokio::test] - async fn test_wait_until_ready() { - // Start the source server - let (source_shutdown_tx, source_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let source_sock_file = tmp_dir.path().join("source.sock"); - let server_info_file = tmp_dir.path().join("source-server-info"); - - let server_info = server_info_file.clone(); - let source_socket = source_sock_file.clone(); - let source_server_handle = tokio::spawn(async move { - source::Server::new(SimpleSource {}) - .with_socket_file(source_socket) - .with_server_info_file(server_info) - .start_with_shutdown(source_shutdown_rx) - .await - .unwrap(); - }); - - // Start the sink server - let (sink_shutdown_tx, sink_shutdown_rx) = tokio::sync::oneshot::channel(); - let sink_tmp_dir = tempfile::TempDir::new().unwrap(); - let sink_sock_file = sink_tmp_dir.path().join("sink.sock"); - let server_info_file = sink_tmp_dir.path().join("sink-server-info"); - - let server_info = server_info_file.clone(); - let sink_socket = sink_sock_file.clone(); - let sink_server_handle = tokio::spawn(async move { - sink::Server::new(InMemorySink {}) - .with_socket_file(sink_socket) - .with_server_info_file(server_info) - .start_with_shutdown(sink_shutdown_rx) - .await - .unwrap(); - }); - - // Start the transformer server - let (transformer_shutdown_tx, transformer_shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); - let transformer_sock_file = tmp_dir.path().join("transformer.sock"); - let server_info_file = tmp_dir.path().join("transformer-server-info"); - - let server_info = server_info_file.clone(); - let transformer_socket = transformer_sock_file.clone(); - let transformer_server_handle = tokio::spawn(async move { - sourcetransform::Server::new(SimpleTransformer {}) - .with_socket_file(transformer_socket) - .with_server_info_file(server_info) - .start_with_shutdown(transformer_shutdown_rx) - .await - .unwrap(); - }); - - // Wait for the servers to start - sleep(Duration::from_millis(100)).await; - - let cln_token = CancellationToken::new(); - - let mut source_grpc_client = - SourceClient::new(create_rpc_channel(source_sock_file.clone()).await.unwrap()); - wait_until_source_ready(&cln_token, &mut source_grpc_client) - .await - .unwrap(); - - let mut sink_grpc_client = - SinkClient::new(create_rpc_channel(sink_sock_file.clone()).await.unwrap()); - wait_until_sink_ready(&cln_token, &mut sink_grpc_client) - .await - .unwrap(); - - let mut transformer_grpc_client = Some(SourceTransformClient::new( - create_rpc_channel(transformer_sock_file.clone()) - .await - .unwrap(), - )); - wait_until_transformer_ready(&cln_token, transformer_grpc_client.as_mut().unwrap()) - .await - .unwrap(); - - source_shutdown_tx.send(()).unwrap(); - sink_shutdown_tx.send(()).unwrap(); - transformer_shutdown_tx.send(()).unwrap(); - - source_server_handle.await.unwrap(); - sink_server_handle.await.unwrap(); - transformer_server_handle.await.unwrap(); - } -} diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index b289dc94c1..144cefef11 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::time::Duration; use numaflow_pb::clients::sink::sink_client::SinkClient; use tokio::sync::mpsc::Receiver; @@ -6,14 +7,14 @@ use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio::time::sleep; use tokio::{pin, time}; +use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; use user_defined::UserDefinedSink; use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; -use crate::config::pipeline::SinkVtxConfig; use crate::error::Error; use crate::message::{Message, ReadAck, ReadMessage, ResponseFromSink, ResponseStatusFromSink}; use crate::Result; @@ -35,6 +36,7 @@ pub(crate) trait LocalSink { async fn sink(&mut self, messages: Vec) -> Result>; } +/// ActorMessage is a message that is sent to the SinkActor. enum ActorMessage { Sink { messages: Vec, @@ -42,8 +44,9 @@ enum ActorMessage { }, } +/// SinkActor is an actor that handles messages sent to the Sink. struct SinkActor { - actor_messages: mpsc::Receiver, + actor_messages: Receiver, sink: T, } @@ -51,7 +54,7 @@ impl SinkActor where T: Sink, { - fn new(actor_messages: mpsc::Receiver, sink: T) -> Self { + fn new(actor_messages: Receiver, sink: T) -> Self { Self { actor_messages, sink, @@ -71,21 +74,57 @@ where } } -#[derive(Clone)] -pub(crate) struct SinkHandle { - sender: mpsc::Sender, -} - pub(crate) enum SinkClientType { Log, Blackhole, UserDefined(SinkClient), } -impl SinkHandle { - pub(crate) async fn new(sink_client: SinkClientType, batch_size: usize) -> Result { - let (sender, receiver) = mpsc::channel(batch_size); - match sink_client { +/// SinkWriter is a writer that writes messages to the Sink. +#[derive(Clone)] +pub(super) struct SinkWriter { + batch_size: usize, + chunk_timeout: Duration, + retry_config: RetryConfig, + sink_handle: mpsc::Sender, + fb_sink_handle: Option>, +} + +/// SinkWriterBuilder is a builder to build a SinkWriter. +pub struct SinkWriterBuilder { + batch_size: usize, + chunk_timeout: Duration, + retry_config: RetryConfig, + sink_client: SinkClientType, + fb_sink_client: Option, +} + +impl SinkWriterBuilder { + pub fn new(batch_size: usize, chunk_timeout: Duration, sink_type: SinkClientType) -> Self { + Self { + batch_size, + chunk_timeout, + retry_config: RetryConfig::default(), + sink_client: sink_type, + fb_sink_client: None, + } + } + + pub fn retry_config(mut self, retry_config: RetryConfig) -> Self { + self.retry_config = retry_config; + self + } + + pub fn fb_sink_client(mut self, fb_sink_client: SinkClientType) -> Self { + self.fb_sink_client = Some(fb_sink_client); + self + } + + /// Build the SinkWriter, it also starts the SinkActor to handle messages. + pub async fn build(self) -> Result { + let (sender, receiver) = mpsc::channel(self.batch_size); + + match self.sink_client { SinkClientType::Log => { let log_sink = log::LogSink; tokio::spawn(async { @@ -114,87 +153,140 @@ impl SinkHandle { }); } }; - Ok(Self { sender }) + + let fb_sink_handle = if let Some(fb_sink_client) = self.fb_sink_client { + let (fb_sender, fb_receiver) = mpsc::channel(self.batch_size); + match fb_sink_client { + SinkClientType::Log => { + let log_sink = log::LogSink; + tokio::spawn(async { + let mut actor = SinkActor::new(fb_receiver, log_sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + } + SinkClientType::Blackhole => { + let blackhole_sink = blackhole::BlackholeSink; + tokio::spawn(async { + let mut actor = SinkActor::new(fb_receiver, blackhole_sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + } + SinkClientType::UserDefined(sink_client) => { + let sink = UserDefinedSink::new(sink_client).await?; + tokio::spawn(async { + let mut actor = SinkActor::new(fb_receiver, sink); + while let Some(msg) = actor.actor_messages.recv().await { + actor.handle_message(msg).await; + } + }); + } + }; + Some(fb_sender) + } else { + None + }; + + Ok(SinkWriter { + batch_size: self.batch_size, + chunk_timeout: self.chunk_timeout, + retry_config: self.retry_config, + sink_handle: sender, + fb_sink_handle, + }) } +} - pub(crate) async fn sink(&self, messages: Vec) -> Result> { +impl SinkWriter { + /// Sink the messages to the Sink. + async fn sink(&self, messages: Vec) -> Result> { let (tx, rx) = oneshot::channel(); let msg = ActorMessage::Sink { messages, respond_to: tx, }; - let _ = self.sender.send(msg).await; + let _ = self.sink_handle.send(msg).await; rx.await.unwrap() } -} -#[derive(Clone)] -pub(super) struct SinkWriter { - batch_size: usize, - read_timeout: time::Duration, - config: SinkVtxConfig, - sink_handle: SinkHandle, - fb_sink_handle: Option, -} + /// Sink the messages to the Fallback Sink. + async fn fb_sink(&self, messages: Vec) -> Result> { + if self.fb_sink_handle.is_none() { + return Err(Error::Sink( + "Response contains fallback messages but no fallback sink is configured" + .to_string(), + )); + } -impl SinkWriter { - pub(super) async fn new( - batch_size: usize, - read_timeout: time::Duration, - config: SinkVtxConfig, - sink_handle: SinkHandle, - fb_sink_handle: Option, - ) -> Result { - Ok(Self { - batch_size, - read_timeout, - config, - sink_handle, - fb_sink_handle, - }) + let (tx, rx) = oneshot::channel(); + let msg = ActorMessage::Sink { + messages, + respond_to: tx, + }; + let _ = self.fb_sink_handle.as_ref().unwrap().send(msg).await; + rx.await.unwrap() } - pub(super) async fn start( + /// Streaming write the messages to the Sink, it will keep writing messages until the stream is + /// closed or the cancellation token is triggered. + pub(super) async fn streaming_write( &self, - messages_rx: Receiver, + messages_stream: ReceiverStream, cancellation_token: CancellationToken, ) -> Result>> { let handle: JoinHandle> = tokio::spawn({ let mut this = self.clone(); async move { - let chunk_stream = tokio_stream::wrappers::ReceiverStream::new(messages_rx) - .chunks_timeout(this.batch_size, this.read_timeout); + let chunk_stream = + messages_stream.chunks_timeout(this.batch_size, this.chunk_timeout); pin!(chunk_stream); - while let Some(batch) = chunk_stream.next().await { + let mut processed_msgs_count: usize = 0; + let mut last_logged_at = std::time::Instant::now(); + + loop { + let batch = match chunk_stream.next().await { + Some(batch) => batch, + None => { + break; + } + }; + if batch.is_empty() { continue; } - let messages: Vec = - batch.iter().map(|rm| rm.message.clone()).collect(); + let n = batch.len(); + let (messages, senders): (Vec<_>, Vec<_>) = + batch.into_iter().map(|rm| (rm.message, rm.ack)).unzip(); - match this - .write_to_sink(messages, cancellation_token.clone()) - .await - { + match this.write(messages, cancellation_token.clone()).await { Ok(_) => { - for rm in batch { - let _ = rm.ack.send(ReadAck::Ack); + for sender in senders { + let _ = sender.send(ReadAck::Ack); } } Err(e) => { error!(?e, "Error writing to sink"); - for rm in batch { - let _ = rm.ack.send(ReadAck::Nak); + for sender in senders { + let _ = sender.send(ReadAck::Nak); } } } - if cancellation_token.is_cancelled() { - warn!("Cancellation token is cancelled. Exiting SinkWriter"); - break; + processed_msgs_count += n; + if last_logged_at.elapsed().as_millis() >= 1000 { + info!( + "Processed {} messages at {:?}", + processed_msgs_count, + std::time::Instant::now() + ); + processed_msgs_count = 0; + last_logged_at = std::time::Instant::now(); } } @@ -204,8 +296,8 @@ impl SinkWriter { Ok(handle) } - // Writes the messages to the sink and handles fallback messages if present - async fn write_to_sink( + /// Write the messages to the Sink. + pub(crate) async fn write( &mut self, messages: Vec, cln_token: CancellationToken, @@ -223,12 +315,7 @@ impl SinkWriter { // only breaks out of this loop based on the retry strategy unless all the messages have been written to sink // successfully. - let retry_config = &self - .config - .sink_config - .retry_config - .clone() - .unwrap_or_default(); + let retry_config = &self.retry_config.clone(); loop { while attempts < retry_config.sink_max_retry_attempts { @@ -347,11 +434,8 @@ impl SinkWriter { messages_to_send: &mut Vec, retry_config: &RetryConfig, ) -> Result { - let start_time = time::Instant::now(); - match self.sink_handle.sink(messages_to_send.clone()).await { + match self.sink(messages_to_send.clone()).await { Ok(response) => { - debug!("Sink latency - {}ms", start_time.elapsed().as_millis()); - // create a map of id to result, since there is no strict requirement // for the udsink to return the results in the same order as the requests let result_map = response @@ -385,7 +469,7 @@ impl SinkWriter { return Ok(true); } - sleep(tokio::time::Duration::from_millis( + sleep(Duration::from_millis( retry_config.sink_retry_interval_in_ms as u64, )) .await; @@ -410,7 +494,6 @@ impl SinkWriter { )); } - let fallback_client = self.fb_sink_handle.as_mut().unwrap(); let mut attempts = 0; let mut fallback_error_map = HashMap::new(); // start with the original set of message to be sent. @@ -426,8 +509,8 @@ impl SinkWriter { let sleep_interval = default_retry.interval.unwrap(); while attempts < max_attempts { - let start_time = tokio::time::Instant::now(); - match fallback_client.sink(messages_to_send.clone()).await { + let start_time = time::Instant::now(); + match self.fb_sink(messages_to_send.clone()).await { Ok(fb_response) => { debug!( "Fallback sink latency - {}ms", @@ -496,3 +579,274 @@ impl SinkWriter { Ok(()) } } + +impl Drop for SinkWriter { + fn drop(&mut self) {} +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + use numaflow::sink; + use tokio::time::Duration; + use tokio_util::sync::CancellationToken; + + use super::*; + use crate::message::{Message, MessageID}; + use crate::shared::grpc::create_rpc_channel; + + struct SimpleSink; + #[tonic::async_trait] + impl sink::Sinker for SimpleSink { + async fn sink(&self, mut input: Receiver) -> Vec { + let mut responses: Vec = Vec::new(); + while let Some(datum) = input.recv().await { + if datum.keys.first().unwrap() == "fallback" { + responses.push(sink::Response::fallback(datum.id)); + continue; + } else if datum.keys.first().unwrap() == "error" { + responses.push(sink::Response::failure( + datum.id, + "simple error".to_string(), + )); + } else { + responses.push(sink::Response::ok(datum.id)); + } + } + responses + } + } + + #[tokio::test] + async fn test_write() { + let mut sink_writer = + SinkWriterBuilder::new(10, Duration::from_secs(1), SinkClientType::Log) + .build() + .await + .unwrap(); + + let messages: Vec = (0..5) + .map(|i| Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }) + .collect(); + + let result = sink_writer + .write(messages.clone(), CancellationToken::new()) + .await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_streaming_write() { + let sink_writer = + SinkWriterBuilder::new(10, Duration::from_millis(100), SinkClientType::Log) + .build() + .await + .unwrap(); + + let messages: Vec = (0..10) + .map(|i| Message { + keys: vec![format!("key_{}", i)], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }) + .collect(); + + let (tx, rx) = mpsc::channel(10); + let mut ack_rxs = vec![]; + for msg in messages { + let (ack_tx, ack_rx) = oneshot::channel(); + let _ = tx + .send(ReadMessage { + message: msg, + ack: ack_tx, + }) + .await; + ack_rxs.push(ack_rx); + } + drop(tx); + + let handle = sink_writer + .streaming_write(ReceiverStream::new(rx), CancellationToken::new()) + .await + .unwrap(); + + let _ = handle.await.unwrap(); + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); + } + } + + #[tokio::test] + async fn test_streaming_write_error() { + // start the server + let (_shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sink.sock"); + let server_info_file = tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + + let _server_handle = tokio::spawn(async move { + sink::Server::new(SimpleSink) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("failed to start sink server"); + }); + + // wait for the server to start + sleep(Duration::from_millis(100)).await; + + let sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_millis(100), + SinkClientType::UserDefined(SinkClient::new( + create_rpc_channel(sock_file).await.unwrap(), + )), + ) + .build() + .await + .unwrap(); + + let messages: Vec = (0..10) + .map(|i| Message { + keys: vec!["error".to_string()], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }) + .collect(); + + let (tx, rx) = mpsc::channel(10); + let mut ack_rxs = vec![]; + for msg in messages { + let (ack_tx, ack_rx) = oneshot::channel(); + let _ = tx + .send(ReadMessage { + message: msg, + ack: ack_tx, + }) + .await; + ack_rxs.push(ack_rx); + } + drop(tx); + let cln_token = CancellationToken::new(); + let handle = sink_writer + .streaming_write(ReceiverStream::new(rx), cln_token.clone()) + .await + .unwrap(); + + // cancel the token after 1 second to exit from the retry loop + tokio::spawn(async move { + sleep(Duration::from_secs(1)).await; + cln_token.cancel(); + }); + + let _ = handle.await.unwrap(); + // since the writes fail, all the messages will be NAKed + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Nak); + } + } + + #[tokio::test] + async fn test_fallback_write() { + // start the server + let (_shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sink.sock"); + let server_info_file = tmp_dir.path().join("sink-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + + let _server_handle = tokio::spawn(async move { + sink::Server::new(SimpleSink) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("failed to start sink server"); + }); + + // wait for the server to start + sleep(Duration::from_millis(100)).await; + + let sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_millis(100), + SinkClientType::UserDefined(SinkClient::new( + create_rpc_channel(sock_file).await.unwrap(), + )), + ) + .fb_sink_client(SinkClientType::Log) + .build() + .await + .unwrap(); + + let messages: Vec = (0..20) + .map(|i| Message { + keys: vec!["fallback".to_string()], + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }) + .collect(); + + let (tx, rx) = mpsc::channel(20); + let mut ack_rxs = vec![]; + for msg in messages { + let (ack_tx, ack_rx) = oneshot::channel(); + let _ = tx + .send(ReadMessage { + message: msg, + ack: ack_tx, + }) + .await; + ack_rxs.push(ack_rx); + } + drop(tx); + let cln_token = CancellationToken::new(); + let handle = sink_writer + .streaming_write(ReceiverStream::new(rx), cln_token.clone()) + .await + .unwrap(); + + let _ = handle.await.unwrap(); + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); + } + } +} diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index 41ddfd06dd..d3cc7a53ca 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -23,10 +23,8 @@ mod tests { use super::BlackholeSink; use crate::message::IntOffset; - use crate::{ - message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, - sink::Sink, - }; + use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; + use crate::sink::Sink; #[tokio::test] async fn test_black_hole() { diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 4e53d8b797..970ab66bd0 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -1,7 +1,7 @@ +use crate::sink::Sink; use crate::{ error, message::{Message, ResponseFromSink, ResponseStatusFromSink}, - sink::Sink, }; pub(crate) struct LogSink; @@ -39,10 +39,8 @@ mod tests { use super::LogSink; use crate::message::IntOffset; - use crate::{ - message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}, - sink::Sink, - }; + use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; + use crate::sink::Sink; #[tokio::test] async fn test_log_sink() { diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 5799291eaf..81ac3d2022 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,7 +1,3 @@ -use crate::message::{Message, ResponseFromSink}; -use crate::sink::Sink; -use crate::Error; -use crate::Result; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use tokio::sync::mpsc; @@ -9,9 +5,14 @@ use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; +use crate::message::{Message, ResponseFromSink}; +use crate::sink::Sink; +use crate::Error; +use crate::Result; + const DEFAULT_CHANNEL_SIZE: usize = 1000; -/// User-Defined Sink code writes messages to a custom [Sink]. +/// User-Defined Sink code writes messages to a custom [SinkWriter]. pub struct UserDefinedSink { sink_tx: mpsc::Sender, resp_stream: Streaming, @@ -44,7 +45,7 @@ impl UserDefinedSink { "failed to receive handshake response".to_string(), ))?; - // Handshake cannot be None during the initial phase and it has to set `sot` to true. + // Handshake cannot be None during the initial phase, and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { return Err(Error::Sink("invalid handshake response".to_string())); } @@ -125,7 +126,7 @@ mod tests { use super::*; use crate::error::Result; use crate::message::{Message, MessageID}; - use crate::shared::utils::create_rpc_channel; + use crate::shared::grpc::create_rpc_channel; use crate::sink::user_defined::UserDefinedSink; struct Logger; diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index f851268b72..b48f852222 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,9 +1,22 @@ +use numaflow_pulsar::source::PulsarSource; +use tokio::sync::{mpsc, oneshot}; +use tokio::task::JoinHandle; +use tokio::time; +use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; + +use crate::config::{get_vertex_name, is_mono_vertex}; +use crate::message::{ReadAck, ReadMessage}; +use crate::metrics::{ + monovertex_metrics, mvtx_forward_metric_labels, pipeline_forward_metric_labels, + pipeline_isb_metric_labels, pipeline_metrics, +}; +use crate::Result; use crate::{ message::{Message, Offset}, reader::LagReader, }; -use numaflow_pulsar::source::PulsarSource; -use tokio::sync::{mpsc, oneshot}; /// [User-Defined Source] extends Numaflow to add custom sources supported outside the builtins. /// @@ -26,7 +39,7 @@ pub(crate) trait SourceReader { /// Name of the source. fn name(&self) -> &'static str; - async fn read(&mut self) -> crate::Result>; + async fn read(&mut self) -> Result>; #[allow(dead_code)] /// number of partitions processed by this source. @@ -36,7 +49,21 @@ pub(crate) trait SourceReader { /// Set of Ack related items that has to be implemented to become a Source. pub(crate) trait SourceAcker { /// acknowledge an offset. The implementor might choose to do it in an asynchronous way. - async fn ack(&mut self, _: Vec) -> crate::Result<()>; + async fn ack(&mut self, _: Vec) -> Result<()>; +} + +pub(crate) enum SourceType { + UserDefinedSource( + user_defined::UserDefinedSourceRead, + user_defined::UserDefinedSourceAck, + user_defined::UserDefinedSourceLagReader, + ), + Generator( + generator::GeneratorRead, + generator::GeneratorAck, + generator::GeneratorLagReader, + ), + Pulsar(PulsarSource), } enum ActorMessage { @@ -45,14 +72,14 @@ enum ActorMessage { respond_to: oneshot::Sender<&'static str>, }, Read { - respond_to: oneshot::Sender>>, + respond_to: oneshot::Sender>>, }, Ack { - respond_to: oneshot::Sender>, + respond_to: oneshot::Sender>, offsets: Vec, }, Pending { - respond_to: oneshot::Sender>>, + respond_to: oneshot::Sender>>, }, } @@ -103,13 +130,16 @@ where } } +/// Source is used to read, ack, and get the pending messages count from the source. #[derive(Clone)] -pub(crate) struct SourceHandle { +pub(crate) struct Source { + read_batch_size: usize, sender: mpsc::Sender, } -impl SourceHandle { - pub(crate) fn new(src_type: SourceType, batch_size: usize) -> Self { +impl Source { + /// Create a new StreamingSource. It starts the read and ack actors in the background. + pub(crate) fn new(batch_size: usize, src_type: SourceType) -> Self { let (sender, receiver) = mpsc::channel(batch_size); match src_type { SourceType::UserDefinedSource(reader, acker, lag_reader) => { @@ -142,21 +172,26 @@ impl SourceHandle { }); } }; - Self { sender } + Self { + read_batch_size: batch_size, + sender, + } } - pub(crate) async fn read(&self) -> crate::Result> { + /// read messages from the source by communicating with the read actor. + async fn read(source_handle: mpsc::Sender) -> Result> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Read { respond_to: sender }; // Ignore send errors. If send fails, so does the recv.await below. There's no reason // to check for the same failure twice. - let _ = self.sender.send(msg).await; + let _ = source_handle.send(msg).await; receiver .await .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } - pub(crate) async fn ack(&self, offsets: Vec) -> crate::Result<()> { + /// ack the offsets by communicating with the ack actor. + async fn ack(source_handle: mpsc::Sender, offsets: Vec) -> Result<()> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Ack { respond_to: sender, @@ -164,13 +199,14 @@ impl SourceHandle { }; // Ignore send errors. If send fails, so does the recv.await below. There's no reason // to check for the same failure twice. - let _ = self.sender.send(msg).await; + let _ = source_handle.send(msg).await; receiver .await .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } - pub(crate) async fn pending(&self) -> crate::error::Result> { + /// get the pending messages count by communicating with the pending actor. + pub(crate) async fn pending(&self) -> Result> { let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Pending { respond_to: sender }; // Ignore send errors. If send fails, so does the recv.await below. There's no reason @@ -180,18 +216,335 @@ impl SourceHandle { .await .map_err(|e| crate::error::Error::ActorPatternRecv(e.to_string()))? } + + /// Starts streaming messages from the source. It returns a stream of messages and + /// a handle to the spawned task. + pub(crate) fn streaming_read( + &self, + cln_token: CancellationToken, + ) -> Result<(ReceiverStream, JoinHandle>)> { + let batch_size = self.read_batch_size; + let (messages_tx, messages_rx) = mpsc::channel(batch_size); + let source_handle = self.sender.clone(); + + let pipeline_labels = pipeline_forward_metric_labels("Source", Some(get_vertex_name())); + let mvtx_labels = mvtx_forward_metric_labels(); + + info!("Started streaming source with batch size: {}", batch_size); + let handle = tokio::spawn(async move { + let mut processed_msgs_count: usize = 0; + let mut last_logged_at = tokio::time::Instant::now(); + + loop { + if cln_token.is_cancelled() { + info!("Cancellation token is cancelled. Stopping the source."); + return Ok(()); + } + let permit_time = tokio::time::Instant::now(); + // Reserve the permits before invoking the read method. + let mut permit = match messages_tx.reserve_many(batch_size).await { + Ok(permit) => { + info!( + "Reserved permits for {} messages in {:?}", + batch_size, + permit_time.elapsed() + ); + permit + } + Err(e) => { + error!("Error while reserving permits: {:?}", e); + return Err(crate::error::Error::Source(e.to_string())); + } + }; + + let read_start_time = tokio::time::Instant::now(); + let messages = match Self::read(source_handle.clone()).await { + Ok(messages) => messages, + Err(e) => { + error!("Error while reading messages: {:?}", e); + return Err(e); + } + }; + let n = messages.len(); + if is_mono_vertex() { + monovertex_metrics() + .read_total + .get_or_create(mvtx_labels) + .inc_by(n as u64); + monovertex_metrics() + .read_time + .get_or_create(mvtx_labels) + .observe(read_start_time.elapsed().as_micros() as f64); + } else { + pipeline_metrics() + .forwarder + .read_total + .get_or_create(pipeline_labels) + .inc_by(n as u64); + pipeline_metrics() + .forwarder + .read_time + .get_or_create(pipeline_labels) + .observe(read_start_time.elapsed().as_micros() as f64); + } + + let mut ack_batch = Vec::with_capacity(n); + for message in messages { + let (resp_ack_tx, resp_ack_rx) = oneshot::channel(); + let offset = message.offset.clone().unwrap(); + + let read_message = ReadMessage { + message, + ack: resp_ack_tx, + }; + + // store the ack one shot in the batch to invoke ack later. + ack_batch.push((offset, resp_ack_rx)); + + match permit.next() { + Some(permit) => { + permit.send(read_message); + } + None => { + unreachable!( + "Permits should be reserved for all messages in the batch" + ); + } + } + } + + // start a background task to invoke ack on the source for the offsets that are acked. + tokio::spawn(Self::invoke_ack( + read_start_time, + source_handle.clone(), + ack_batch, + )); + + processed_msgs_count += n; + if last_logged_at.elapsed().as_secs() >= 1 { + info!( + "Processed {} messages in {:?}", + processed_msgs_count, + std::time::Instant::now() + ); + processed_msgs_count = 0; + last_logged_at = tokio::time::Instant::now(); + } + } + }); + Ok((ReceiverStream::new(messages_rx), handle)) + } + + /// Listens to the oneshot receivers and invokes ack on the source for the offsets that are acked. + async fn invoke_ack( + e2e_start_time: time::Instant, + source_handle: mpsc::Sender, + ack_rx_batch: Vec<(Offset, oneshot::Receiver)>, + ) -> Result<()> { + let n = ack_rx_batch.len(); + let mut offsets_to_ack = Vec::with_capacity(n); + + for (offset, oneshot_rx) in ack_rx_batch { + match oneshot_rx.await { + Ok(ReadAck::Ack) => { + offsets_to_ack.push(offset); + } + Ok(ReadAck::Nak) => { + error!("Nak received for offset: {:?}", offset); + } + Err(e) => { + error!( + "Error receiving ack for offset: {:?}, error: {:?}", + offset, e + ); + } + } + } + + let start = time::Instant::now(); + if !offsets_to_ack.is_empty() { + Self::ack(source_handle, offsets_to_ack).await?; + } + + if is_mono_vertex() { + monovertex_metrics() + .ack_time + .get_or_create(mvtx_forward_metric_labels()) + .observe(start.elapsed().as_micros() as f64); + + monovertex_metrics() + .ack_total + .get_or_create(mvtx_forward_metric_labels()) + .inc_by(n as u64); + + monovertex_metrics() + .e2e_time + .get_or_create(mvtx_forward_metric_labels()) + .observe(e2e_start_time.elapsed().as_micros() as f64); + } else { + pipeline_metrics() + .forwarder + .ack_time + .get_or_create(pipeline_isb_metric_labels()) + .observe(start.elapsed().as_micros() as f64); + + pipeline_metrics() + .forwarder + .ack_total + .get_or_create(pipeline_isb_metric_labels()) + .inc_by(n as u64); + + pipeline_metrics() + .forwarder + .processed_time + .get_or_create(pipeline_isb_metric_labels()) + .observe(e2e_start_time.elapsed().as_micros() as f64); + } + Ok(()) + } } -pub(crate) enum SourceType { - UserDefinedSource( - user_defined::UserDefinedSourceRead, - user_defined::UserDefinedSourceAck, - user_defined::UserDefinedSourceLagReader, - ), - Generator( - generator::GeneratorRead, - generator::GeneratorAck, - generator::GeneratorLagReader, - ), - Pulsar(PulsarSource), +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::time::Duration; + + use chrono::Utc; + use futures::StreamExt; + use numaflow::source; + use numaflow::source::{Message, Offset, SourceReadRequest}; + use numaflow_pb::clients::source::source_client::SourceClient; + use tokio::sync::mpsc::Sender; + use tokio_util::sync::CancellationToken; + + use crate::shared::grpc::create_rpc_channel; + use crate::source::user_defined::new_source; + use crate::source::{Source, SourceType}; + + struct SimpleSource { + num: usize, + sent_count: AtomicUsize, + yet_to_ack: std::sync::RwLock>, + } + + impl SimpleSource { + fn new(num: usize) -> Self { + Self { + num, + sent_count: AtomicUsize::new(0), + yet_to_ack: std::sync::RwLock::new(HashSet::new()), + } + } + } + + #[tonic::async_trait] + impl source::Sourcer for SimpleSource { + async fn read(&self, request: SourceReadRequest, transmitter: Sender) { + let event_time = Utc::now(); + let mut message_offsets = Vec::with_capacity(request.count); + + for i in 0..request.count { + if self.sent_count.load(Ordering::SeqCst) >= self.num { + return; + } + + let offset = format!("{}-{}", event_time.timestamp_nanos_opt().unwrap(), i); + transmitter + .send(Message { + value: b"hello".to_vec(), + event_time, + offset: Offset { + offset: offset.clone().into_bytes(), + partition_id: 0, + }, + keys: vec![], + headers: Default::default(), + }) + .await + .unwrap(); + message_offsets.push(offset); + self.sent_count.fetch_add(1, Ordering::SeqCst); + } + self.yet_to_ack.write().unwrap().extend(message_offsets); + } + + async fn ack(&self, offsets: Vec) { + for offset in offsets { + self.yet_to_ack + .write() + .unwrap() + .remove(&String::from_utf8(offset.offset).unwrap()); + } + } + + async fn pending(&self) -> usize { + self.yet_to_ack.read().unwrap().len() + } + + async fn partitions(&self) -> Option> { + Some(vec![1, 2]) + } + } + + #[tokio::test] + async fn test_source() { + // start the server + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = tempfile::TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let server_handle = tokio::spawn(async move { + // a simple source which generates total of 100 messages + source::Server::new(SimpleSource::new(100)) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .unwrap() + }); + + // wait for the server to start + // TODO: flaky + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); + + let (src_read, src_ack, lag_reader) = new_source(client, 5, Duration::from_millis(1000)) + .await + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); + + let source = Source::new( + 5, + SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + ); + + let cln_token = CancellationToken::new(); + + let (mut stream, handle) = source.streaming_read(cln_token.clone()).unwrap(); + let mut offsets = vec![]; + // we should read all the 100 messages + for _ in 0..100 { + let message = stream.next().await.unwrap(); + assert_eq!(message.message.value, "hello".as_bytes()); + offsets.push(message.message.offset.clone().unwrap()); + } + + // ack all the messages + Source::ack(source.sender.clone(), offsets).await.unwrap(); + + // since we acked all the messages, pending should be 0 + let pending = source.pending().await.unwrap(); + assert_eq!(pending, Some(0)); + + cln_token.cancel(); + let _ = handle.await.unwrap(); + drop(source); + let _ = shutdown_tx.send(()); + server_handle.await.unwrap(); + } } diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 3c91bbf1cc..22bdf94d58 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -33,10 +33,8 @@ mod stream_generator { use tracing::warn; use crate::config::components::source::GeneratorConfig; - use crate::message::{ - get_vertex_name, get_vertex_replica, Message, MessageID, Offset, StringOffset, - }; - + use crate::config::{get_vertex_name, get_vertex_replica}; + use crate::message::{Message, MessageID, Offset, StringOffset}; #[pin_project] pub(super) struct StreamGenerator { /// the content generated by Generator. diff --git a/rust/numaflow-core/src/source/pulsar.rs b/rust/numaflow-core/src/source/pulsar.rs index 6d8d1d33f3..0b81f2615b 100644 --- a/rust/numaflow-core/src/source/pulsar.rs +++ b/rust/numaflow-core/src/source/pulsar.rs @@ -1,9 +1,11 @@ use std::time::Duration; +use numaflow_pulsar::source::{PulsarMessage, PulsarSource, PulsarSourceConfig}; + +use crate::config::get_vertex_name; use crate::error::Error; -use crate::message::{get_vertex_name, IntOffset, Message, MessageID, Offset}; +use crate::message::{IntOffset, Message, MessageID, Offset}; use crate::source; -use numaflow_pulsar::source::{PulsarMessage, PulsarSource, PulsarSourceConfig}; impl TryFrom for Message { type Error = Error; diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 03162b53ac..b75564bfbc 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -242,7 +242,7 @@ mod tests { use tokio::sync::mpsc::Sender; use super::*; - use crate::shared::utils::create_rpc_channel; + use crate::shared::grpc::create_rpc_channel; struct SimpleSource { num: usize, diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index af407e159e..d987d62050 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -1,4 +1,305 @@ +use std::sync::Arc; + +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use tokio::sync::{mpsc, oneshot, OwnedSemaphorePermit, Semaphore}; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; +use tonic::transport::Channel; +use user_defined::ActorMessage; + +use crate::message::{ReadAck, ReadMessage}; +use crate::transformer::user_defined::UserDefinedTransformer; +use crate::Result; + /// User-Defined Transformer extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/#build-your-own-transformer pub(crate) mod user_defined; + +/// StreamingTransformer, transforms messages in a streaming fashion. +pub(crate) struct Transformer { + batch_size: usize, + sender: mpsc::Sender, + concurrency: usize, +} +impl Transformer { + pub(crate) async fn new( + batch_size: usize, + concurrency: usize, + client: SourceTransformClient, + ) -> Result { + let (sender, mut receiver) = mpsc::channel(batch_size); + let mut client = UserDefinedTransformer::new(batch_size, client).await?; + + tokio::spawn(async move { + while let Some(msg) = receiver.recv().await { + client.handle_message(msg).await; + } + }); + + Ok(Self { + batch_size, + concurrency, + sender, + }) + } + + /// Applies the transformation on the message and sends it to the next stage, it blocks if the + /// concurrency limit is reached. + pub(crate) async fn transform( + transform_handle: mpsc::Sender, + permit: OwnedSemaphorePermit, + read_msg: ReadMessage, + output_tx: mpsc::Sender, + ) -> Result<()> { + // only if we have tasks < max_concurrency + + let output_tx = output_tx.clone(); + + // invoke transformer and then wait for the one-shot + tokio::spawn(async move { + let _permit = permit; + let message = read_msg.message.clone(); + + let (sender, receiver) = oneshot::channel(); + let msg = ActorMessage::Transform { + message, + respond_to: sender, + }; + + // invoke trf + transform_handle.send(msg).await.unwrap(); + + // wait for one-shot + match receiver.await { + Ok(Ok(mut transformed_messages)) => { + // FIXME: handle the case where the transformer does flat map operation + if let Some(transformed_msg) = transformed_messages.pop() { + output_tx + .send(ReadMessage { + message: transformed_msg, + ack: read_msg.ack, + }) + .await + .unwrap(); + } + } + Err(_) | Ok(Err(_)) => { + let _ = read_msg.ack.send(ReadAck::Nak); + } + } + }); + + Ok(()) + } + + /// Starts reading messages in the form of chunks and transforms them and + /// sends them to the next stage. + pub(crate) fn transform_stream( + &self, + input_stream: ReceiverStream, + ) -> Result<(ReceiverStream, JoinHandle>)> { + let (output_tx, output_rx) = mpsc::channel(self.batch_size); + + let transform_handle = self.sender.clone(); + // FIXME: batch_size should not be used, introduce a new config called udf concurrenc + let semaphore = Arc::new(Semaphore::new(self.concurrency)); + + let handle = tokio::spawn(async move { + let mut input_stream = input_stream; + + while let Some(read_msg) = input_stream.next().await { + let permit = semaphore.clone().acquire_owned().await.unwrap(); + + Self::transform( + transform_handle.clone(), + permit, + read_msg, + output_tx.clone(), + ) + .await?; + } + Ok(()) + }); + + Ok((ReceiverStream::new(output_rx), handle)) + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use numaflow::sourcetransform; + use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; + use tempfile::TempDir; + use tokio::sync::oneshot; + + use super::*; + use crate::message::{Message, MessageID, Offset, ReadMessage}; + use crate::shared::grpc::create_rpc_channel; + + struct SimpleTransformer; + + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformer { + async fn transform( + &self, + input: sourcetransform::SourceTransformRequest, + ) -> Vec { + let message = sourcetransform::Message::new(input.value, chrono::offset::Utc::now()) + .keys(input.keys); + vec![message] + } + } + + #[tokio::test] + async fn transformer_operations() -> Result<()> { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await?); + let transformer = Transformer::new(500, 10, client).await?; + + let message = Message { + keys: vec!["first".into()], + value: "hello".into(), + offset: Some(Offset::String(crate::message::StringOffset::new( + "0".to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string(), + offset: "0".to_string(), + index: 0, + }, + headers: Default::default(), + }; + + let (tx, _) = oneshot::channel(); + + let read_message = ReadMessage { + message: message.clone(), + ack: tx, + }; + + let (output_tx, mut output_rx) = mpsc::channel(10); + + let semaphore = Arc::new(Semaphore::new(10)); + let permit = semaphore.clone().acquire_owned().await.unwrap(); + Transformer::transform(transformer.sender.clone(), permit, read_message, output_tx).await?; + + let transformed_message = output_rx.recv().await.unwrap(); + assert_eq!(transformed_message.message.value, "hello"); + + // we need to drop the transformer, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(transformer); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + #[tokio::test] + async fn test_transform_stream() -> Result<()> { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformer) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await?); + let transformer = Transformer::new(500, 10, client).await?; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + for i in 0..5 { + let message = Message { + keys: vec![format!("key_{}", i)], + value: format!("value_{}", i).into(), + offset: Some(Offset::String(crate::message::StringOffset::new( + i.to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string(), + offset: i.to_string(), + index: i as i32, + }, + headers: Default::default(), + }; + let (tx, _) = oneshot::channel(); + let read_message = ReadMessage { message, ack: tx }; + + input_tx.send(read_message).await.unwrap(); + } + drop(input_tx); + + let (output_stream, transform_handle) = transformer.transform_stream(input_stream)?; + + let mut output_rx = output_stream.into_inner(); + + for i in 0..5 { + let transformed_message = output_rx.recv().await.unwrap(); + assert_eq!(transformed_message.message.value, format!("value_{}", i)); + } + + // we need to drop the transformer, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(transformer); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + assert!( + transform_handle.is_finished(), + "Expected transformer to have shut down" + ); + Ok(()) + } +} diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index bbacfbbfa9..8ebb409717 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,40 +1,52 @@ use std::collections::HashMap; +use std::sync::{Arc, Mutex}; use numaflow_pb::clients::sourcetransformer::{ self, source_transform_client::SourceTransformClient, SourceTransformRequest, SourceTransformResponse, }; use tokio::sync::{mpsc, oneshot}; -use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; -use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use tracing::warn; +use crate::config::get_vertex_name; use crate::error::{Error, Result}; -use crate::message::{get_vertex_name, Message, MessageID, Offset}; -use crate::shared::utils::utc_from_timestamp; +use crate::message::{Message, MessageID, Offset}; +use crate::shared::grpc::utc_from_timestamp; -const DROP: &str = "U+005C__DROP__"; +type ResponseSenderMap = + Arc>>)>>>; -/// TransformerClient is a client to interact with the transformer server. -struct SourceTransformer { - actor_messages: mpsc::Receiver, +// fields which will not be changed +struct ParentMessageInfo { + offset: Offset, + headers: HashMap, +} + +pub enum ActorMessage { + Transform { + message: Message, + respond_to: oneshot::Sender>>, + }, +} + +/// UserDefinedTransformer exposes methods to do user-defined transformations. +pub(super) struct UserDefinedTransformer { read_tx: mpsc::Sender, - resp_stream: Streaming, + senders: ResponseSenderMap, } -impl SourceTransformer { - async fn new( +impl UserDefinedTransformer { + /// Performs handshake with the server and creates a new UserDefinedTransformer. + pub(super) async fn new( batch_size: usize, mut client: SourceTransformClient, - actor_messages: mpsc::Receiver, ) -> Result { let (read_tx, read_rx) = mpsc::channel(batch_size); let read_stream = ReceiverStream::new(read_rx); - // do a handshake for read with the server before we start sending read requests + // perform handshake let handshake_request = SourceTransformRequest { request: None, handshake: Some(sourcetransformer::Handshake { sot: true }), @@ -49,184 +61,82 @@ impl SourceTransformer { .await? .into_inner(); - // first response from the server will be the handshake response. We need to check if the - // server has accepted the handshake. let handshake_response = resp_stream.message().await?.ok_or(Error::Transformer( "failed to receive handshake response".to_string(), ))?; - // handshake cannot to None during the initial phase and it has to set `sot` to true. if handshake_response.handshake.map_or(true, |h| !h.sot) { return Err(Error::Transformer("invalid handshake response".to_string())); } - Ok(Self { - actor_messages, - read_tx, - resp_stream, - }) - } + // map to track the oneshot sender for each request along with the message info + let sender_map = Arc::new(Mutex::new(HashMap::new())); - async fn handle_message(&mut self, message: ActorMessage) { - match message { - ActorMessage::Transform { - messages, - respond_to, - } => { - let result = self.transform_fn(messages).await; - let _ = respond_to.send(result); - } - } - } + let transformer = Self { + read_tx, + senders: sender_map.clone(), + }; - async fn transform_fn(&mut self, messages: Vec) -> Result> { - // fields which will not be changed - struct MessageInfo { - offset: Offset, - headers: HashMap, - } + // background task to receive responses from the server and send them to the appropriate + // oneshot sender based on the message id + tokio::spawn(Self::receive_responses(sender_map, resp_stream)); - let mut tracker: HashMap = HashMap::with_capacity(messages.len()); - for message in &messages { - tracker.insert( - message.id.to_string(), - MessageInfo { - offset: message - .offset - .clone() - .ok_or(Error::Transformer("Message offset is missing".to_string()))?, - headers: message.headers.clone(), - }, - ); - } + Ok(transformer) + } - // Cancellation token is used to cancel either sending task (if an error occurs while receiving) or receiving messages (if an error occurs on sending task) - let token = CancellationToken::new(); - - // Send transform requests to the source transformer server - let sender_task: JoinHandle> = tokio::spawn({ - let read_tx = self.read_tx.clone(); - let token = token.clone(); - async move { - for msg in messages { - let result = tokio::select! { - result = read_tx.send(msg.into()) => result, - _ = token.cancelled() => { - warn!("Cancellation token was cancelled while sending source transform requests"); - return Ok(()); + // receive responses from the server and gets the corresponding oneshot sender from the map + // and sends the response. + async fn receive_responses( + sender_map: ResponseSenderMap, + mut resp_stream: Streaming, + ) { + while let Some(resp) = resp_stream.message().await.unwrap() { + let msg_id = resp.id; + for (i, result) in resp.results.into_iter().enumerate() { + if let Some((msg_info, sender)) = sender_map + .lock() + .expect("map entry should always be present") + .remove(&msg_id) + { + let message = Message { + id: MessageID { + vertex_name: get_vertex_name().to_string(), + index: i as i32, + offset: msg_info.offset.to_string(), }, + keys: result.keys, + value: result.value.into(), + offset: None, + event_time: utc_from_timestamp(result.event_time), + headers: msg_info.headers.clone(), }; - - match result { - Ok(()) => continue, - Err(e) => { - token.cancel(); - return Err(Error::Transformer(e.to_string())); - } - }; + let _ = sender.send(Ok(vec![message])); } - Ok(()) - } - }); - - // Receive transformer results - let mut messages = Vec::new(); - while !tracker.is_empty() { - let resp = tokio::select! { - _ = token.cancelled() => { - break; - }, - resp = self.resp_stream.message() => {resp} - }; - - let resp = match resp { - Ok(Some(val)) => val, - Ok(None) => { - // Logging at warning level since we don't expect this to happen - warn!("Source transformer server closed its sending end of the stream. No more messages to receive"); - token.cancel(); - break; - } - Err(e) => { - token.cancel(); - return Err(Error::Transformer(format!( - "gRPC error while receiving messages from source transformer server: {e:?}" - ))); - } - }; - - let Some((_, msg_info)) = tracker.remove_entry(&resp.id) else { - token.cancel(); - return Err(Error::Transformer(format!( - "Received message with unknown ID {}", - resp.id - ))); - }; - - for (i, result) in resp.results.into_iter().enumerate() { - // TODO: Expose metrics - if result.tags.iter().any(|x| x == DROP) { - continue; - } - let message = Message { - id: MessageID { - vertex_name: get_vertex_name().to_string(), - index: i as i32, - offset: msg_info.offset.to_string(), - }, - keys: result.keys, - value: result.value.into(), - offset: None, - event_time: utc_from_timestamp(result.event_time), - headers: msg_info.headers.clone(), - }; - messages.push(message); } } - - sender_task.await.unwrap().map_err(|e| { - Error::Transformer(format!( - "Sending messages to gRPC transformer failed: {e:?}", - )) - })?; - - Ok(messages) } -} -enum ActorMessage { - Transform { - messages: Vec, - respond_to: oneshot::Sender>>, - }, -} + /// Handles the incoming message and sends it to the server for transformation. + pub(super) async fn handle_message(&mut self, message: ActorMessage) { + match message { + ActorMessage::Transform { + message, + respond_to, + } => { + let msg_id = message.id.to_string(); + let msg_info = ParentMessageInfo { + offset: message.offset.clone().unwrap(), + headers: message.headers.clone(), + }; -#[derive(Clone)] -pub(crate) struct SourceTransformHandle { - sender: mpsc::Sender, -} + self.senders + .lock() + .unwrap() + .insert(msg_id, (msg_info, respond_to)); -impl SourceTransformHandle { - pub(crate) async fn new(client: SourceTransformClient) -> Result { - let batch_size = 500; - let (sender, receiver) = mpsc::channel(batch_size); - let mut client = SourceTransformer::new(batch_size, client, receiver).await?; - tokio::spawn(async move { - while let Some(msg) = client.actor_messages.recv().await { - client.handle_message(msg).await; + self.read_tx.send(message.into()).await.unwrap(); } - }); - Ok(Self { sender }) - } - - pub(crate) async fn transform(&self, messages: Vec) -> Result> { - let (sender, receiver) = oneshot::channel(); - let msg = ActorMessage::Transform { - messages, - respond_to: sender, - }; - let _ = self.sender.send(msg).await; - receiver.await.unwrap() + } } } @@ -240,9 +150,8 @@ mod tests { use tempfile::TempDir; use crate::message::{MessageID, StringOffset}; - use crate::shared::utils::create_rpc_channel; - use crate::transformer::user_defined::SourceTransformHandle; - + use crate::shared::grpc::create_rpc_channel; + use crate::transformer::user_defined::{ActorMessage, UserDefinedTransformer}; struct NowCat; #[tonic::async_trait] @@ -279,9 +188,10 @@ mod tests { // wait for the server to start tokio::time::sleep(Duration::from_millis(100)).await; - let client = SourceTransformHandle::new(SourceTransformClient::new( - create_rpc_channel(sock_file).await?, - )) + let mut client = UserDefinedTransformer::new( + 500, + SourceTransformClient::new(create_rpc_channel(sock_file).await?), + ) .await?; let message = crate::message::Message { @@ -300,9 +210,20 @@ mod tests { headers: Default::default(), }; - let resp = - tokio::time::timeout(Duration::from_secs(2), client.transform(vec![message])).await??; - assert_eq!(resp.len(), 1); + let (tx, rx) = tokio::sync::oneshot::channel(); + + let _ = tokio::time::timeout( + Duration::from_secs(2), + client.handle_message(ActorMessage::Transform { + message, + respond_to: tx, + }), + ) + .await?; + + let messages = rx.await?; + assert!(messages.is_ok()); + assert_eq!(messages.unwrap().len(), 1); // we need to drop the client, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 @@ -318,75 +239,4 @@ mod tests { ); Ok(()) } - - struct FilterCat; - - #[tonic::async_trait] - impl sourcetransform::SourceTransformer for FilterCat { - async fn transform( - &self, - input: sourcetransform::SourceTransformRequest, - ) -> Vec { - let message = sourcetransform::Message::new(input.value, chrono::offset::Utc::now()) - .keys(input.keys) - .tags(vec![crate::transformer::user_defined::DROP.to_string()]); - vec![message] - } - } - - #[tokio::test] - async fn transformer_operations_with_drop() -> Result<(), Box> { - let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); - let tmp_dir = TempDir::new()?; - let sock_file = tmp_dir.path().join("sourcetransform.sock"); - let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); - - let server_info = server_info_file.clone(); - let server_socket = sock_file.clone(); - let handle = tokio::spawn(async move { - sourcetransform::Server::new(FilterCat) - .with_socket_file(server_socket) - .with_server_info_file(server_info) - .start_with_shutdown(shutdown_rx) - .await - .expect("server failed"); - }); - - // wait for the server to start - tokio::time::sleep(Duration::from_millis(100)).await; - - let client = SourceTransformHandle::new(SourceTransformClient::new( - create_rpc_channel(sock_file).await?, - )) - .await?; - - let message = crate::message::Message { - keys: vec!["second".into()], - value: "hello".into(), - offset: Some(crate::message::Offset::String(StringOffset::new( - "0".to_string(), - 0, - ))), - event_time: chrono::Utc::now(), - id: MessageID { - vertex_name: "vertex_name".to_string(), - offset: "0".to_string(), - index: 0, - }, - headers: Default::default(), - }; - - let resp = client.transform(vec![message]).await?; - assert!(resp.is_empty()); - - // we need to drop the client, because if there are any in-flight requests - // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 - drop(client); - - shutdown_tx - .send(()) - .expect("failed to send shutdown signal"); - handle.await.expect("failed to join server task"); - Ok(()) - } } diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 3e5f8677f1..72f2802c5b 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] tonic = "0.12.3" -tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } +tokio = { version = "1.41.1", features = ["macros", "rt-multi-thread"] } numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index 0af3c74e95..04fa96c288 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -16,7 +16,7 @@ axum-macros = "0.4.1" hyper-util = { version = "0.1.6", features = ["client-legacy"] } serde = { version = "1.0.204", features = ["derive"] } serde_json = "1.0.120" -tokio = { version = "1.39.3", features = ["full"] } +tokio = { version = "1.41.1", features = ["full"] } tower = "0.4.13" tower-http = { version = "0.5.2", features = ["trace", "timeout"] } tracing = "0.1.40" diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index 4ffd64ed7a..d56cfff599 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -1,5 +1,7 @@ use std::env; +use std::time::Duration; +use tokio::time; use tracing::{error, info}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; From 05df88691d8c9a787c5d4b84e5a97bf4d9d39a4e Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Wed, 4 Dec 2024 12:05:22 +0530 Subject: [PATCH 152/188] feat: counter metrics visualizer for pipeline (#2238) Signed-off-by: adarsh0728 --- .../namespaced-numaflow-server.yaml | 28 ++++- config/advanced-install/numaflow-server.yaml | 28 ++++- .../numaflow-server-metrics-proxy-config.yaml | 26 +++++ config/install.yaml | 28 ++++- config/namespace-install.yaml | 28 ++++- server/apis/v1/promql_service_test.go | 104 +++++++++++++++++- .../partials/NodeInfo/partials/Pods/index.tsx | 1 + .../Pods/partials/PodDetails/index.tsx | 6 +- .../PodDetails/partials/Metrics/index.tsx | 3 +- .../Metrics/partials/LineChart/index.tsx | 12 +- .../partials/common/FiltersDropdown/index.tsx | 6 +- .../partials/Metrics/utils/constants.ts | 6 + ui/src/types/declarations/pods.d.ts | 1 + 13 files changed, 259 insertions(+), 18 deletions(-) diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index d316bf1e00..2804c41248 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -137,7 +137,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -186,6 +186,32 @@ data: # required: false # - name: mono-vertex # #expr: optional + - name: vertex_throughput + object: vertex + title: Vertex Throughput and Message Rates + description: This pattern measures the throughput of a vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: forwarder_data_read_total + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index b4ba7fd488..6a7444d395 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -144,7 +144,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -193,6 +193,32 @@ data: # required: false # - name: mono-vertex # #expr: optional + - name: vertex_throughput + object: vertex + title: Vertex Throughput and Message Rates + description: This pattern measures the throughput of a vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: forwarder_data_read_total + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml index f970cea63b..7824bae01f 100644 --- a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -52,3 +52,29 @@ data: # required: false # - name: mono-vertex # #expr: optional + - name: vertex_throughput + object: vertex + title: Vertex Throughput and Message Rates + description: This pattern measures the throughput of a vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: forwarder_data_read_total + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + filters: + - name: pod + required: false \ No newline at end of file diff --git a/config/install.yaml b/config/install.yaml index 6e3551c059..60bbd60918 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28557,7 +28557,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -28606,6 +28606,32 @@ data: # required: false # - name: mono-vertex # #expr: optional + - name: vertex_throughput + object: vertex + title: Vertex Throughput and Message Rates + description: This pattern measures the throughput of a vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: forwarder_data_read_total + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 12821ec38d..58c769f7ff 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28445,7 +28445,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -28494,6 +28494,32 @@ data: # required: false # - name: mono-vertex # #expr: optional + - name: vertex_throughput + object: vertex + title: Vertex Throughput and Message Rates + description: This pattern measures the throughput of a vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: forwarder_data_read_total + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/server/apis/v1/promql_service_test.go b/server/apis/v1/promql_service_test.go index 8ad0fac810..733476ad24 100644 --- a/server/apis/v1/promql_service_test.go +++ b/server/apis/v1/promql_service_test.go @@ -127,7 +127,8 @@ func Test_PopulateReqMap(t *testing.T) { }) } func Test_PromQueryBuilder(t *testing.T) { - var service = &PromQlService{ + // tests for histogram + var histogram_service = &PromQlService{ PlaceHolders: map[string]map[string][]string{ "test_metric": { "test_dimension": {"$quantile", "$dimension", "$metric_name", "$filters", "$duration"}, @@ -140,14 +141,14 @@ func Test_PromQueryBuilder(t *testing.T) { }, } - tests := []struct { + histogram_metrics_tests := []struct { name string requestBody MetricsRequestBody expectedQuery string expectError bool }{ { - name: "Successful template substitution", + name: "Successful histogram metrics template substitution", requestBody: MetricsRequestBody{ MetricName: "test_metric", Quantile: "0.90", @@ -191,9 +192,75 @@ func Test_PromQueryBuilder(t *testing.T) { }, } - for _, tt := range tests { + for _, tt := range histogram_metrics_tests { + t.Run(tt.name, func(t *testing.T) { + actualQuery, err := histogram_service.BuildQuery(tt.requestBody) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if !comparePrometheusQueries(tt.expectedQuery, actualQuery) { + t.Errorf("Prometheus queries do not match.\nExpected: %s\nGot: %s", tt.expectedQuery, actualQuery) + } else { + t.Log("Prometheus queries match!") + } + } + }) + } + + // tests for counter metrics + var counter_service = &PromQlService{ + PlaceHolders: map[string]map[string][]string{ + "forwarder_data_read_total": { + "vertex": {"$duration", "$dimension", "$metric_name", "$filters"}, + }, + }, + Expression: map[string]map[string]string{ + "forwarder_data_read_total": { + "vertex": "sum(rate($metric_name{$filters}[$duration])) by ($dimension)", + }, + }, + } + + counter_metrics_tests := []struct { + name string + requestBody MetricsRequestBody + expectedQuery string + expectError bool + }{ + { + name: "Successful counter metrics template substitution", + requestBody: MetricsRequestBody{ + MetricName: "forwarder_data_read_total", + Duration: "5m", + Dimension: "vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "pipeline": "test_pipeline", + "vertex": "test_vertex", + }, + }, + expectedQuery: `sum(rate(forwarder_data_read_total{namespace= "test_namespace", pipeline= "test_pipeline", vertex= "test_vertex"}[5m])) by (vertex)`, + }, + { + name: "Missing metric name in service config", + requestBody: MetricsRequestBody{ + MetricName: "non_existent_metric", + Duration: "5m", + Dimension: "vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "pipeline": "test_pipeline", + "vertex": "test_vertex", + }, + }, + expectError: true, + }, + } + + for _, tt := range counter_metrics_tests { t.Run(tt.name, func(t *testing.T) { - actualQuery, err := service.BuildQuery(tt.requestBody) + actualQuery, err := counter_service.BuildQuery(tt.requestBody) if tt.expectError { assert.Error(t, err) } else { @@ -207,8 +274,9 @@ func Test_PromQueryBuilder(t *testing.T) { }) } } + func Test_QueryPrometheus(t *testing.T) { - t.Run("Successful query", func(t *testing.T) { + t.Run("Successful histogram query", func(t *testing.T) { mockAPI := &MockPrometheusAPI{} promQlService := &PromQlService{ PrometheusClient: &Prometheus{ @@ -230,6 +298,30 @@ func Test_QueryPrometheus(t *testing.T) { assert.True(t, ok) assert.Equal(t, 1, matrix.Len()) }) + + t.Run("Successful counter query", func(t *testing.T) { + mockAPI := &MockPrometheusAPI{} + promQlService := &PromQlService{ + PrometheusClient: &Prometheus{ + Api: mockAPI, + }, + } + query := `sum(rate(forwarder_data_read_total{namespace="default", pipeline="test-pipeline"}[5m])) by (vertex)` + startTime := time.Now().Add(-30 * time.Minute) + endTime := time.Now() + + ctx := context.Background() + result, err := promQlService.QueryPrometheus(ctx, query, startTime, endTime) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // for query range , response should be a matrix + matrix, ok := result.(model.Matrix) + assert.True(t, ok) + assert.Equal(t, 1, matrix.Len()) + }) + t.Run("Prometheus client is nil", func(t *testing.T) { service := &PromQlService{ PrometheusClient: nil, diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx index 91baaeed84..6f12096ea7 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx @@ -201,6 +201,7 @@ export function Pods(props: PodsProps) { containerName={selectedContainer} pod={selectedPod} podDetails={selectedPodDetails} + vertexId={vertexId} /> ); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/index.tsx index 164767ee8f..b29cd1836d 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/index.tsx @@ -26,6 +26,7 @@ export function PodDetail({ type, containerName, pod, + vertexId }: PodDetailProps) { if (!pod) return null; @@ -59,7 +60,7 @@ export function PodDetail({ label="Logs" data-testid="logs-tab" /> - {!disableMetricsCharts && type === "monoVertex" && ( + {!disableMetricsCharts && ( )} - {!disableMetricsCharts && type === "monoVertex" && ( + {!disableMetricsCharts && (
)} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx index 220cb0f0cc..d078509923 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx @@ -17,7 +17,7 @@ export interface MetricsProps { vertexId?: string; } -export function Metrics({ namespaceId, pipelineId, type }: MetricsProps) { +export function Metrics({ namespaceId, pipelineId, type, vertexId }: MetricsProps) { const { metricsDiscoveryData: discoveredMetrics, error: discoveredMetricsError, @@ -88,6 +88,7 @@ export function Metrics({ namespaceId, pipelineId, type }: MetricsProps) { pipelineId={pipelineId} type={type} metric={metric} + vertexId={vertexId} /> )} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx index 6152c83c29..b7171a0c42 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx @@ -18,7 +18,7 @@ import EmptyChart from "../EmptyChart"; import { useMetricsFetch } from "../../../../../../../../../../../../../../../utils/fetchWrappers/metricsFetch"; // TODO have a check for metricReq against metric object to ensure required fields are passed -const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { +const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: any) => { const [transformedData, setTransformedData] = useState([]); const [chartLabels, setChartLabels] = useState([]); const [metricsReq, setMetricsReq] = useState({ @@ -28,6 +28,7 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { // store all filters for each selected dimension const [filtersList, setFiltersList] = useState([]); const [filters, setFilters] = useState({}); + const [previousDimension, setPreviousDimension] = useState(metricsReq?.dimension); const getRandomColor = useCallback((index: number) => { const hue = (index * 137.508) % 360; @@ -42,6 +43,8 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { case "mvtx_name": case "pipeline": return pipelineId; + case "vertex": + return vertexId; default: return ""; } @@ -73,9 +76,11 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { setFilters(newFilters); }, [filtersList, getFilterValue, setFilters]); + //update filters only when dimension changes in metricsReq useEffect(() => { - if (metricsReq?.dimension) { + if (metricsReq?.dimension !== previousDimension) { updateFilterList(metricsReq.dimension); + setPreviousDimension(metricsReq?.dimension); } }, [metricsReq, updateFilterList]); @@ -91,7 +96,7 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { name: param?.Name, required: param?.Required, })) || []; - + setParamsList([...initParams, ...newParams]); }, [metric, setParamsList]); @@ -214,6 +219,7 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric }: any) => { namespaceId={namespaceId} pipelineId={pipelineId} type={type} + vertexId={vertexId} setFilters={setFilters} /> diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx index 6cbc89794f..193f2ef855 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx @@ -25,6 +25,7 @@ export interface FiltersDropdownProps { namespaceId: string; pipelineId: string; type: string; + vertexId?: string; setFilters: any; } @@ -33,6 +34,7 @@ const FiltersDropdown = ({ namespaceId, pipelineId, type, + vertexId, setFilters, }: FiltersDropdownProps) => { const { host } = useContext(AppContext); @@ -62,8 +64,8 @@ const FiltersDropdown = ({ try { const response = await fetch( `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/${ - type === "monoVertex" ? "mono-vertices" : "pipeline" - }/${pipelineId}/pods` + type === "monoVertex" ? `mono-vertices/${pipelineId}/pods` : `pipelines/${pipelineId}/vertices/${vertexId}/pods` + }` ); if (!response.ok) { callback(null); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts index 3f5bf52f09..925d8acea1 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -19,10 +19,14 @@ export const dimensionMap: { [p: string]: string } = { "mono-vertex": "MonoVertex", pod: "Pod", pipeline: "Pipeline", + vertex: "Vertex" }; export const dimensionReverseMap: { [p: string]: string } = { monoVertex: "mono-vertex", + source: "vertex", + udf: "vertex", + sink: "vertex", pipeline: "pipeline", pod: "pod", }; @@ -34,4 +38,6 @@ export const metricNameMap: { [p: string]: string } = { "Mono Vertex Processing Time Latency (in micro seconds)", monovtx_sink_time_bucket: "Mono Vertex Sink Write Time Latency (in micro seconds)", + forwarder_data_read_total: + "Vertex Read Processing Rate" }; diff --git a/ui/src/types/declarations/pods.d.ts b/ui/src/types/declarations/pods.d.ts index b108a8fbc8..e9f72c4c4e 100644 --- a/ui/src/types/declarations/pods.d.ts +++ b/ui/src/types/declarations/pods.d.ts @@ -98,6 +98,7 @@ export interface PodDetailProps { containerName: string; pod: Pod; podDetails: PodDetail; + vertexId: string; } export interface ContainerInfoProps { state: string; From 5c9094b3200737027bbcfdc0bced3528b7b8b405 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Thu, 5 Dec 2024 00:03:27 -0800 Subject: [PATCH 153/188] fix: honor lookbackSeconds in monovertex and rust pipeline (#2258) Signed-off-by: Sidhant Kohli --- .../numaflow/v1alpha1/mono_vertex_types.go | 2 +- rust/numaflow-core/src/config/components.rs | 11 +++++ rust/numaflow-core/src/config/monovertex.rs | 11 ++++- rust/numaflow-core/src/config/pipeline.rs | 11 ++++- rust/numaflow-core/src/metrics.rs | 44 +++++++++++++++---- rust/numaflow-core/src/pipeline.rs | 2 + rust/numaflow-core/src/shared/metrics.rs | 1 + 7 files changed, 70 insertions(+), 12 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 2fee030203..43e794967d 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -318,7 +318,7 @@ func (mv MonoVertex) simpleCopy() MonoVertex { func (mv MonoVertex) GetPodSpec(req GetMonoVertexPodSpecReq) (*corev1.PodSpec, error) { copiedSpec := mv.simpleCopy() - copiedSpec.Spec.Scale = Scale{} + copiedSpec.Spec.Scale = Scale{LookbackSeconds: mv.Spec.Scale.LookbackSeconds} monoVtxBytes, err := json.Marshal(copiedSpec) if err != nil { return nil, errors.New("failed to marshal mono vertex spec") diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index adb2784e2a..db64b9cfc3 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -398,12 +398,14 @@ pub(crate) mod metrics { const DEFAULT_METRICS_PORT: u16 = 2469; const DEFAULT_LAG_CHECK_INTERVAL_IN_SECS: u16 = 5; const DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS: u16 = 3; + const DEFAULT_LOOKBACK_WINDOW_IN_SECS: u16 = 120; #[derive(Debug, Clone, PartialEq)] pub(crate) struct MetricsConfig { pub metrics_server_listen_port: u16, pub lag_check_interval_in_secs: u16, pub lag_refresh_interval_in_secs: u16, + pub lookback_window_in_secs: u16, } impl Default for MetricsConfig { @@ -412,9 +414,18 @@ pub(crate) mod metrics { metrics_server_listen_port: DEFAULT_METRICS_PORT, lag_check_interval_in_secs: DEFAULT_LAG_CHECK_INTERVAL_IN_SECS, lag_refresh_interval_in_secs: DEFAULT_LAG_REFRESH_INTERVAL_IN_SECS, + lookback_window_in_secs: DEFAULT_LOOKBACK_WINDOW_IN_SECS, } } } + + impl MetricsConfig { + pub(crate) fn with_lookback_window_in_secs(lookback_window_in_secs: u16) -> Self { + let mut default_config = Self::default(); + default_config.lookback_window_in_secs = lookback_window_in_secs; + default_config + } + } } #[cfg(test)] diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index 356e97d827..bf59313e6a 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -18,6 +18,7 @@ use crate::Result; const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; +const DEFAULT_LOOKBACK_WINDOW_IN_SECS: u16 = 120; #[derive(Debug, Clone, PartialEq)] pub(crate) struct MonovertexConfig { @@ -128,12 +129,19 @@ impl MonovertexConfig { None }; + let look_back_window = mono_vertex_obj + .spec + .scale + .as_ref() + .and_then(|scale| scale.lookback_seconds.map(|x| x as u16)) + .unwrap_or(DEFAULT_LOOKBACK_WINDOW_IN_SECS); + Ok(MonovertexConfig { name: mono_vertex_name, replica: *get_vertex_replica(), batch_size: batch_size as usize, read_timeout: Duration::from_millis(timeout_in_ms as u64), - metrics_config: MetricsConfig::default(), + metrics_config: MetricsConfig::with_lookback_window_in_secs(look_back_window), source_config, sink_config, transformer_config, @@ -152,6 +160,7 @@ mod tests { use crate::config::components::transformer::TransformerType; use crate::config::monovertex::MonovertexConfig; use crate::error::Error; + #[test] fn test_load_valid_config() { let valid_config = r#" diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index c05ca73d3c..eb940e5fce 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -18,6 +18,7 @@ use crate::Result; const DEFAULT_BATCH_SIZE: u64 = 500; const DEFAULT_TIMEOUT_IN_MS: u32 = 1000; +const DEFAULT_LOOKBACK_WINDOW_IN_SECS: u16 = 120; const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; @@ -268,6 +269,13 @@ impl PipelineConfig { }); } + let look_back_window = vertex_obj + .spec + .scale + .as_ref() + .and_then(|scale| scale.lookback_seconds.map(|x| x as u16)) + .unwrap_or(DEFAULT_LOOKBACK_WINDOW_IN_SECS); + Ok(PipelineConfig { batch_size: batch_size as usize, paf_concurrency: env::var("PAF_BATCH_SIZE") @@ -282,7 +290,7 @@ impl PipelineConfig { from_vertex_config, to_vertex_config, vertex_config: vertex, - metrics_config: Default::default(), + metrics_config: MetricsConfig::with_lookback_window_in_secs(look_back_window), }) } } @@ -376,6 +384,7 @@ mod tests { metrics_server_listen_port: 2469, lag_check_interval_in_secs: 5, lag_refresh_interval_in_secs: 3, + lookback_window_in_secs: 120, }, }; assert_eq!(pipeline_config, expected); diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 81c8a3ccaa..317b097e87 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -688,6 +688,7 @@ pub(crate) struct PendingReader { lag_checking_interval: Duration, refresh_interval: Duration, pending_stats: Arc>>, + lookback_seconds: u16, } pub(crate) struct PendingReaderTasks { @@ -700,6 +701,7 @@ pub(crate) struct PendingReaderBuilder { lag_reader: Source, lag_checking_interval: Option, refresh_interval: Option, + lookback_seconds: Option, } impl PendingReaderBuilder { @@ -708,6 +710,7 @@ impl PendingReaderBuilder { lag_reader, lag_checking_interval: None, refresh_interval: None, + lookback_seconds: None, } } @@ -721,6 +724,11 @@ impl PendingReaderBuilder { self } + pub(crate) fn lookback_seconds(mut self, seconds: u16) -> Self { + self.lookback_seconds = Some(seconds); + self + } + pub(crate) fn build(self) -> PendingReader { PendingReader { lag_reader: self.lag_reader, @@ -730,6 +738,7 @@ impl PendingReaderBuilder { refresh_interval: self .refresh_interval .unwrap_or_else(|| Duration::from_secs(5)), + lookback_seconds: self.lookback_seconds.unwrap_or(120), pending_stats: Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))), } } @@ -748,6 +757,7 @@ impl PendingReader { let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; let pending_stats = self.pending_stats.clone(); + let lookback_seconds = self.lookback_seconds; let buildup_handle = tokio::spawn(async move { build_pending_info(pending_reader, lag_checking_interval, pending_stats).await; @@ -755,7 +765,13 @@ impl PendingReader { let pending_stats = self.pending_stats.clone(); let expose_handle = tokio::spawn(async move { - expose_pending_metrics(is_mono_vertex, refresh_interval, pending_stats).await; + expose_pending_metrics( + is_mono_vertex, + refresh_interval, + pending_stats, + lookback_seconds, + ) + .await; }); PendingReaderTasks { buildup_handle, @@ -809,14 +825,12 @@ async fn fetch_pending(lag_reader: &Source) -> crate::error::Result { Ok(response) } -const LOOKBACK_SECONDS_MAP: [(&str, i64); 4] = - [("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; - // Periodically exposes the pending metrics by calculating the average pending messages over different intervals. async fn expose_pending_metrics( is_mono_vertex: bool, refresh_interval: Duration, pending_stats: Arc>>, + lookback_seconds: u16, ) { let mut ticker = time::interval(refresh_interval); @@ -824,10 +838,17 @@ async fn expose_pending_metrics( // string concat is more efficient? let mut pending_info: BTreeMap<&str, i64> = BTreeMap::new(); + let lookback_seconds_map: [(&str, u16); 4] = [ + ("1m", 60), + ("default", lookback_seconds), + ("5m", 300), + ("15m", 900), + ]; + loop { ticker.tick().await; - for (label, seconds) in LOOKBACK_SECONDS_MAP { - let pending = calculate_pending(seconds, &pending_stats).await; + for (label, seconds) in lookback_seconds_map { + let pending = calculate_pending(seconds as i64, &pending_stats).await; if pending != -1 { let mut metric_labels = mvtx_forward_metric_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); @@ -1050,6 +1071,7 @@ mod tests { async fn test_expose_pending_metrics() { let pending_stats = Arc::new(Mutex::new(Vec::with_capacity(MAX_PENDING_STATS))); let refresh_interval = Duration::from_secs(1); + let lookback_seconds = 120; // Populate pending_stats with some values. // The array will be sorted by the timestamp with the most recent last. @@ -1076,18 +1098,22 @@ mod tests { tokio::spawn({ let pending_stats = pending_stats.clone(); async move { - expose_pending_metrics(true, refresh_interval, pending_stats).await; + expose_pending_metrics(true, refresh_interval, pending_stats, lookback_seconds) + .await; } }); // We use tokio::time::interval() as the ticker in the expose_pending_metrics() function. // The first tick happens immediately, so we don't need to wait for the refresh_interval for the first iteration to complete. tokio::time::sleep(Duration::from_millis(50)).await; + let lookback_seconds_map: [(&str, u16); 4] = + [("1m", 60), ("default", 120), ("5m", 300), ("15m", 900)]; + // Get the stored values for all time intervals - // We will store the values corresponding to the labels (from LOOKBACK_SECONDS_MAP) "1m", "default", "5m", "15" in the same order in this array + // We will store the values corresponding to the labels (from lookback_seconds_map) "1m", "default", "5m", "15" in the same order in this array let mut stored_values: [i64; 4] = [0; 4]; { - for (i, (label, _)) in LOOKBACK_SECONDS_MAP.iter().enumerate() { + for (i, (label, _)) in lookback_seconds_map.iter().enumerate() { let mut metric_labels = mvtx_forward_metric_labels().clone(); metric_labels.push((PENDING_PERIOD_LABEL.to_string(), label.to_string())); let guage = monovertex_metrics() diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index e29ebe5043..d05d61aedb 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -330,6 +330,7 @@ mod tests { metrics_server_listen_port: 2469, lag_check_interval_in_secs: 5, lag_refresh_interval_in_secs: 3, + lookback_window_in_secs: 120, }, }; @@ -486,6 +487,7 @@ mod tests { metrics_server_listen_port: 2469, lag_check_interval_in_secs: 5, lag_refresh_interval_in_secs: 3, + lookback_window_in_secs: 120, }, }; diff --git a/rust/numaflow-core/src/shared/metrics.rs b/rust/numaflow-core/src/shared/metrics.rs index 0fe06e05d4..1b5a2a7db2 100644 --- a/rust/numaflow-core/src/shared/metrics.rs +++ b/rust/numaflow-core/src/shared/metrics.rs @@ -40,5 +40,6 @@ pub(crate) async fn create_pending_reader( .refresh_interval(Duration::from_secs( metrics_config.lag_refresh_interval_in_secs.into(), )) + .lookback_seconds(metrics_config.lookback_window_in_secs.into()) .build() } From d0405dd0941f04e2eece63920464a19cade46f6a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:40:26 -0800 Subject: [PATCH 154/188] docs: updated CHANGELOG.md (#2262) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2cc0e895e..8e21c675fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## v1.4.1 (2024-12-05) + + * [346f2a73](https://github.com/numaproj/numaflow/commit/346f2a7321d158fa9ce9392cfdcc76d671d6f577) Update manifests to v1.4.1 + * [1343e4d4](https://github.com/numaproj/numaflow/commit/1343e4d47934afcea324d4426df810dd9e99d9ab) feat: add sdk infomation metrics (#2208) + * [1abb5ede](https://github.com/numaproj/numaflow/commit/1abb5ede3577016b7c2a923755e1445146efdb05) fix: Fix Sink Config to respect Fallback (#2261) + +### Contributors + + * Derek Wang + * Yashash H L + ## v1.4.0 (2024-11-08) * [6892c115](https://github.com/numaproj/numaflow/commit/6892c11590ea482c186724e55837dbcfb2100ce3) Update manifests to v1.4.0 @@ -283,6 +294,15 @@ * samhith-kakarla * xdevxy +## v1.2.2 (2024-11-15) + + * [61adf4e9](https://github.com/numaproj/numaflow/commit/61adf4e9805c2772d937a7513afcb3c14048127c) Update manifests to v1.2.2 + * [623cc4e2](https://github.com/numaproj/numaflow/commit/623cc4e2aaa2d67d196cb972bd525a60544d2148) fix: update key len (#2223) + +### Contributors + + * Sidhant Kohli + ## v1.2.1 (2024-05-07) * [89ea33f1](https://github.com/numaproj/numaflow/commit/89ea33f1d69785f6f5f17f1d5854ac189003918a) Update manifests to v1.2.1 From 11c0d2b40c314d81517fe8afeea30edd50f3cda4 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Fri, 6 Dec 2024 11:52:23 +0530 Subject: [PATCH 155/188] fix: Sink Config to respect Fallback (#2265) Signed-off-by: Yashash H L --- .../numaflow/v1alpha1/mono_vertex_types.go | 3 +- rust/numaflow-core/src/config/components.rs | 38 ++++----- rust/numaflow-core/src/config/monovertex.rs | 77 ++++++++++++++++++- rust/numaflow-core/src/config/pipeline.rs | 5 +- rust/src/bin/main.rs | 2 - 5 files changed, 99 insertions(+), 26 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go index 43e794967d..cdea87e5d0 100644 --- a/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go +++ b/pkg/apis/numaflow/v1alpha1/mono_vertex_types.go @@ -481,10 +481,9 @@ func (mvspec MonoVertexSpec) buildContainers(req getContainerReq) ([]corev1.Cont if mvspec.Sink.UDSink != nil { // Only support UDSink for now. sidecarContainers = append(sidecarContainers, mvspec.Sink.getUDSinkContainer(req)) } - if mvspec.Sink.Fallback != nil { + if mvspec.Sink.Fallback != nil && mvspec.Sink.Fallback.UDSink != nil { sidecarContainers = append(sidecarContainers, mvspec.Sink.getFallbackUDSinkContainer(req)) } - // Fallback sink is not supported. sidecarContainers = append(sidecarContainers, mvspec.Sidecars...) return sidecarContainers, containers } diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index db64b9cfc3..f5780ed9f0 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -189,14 +189,29 @@ pub(crate) mod sink { UserDefined(UserDefinedConfig), } - impl TryFrom> for SinkType { - type Error = Error; - + impl SinkType { // FIXME(cr): why is sink.fallback Box vs. sink Box. This is coming from // numaflow-models. Problem is, golang has embedded structures and rust does not. We might // have to AbstractSink for sink-configs while Sink for real sink types. // NOTE: I do not see this problem with Source? - fn try_from(sink: Box) -> Result { + pub(crate) fn primary_sinktype(sink: Box) -> Result { + sink.udsink + .as_ref() + .map(|_| Ok(SinkType::UserDefined(UserDefinedConfig::default()))) + .or_else(|| { + sink.log + .as_ref() + .map(|_| Ok(SinkType::Log(LogConfig::default()))) + }) + .or_else(|| { + sink.blackhole + .as_ref() + .map(|_| Ok(SinkType::Blackhole(BlackholeConfig::default()))) + }) + .ok_or_else(|| Error::Config("Sink type not found".to_string()))? + } + + pub(crate) fn fallback_sinktype(sink: Box) -> Result { if let Some(fallback) = sink.fallback { fallback .udsink @@ -216,20 +231,7 @@ pub(crate) mod sink { }) .ok_or_else(|| Error::Config("Sink type not found".to_string()))? } else { - sink.udsink - .as_ref() - .map(|_| Ok(SinkType::UserDefined(UserDefinedConfig::default()))) - .or_else(|| { - sink.log - .as_ref() - .map(|_| Ok(SinkType::Log(LogConfig::default()))) - }) - .or_else(|| { - sink.blackhole - .as_ref() - .map(|_| Ok(SinkType::Blackhole(BlackholeConfig::default()))) - }) - .ok_or_else(|| Error::Config("Sink type not found".to_string()))? + Err(Error::Config("Fallback sink not found".to_string())) } } } diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index bf59313e6a..74a7e65fb0 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -1,3 +1,4 @@ +use crate::config::monovertex::sink::SinkType; use std::time::Duration; use base64::prelude::BASE64_STANDARD; @@ -116,13 +117,13 @@ impl MonovertexConfig { .ok_or_else(|| Error::Config("Sink not found".to_string()))?; let sink_config = SinkConfig { - sink_type: sink.clone().try_into()?, + sink_type: SinkType::primary_sinktype(sink.clone())?, retry_config: sink.retry_strategy.clone().map(|retry| retry.into()), }; let fb_sink_config = if sink.fallback.is_some() { Some(SinkConfig { - sink_type: sink.try_into()?, + sink_type: SinkType::fallback_sinktype(sink)?, retry_config: None, }) } else { @@ -297,4 +298,76 @@ mod tests { TransformerType::UserDefined(_) )); } + + #[test] + fn test_load_sink_and_fallback() { + let valid_config = r#" + { + "metadata": { + "name": "test_vertex" + }, + "spec": { + "limits": { + "readBatchSize": 1000, + "readTimeout": "2s" + }, + "source": { + "udsource": { + "container": { + "image": "xxxxxxx", + "resources": {} + } + } + }, + "sink": { + "udsink": { + "container": { + "image": "primary-sink", + "resources": {} + } + }, + "fallback": { + "udsink": { + "container": { + "image": "fallback-sink", + "resources": {} + } + } + } + } + } + } + "#; + let encoded_invalid_config = BASE64_STANDARD.encode(valid_config); + let spec = encoded_invalid_config.as_str(); + + let config = MonovertexConfig::load(spec.to_string()).unwrap(); + + assert_eq!(config.name, "test_vertex"); + assert!(matches!( + config.sink_config.sink_type, + SinkType::UserDefined(_) + )); + assert!(config.fb_sink_config.is_some()); + assert!(matches!( + config.fb_sink_config.clone().unwrap().sink_type, + SinkType::UserDefined(_) + )); + + if let SinkType::UserDefined(config) = config.sink_config.sink_type.clone() { + assert_eq!(config.socket_path, "/var/run/numaflow/sink.sock"); + assert_eq!( + config.server_info_path, + "/var/run/numaflow/sinker-server-info" + ); + } + + if let SinkType::UserDefined(config) = config.fb_sink_config.unwrap().sink_type { + assert_eq!(config.socket_path, "/var/run/numaflow/fb-sink.sock"); + assert_eq!( + config.server_info_path, + "/var/run/numaflow/fb-sinker-server-info" + ); + } + } } diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index eb940e5fce..844425a8c8 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -1,3 +1,4 @@ +use crate::config::components::sink::SinkType; use std::collections::HashMap; use std::env; use std::time::Duration; @@ -164,7 +165,7 @@ impl PipelineConfig { } else if let Some(sink) = vertex_obj.spec.sink { let fb_sink_config = if sink.fallback.as_ref().is_some() { Some(SinkConfig { - sink_type: sink.clone().try_into()?, + sink_type: SinkType::fallback_sinktype(sink.clone())?, retry_config: None, }) } else { @@ -173,7 +174,7 @@ impl PipelineConfig { VertexType::Sink(SinkVtxConfig { sink_config: SinkConfig { - sink_type: sink.try_into()?, + sink_type: SinkType::primary_sinktype(sink)?, retry_config: None, }, fb_sink_config, diff --git a/rust/src/bin/main.rs b/rust/src/bin/main.rs index d56cfff599..4ffd64ed7a 100644 --- a/rust/src/bin/main.rs +++ b/rust/src/bin/main.rs @@ -1,7 +1,5 @@ use std::env; -use std::time::Duration; -use tokio::time; use tracing::{error, info}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; From 5f69064ed836d91e73947d43853830d89895e5ba Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Sat, 7 Dec 2024 22:13:58 +0530 Subject: [PATCH 156/188] feat: Introducing Tracker to track completeness of the Message (#2264) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/Cargo.lock | 1 - rust/numaflow-core/Cargo.toml | 5 +- rust/numaflow-core/src/error.rs | 3 + rust/numaflow-core/src/lib.rs | 3 + rust/numaflow-core/src/message.rs | 8 +- rust/numaflow-core/src/monovertex.rs | 5 + .../numaflow-core/src/monovertex/forwarder.rs | 155 +++++++- rust/numaflow-core/src/pipeline.rs | 92 +++-- rust/numaflow-core/src/pipeline/forwarder.rs | 8 +- .../pipeline/forwarder/source_forwarder.rs | 12 +- .../src/pipeline/isb/jetstream.rs | 99 ++--- .../src/pipeline/isb/jetstream/reader.rs | 85 +++-- .../src/pipeline/isb/jetstream/writer.rs | 44 ++- .../src/shared/create_components.rs | 28 +- rust/numaflow-core/src/sink.rs | 118 +++--- rust/numaflow-core/src/sink/user_defined.rs | 12 +- rust/numaflow-core/src/source.rs | 46 +-- rust/numaflow-core/src/tracker.rs | 341 ++++++++++++++++++ rust/numaflow-core/src/transformer.rs | 143 +++++--- .../src/transformer/user_defined.rs | 85 ++--- 20 files changed, 993 insertions(+), 300 deletions(-) create mode 100644 rust/numaflow-core/src/tracker.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index a5d40a88e8..d6a518cee6 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1863,7 +1863,6 @@ dependencies = [ "futures", "hyper-util", "kube", - "log", "numaflow 0.1.1", "numaflow-models", "numaflow-pb", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index c72b20b5d9..1389946079 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -37,16 +37,15 @@ backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" kube = "0.95.0" -log = "0.4.22" futures = "0.3.30" pin-project = "1.1.5" rand = "0.8.5" async-nats = "0.38.0" -numaflow-pulsar = {path = "../numaflow-extns/pulsar"} +numaflow-pulsar = { path = "../numaflow-extns/pulsar" } [dev-dependencies] tempfile = "3.11.0" numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } -pulsar = {version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"]} +pulsar = { version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"] } [build-dependencies] diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index 27cbcb6fc9..e82a93e2d8 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -49,6 +49,9 @@ pub enum Error { #[error("Lag cannot be fetched, {0}")] Lag(String), + + #[error("Task Error - {0}")] + Tracker(String), } impl From for Error { diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index f90633d06b..727a119f1b 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -48,6 +48,9 @@ pub(crate) mod metrics; /// [Pipeline]: https://numaflow.numaproj.io/core-concepts/pipeline/ mod pipeline; +/// Tracker to track the completeness of message processing. +mod tracker; + pub async fn run() -> Result<()> { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 10cb3063ac..a3436e6ab4 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -14,7 +14,6 @@ use numaflow_pb::clients::source::read_response; use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; -use tokio::sync::oneshot; use crate::shared::grpc::prost_timestamp_from_utc; use crate::shared::grpc::utc_from_timestamp; @@ -86,7 +85,7 @@ impl TryFrom for Message { Ok(Self { keys, - value: payload, // FIXME: use Bytes + value: payload, offset, event_time, id, @@ -154,11 +153,6 @@ pub(crate) enum ReadAck { Nak, } -pub(crate) struct ReadMessage { - pub(crate) message: Message, - pub(crate) ack: oneshot::Sender, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct MessageID { pub(crate) vertex_name: String, diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index b8016624fa..5a830cfd15 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -8,6 +8,7 @@ use crate::error::{self}; use crate::shared::create_components; use crate::sink::SinkWriter; use crate::source::Source; +use crate::tracker::TrackerHandle; use crate::transformer::Transformer; use crate::{metrics, shared}; @@ -23,10 +24,12 @@ pub(crate) async fn start_forwarder( cln_token: CancellationToken, config: &MonovertexConfig, ) -> error::Result<()> { + let tracker_handle = TrackerHandle::new(); let (source, source_grpc_client) = create_components::create_source( config.batch_size, config.read_timeout, &config.source_config, + tracker_handle.clone(), cln_token.clone(), ) .await?; @@ -34,6 +37,7 @@ pub(crate) async fn start_forwarder( let (transformer, transformer_grpc_client) = create_components::create_transformer( config.batch_size, config.transformer_config.clone(), + tracker_handle.clone(), cln_token.clone(), ) .await?; @@ -44,6 +48,7 @@ pub(crate) async fn start_forwarder( config.read_timeout, config.sink_config.clone(), config.fb_sink_config.clone(), + tracker_handle, &cln_token, ) .await?; diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index dc77154c6f..dcdfdb21a5 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -4,12 +4,16 @@ //! //! ```text //! (source) --[c]--> (transformer)* --[c]--> (sink) +//! | | | +//! | v | +//! +--------------> tracker <----------------+ //! //! [c] - channel //! * - optional //! ``` //! -//! Most of the data move forward except for the ack which can happen only after the Write. +//! Most of the data move forward except for the `ack` which can happen only after the that the tracker +//! has guaranteed that the processing complete. //! ```text //! (Read) +-------> (UDF) -------> (Write) + //! | | @@ -142,6 +146,7 @@ mod tests { use crate::sink::{SinkClientType, SinkWriterBuilder}; use crate::source::user_defined::new_source; use crate::source::{Source, SourceType}; + use crate::tracker::TrackerHandle; use crate::transformer::Transformer; use crate::Result; @@ -256,10 +261,11 @@ mod tests { .await .map_err(|e| panic!("failed to create source reader: {:?}", e)) .unwrap(); - + let tracker_handle = TrackerHandle::new(); let source = Source::new( 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + tracker_handle.clone(), ); // create a transformer @@ -283,13 +289,148 @@ mod tests { tokio::time::sleep(Duration::from_millis(100)).await; let client = SourceTransformClient::new(create_rpc_channel(sock_file).await.unwrap()); - let transformer = Transformer::new(10, 10, client).await.unwrap(); + let transformer = Transformer::new(10, 10, client, tracker_handle.clone()) + .await + .unwrap(); + + let sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_millis(100), + SinkClientType::Log, + tracker_handle.clone(), + ) + .build() + .await + .unwrap(); + + // create the forwarder with the source, transformer, and writer + let forwarder = ForwarderBuilder::new(source.clone(), sink_writer, cln_token.clone()) + .transformer(transformer) + .build(); + + let forwarder_handle: JoinHandle> = tokio::spawn(async move { + forwarder.start().await?; + Ok(()) + }); + + // wait for one sec to check if the pending becomes zero, because all the messages + // should be read and acked; if it doesn't, then fail the test + let tokio_result = tokio::time::timeout(Duration::from_secs(1), async move { + loop { + let pending = source.pending().await.unwrap(); + if pending == Some(0) { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await; - let sink_writer = - SinkWriterBuilder::new(10, Duration::from_millis(100), SinkClientType::Log) - .build() + assert!( + tokio_result.is_ok(), + "Timeout occurred before pending became zero" + ); + cln_token.cancel(); + forwarder_handle.await.unwrap().unwrap(); + st_shutdown_tx.send(()).unwrap(); + src_shutdown_tx.send(()).unwrap(); + source_handle.await.unwrap(); + transformer_handle.await.unwrap(); + } + + struct FlatMapTransformer; + + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for FlatMapTransformer { + async fn transform( + &self, + _input: sourcetransform::SourceTransformRequest, + ) -> Vec { + let mut output = vec![]; + for i in 0..5 { + let message = sourcetransform::Message::new(i.to_string().into_bytes(), Utc::now()) + .keys(vec![format!("key-{}", i)]) + .tags(vec![]); + output.push(message); + } + output + } + } + + #[tokio::test] + async fn test_flatmap_operation() { + let tracker_handle = TrackerHandle::new(); + // create the source which produces x number of messages + let cln_token = CancellationToken::new(); + + let (src_shutdown_tx, src_shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("source.sock"); + let server_info_file = tmp_dir.path().join("source-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let source_handle = tokio::spawn(async move { + // a simple source which generates total of 100 messages + source::Server::new(SimpleSource::new(100)) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(src_shutdown_rx) .await - .unwrap(); + .unwrap() + }); + + // wait for the server to start + // TODO: flaky + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceClient::new(create_rpc_channel(sock_file).await.unwrap()); + + let (src_read, src_ack, lag_reader) = new_source(client, 5, Duration::from_millis(1000)) + .await + .map_err(|e| panic!("failed to create source reader: {:?}", e)) + .unwrap(); + + let source = Source::new( + 5, + SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + tracker_handle.clone(), + ); + + // create a transformer + let (st_shutdown_tx, st_shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let transformer_handle = tokio::spawn(async move { + sourcetransform::Server::new(FlatMapTransformer) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(st_shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await.unwrap()); + let transformer = Transformer::new(10, 10, client, tracker_handle.clone()) + .await + .unwrap(); + + let sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_millis(100), + SinkClientType::Log, + tracker_handle.clone(), + ) + .build() + .await + .unwrap(); // create the forwarder with the source, transformer, and writer let forwarder = ForwarderBuilder::new(source.clone(), sink_writer, cln_token.clone()) diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index d05d61aedb..d3b2e076f1 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,3 +1,4 @@ +use crate::pipeline::pipeline::isb::BufferReaderConfig; use std::time::Duration; use async_nats::jetstream::Context; @@ -15,6 +16,7 @@ use crate::pipeline::isb::jetstream::ISBWriter; use crate::shared::create_components; use crate::shared::create_components::create_sink_writer; use crate::shared::metrics::start_metrics_server; +use crate::tracker::TrackerHandle; use crate::{error, Result}; mod forwarder; @@ -43,20 +45,29 @@ async fn start_source_forwarder( config: PipelineConfig, source_config: SourceVtxConfig, ) -> Result<()> { + let tracker_handle = TrackerHandle::new(); let js_context = create_js_context(config.js_client_config.clone()).await?; - let buffer_writer = create_buffer_writer(&config, js_context.clone(), cln_token.clone()).await; + let buffer_writer = create_buffer_writer( + &config, + js_context.clone(), + tracker_handle.clone(), + cln_token.clone(), + ) + .await; let (source, source_grpc_client) = create_components::create_source( config.batch_size, config.read_timeout, &source_config.source_config, + tracker_handle.clone(), cln_token.clone(), ) .await?; let (transformer, transformer_grpc_client) = create_components::create_transformer( config.batch_size, source_config.transformer_config.clone(), + tracker_handle, cln_token.clone(), ) .await?; @@ -90,17 +101,35 @@ async fn start_sink_forwarder( ) -> Result<()> { let js_context = create_js_context(config.js_client_config.clone()).await?; - // Create buffer readers for each partition - let buffer_readers = create_buffer_readers(&config, js_context.clone()).await?; + // Only the reader config of the first "from" vertex is needed, as all "from" vertices currently write + // to a common buffer, in the case of a join. + let reader_config = &config + .from_vertex_config + .first() + .ok_or_else(|| error::Error::Config("No from vertex config found".to_string()))? + .reader_config; + + // Create sink writers and buffer readers for each stream + let mut sink_writers = vec![]; + let mut buffer_readers = vec![]; + for stream in reader_config.streams.clone() { + let tracker_handle = TrackerHandle::new(); + + let buffer_reader = create_buffer_reader( + stream, + reader_config.clone(), + js_context.clone(), + tracker_handle.clone(), + ) + .await?; + buffer_readers.push(buffer_reader); - // Create sink writers and clients - let mut sink_writers = Vec::new(); - for _ in &buffer_readers { let (sink_writer, sink_grpc_client, fb_sink_grpc_client) = create_sink_writer( config.batch_size, config.read_timeout, sink.sink_config.clone(), sink.fb_sink_config.clone(), + tracker_handle, &cln_token, ) .await?; @@ -148,6 +177,7 @@ async fn start_sink_forwarder( async fn create_buffer_writer( config: &PipelineConfig, js_context: Context, + tracker_handle: TrackerHandle, cln_token: CancellationToken, ) -> ISBWriter { ISBWriter::new( @@ -158,36 +188,26 @@ async fn create_buffer_writer( .map(|tv| tv.writer_config.clone()) .collect(), js_context, + tracker_handle, cln_token, ) .await } -async fn create_buffer_readers( - config: &PipelineConfig, +async fn create_buffer_reader( + stream: (&'static str, u16), + reader_config: BufferReaderConfig, js_context: Context, -) -> Result> { - // Only the reader config of the first "from" vertex is needed, as all "from" vertices currently write - // to a common buffer, in the case of a join. - let reader_config = &config - .from_vertex_config - .first() - .ok_or_else(|| error::Error::Config("No from vertex config found".to_string()))? - .reader_config; - - let mut readers = Vec::new(); - for stream in &reader_config.streams { - let reader = JetstreamReader::new( - stream.0, - stream.1, - js_context.clone(), - reader_config.clone(), - ) - .await?; - readers.push(reader); - } - - Ok(readers) + tracker_handle: TrackerHandle, +) -> Result { + JetstreamReader::new( + stream.0, + stream.1, + js_context, + reader_config, + tracker_handle, + ) + .await } /// Creates a jetstream context based on the provided configuration @@ -256,6 +276,8 @@ mod tests { // that messages were actually written to the streams. for stream_name in &streams { let stream_name = *stream_name; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -334,7 +356,7 @@ mod tests { }, }; - let cancellation_token = tokio_util::sync::CancellationToken::new(); + let cancellation_token = CancellationToken::new(); let forwarder_task = tokio::spawn({ let cancellation_token = cancellation_token.clone(); async move { @@ -350,7 +372,7 @@ mod tests { forwarder_task.await.unwrap(); for (stream_name, stream_consumer) in consumers { - let messages: Vec = stream_consumer + let messages: Vec = stream_consumer .batch() .max_messages(10) .expires(Duration::from_millis(50)) @@ -375,7 +397,7 @@ mod tests { #[cfg(feature = "nats-tests")] #[tokio::test] - async fn test_forwarder_for_sink_vetex() { + async fn test_forwarder_for_sink_vertex() { // Unique names for the streams we use in this test let streams = vec![ "default-test-forwarder-for-sink-vertex-out-0", @@ -455,7 +477,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 1000, - paf_concurrency: 30000, + paf_concurrency: 1000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -491,7 +513,7 @@ mod tests { }, }; - let cancellation_token = tokio_util::sync::CancellationToken::new(); + let cancellation_token = CancellationToken::new(); let forwarder_task = tokio::spawn({ let cancellation_token = cancellation_token.clone(); async move { diff --git a/rust/numaflow-core/src/pipeline/forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder.rs index 9c1f8deeff..e87a15ef48 100644 --- a/rust/numaflow-core/src/pipeline/forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder.rs @@ -4,13 +4,19 @@ //! //! ```text //! (source) --[c]--> (transformer)* --[c]--> ==> (map)* --[c]--> ===> (reducer)* --[c]--> ===> --[c]--> (sink) +//! | | | | | +//! | | | | | +//! | | v | | +//! +-------------------+------------------> tracker <-----------------+--------------------------------+ +//! //! //! ==> - ISB //! [c] - channel //! * - optional //! ``` //! -//! Most of the data move forward except for the ack which can happen only after the Write. +//! Most of the data move forward except for the `ack` which can happen only after the that the tracker +//! has guaranteed that the processing complete. //! ```text //! (Read) +-------> (UDF) -------> (Write) + //! | | diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index 6246334447..065b19e48c 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -119,6 +119,7 @@ mod tests { use crate::shared::grpc::create_rpc_channel; use crate::source::user_defined::new_source; use crate::source::{Source, SourceType}; + use crate::tracker::TrackerHandle; use crate::transformer::Transformer; use crate::Result; @@ -204,6 +205,8 @@ mod tests { #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_source_forwarder() { + let tracker_handle = TrackerHandle::new(); + // create the source which produces x number of messages let cln_token = CancellationToken::new(); @@ -238,6 +241,7 @@ mod tests { let source = Source::new( 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + tracker_handle.clone(), ); // create a js writer @@ -247,6 +251,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_source_forwarder"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -276,6 +282,7 @@ mod tests { ..Default::default() }], context.clone(), + tracker_handle.clone(), cln_token.clone(), ) .await; @@ -299,9 +306,10 @@ mod tests { // wait for the server to start tokio::time::sleep(Duration::from_millis(100)).await; - let client = SourceTransformClient::new(create_rpc_channel(sock_file).await.unwrap()); - let transformer = Transformer::new(10, 10, client).await.unwrap(); + let transformer = Transformer::new(10, 10, client, tracker_handle) + .await + .unwrap(); // create the forwarder with the source, transformer, and writer let forwarder = SourceForwarderBuilder::new(source.clone(), writer, cln_token.clone()) diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index 9f3635861e..d4f37cf184 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -7,12 +7,12 @@ use tokio_util::sync::CancellationToken; use tracing::info; use crate::config::pipeline::isb::BufferWriterConfig; -use crate::error::Error; -use crate::message::{ReadAck, ReadMessage}; +use crate::message::Message; use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; use crate::pipeline::isb::jetstream::writer::{ JetstreamWriter, PafResolver, ResolveAndPublishResult, }; +use crate::tracker::TrackerHandle; use crate::Result; /// JetStream Writer is responsible for writing messages to JetStream ISB. @@ -34,6 +34,7 @@ pub(crate) struct ISBWriter { paf_concurrency: usize, config: Vec, writer: JetstreamWriter, + tracker_handle: TrackerHandle, } impl ISBWriter { @@ -41,6 +42,7 @@ impl ISBWriter { paf_concurrency: usize, config: Vec, js_ctx: Context, + tracker_handle: TrackerHandle, cancel_token: CancellationToken, ) -> Self { info!(?config, paf_concurrency, "Streaming JetstreamWriter",); @@ -57,39 +59,40 @@ impl ISBWriter { config, writer: js_writer, paf_concurrency, + tracker_handle, } } /// Starts reading messages from the stream and writes them to Jetstream ISB. pub(crate) async fn streaming_write( &self, - messages_stream: ReceiverStream, + messages_stream: ReceiverStream, ) -> Result>> { let handle: JoinHandle> = tokio::spawn({ let writer = self.writer.clone(); let paf_concurrency = self.paf_concurrency; let config = self.config.clone(); + let tracker_handle = self.tracker_handle.clone(); + let mut messages_stream = messages_stream; let mut index = 0; async move { - let paf_resolver = PafResolver::new(paf_concurrency, writer.clone()); - while let Some(read_message) = messages_stream.next().await { + let paf_resolver = + PafResolver::new(paf_concurrency, writer.clone(), tracker_handle.clone()); + while let Some(message) = messages_stream.next().await { // if message needs to be dropped, ack and continue // TODO: add metric for dropped count - if read_message.message.dropped() { - read_message - .ack - .send(ReadAck::Ack) - .map_err(|e| Error::ISB(format!("Failed to send ack: {:?}", e)))?; + if message.dropped() { + // delete the entry from tracker + tracker_handle.delete(message.id.offset).await?; continue; } let mut pafs = vec![]; // FIXME(CF): This is a temporary solution to round-robin the streams for buffer in &config { - let payload: BytesMut = read_message - .message + let payload: BytesMut = message .clone() .try_into() .expect("message serialization should not fail"); @@ -109,8 +112,8 @@ impl ISBWriter { paf_resolver .resolve_pafs(ResolveAndPublishResult { pafs, - payload: read_message.message.value.clone().into(), - ack_tx: read_message.ack, + payload: message.value.clone().into(), + offset: message.id.offset, }) .await?; } @@ -128,7 +131,6 @@ mod tests { use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; use chrono::Utc; - use tokio::sync::oneshot; use super::*; use crate::message::{Message, MessageID, ReadAck}; @@ -141,8 +143,11 @@ mod tests { // Create JetStream context let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); let stream_name = "test_publish_messages"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -173,12 +178,13 @@ mod tests { ..Default::default() }], context.clone(), + tracker_handle.clone(), cln_token.clone(), ) .await; - let mut ack_receivers = Vec::new(); let (messages_tx, messages_rx) = tokio::sync::mpsc::channel(500); + let mut ack_rxs = vec![]; // Publish 500 messages for i in 0..500 { let message = Message { @@ -193,23 +199,24 @@ mod tests { }, headers: HashMap::new(), }; - let (sender, receiver) = oneshot::channel(); - let read_message = ReadMessage { - message, - ack: sender, - }; - messages_tx.send(read_message).await.unwrap(); - ack_receivers.push(receiver); + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert(message.id.offset.clone(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + messages_tx.send(message).await.unwrap(); } drop(messages_tx); let receiver_stream = ReceiverStream::new(messages_rx); let _handle = writer.streaming_write(receiver_stream).await.unwrap(); - for receiver in ack_receivers { - let result = receiver.await.unwrap(); - assert_eq!(result, ReadAck::Ack); + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); } + // make sure all messages are acked + assert!(tracker_handle.is_empty().await.unwrap()); context.delete_stream(stream_name).await.unwrap(); } @@ -220,8 +227,11 @@ mod tests { // Create JetStream context let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); let stream_name = "test_publish_cancellation"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -252,12 +262,13 @@ mod tests { ..Default::default() }], context.clone(), + tracker_handle.clone(), cancel_token.clone(), ) .await; - let mut ack_receivers = Vec::new(); let (tx, rx) = tokio::sync::mpsc::channel(500); + let mut ack_rxs = vec![]; // Publish 100 messages successfully for i in 0..100 { let message = Message { @@ -272,13 +283,13 @@ mod tests { }, headers: HashMap::new(), }; - let (sender, receiver) = oneshot::channel(); - let read_message = ReadMessage { - message, - ack: sender, - }; - tx.send(read_message).await.unwrap(); - ack_receivers.push(receiver); + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert(message.id.offset.clone(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + tx.send(message).await.unwrap(); } let receiver_stream = ReceiverStream::new(rx); @@ -298,20 +309,19 @@ mod tests { }, headers: HashMap::new(), }; - let (sender, receiver) = oneshot::channel(); - let read_message = ReadMessage { - message, - ack: sender, - }; - tx.send(read_message).await.unwrap(); - ack_receivers.push(receiver); + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert("offset_101".to_string(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + tx.send(message).await.unwrap(); drop(tx); // Cancel the token to exit the retry loop cancel_token.cancel(); - // Check the results - for (i, receiver) in ack_receivers.into_iter().enumerate() { + for (i, receiver) in ack_rxs.into_iter().enumerate() { let result = receiver.await.unwrap(); if i < 100 { assert_eq!(result, ReadAck::Ack); @@ -319,6 +329,9 @@ mod tests { assert_eq!(result, ReadAck::Nak); } } + + // make sure all messages are acked + assert!(tracker_handle.is_empty().await.unwrap()); context.delete_stream(stream_name).await.unwrap(); } } diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 6e0aff77bc..a9e3622005 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -15,10 +15,11 @@ use tracing::{error, info}; use crate::config::pipeline::isb::BufferReaderConfig; use crate::config::pipeline::PipelineConfig; use crate::error::Error; -use crate::message::{IntOffset, Message, Offset, ReadAck, ReadMessage}; +use crate::message::{IntOffset, Message, MessageID, Offset, ReadAck}; use crate::metrics::{ pipeline_forward_metric_labels, pipeline_isb_metric_labels, pipeline_metrics, }; +use crate::tracker::TrackerHandle; use crate::Result; /// The JetstreamReader is a handle to the background actor that continuously fetches messages from Jetstream. @@ -31,6 +32,7 @@ pub(crate) struct JetstreamReader { partition_idx: u16, config: BufferReaderConfig, consumer: PullConsumer, + tracker_handle: TrackerHandle, } impl JetstreamReader { @@ -39,6 +41,7 @@ impl JetstreamReader { partition_idx: u16, js_ctx: Context, config: BufferReaderConfig, + tracker_handle: TrackerHandle, ) -> Result { let mut config = config; @@ -65,6 +68,7 @@ impl JetstreamReader { partition_idx, config: config.clone(), consumer, + tracker_handle, }) } @@ -78,13 +82,15 @@ impl JetstreamReader { &self, cancel_token: CancellationToken, pipeline_config: &PipelineConfig, - ) -> Result<(ReceiverStream, JoinHandle>)> { + ) -> Result<(ReceiverStream, JoinHandle>)> { let (messages_tx, messages_rx) = mpsc::channel(2 * pipeline_config.batch_size); + let pipeline_config = pipeline_config.clone(); let handle: JoinHandle> = tokio::spawn({ let consumer = self.consumer.clone(); let partition_idx = self.partition_idx; let config = self.config.clone(); + let tracker_handle = self.tracker_handle.clone(); let cancel_token = cancel_token.clone(); let stream_name = self.stream_name; @@ -144,19 +150,23 @@ impl JetstreamReader { partition_idx, ))); + message.id = MessageID { + vertex_name: pipeline_config.vertex_name.clone(), + offset: msg_info.stream_sequence.to_string(), + index: 0, + }; + + // Insert the message into the tracker and wait for the ack to be sent back. let (ack_tx, ack_rx) = oneshot::channel(); + tracker_handle.insert(message.id.offset.clone(), ack_tx).await?; + tokio::spawn(Self::start_work_in_progress( jetstream_message, ack_rx, config.wip_ack_interval, )); - let read_message = ReadMessage { - message, - ack: ack_tx, - }; - - messages_tx.send(read_message).await.map_err(|e| { + messages_tx.send(message).await.map_err(|e| { Error::ISB(format!("Error while sending message to channel: {:?}", e)) })?; @@ -261,15 +271,14 @@ impl fmt::Display for JetstreamReader { mod tests { use std::collections::HashMap; + use super::*; + use crate::message::{Message, MessageID}; + use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; use chrono::Utc; - - use super::*; - use crate::message::ReadAck::Ack; - use crate::message::{Message, MessageID}; - use crate::pipeline::isb::jetstream::writer::JetstreamWriter; + use tokio::time::sleep; #[cfg(feature = "nats-tests")] #[tokio::test] @@ -280,6 +289,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_jetstream_read"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -307,9 +318,15 @@ mod tests { streams: vec![], wip_ack_interval: Duration::from_millis(5), }; - let js_reader = JetstreamReader::new(stream_name, 0, context.clone(), buf_reader_config) - .await - .unwrap(); + let js_reader = JetstreamReader::new( + stream_name, + 0, + context.clone(), + buf_reader_config, + TrackerHandle::new(), + ) + .await + .unwrap(); let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); @@ -380,8 +397,11 @@ mod tests { // Create JetStream context let client = async_nats::connect(js_url).await.unwrap(); let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); let stream_name = "test_ack"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -409,9 +429,15 @@ mod tests { streams: vec![], wip_ack_interval: Duration::from_millis(5), }; - let js_reader = JetstreamReader::new(stream_name, 0, context.clone(), buf_reader_config) - .await - .unwrap(); + let js_reader = JetstreamReader::new( + stream_name, + 0, + context.clone(), + buf_reader_config, + tracker_handle.clone(), + ) + .await + .unwrap(); let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); @@ -431,6 +457,7 @@ mod tests { writer_cancel_token.clone(), ); + let mut offsets = vec![]; // write 5 messages for i in 0..5 { let message = Message { @@ -440,11 +467,12 @@ mod tests { event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + offset: format!("{}", i + 1), index: i, }, headers: HashMap::new(), }; + offsets.push(message.id.offset.clone()); let message_bytes: BytesMut = message.try_into().unwrap(); writer .write((stream_name.to_string(), 0), message_bytes.into()) @@ -456,12 +484,25 @@ mod tests { writer_cancel_token.cancel(); for _ in 0..5 { - let Some(val) = js_reader_rx.next().await else { + let Some(_val) = js_reader_rx.next().await else { break; }; - val.ack.send(Ack).unwrap() } + // after reading messages remove from the tracker so that the messages are acked + for offset in offsets { + tracker_handle.delete(offset).await.unwrap(); + } + + // wait until the tracker becomes empty, don't wait more than 1 second + tokio::time::timeout(Duration::from_secs(1), async { + while !tracker_handle.is_empty().await.unwrap() { + sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("Tracker is not empty after 1 second"); + let mut consumer: PullConsumer = context .get_consumer_from_stream(stream_name, stream_name) .await diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 28a8ca6ec0..a90b20a5c1 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -9,16 +9,17 @@ use async_nats::jetstream::publish::PublishAck; use async_nats::jetstream::stream::RetentionPolicy::Limits; use async_nats::jetstream::Context; use bytes::Bytes; -use tokio::sync::{oneshot, Semaphore}; +use tokio::sync::Semaphore; use tokio::time::{sleep, Instant}; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; use crate::config::pipeline::isb::BufferWriterConfig; use crate::error::Error; -use crate::message::{IntOffset, Offset, ReadAck}; +use crate::message::{IntOffset, Offset}; use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; use crate::pipeline::isb::jetstream::Stream; +use crate::tracker::TrackerHandle; use crate::Result; #[derive(Clone, Debug)] @@ -259,8 +260,7 @@ impl JetstreamWriter { pub(crate) struct ResolveAndPublishResult { pub(crate) pafs: Vec<(Stream, PublishAckFuture)>, pub(crate) payload: Vec, - // Acknowledgement oneshot to notify the reader that the message has been written - pub(crate) ack_tx: oneshot::Sender, + pub(crate) offset: String, } /// Resolves the PAF from the write call, if not successful it will do a blocking write so that @@ -269,13 +269,19 @@ pub(crate) struct ResolveAndPublishResult { pub(crate) struct PafResolver { sem: Arc, js_writer: JetstreamWriter, + tracker_handle: TrackerHandle, } impl PafResolver { - pub(crate) fn new(concurrency: usize, js_writer: JetstreamWriter) -> Self { + pub(crate) fn new( + concurrency: usize, + js_writer: JetstreamWriter, + tracker_handle: TrackerHandle, + ) -> Self { PafResolver { sem: Arc::new(Semaphore::new(concurrency)), // concurrency limit for resolving PAFs js_writer, + tracker_handle, } } @@ -289,6 +295,7 @@ impl PafResolver { .acquire_owned() .await .map_err(|_e| Error::ISB("Failed to acquire semaphore permit".to_string()))?; + let tracker_handle = self.tracker_handle.clone(); let mut offsets = Vec::new(); let js_writer = self.js_writer.clone(); @@ -307,6 +314,10 @@ impl PafResolver { stream.clone(), Offset::Int(IntOffset::new(ack.sequence, stream.1)), )); + tracker_handle + .delete(result.offset.clone()) + .await + .expect("Failed to delete offset from tracker"); } Err(e) => { error!( @@ -333,21 +344,16 @@ impl PafResolver { Err(e) => { error!(?e, "Blocking write failed for stream {}", stream.0); // Since we failed to write to the stream, we need to send a NAK to the reader - result.ack_tx.send(ReadAck::Nak).unwrap_or_else(|e| { - error!("Failed to send error for stream {}: {:?}", stream.0, e); - }); + tracker_handle + .discard(result.offset.clone()) + .await + .expect("Failed to discard offset from the tracker"); return; } } } } } - - // Send an ack to the reader - result.ack_tx.send(ReadAck::Ack).unwrap_or_else(|e| { - error!("Failed to send ack: {:?}", e); - }); - pipeline_metrics() .isb .paf_resolution_time @@ -381,6 +387,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_async"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -441,6 +449,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_sync"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -503,6 +513,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_cancellation"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -606,6 +618,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_fetch_buffer_usage"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), @@ -679,6 +693,8 @@ mod tests { let context = jetstream::new(client); let stream_name = "test_check_stream_status"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; let _stream = context .get_or_create_stream(stream::Config { name: stream_name.into(), diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index b09f243de5..26516e34d9 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -16,6 +16,7 @@ use crate::source::generator::new_generator; use crate::source::pulsar::new_pulsar_source; use crate::source::user_defined::new_source; use crate::source::Source; +use crate::tracker::TrackerHandle; use crate::transformer::Transformer; use crate::{config, error, metrics, source}; @@ -25,6 +26,7 @@ pub(crate) async fn create_sink_writer( read_timeout: Duration, primary_sink: SinkConfig, fallback_sink: Option, + tracker_handle: TrackerHandle, cln_token: &CancellationToken, ) -> error::Result<( SinkWriter, @@ -33,11 +35,21 @@ pub(crate) async fn create_sink_writer( )> { let (sink_writer_builder, sink_rpc_client) = match primary_sink.sink_type.clone() { SinkType::Log(_) => ( - SinkWriterBuilder::new(batch_size, read_timeout, SinkClientType::Log), + SinkWriterBuilder::new( + batch_size, + read_timeout, + SinkClientType::Log, + tracker_handle, + ), None, ), SinkType::Blackhole(_) => ( - SinkWriterBuilder::new(batch_size, read_timeout, SinkClientType::Blackhole), + SinkWriterBuilder::new( + batch_size, + read_timeout, + SinkClientType::Blackhole, + tracker_handle, + ), None, ), SinkType::UserDefined(ud_config) => { @@ -69,6 +81,7 @@ pub(crate) async fn create_sink_writer( batch_size, read_timeout, SinkClientType::UserDefined(sink_grpc_client.clone()), + tracker_handle, ) .retry_config(primary_sink.retry_config.unwrap_or_default()), Some(sink_grpc_client), @@ -137,6 +150,7 @@ pub(crate) async fn create_sink_writer( pub async fn create_transformer( batch_size: usize, transformer_config: Option, + tracker_handle: TrackerHandle, cln_token: CancellationToken, ) -> error::Result<(Option, Option>)> { if let Some(transformer_config) = transformer_config { @@ -172,6 +186,7 @@ pub async fn create_transformer( batch_size, transformer_config.concurrency, transformer_grpc_client.clone(), + tracker_handle, ) .await?, ), @@ -187,6 +202,7 @@ pub async fn create_source( batch_size: usize, read_timeout: Duration, source_config: &SourceConfig, + tracker_handle: TrackerHandle, cln_token: CancellationToken, ) -> error::Result<(Source, Option>)> { match &source_config.source_type { @@ -197,6 +213,7 @@ pub async fn create_source( Source::new( batch_size, source::SourceType::Generator(generator_read, generator_ack, generator_lag), + tracker_handle, ), None, )) @@ -233,6 +250,7 @@ pub async fn create_source( Source::new( batch_size, source::SourceType::UserDefinedSource(ud_read, ud_ack, ud_lag), + tracker_handle, ), Some(source_grpc_client), )) @@ -240,7 +258,11 @@ pub async fn create_source( SourceType::Pulsar(pulsar_config) => { let pulsar = new_pulsar_source(pulsar_config.clone(), batch_size, read_timeout).await?; Ok(( - Source::new(batch_size, source::SourceType::Pulsar(pulsar)), + Source::new( + batch_size, + source::SourceType::Pulsar(pulsar), + tracker_handle, + ), None, )) } diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 144cefef11..dc321d6503 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -16,7 +16,8 @@ use user_defined::UserDefinedSink; use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; use crate::error::Error; -use crate::message::{Message, ReadAck, ReadMessage, ResponseFromSink, ResponseStatusFromSink}; +use crate::message::{Message, ResponseFromSink, ResponseStatusFromSink}; +use crate::tracker::TrackerHandle; use crate::Result; mod blackhole; @@ -88,6 +89,7 @@ pub(super) struct SinkWriter { retry_config: RetryConfig, sink_handle: mpsc::Sender, fb_sink_handle: Option>, + tracker_handle: TrackerHandle, } /// SinkWriterBuilder is a builder to build a SinkWriter. @@ -97,16 +99,23 @@ pub struct SinkWriterBuilder { retry_config: RetryConfig, sink_client: SinkClientType, fb_sink_client: Option, + tracker_handle: TrackerHandle, } impl SinkWriterBuilder { - pub fn new(batch_size: usize, chunk_timeout: Duration, sink_type: SinkClientType) -> Self { + pub fn new( + batch_size: usize, + chunk_timeout: Duration, + sink_type: SinkClientType, + tracker_handle: TrackerHandle, + ) -> Self { Self { batch_size, chunk_timeout, retry_config: RetryConfig::default(), sink_client: sink_type, fb_sink_client: None, + tracker_handle, } } @@ -196,6 +205,7 @@ impl SinkWriterBuilder { retry_config: self.retry_config, sink_handle: sender, fb_sink_handle, + tracker_handle: self.tracker_handle, }) } } @@ -234,7 +244,7 @@ impl SinkWriter { /// closed or the cancellation token is triggered. pub(super) async fn streaming_write( &self, - messages_stream: ReceiverStream, + messages_stream: ReceiverStream, cancellation_token: CancellationToken, ) -> Result>> { let handle: JoinHandle> = tokio::spawn({ @@ -260,20 +270,24 @@ impl SinkWriter { continue; } - let n = batch.len(); - let (messages, senders): (Vec<_>, Vec<_>) = - batch.into_iter().map(|rm| (rm.message, rm.ack)).unzip(); + let offsets = batch + .iter() + .map(|msg| msg.id.offset.clone()) + .collect::>(); - match this.write(messages, cancellation_token.clone()).await { + let n = batch.len(); + match this.write(batch, cancellation_token.clone()).await { Ok(_) => { - for sender in senders { - let _ = sender.send(ReadAck::Ack); + for offset in offsets { + // Delete the message from the tracker + this.tracker_handle.delete(offset).await?; } } Err(e) => { error!(?e, "Error writing to sink"); - for sender in senders { - let _ = sender.send(ReadAck::Nak); + for offset in offsets { + // Discard the message from the tracker + this.tracker_handle.discard(offset).await?; } } } @@ -592,7 +606,7 @@ mod tests { use tokio_util::sync::CancellationToken; use super::*; - use crate::message::{Message, MessageID}; + use crate::message::{Message, MessageID, ReadAck}; use crate::shared::grpc::create_rpc_channel; struct SimpleSink; @@ -619,11 +633,15 @@ mod tests { #[tokio::test] async fn test_write() { - let mut sink_writer = - SinkWriterBuilder::new(10, Duration::from_secs(1), SinkClientType::Log) - .build() - .await - .unwrap(); + let mut sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_secs(1), + SinkClientType::Log, + TrackerHandle::new(), + ) + .build() + .await + .unwrap(); let messages: Vec = (0..5) .map(|i| Message { @@ -648,11 +666,16 @@ mod tests { #[tokio::test] async fn test_streaming_write() { - let sink_writer = - SinkWriterBuilder::new(10, Duration::from_millis(100), SinkClientType::Log) - .build() - .await - .unwrap(); + let tracker_handle = TrackerHandle::new(); + let sink_writer = SinkWriterBuilder::new( + 10, + Duration::from_millis(100), + SinkClientType::Log, + tracker_handle.clone(), + ) + .build() + .await + .unwrap(); let messages: Vec = (0..10) .map(|i| Message { @@ -673,13 +696,12 @@ mod tests { let mut ack_rxs = vec![]; for msg in messages { let (ack_tx, ack_rx) = oneshot::channel(); - let _ = tx - .send(ReadMessage { - message: msg, - ack: ack_tx, - }) - .await; ack_rxs.push(ack_rx); + tracker_handle + .insert(msg.id.offset.clone(), ack_tx) + .await + .unwrap(); + let _ = tx.send(msg).await; } drop(tx); @@ -692,12 +714,15 @@ mod tests { for ack_rx in ack_rxs { assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); } + // check if the tracker is empty + assert!(tracker_handle.is_empty().await.unwrap()); } #[tokio::test] async fn test_streaming_write_error() { + let tracker_handle = TrackerHandle::new(); // start the server - let (_shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let sock_file = tmp_dir.path().join("sink.sock"); let server_info_file = tmp_dir.path().join("sink-server-info"); @@ -723,6 +748,7 @@ mod tests { SinkClientType::UserDefined(SinkClient::new( create_rpc_channel(sock_file).await.unwrap(), )), + tracker_handle.clone(), ) .build() .await @@ -747,13 +773,12 @@ mod tests { let mut ack_rxs = vec![]; for msg in messages { let (ack_tx, ack_rx) = oneshot::channel(); - let _ = tx - .send(ReadMessage { - message: msg, - ack: ack_tx, - }) - .await; ack_rxs.push(ack_rx); + tracker_handle + .insert(msg.id.offset.clone(), ack_tx) + .await + .unwrap(); + let _ = tx.send(msg).await; } drop(tx); let cln_token = CancellationToken::new(); @@ -769,16 +794,20 @@ mod tests { }); let _ = handle.await.unwrap(); - // since the writes fail, all the messages will be NAKed for ack_rx in ack_rxs { assert_eq!(ack_rx.await.unwrap(), ReadAck::Nak); } + + // check if the tracker is empty + assert!(tracker_handle.is_empty().await.unwrap()); } #[tokio::test] async fn test_fallback_write() { + let tracker_handle = TrackerHandle::new(); + // start the server - let (_shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); let tmp_dir = tempfile::TempDir::new().unwrap(); let sock_file = tmp_dir.path().join("sink.sock"); let server_info_file = tmp_dir.path().join("sink-server-info"); @@ -804,6 +833,7 @@ mod tests { SinkClientType::UserDefined(SinkClient::new( create_rpc_channel(sock_file).await.unwrap(), )), + tracker_handle.clone(), ) .fb_sink_client(SinkClientType::Log) .build() @@ -829,13 +859,12 @@ mod tests { let mut ack_rxs = vec![]; for msg in messages { let (ack_tx, ack_rx) = oneshot::channel(); - let _ = tx - .send(ReadMessage { - message: msg, - ack: ack_tx, - }) - .await; + tracker_handle + .insert(msg.id.offset.clone(), ack_tx) + .await + .unwrap(); ack_rxs.push(ack_rx); + let _ = tx.send(msg).await; } drop(tx); let cln_token = CancellationToken::new(); @@ -848,5 +877,8 @@ mod tests { for ack_rx in ack_rxs { assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); } + + // check if the tracker is empty + assert!(tracker_handle.is_empty().await.unwrap()); } } diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 81ac3d2022..11e84af61f 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,14 +1,14 @@ +use crate::message::{Message, ResponseFromSink}; +use crate::sink::Sink; +use crate::Error; +use crate::Result; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; - -use crate::message::{Message, ResponseFromSink}; -use crate::sink::Sink; -use crate::Error; -use crate::Result; +use tracing::error; const DEFAULT_CHANNEL_SIZE: usize = 1000; @@ -97,7 +97,7 @@ impl Sink for UserDefinedSink { if response.status.map_or(false, |s| s.eot) { if responses.len() != num_requests { - log::error!("received EOT message before all responses are received, we will wait indefinitely for the remaining responses"); + error!("received EOT message before all responses are received, we will wait indefinitely for the remaining responses"); } else { break; } diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index b48f852222..8409187d6c 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -7,11 +7,12 @@ use tokio_util::sync::CancellationToken; use tracing::{error, info}; use crate::config::{get_vertex_name, is_mono_vertex}; -use crate::message::{ReadAck, ReadMessage}; +use crate::message::ReadAck; use crate::metrics::{ monovertex_metrics, mvtx_forward_metric_labels, pipeline_forward_metric_labels, pipeline_isb_metric_labels, pipeline_metrics, }; +use crate::tracker::TrackerHandle; use crate::Result; use crate::{ message::{Message, Offset}, @@ -135,11 +136,16 @@ where pub(crate) struct Source { read_batch_size: usize, sender: mpsc::Sender, + tracker_handle: TrackerHandle, } impl Source { /// Create a new StreamingSource. It starts the read and ack actors in the background. - pub(crate) fn new(batch_size: usize, src_type: SourceType) -> Self { + pub(crate) fn new( + batch_size: usize, + src_type: SourceType, + tracker_handle: TrackerHandle, + ) -> Self { let (sender, receiver) = mpsc::channel(batch_size); match src_type { SourceType::UserDefinedSource(reader, acker, lag_reader) => { @@ -175,6 +181,7 @@ impl Source { Self { read_batch_size: batch_size, sender, + tracker_handle, } } @@ -222,10 +229,11 @@ impl Source { pub(crate) fn streaming_read( &self, cln_token: CancellationToken, - ) -> Result<(ReceiverStream, JoinHandle>)> { + ) -> Result<(ReceiverStream, JoinHandle>)> { let batch_size = self.read_batch_size; let (messages_tx, messages_rx) = mpsc::channel(batch_size); let source_handle = self.sender.clone(); + let tracker_handle = self.tracker_handle.clone(); let pipeline_labels = pipeline_forward_metric_labels("Source", Some(get_vertex_name())); let mvtx_labels = mvtx_forward_metric_labels(); @@ -233,31 +241,23 @@ impl Source { info!("Started streaming source with batch size: {}", batch_size); let handle = tokio::spawn(async move { let mut processed_msgs_count: usize = 0; - let mut last_logged_at = tokio::time::Instant::now(); + let mut last_logged_at = time::Instant::now(); loop { if cln_token.is_cancelled() { info!("Cancellation token is cancelled. Stopping the source."); return Ok(()); } - let permit_time = tokio::time::Instant::now(); // Reserve the permits before invoking the read method. let mut permit = match messages_tx.reserve_many(batch_size).await { - Ok(permit) => { - info!( - "Reserved permits for {} messages in {:?}", - batch_size, - permit_time.elapsed() - ); - permit - } + Ok(permit) => permit, Err(e) => { error!("Error while reserving permits: {:?}", e); return Err(crate::error::Error::Source(e.to_string())); } }; - let read_start_time = tokio::time::Instant::now(); + let read_start_time = time::Instant::now(); let messages = match Self::read(source_handle.clone()).await { Ok(messages) => messages, Err(e) => { @@ -293,17 +293,17 @@ impl Source { let (resp_ack_tx, resp_ack_rx) = oneshot::channel(); let offset = message.offset.clone().unwrap(); - let read_message = ReadMessage { - message, - ack: resp_ack_tx, - }; + // insert the offset and the ack one shot in the tracker. + tracker_handle + .insert(offset.to_string(), resp_ack_tx) + .await?; // store the ack one shot in the batch to invoke ack later. ack_batch.push((offset, resp_ack_rx)); match permit.next() { Some(permit) => { - permit.send(read_message); + permit.send(message); } None => { unreachable!( @@ -328,7 +328,7 @@ impl Source { std::time::Instant::now() ); processed_msgs_count = 0; - last_logged_at = tokio::time::Instant::now(); + last_logged_at = time::Instant::now(); } } }); @@ -421,6 +421,7 @@ mod tests { use crate::shared::grpc::create_rpc_channel; use crate::source::user_defined::new_source; use crate::source::{Source, SourceType}; + use crate::tracker::TrackerHandle; struct SimpleSource { num: usize, @@ -521,6 +522,7 @@ mod tests { let source = Source::new( 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), + TrackerHandle::new(), ); let cln_token = CancellationToken::new(); @@ -530,8 +532,8 @@ mod tests { // we should read all the 100 messages for _ in 0..100 { let message = stream.next().await.unwrap(); - assert_eq!(message.message.value, "hello".as_bytes()); - offsets.push(message.message.offset.clone().unwrap()); + assert_eq!(message.value, "hello".as_bytes()); + offsets.push(message.offset.clone().unwrap()); } // ack all the messages diff --git a/rust/numaflow-core/src/tracker.rs b/rust/numaflow-core/src/tracker.rs new file mode 100644 index 0000000000..1177bc7438 --- /dev/null +++ b/rust/numaflow-core/src/tracker.rs @@ -0,0 +1,341 @@ +//! Tracker is added because when do data forwarding in [MonoVertex](crate::monovertex::forwarder) or +//! in [Pipeline](crate::pipeline::forwarder), immaterial whether we are in source, UDF, or Sink, we +//! have to track whether the message has completely moved to the next vertex (N+1)th before we can +//! mark that message as done in the Nth vertex. We use Tracker to let Read know that it can mark the +//! message as Ack or NAck based on the state of the message. E.g., Ack if successfully written to ISB, +//! NAck otherwise if ISB is failing to accept, and we are in shutdown path. +//! There will be a tracker per input stream reader. +//! +//! In the future Watermark will also be propagated based on this. + +use crate::error::Error; +use crate::message::ReadAck; +use crate::Result; +use std::collections::HashMap; +use tokio::sync::{mpsc, oneshot}; +use tracing::warn; + +/// TrackerEntry represents the state of a tracked message. +#[derive(Debug)] +struct TrackerEntry { + ack_send: oneshot::Sender, + count: u32, + eof: bool, +} + +/// ActorMessage represents the messages that can be sent to the Tracker actor. +enum ActorMessage { + Insert { + offset: String, + ack_send: oneshot::Sender, + }, + Update { + offset: String, + count: u32, + eof: bool, + }, + Delete { + offset: String, + }, + Discard { + offset: String, + }, + #[cfg(test)] + IsEmpty { + respond_to: oneshot::Sender, + }, +} + +/// Tracker is responsible for managing the state of messages being processed. +/// It keeps track of message offsets and their completeness, and sends acknowledgments. +struct Tracker { + entries: HashMap, + receiver: mpsc::Receiver, +} + +/// Implementation of Drop for Tracker to send Nak for unacknowledged messages. +impl Drop for Tracker { + fn drop(&mut self) { + for (offset, entry) in self.entries.drain() { + warn!(?offset, "Sending Nak for unacknowledged message"); + entry + .ack_send + .send(ReadAck::Nak) + .expect("Failed to send nak"); + } + } +} + +impl Tracker { + /// Creates a new Tracker instance with the given receiver for actor messages. + fn new(receiver: mpsc::Receiver) -> Self { + Self { + entries: HashMap::new(), + receiver, + } + } + + /// Runs the Tracker, processing incoming actor messages to update the state. + async fn run(mut self) { + while let Some(message) = self.receiver.recv().await { + self.handle_message(message).await; + } + } + + /// Handles incoming actor messages to update the state of tracked messages. + async fn handle_message(&mut self, message: ActorMessage) { + match message { + ActorMessage::Insert { + offset, + ack_send: respond_to, + } => { + self.handle_insert(offset, respond_to); + } + ActorMessage::Update { offset, count, eof } => { + self.handle_update(offset, count, eof); + } + ActorMessage::Delete { offset } => { + self.handle_delete(offset); + } + ActorMessage::Discard { offset } => { + self.handle_discard(offset); + } + #[cfg(test)] + ActorMessage::IsEmpty { respond_to } => { + let is_empty = self.entries.is_empty(); + let _ = respond_to.send(is_empty); + } + } + } + + /// Inserts a new entry into the tracker with the given offset and ack sender. + fn handle_insert(&mut self, offset: String, respond_to: oneshot::Sender) { + self.entries.insert( + offset, + TrackerEntry { + ack_send: respond_to, + count: 0, + eof: false, + }, + ); + } + + /// Updates an existing entry in the tracker with the number of expected messages and EOF status. + fn handle_update(&mut self, offset: String, count: u32, eof: bool) { + if let Some(entry) = self.entries.get_mut(&offset) { + entry.count = count; + entry.eof = eof; + } + } + + /// Removes an entry from the tracker and sends an acknowledgment if the count is zero + /// or the entry is marked as EOF. + fn handle_delete(&mut self, offset: String) { + if let Some(mut entry) = self.entries.remove(&offset) { + if entry.count > 0 { + entry.count -= 1; + } + if entry.count == 0 || entry.eof { + entry + .ack_send + .send(ReadAck::Ack) + .expect("Failed to send ack"); + } else { + self.entries.insert(offset, entry); + } + } + } + + /// Discards an entry from the tracker and sends a nak. + fn handle_discard(&mut self, offset: String) { + if let Some(entry) = self.entries.remove(&offset) { + entry + .ack_send + .send(ReadAck::Nak) + .expect("Failed to send nak"); + } + } +} + +/// TrackerHandle provides an interface to interact with the Tracker. +/// It allows inserting, updating, deleting, and discarding tracked messages. +#[derive(Clone)] +pub struct TrackerHandle { + sender: mpsc::Sender, +} + +impl TrackerHandle { + /// Creates a new TrackerHandle instance and spawns the Tracker. + pub(crate) fn new() -> Self { + let (sender, receiver) = mpsc::channel(100); + let tracker = Tracker::new(receiver); + tokio::spawn(tracker.run()); + Self { sender } + } + + /// Inserts a new message into the Tracker with the given offset and acknowledgment sender. + pub(crate) async fn insert( + &self, + offset: String, + ack_send: oneshot::Sender, + ) -> Result<()> { + let message = ActorMessage::Insert { offset, ack_send }; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + Ok(()) + } + + /// Updates an existing message in the Tracker with the given offset, count, and EOF status. + pub(crate) async fn update(&self, offset: String, count: u32, eof: bool) -> Result<()> { + let message = ActorMessage::Update { offset, count, eof }; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + Ok(()) + } + + /// Deletes a message from the Tracker with the given offset. + pub(crate) async fn delete(&self, offset: String) -> Result<()> { + let message = ActorMessage::Delete { offset }; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + Ok(()) + } + + /// Discards a message from the Tracker with the given offset. + pub(crate) async fn discard(&self, offset: String) -> Result<()> { + let message = ActorMessage::Discard { offset }; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + Ok(()) + } + + /// Checks if the Tracker is empty. Used for testing to make sure all messages are acknowledged. + #[cfg(test)] + pub(crate) async fn is_empty(&self) -> Result { + let (respond_to, response) = oneshot::channel(); + let message = ActorMessage::IsEmpty { respond_to }; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + response + .await + .map_err(|e| Error::Tracker(format!("{:?}", e))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::sync::oneshot; + use tokio::time::{timeout, Duration}; + + #[tokio::test] + async fn test_insert_update_delete() { + let handle = TrackerHandle::new(); + let (ack_send, ack_recv) = oneshot::channel(); + + // Insert a new message + handle + .insert("offset1".to_string(), ack_send) + .await + .unwrap(); + + // Update the message + handle.update("offset1".to_string(), 1, true).await.unwrap(); + + // Delete the message + handle.delete("offset1".to_string()).await.unwrap(); + + // Verify that the message was deleted and ack was received + let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); + assert!(result.is_ok(), "Ack should be received"); + assert_eq!(result.unwrap(), ReadAck::Ack); + assert!(handle.is_empty().await.unwrap(), "Tracker should be empty"); + } + + #[tokio::test] + async fn test_update_with_multiple_deletes() { + let handle = TrackerHandle::new(); + let (ack_send, ack_recv) = oneshot::channel(); + + // Insert a new message + handle + .insert("offset1".to_string(), ack_send) + .await + .unwrap(); + + // Update the message with a count of 3 + handle + .update("offset1".to_string(), 3, false) + .await + .unwrap(); + + // Delete the message three times + handle.delete("offset1".to_string()).await.unwrap(); + handle.delete("offset1".to_string()).await.unwrap(); + handle.delete("offset1".to_string()).await.unwrap(); + + // Verify that the message was deleted and ack was received after the third delete + let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); + assert!(result.is_ok(), "Ack should be received after three deletes"); + assert_eq!(result.unwrap(), ReadAck::Ack); + assert!(handle.is_empty().await.unwrap(), "Tracker should be empty"); + } + + #[tokio::test] + async fn test_discard() { + let handle = TrackerHandle::new(); + let (ack_send, ack_recv) = oneshot::channel(); + + // Insert a new message + handle + .insert("offset1".to_string(), ack_send) + .await + .unwrap(); + + // Discard the message + handle.discard("offset1".to_string()).await.unwrap(); + + // Verify that the message was discarded and nak was received + let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); + assert!(result.is_ok(), "Nak should be received"); + assert_eq!(result.unwrap(), ReadAck::Nak); + assert!(handle.is_empty().await.unwrap(), "Tracker should be empty"); + } + + #[tokio::test] + async fn test_discard_after_update_with_higher_count() { + let handle = TrackerHandle::new(); + let (ack_send, ack_recv) = oneshot::channel(); + + // Insert a new message + handle + .insert("offset1".to_string(), ack_send) + .await + .unwrap(); + + // Update the message with a count of 3 + handle + .update("offset1".to_string(), 3, false) + .await + .unwrap(); + + // Discard the message + handle.discard("offset1".to_string()).await.unwrap(); + + // Verify that the message was discarded and nak was received + let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); + assert!(result.is_ok(), "Nak should be received"); + assert_eq!(result.unwrap(), ReadAck::Nak); + assert!(handle.is_empty().await.unwrap(), "Tracker should be empty"); + } +} diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index d987d62050..114e4a552f 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -1,14 +1,14 @@ -use std::sync::Arc; - +use futures::StreamExt; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use std::sync::Arc; use tokio::sync::{mpsc, oneshot, OwnedSemaphorePermit, Semaphore}; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; -use tokio_stream::StreamExt; use tonic::transport::Channel; -use user_defined::ActorMessage; +use tracing::error; -use crate::message::{ReadAck, ReadMessage}; +use crate::message::Message; +use crate::tracker::TrackerHandle; use crate::transformer::user_defined::UserDefinedTransformer; use crate::Result; @@ -17,31 +17,75 @@ use crate::Result; /// [User-Defined Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/#build-your-own-transformer pub(crate) mod user_defined; +pub enum ActorMessage { + Transform { + message: Message, + respond_to: oneshot::Sender>>, + }, +} + +/// TransformerActor, handles the transformation of messages. +struct TransformerActor { + receiver: mpsc::Receiver, + transformer: UserDefinedTransformer, +} + +impl TransformerActor { + fn new(receiver: mpsc::Receiver, transformer: UserDefinedTransformer) -> Self { + Self { + receiver, + transformer, + } + } + + /// Handles the incoming message, unlike standard actor pattern the downstream call is not blocking + /// and the response is sent back to the caller using oneshot in this actor, this is because the + /// downstream can handle multiple messages at once. + async fn handle_message(&mut self, msg: ActorMessage) { + match msg { + ActorMessage::Transform { + message, + respond_to, + } => self.transformer.transform(message, respond_to).await, + } + } + + async fn run(mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } +} + /// StreamingTransformer, transforms messages in a streaming fashion. pub(crate) struct Transformer { batch_size: usize, sender: mpsc::Sender, concurrency: usize, + tracker_handle: TrackerHandle, } impl Transformer { pub(crate) async fn new( batch_size: usize, concurrency: usize, client: SourceTransformClient, + tracker_handle: TrackerHandle, ) -> Result { - let (sender, mut receiver) = mpsc::channel(batch_size); - let mut client = UserDefinedTransformer::new(batch_size, client).await?; + let (sender, receiver) = mpsc::channel(batch_size); + let transformer_actor = TransformerActor::new( + receiver, + UserDefinedTransformer::new(batch_size, client).await?, + ); tokio::spawn(async move { - while let Some(msg) = receiver.recv().await { - client.handle_message(msg).await; - } + transformer_actor.run().await; }); Ok(Self { batch_size, concurrency, sender, + tracker_handle, }) } @@ -50,8 +94,9 @@ impl Transformer { pub(crate) async fn transform( transform_handle: mpsc::Sender, permit: OwnedSemaphorePermit, - read_msg: ReadMessage, - output_tx: mpsc::Sender, + read_msg: Message, + output_tx: mpsc::Sender, + tracker_handle: TrackerHandle, ) -> Result<()> { // only if we have tasks < max_concurrency @@ -60,33 +105,40 @@ impl Transformer { // invoke transformer and then wait for the one-shot tokio::spawn(async move { let _permit = permit; - let message = read_msg.message.clone(); let (sender, receiver) = oneshot::channel(); let msg = ActorMessage::Transform { - message, + message: read_msg.clone(), respond_to: sender, }; // invoke trf - transform_handle.send(msg).await.unwrap(); + transform_handle + .send(msg) + .await + .expect("failed to send message"); // wait for one-shot match receiver.await { Ok(Ok(mut transformed_messages)) => { - // FIXME: handle the case where the transformer does flat map operation - if let Some(transformed_msg) = transformed_messages.pop() { - output_tx - .send(ReadMessage { - message: transformed_msg, - ack: read_msg.ack, - }) - .await - .unwrap(); + tracker_handle + .update( + read_msg.id.offset.clone(), + transformed_messages.len() as u32, + false, + ) + .await + .expect("failed to update tracker"); + for transformed_message in transformed_messages.drain(..) { + let _ = output_tx.send(transformed_message).await; } } Err(_) | Ok(Err(_)) => { - let _ = read_msg.ack.send(ReadAck::Nak); + error!("Failed to transform message"); + tracker_handle + .discard(read_msg.id.offset.clone()) + .await + .expect("failed to discard tracker"); } } }); @@ -98,11 +150,12 @@ impl Transformer { /// sends them to the next stage. pub(crate) fn transform_stream( &self, - input_stream: ReceiverStream, - ) -> Result<(ReceiverStream, JoinHandle>)> { + input_stream: ReceiverStream, + ) -> Result<(ReceiverStream, JoinHandle>)> { let (output_tx, output_rx) = mpsc::channel(self.batch_size); let transform_handle = self.sender.clone(); + let tracker_handle = self.tracker_handle.clone(); // FIXME: batch_size should not be used, introduce a new config called udf concurrenc let semaphore = Arc::new(Semaphore::new(self.concurrency)); @@ -117,6 +170,7 @@ impl Transformer { permit, read_msg, output_tx.clone(), + tracker_handle.clone(), ) .await?; } @@ -137,7 +191,7 @@ mod tests { use tokio::sync::oneshot; use super::*; - use crate::message::{Message, MessageID, Offset, ReadMessage}; + use crate::message::{Message, MessageID, Offset}; use crate::shared::grpc::create_rpc_channel; struct SimpleTransformer; @@ -174,9 +228,10 @@ mod tests { // wait for the server to start tokio::time::sleep(Duration::from_millis(100)).await; + let tracker_handle = TrackerHandle::new(); let client = SourceTransformClient::new(create_rpc_channel(sock_file).await?); - let transformer = Transformer::new(500, 10, client).await?; + let transformer = Transformer::new(500, 10, client, tracker_handle.clone()).await?; let message = Message { keys: vec!["first".into()], @@ -194,21 +249,21 @@ mod tests { headers: Default::default(), }; - let (tx, _) = oneshot::channel(); - - let read_message = ReadMessage { - message: message.clone(), - ack: tx, - }; - let (output_tx, mut output_rx) = mpsc::channel(10); let semaphore = Arc::new(Semaphore::new(10)); let permit = semaphore.clone().acquire_owned().await.unwrap(); - Transformer::transform(transformer.sender.clone(), permit, read_message, output_tx).await?; + Transformer::transform( + transformer.sender.clone(), + permit, + message, + output_tx, + tracker_handle, + ) + .await?; let transformed_message = output_rx.recv().await.unwrap(); - assert_eq!(transformed_message.message.value, "hello"); + assert_eq!(transformed_message.value, "hello"); // we need to drop the transformer, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 @@ -246,8 +301,9 @@ mod tests { // wait for the server to start tokio::time::sleep(Duration::from_millis(100)).await; + let tracker_handle = TrackerHandle::new(); let client = SourceTransformClient::new(create_rpc_channel(sock_file).await?); - let transformer = Transformer::new(500, 10, client).await?; + let transformer = Transformer::new(500, 10, client, tracker_handle.clone()).await?; let (input_tx, input_rx) = mpsc::channel(10); let input_stream = ReceiverStream::new(input_rx); @@ -264,14 +320,11 @@ mod tests { id: MessageID { vertex_name: "vertex_name".to_string(), offset: i.to_string(), - index: i as i32, + index: i, }, headers: Default::default(), }; - let (tx, _) = oneshot::channel(); - let read_message = ReadMessage { message, ack: tx }; - - input_tx.send(read_message).await.unwrap(); + input_tx.send(message).await.unwrap(); } drop(input_tx); @@ -281,7 +334,7 @@ mod tests { for i in 0..5 { let transformed_message = output_rx.recv().await.unwrap(); - assert_eq!(transformed_message.message.value, format!("value_{}", i)); + assert_eq!(transformed_message.value, format!("value_{}", i)); } // we need to drop the transformer, because if there are any in-flight requests diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 8ebb409717..1fe64884eb 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -24,13 +24,6 @@ struct ParentMessageInfo { headers: HashMap, } -pub enum ActorMessage { - Transform { - message: Message, - respond_to: oneshot::Sender>>, - }, -} - /// UserDefinedTransformer exposes methods to do user-defined transformations. pub(super) struct UserDefinedTransformer { read_tx: mpsc::Sender, @@ -90,14 +83,19 @@ impl UserDefinedTransformer { sender_map: ResponseSenderMap, mut resp_stream: Streaming, ) { - while let Some(resp) = resp_stream.message().await.unwrap() { + while let Some(resp) = resp_stream + .message() + .await + .expect("failed to receive response") + { let msg_id = resp.id; - for (i, result) in resp.results.into_iter().enumerate() { - if let Some((msg_info, sender)) = sender_map - .lock() - .expect("map entry should always be present") - .remove(&msg_id) - { + if let Some((msg_info, sender)) = sender_map + .lock() + .expect("map entry should always be present") + .remove(&msg_id) + { + let mut response_messages = vec![]; + for (i, result) in resp.results.into_iter().enumerate() { let message = Message { id: MessageID { vertex_name: get_vertex_name().to_string(), @@ -110,33 +108,33 @@ impl UserDefinedTransformer { event_time: utc_from_timestamp(result.event_time), headers: msg_info.headers.clone(), }; - let _ = sender.send(Ok(vec![message])); + response_messages.push(message); } + sender + .send(Ok(response_messages)) + .expect("failed to send response"); } } } /// Handles the incoming message and sends it to the server for transformation. - pub(super) async fn handle_message(&mut self, message: ActorMessage) { - match message { - ActorMessage::Transform { - message, - respond_to, - } => { - let msg_id = message.id.to_string(); - let msg_info = ParentMessageInfo { - offset: message.offset.clone().unwrap(), - headers: message.headers.clone(), - }; - - self.senders - .lock() - .unwrap() - .insert(msg_id, (msg_info, respond_to)); - - self.read_tx.send(message.into()).await.unwrap(); - } - } + pub(super) async fn transform( + &mut self, + message: Message, + respond_to: oneshot::Sender>>, + ) { + let msg_id = message.id.to_string(); + let msg_info = ParentMessageInfo { + offset: message.offset.clone().unwrap(), + headers: message.headers.clone(), + }; + + self.senders + .lock() + .unwrap() + .insert(msg_id, (msg_info, respond_to)); + + self.read_tx.send(message.into()).await.unwrap(); } } @@ -151,7 +149,7 @@ mod tests { use crate::message::{MessageID, StringOffset}; use crate::shared::grpc::create_rpc_channel; - use crate::transformer::user_defined::{ActorMessage, UserDefinedTransformer}; + use crate::transformer::user_defined::UserDefinedTransformer; struct NowCat; #[tonic::async_trait] @@ -212,18 +210,13 @@ mod tests { let (tx, rx) = tokio::sync::oneshot::channel(); - let _ = tokio::time::timeout( - Duration::from_secs(2), - client.handle_message(ActorMessage::Transform { - message, - respond_to: tx, - }), - ) - .await?; + tokio::time::timeout(Duration::from_secs(2), client.transform(message, tx)) + .await + .unwrap(); - let messages = rx.await?; + let messages = rx.await.unwrap(); assert!(messages.is_ok()); - assert_eq!(messages.unwrap().len(), 1); + assert_eq!(messages?.len(), 1); // we need to drop the client, because if there are any in-flight requests // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 From 55e798dc041d481c6c6c96399b66bb4c474cd623 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Mon, 9 Dec 2024 15:29:54 +0530 Subject: [PATCH 157/188] chore: Define common dependency versions in root Cargo.toml (#2266) Signed-off-by: Sreekanth --- rust/Cargo.lock | 16 +---- rust/Cargo.toml | 65 +++++++++++++++---- rust/backoff/Cargo.toml | 5 +- .../numaflow-pulsar}/Cargo.toml | 13 ++-- .../numaflow-pulsar}/src/lib.rs | 0 .../numaflow-pulsar}/src/source.rs | 2 +- rust/numaflow-core/Cargo.toml | 19 +++--- rust/numaflow-core/src/config/components.rs | 13 ++-- rust/numaflow-core/src/config/monovertex.rs | 4 +- rust/numaflow-core/src/config/pipeline.rs | 4 +- rust/numaflow-core/src/metrics.rs | 4 +- rust/numaflow-core/src/shared/metrics.rs | 2 +- rust/numaflow-core/src/sink.rs | 5 +- rust/numaflow-core/src/source/generator.rs | 4 +- rust/numaflow-core/src/source/user_defined.rs | 2 +- rust/numaflow-core/src/transformer.rs | 2 +- .../src/transformer/user_defined.rs | 2 +- rust/numaflow/Cargo.toml | 17 +++++ rust/{src/bin => numaflow/src}/main.rs | 2 +- rust/servesink/.dockerignore | 1 - rust/servesink/Cargo.toml | 8 ++- rust/serving/Cargo.toml | 17 ++--- rust/serving/src/app/tracker.rs | 4 +- 23 files changed, 129 insertions(+), 82 deletions(-) rename rust/{numaflow-extns/pulsar => extns/numaflow-pulsar}/Cargo.toml (75%) rename rust/{numaflow-extns/pulsar => extns/numaflow-pulsar}/src/lib.rs (100%) rename rust/{numaflow-extns/pulsar => extns/numaflow-pulsar}/src/source.rs (99%) create mode 100644 rust/numaflow/Cargo.toml rename rust/{src/bin => numaflow/src}/main.rs (95%) delete mode 100644 rust/servesink/.dockerignore diff --git a/rust/Cargo.lock b/rust/Cargo.lock index d6a518cee6..ec51332105 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -348,15 +348,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.69.5" @@ -1819,7 +1810,9 @@ dependencies = [ name = "numaflow" version = "0.1.0" dependencies = [ + "backoff", "numaflow-core", + "numaflow-models", "servesink", "serving", "tokio", @@ -1920,7 +1913,6 @@ dependencies = [ name = "numaflow-pulsar" version = "0.1.0" dependencies = [ - "bincode", "bytes", "chrono", "prost 0.11.9", @@ -3006,7 +2998,6 @@ dependencies = [ "tokio", "tonic", "tracing", - "tracing-subscriber", ] [[package]] @@ -3029,13 +3020,11 @@ dependencies = [ "redis", "serde", "serde_json", - "tempfile", "thiserror 1.0.69", "tokio", "tower 0.4.13", "tower-http", "tracing", - "tracing-subscriber", "trait-variant", "uuid", ] @@ -3374,7 +3363,6 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 3cbd68ef23..8a6b41a1a4 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,26 +1,65 @@ -workspace = { members = [ +[workspace] +resolver = "2" +members = [ "backoff", "numaflow-models", "servesink", "serving", "numaflow-core", "numaflow-pb", - "numaflow-extns/pulsar", -] } + "extns/numaflow-pulsar", + "numaflow", +] -[[bin]] -name = "numaflow" -path = "src/bin/main.rs" +[workspace.lints.rust] +unsafe_code = "forbid" +unused_must_use = "forbid" +rust_2018_idioms = { level = "deny", priority = -1 } -[package] -name = "numaflow" -version = "0.1.0" -edition = "2021" +[workspace.lints.clippy] +enum_glob_use = "deny" +clone_on_ref_ptr = "deny" +dbg_macro = "deny" +todo = "deny" +# We should probably enable it in the future +# future_not_send = "deny" +empty_enum = "warn" +exit = "warn" +filter_map_next = "warn" +fn_params_excessive_bools = "warn" +inefficient_to_string = "warn" +match_on_vec_items = "warn" +match_wildcard_for_single_variants = "warn" +needless_continue = "warn" +option_option = "warn" +rest_pat_in_fully_bound_structs = "warn" +unnested_or_patterns = "warn" +unused_self = "warn" +verbose_file_reads = "warn" -[dependencies] -tokio = "1.41.1" +# This profile optimizes for runtime performance and small binary size at the expense of longer build times. +# Compared to default release profile, this profile reduced binary size from 29MB to 21MB +# and increased build time (with only one line change in code) from 12 seconds to 133 seconds (tested on Mac M2 Max). +[profile.release] +lto = "fat" + +# This profile optimizes for short build times at the expense of larger binary size and slower runtime performance. +# If you have to rebuild image often, in Dockerfile you may replace `--release` passed to cargo command with `--profile quick-release` +[profile.quick-release] +inherits = "release" +codegen-units = 16 +lto = false +incremental = true + +[workspace.dependencies] servesink = { path = "servesink" } serving = { path = "serving" } numaflow-core = { path = "numaflow-core" } +numaflow-models = { path = "numaflow-models" } +backoff = { path = "backoff" } +numaflow-pb = { path = "numaflow-pb" } +numaflow-pulsar = {path = "extns/numaflow-pulsar"} +tokio = "1.41.1" tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +axum = "0.7.5" +axum-server = { version = "0.7.1", features = ["tls-rustls"] } diff --git a/rust/backoff/Cargo.toml b/rust/backoff/Cargo.toml index c82508001a..3a91ce70ac 100644 --- a/rust/backoff/Cargo.toml +++ b/rust/backoff/Cargo.toml @@ -3,6 +3,9 @@ name = "backoff" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] +tokio = {workspace = true, features = ["time"]} pin-project = "1.1.5" -tokio = { version = "1.41.1", features = ["full"] } diff --git a/rust/numaflow-extns/pulsar/Cargo.toml b/rust/extns/numaflow-pulsar/Cargo.toml similarity index 75% rename from rust/numaflow-extns/pulsar/Cargo.toml rename to rust/extns/numaflow-pulsar/Cargo.toml index cc91a495df..b699daaf58 100644 --- a/rust/numaflow-extns/pulsar/Cargo.toml +++ b/rust/extns/numaflow-pulsar/Cargo.toml @@ -3,20 +3,15 @@ name = "numaflow-pulsar" version = "0.1.0" edition = "2021" -[lints.rust] -unsafe_code = "forbid" -unused_must_use = "forbid" - -[lints.clippy] -enum_glob_use = "deny" +[lints] +workspace = true [dependencies] +tokio.workspace = true +tracing.workspace = true prost = "0.11.9" -tokio = "1.41.1" tonic = "0.12.3" serde = { version = "1.0.204", features = ["derive"] } -tracing = "0.1.40" -bincode = "1.3.3" chrono = "0.4.38" # Rustls doesn't allow accepting self-signed certs: https://github.com/streamnative/pulsar-rs/blob/715411cb365932c379d4b5d0a8fde2ac46c54055/src/connection.rs#L912 pulsar = {version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"]} diff --git a/rust/numaflow-extns/pulsar/src/lib.rs b/rust/extns/numaflow-pulsar/src/lib.rs similarity index 100% rename from rust/numaflow-extns/pulsar/src/lib.rs rename to rust/extns/numaflow-pulsar/src/lib.rs diff --git a/rust/numaflow-extns/pulsar/src/source.rs b/rust/extns/numaflow-pulsar/src/source.rs similarity index 99% rename from rust/numaflow-extns/pulsar/src/source.rs rename to rust/extns/numaflow-pulsar/src/source.rs index 7d2c13ed1d..587e4bb85f 100644 --- a/rust/numaflow-extns/pulsar/src/source.rs +++ b/rust/extns/numaflow-pulsar/src/source.rs @@ -293,6 +293,6 @@ impl PulsarSource { } pub fn partitions(&self) -> Vec { - todo!() + unimplemented!() } } diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 1389946079..73c15c489c 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -8,14 +8,21 @@ nats-tests = [] pulsar-tests = [] all-tests = ["nats-tests", "pulsar-tests"] +[lints] +workspace = true + [dependencies] -axum = "0.7.5" -axum-server = { version = "0.7.1", features = ["tls-rustls"] } +tokio.workspace = true +tracing.workspace = true +numaflow-pulsar.workspace = true +numaflow-models.workspace = true +numaflow-pb.workspace = true +backoff.workspace = true +axum.workspace = true +axum-server.workspace = true tonic = "0.12.3" bytes = "1.7.1" thiserror = "2.0.3" -tokio = { version = "1.41.1", features = ["full"] } -tracing = "0.1.40" tokio-util = "0.7.11" tokio-stream = "0.1.15" prost = "0.13.2" @@ -25,15 +32,12 @@ base64 = "0.22.1" hyper-util = "0.1.6" tower = "0.4.13" serde_json = "1.0.122" -numaflow-models = { path = "../numaflow-models" } -numaflow-pb = { path = "../numaflow-pb" } trait-variant = "0.1.2" rcgen = "0.13.1" rustls = { version = "0.23.12", features = ["aws_lc_rs"] } serde = { version = "1.0.204", features = ["derive"] } semver = "1.0" pep440_rs = "0.6.6" -backoff = { path = "../backoff" } parking_lot = "0.12.3" prometheus-client = "0.22.3" kube = "0.95.0" @@ -41,7 +45,6 @@ futures = "0.3.30" pin-project = "1.1.5" rand = "0.8.5" async-nats = "0.38.0" -numaflow-pulsar = { path = "../numaflow-extns/pulsar" } [dev-dependencies] tempfile = "3.11.0" diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index f5780ed9f0..9b26c1d5d7 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -194,7 +194,7 @@ pub(crate) mod sink { // numaflow-models. Problem is, golang has embedded structures and rust does not. We might // have to AbstractSink for sink-configs while Sink for real sink types. // NOTE: I do not see this problem with Source? - pub(crate) fn primary_sinktype(sink: Box) -> Result { + pub(crate) fn primary_sinktype(sink: &Sink) -> Result { sink.udsink .as_ref() .map(|_| Ok(SinkType::UserDefined(UserDefinedConfig::default()))) @@ -211,8 +211,8 @@ pub(crate) mod sink { .ok_or_else(|| Error::Config("Sink type not found".to_string()))? } - pub(crate) fn fallback_sinktype(sink: Box) -> Result { - if let Some(fallback) = sink.fallback { + pub(crate) fn fallback_sinktype(sink: &Sink) -> Result { + if let Some(fallback) = sink.fallback.as_ref() { fallback .udsink .as_ref() @@ -423,9 +423,10 @@ pub(crate) mod metrics { impl MetricsConfig { pub(crate) fn with_lookback_window_in_secs(lookback_window_in_secs: u16) -> Self { - let mut default_config = Self::default(); - default_config.lookback_window_in_secs = lookback_window_in_secs; - default_config + MetricsConfig { + lookback_window_in_secs, + ..Default::default() + } } } } diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index 74a7e65fb0..686e284615 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -117,13 +117,13 @@ impl MonovertexConfig { .ok_or_else(|| Error::Config("Sink not found".to_string()))?; let sink_config = SinkConfig { - sink_type: SinkType::primary_sinktype(sink.clone())?, + sink_type: SinkType::primary_sinktype(&sink)?, retry_config: sink.retry_strategy.clone().map(|retry| retry.into()), }; let fb_sink_config = if sink.fallback.is_some() { Some(SinkConfig { - sink_type: SinkType::fallback_sinktype(sink)?, + sink_type: SinkType::fallback_sinktype(&sink)?, retry_config: None, }) } else { diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 844425a8c8..d4676d1343 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -165,7 +165,7 @@ impl PipelineConfig { } else if let Some(sink) = vertex_obj.spec.sink { let fb_sink_config = if sink.fallback.as_ref().is_some() { Some(SinkConfig { - sink_type: SinkType::fallback_sinktype(sink.clone())?, + sink_type: SinkType::fallback_sinktype(&sink)?, retry_config: None, }) } else { @@ -174,7 +174,7 @@ impl PipelineConfig { VertexType::Sink(SinkVtxConfig { sink_config: SinkConfig { - sink_type: SinkType::primary_sinktype(sink)?, + sink_type: SinkType::primary_sinktype(&sink)?, retry_config: None, }, fb_sink_config, diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 317b097e87..d8fa156d77 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -756,14 +756,14 @@ impl PendingReader { let pending_reader = self.lag_reader.clone(); let lag_checking_interval = self.lag_checking_interval; let refresh_interval = self.refresh_interval; - let pending_stats = self.pending_stats.clone(); + let pending_stats = Arc::clone(&self.pending_stats); let lookback_seconds = self.lookback_seconds; let buildup_handle = tokio::spawn(async move { build_pending_info(pending_reader, lag_checking_interval, pending_stats).await; }); - let pending_stats = self.pending_stats.clone(); + let pending_stats = Arc::clone(&self.pending_stats); let expose_handle = tokio::spawn(async move { expose_pending_metrics( is_mono_vertex, diff --git a/rust/numaflow-core/src/shared/metrics.rs b/rust/numaflow-core/src/shared/metrics.rs index 1b5a2a7db2..dfc22401af 100644 --- a/rust/numaflow-core/src/shared/metrics.rs +++ b/rust/numaflow-core/src/shared/metrics.rs @@ -40,6 +40,6 @@ pub(crate) async fn create_pending_reader( .refresh_interval(Duration::from_secs( metrics_config.lag_refresh_interval_in_secs.into(), )) - .lookback_seconds(metrics_config.lookback_window_in_secs.into()) + .lookback_seconds(metrics_config.lookback_window_in_secs) .build() } diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index dc321d6503..b820ceb57d 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -31,7 +31,7 @@ mod user_defined; /// /// [Sink]: https://numaflow.numaproj.io/user-guide/sinks/overview/ #[trait_variant::make(Sink: Send)] -#[allow(unused)] +#[allow(dead_code)] pub(crate) trait LocalSink { /// Write the messages to the Sink. async fn sink(&mut self, messages: Vec) -> Result>; @@ -362,7 +362,7 @@ impl SinkWriter { } // If after the retries we still have messages to process, handle the post retry failures - let need_retry = self.handle_sink_post_retry( + let need_retry = Self::handle_sink_post_retry( &mut attempts, &mut error_map, &mut fallback_msgs, @@ -394,7 +394,6 @@ impl SinkWriter { /// Handles the post retry failures based on the configured strategy, /// returns true if we need to retry, else false. fn handle_sink_post_retry( - &mut self, attempts: &mut u16, error_map: &mut HashMap, fallback_msgs: &mut Vec, diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 22bdf94d58..c3671d0a98 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -17,7 +17,7 @@ use crate::source; /// =========================================================================> time /// Read RPU=5: | :xxx:xx: | :xxx |:xxx:xx:| :xxx:xx: | :xxx:xx: | /// 2 batches only 1 batch (no reread) 5 5 5 -/// +/// /// ``` /// NOTE: The minimum granularity of duration is 10ms. mod stream_generator { @@ -350,7 +350,7 @@ impl source::SourceReader for GeneratorRead { } fn partitions(&self) -> Vec { - todo!() + unimplemented!() } } diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index b75564bfbc..758f8a6fc2 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -136,7 +136,7 @@ impl SourceReader for UserDefinedSourceRead { } fn partitions(&self) -> Vec { - todo!() + unimplemented!() } } diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index 114e4a552f..d8302fb59d 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -163,7 +163,7 @@ impl Transformer { let mut input_stream = input_stream; while let Some(read_msg) = input_stream.next().await { - let permit = semaphore.clone().acquire_owned().await.unwrap(); + let permit = Arc::clone(&semaphore).acquire_owned().await.unwrap(); Self::transform( transform_handle.clone(), diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 1fe64884eb..32c13eb0e9 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -67,7 +67,7 @@ impl UserDefinedTransformer { let transformer = Self { read_tx, - senders: sender_map.clone(), + senders: Arc::clone(&sender_map), }; // background task to receive responses from the server and send them to the appropriate diff --git a/rust/numaflow/Cargo.toml b/rust/numaflow/Cargo.toml new file mode 100644 index 0000000000..6d5fc0dd60 --- /dev/null +++ b/rust/numaflow/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "numaflow" +version = "0.1.0" +edition = "2021" + +[lints] +workspace = true + +[dependencies] +servesink.workspace = true +serving.workspace = true +numaflow-core.workspace = true +numaflow-models.workspace = true +backoff.workspace = true +tokio.workspace = true +tracing.workspace = true +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } \ No newline at end of file diff --git a/rust/src/bin/main.rs b/rust/numaflow/src/main.rs similarity index 95% rename from rust/src/bin/main.rs rename to rust/numaflow/src/main.rs index 4ffd64ed7a..e25fbf9c66 100644 --- a/rust/src/bin/main.rs +++ b/rust/numaflow/src/main.rs @@ -35,6 +35,6 @@ async fn main() { error!("Error running rust binary: {}", e); } } else { - error!("Invalid argument. Use --serve, --servesink, or --rust."); + error!("Invalid argument. Use --serving, --servesink, or --rust."); } } diff --git a/rust/servesink/.dockerignore b/rust/servesink/.dockerignore deleted file mode 100644 index 2f7896d1d1..0000000000 --- a/rust/servesink/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -target/ diff --git a/rust/servesink/Cargo.toml b/rust/servesink/Cargo.toml index 72f2802c5b..fbc9755433 100644 --- a/rust/servesink/Cargo.toml +++ b/rust/servesink/Cargo.toml @@ -3,12 +3,14 @@ name = "servesink" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] +tracing.workspace = true +tokio.workspace = true tonic = "0.12.3" -tokio = { version = "1.41.1", features = ["macros", "rt-multi-thread"] } numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } -tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } [dependencies.reqwest] version = "0.12.7" diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index 04fa96c288..d62a1d2d8f 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -8,30 +8,31 @@ redis-tests = [] nats-tests = [] all-tests = ["redis-tests", "nats-tests"] +[lints] +workspace = true + [dependencies] +tokio.workspace = true +tracing.workspace = true +numaflow-models.workspace = true +backoff.workspace = true +axum.workspace = true +axum-server.workspace = true async-nats = "0.35.1" -axum = "0.7.5" -axum-server = { version = "0.7.1", features = ["tls-rustls"] } axum-macros = "0.4.1" hyper-util = { version = "0.1.6", features = ["client-legacy"] } serde = { version = "1.0.204", features = ["derive"] } serde_json = "1.0.120" -tokio = { version = "1.41.1", features = ["full"] } tower = "0.4.13" tower-http = { version = "0.5.2", features = ["trace", "timeout"] } -tracing = "0.1.40" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } uuid = { version = "1.10.0", features = ["v4"] } -tempfile = "3.10.1" redis = { version = "0.26.0", features = ["tokio-comp", "aio", "connection-manager"] } config = "0.14.0" trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } -backoff = { path = "../backoff" } base64 = "0.22.1" rcgen = "0.13.1" parking_lot = "0.12.3" prometheus-client = "0.22.3" thiserror = "1.0.63" -numaflow-models = { path = "../numaflow-models" } diff --git a/rust/serving/src/app/tracker.rs b/rust/serving/src/app/tracker.rs index 4714e2171b..33137f45db 100644 --- a/rust/serving/src/app/tracker.rs +++ b/rust/serving/src/app/tracker.rs @@ -79,7 +79,7 @@ impl MessageGraph { .entry(callback.vertex.clone()) .or_default() .push(CallbackRequestWrapper { - callback_request: callback.clone(), + callback_request: Arc::clone(&callback), visited: false, }); } @@ -142,7 +142,7 @@ impl MessageGraph { for callback in callbacks { if callback.callback_request.from_vertex == from && !callback.visited { callback.visited = true; - current_callback = Some(callback.callback_request.clone()); + current_callback = Some(Arc::clone(&callback.callback_request)); break; } } From 7c07e0541ef3060ae8995c5359eb0e465c49b474 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 9 Dec 2024 22:19:15 +0530 Subject: [PATCH 158/188] feat: Conditional Forwarding In Asynchronous Pipeline (#2270) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config/pipeline.rs | 23 +- rust/numaflow-core/src/config/pipeline/isb.rs | 37 +- rust/numaflow-core/src/message.rs | 13 +- rust/numaflow-core/src/pipeline.rs | 21 +- .../pipeline/forwarder/source_forwarder.rs | 28 +- .../src/pipeline/isb/jetstream.rs | 328 +------ .../src/pipeline/isb/jetstream/reader.rs | 46 +- .../src/pipeline/isb/jetstream/writer.rs | 834 ++++++++++++++---- rust/numaflow-core/src/shared.rs | 3 + rust/numaflow-core/src/shared/forward.rs | 108 +++ rust/numaflow-core/src/sink.rs | 12 +- rust/numaflow-core/src/sink/blackhole.rs | 2 + rust/numaflow-core/src/sink/log.rs | 2 + rust/numaflow-core/src/sink/user_defined.rs | 2 + rust/numaflow-core/src/source.rs | 4 +- rust/numaflow-core/src/source/generator.rs | 6 +- rust/numaflow-core/src/source/pulsar.rs | 1 + rust/numaflow-core/src/tracker.rs | 2 +- rust/numaflow-core/src/transformer.rs | 4 +- .../src/transformer/user_defined.rs | 4 +- 20 files changed, 896 insertions(+), 584 deletions(-) create mode 100644 rust/numaflow-core/src/shared/forward.rs diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index d4676d1343..c9d02e632c 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -101,8 +101,7 @@ pub(crate) struct FromVertexConfig { pub(crate) struct ToVertexConfig { pub(crate) name: String, pub(crate) writer_config: BufferWriterConfig, - pub(crate) partitions: u16, - pub(crate) conditions: Option, + pub(crate) conditions: Option>, } impl PipelineConfig { @@ -248,25 +247,25 @@ impl PipelineConfig { writer_config: BufferWriterConfig { streams, partitions: partition_count, - max_length: vertex_obj - .spec - .limits + max_length: edge + .to_vertex_limits .as_ref() .and_then(|l| l.buffer_max_length) .unwrap_or(default_writer_config.max_length as i64) as usize, - usage_limit: vertex_obj - .spec - .limits + usage_limit: edge + .to_vertex_limits .as_ref() .and_then(|l| l.buffer_usage_limit) .unwrap_or(default_writer_config.usage_limit as i64) as f64 / 100.0, - ..default_writer_config + buffer_full_strategy: edge + .on_full + .and_then(|s| s.clone().try_into().ok()) + .unwrap_or(default_writer_config.buffer_full_strategy), }, - partitions: edge.to_vertex_partition_count.unwrap_or_default() as u16, - conditions: None, + conditions: edge.conditions, }); } @@ -421,7 +420,6 @@ mod tests { usage_limit: 0.85, ..Default::default() }, - partitions: 1, conditions: None, }], vertex_config: VertexType::Source(SourceVtxConfig { @@ -474,7 +472,6 @@ mod tests { usage_limit: 0.8, ..Default::default() }, - partitions: 1, conditions: None, }], vertex_config: VertexType::Source(SourceVtxConfig { diff --git a/rust/numaflow-core/src/config/pipeline/isb.rs b/rust/numaflow-core/src/config/pipeline/isb.rs index 30c72351c9..50da134e87 100644 --- a/rust/numaflow-core/src/config/pipeline/isb.rs +++ b/rust/numaflow-core/src/config/pipeline/isb.rs @@ -6,9 +6,7 @@ const DEFAULT_PARTITION_IDX: u16 = 0; const DEFAULT_PARTITIONS: u16 = 1; const DEFAULT_MAX_LENGTH: usize = 30000; const DEFAULT_USAGE_LIMIT: f64 = 0.8; -const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; const DEFAULT_BUFFER_FULL_STRATEGY: BufferFullStrategy = BufferFullStrategy::RetryUntilSuccess; -const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; const DEFAULT_WIP_ACK_INTERVAL_MILLIS: u64 = 1000; pub(crate) mod jetstream { @@ -36,10 +34,8 @@ pub(crate) struct BufferWriterConfig { pub streams: Vec<(String, u16)>, pub partitions: u16, pub max_length: usize, - pub refresh_interval: Duration, pub usage_limit: f64, pub buffer_full_strategy: BufferFullStrategy, - pub retry_interval: Duration, } impl Default for BufferWriterConfig { @@ -49,20 +45,30 @@ impl Default for BufferWriterConfig { partitions: DEFAULT_PARTITIONS, max_length: DEFAULT_MAX_LENGTH, usage_limit: DEFAULT_USAGE_LIMIT, - refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, - retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), } } } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Default)] pub(crate) enum BufferFullStrategy { + #[default] RetryUntilSuccess, - #[allow(dead_code)] DiscardLatest, } +impl TryFrom for BufferFullStrategy { + type Error = &'static str; + + fn try_from(value: String) -> Result { + match value.as_str() { + "retryUntilSuccess" => Ok(BufferFullStrategy::RetryUntilSuccess), + "discardLatest" => Ok(BufferFullStrategy::DiscardLatest), + _ => Err("Invalid BufferFullStrategy string"), + } + } +} + impl fmt::Display for BufferFullStrategy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -116,9 +122,7 @@ mod tests { partitions: DEFAULT_PARTITIONS, max_length: DEFAULT_MAX_LENGTH, usage_limit: DEFAULT_USAGE_LIMIT, - refresh_interval: Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS), buffer_full_strategy: DEFAULT_BUFFER_FULL_STRATEGY, - retry_interval: Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS), }; let config = BufferWriterConfig::default(); @@ -144,4 +148,17 @@ mod tests { let config = BufferReaderConfig::default(); assert_eq!(config, expected); } + + #[test] + fn test_try_from_string_to_buffer_full_strategy() { + assert_eq!( + BufferFullStrategy::try_from("retryUntilSuccess".to_string()).unwrap(), + BufferFullStrategy::RetryUntilSuccess + ); + assert_eq!( + BufferFullStrategy::try_from("discardLatest".to_string()).unwrap(), + BufferFullStrategy::DiscardLatest + ); + assert!(BufferFullStrategy::try_from("invalidStrategy".to_string()).is_err()); + } } diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index a3436e6ab4..f8cb25ff1a 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -25,8 +25,11 @@ const DROP: &str = "U+005C__DROP__"; /// A message that is sent from the source to the sink. #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Message { + // FIXME: Arc<[Bytes]> /// keys of the message pub(crate) keys: Vec, + /// tags of the message + pub(crate) tags: Option>, /// actual payload of the message pub(crate) value: Bytes, /// offset of the message, it is optional because offset is only @@ -85,6 +88,7 @@ impl TryFrom for Message { Ok(Self { keys, + tags: None, value: payload, offset, event_time, @@ -97,7 +101,9 @@ impl TryFrom for Message { impl Message { // Check if the message should be dropped. pub(crate) fn dropped(&self) -> bool { - self.keys.len() == 1 && self.keys[0] == DROP + self.tags + .as_ref() + .map_or(false, |tags| tags.contains(&DROP.to_string())) } } @@ -250,6 +256,7 @@ impl TryFrom for Message { Ok(Message { keys: header.keys, + tags: None, value: body.payload.into(), offset: None, event_time: utc_from_timestamp(message_info.event_time), @@ -293,6 +300,7 @@ impl TryFrom for Message { Ok(Message { keys: result.keys, + tags: None, value: result.payload.into(), offset: Some(source_offset.clone()), event_time: utc_from_timestamp(result.event_time), @@ -411,6 +419,7 @@ mod tests { fn test_message_to_vec_u8() { let message = Message { keys: vec!["key1".to_string()], + tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), @@ -491,6 +500,7 @@ mod tests { fn test_message_to_source_transform_request() { let message = Message { keys: vec!["key1".to_string()], + tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), @@ -540,6 +550,7 @@ mod tests { fn test_message_to_sink_request() { let message = Message { keys: vec!["key1".to_string()], + tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { offset: "123".to_string(), diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index d3b2e076f1..ffee46033b 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -12,7 +12,7 @@ use crate::config::pipeline::{PipelineConfig, SinkVtxConfig, SourceVtxConfig}; use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; use crate::pipeline::forwarder::source_forwarder; use crate::pipeline::isb::jetstream::reader::JetstreamReader; -use crate::pipeline::isb::jetstream::ISBWriter; +use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use crate::shared::create_components; use crate::shared::create_components::create_sink_writer; use crate::shared::metrics::start_metrics_server; @@ -179,19 +179,14 @@ async fn create_buffer_writer( js_context: Context, tracker_handle: TrackerHandle, cln_token: CancellationToken, -) -> ISBWriter { - ISBWriter::new( - config.paf_concurrency, - config - .to_vertex_config - .iter() - .map(|tv| tv.writer_config.clone()) - .collect(), +) -> JetstreamWriter { + JetstreamWriter::new( + config.to_vertex_config.clone(), js_context, + config.paf_concurrency, tracker_handle, cln_token, ) - .await } async fn create_buffer_reader( @@ -238,7 +233,7 @@ mod tests { use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; - use futures::StreamExt; + use tokio_stream::StreamExt; use super::*; use crate::config::components::metrics::MetricsConfig; @@ -326,12 +321,9 @@ mod tests { .collect(), partitions: 5, max_length: 30000, - refresh_interval: Duration::from_secs(1), usage_limit: 0.8, buffer_full_strategy: RetryUntilSuccess, - retry_interval: Duration::from_millis(10), }, - partitions: 5, conditions: None, }], vertex_config: VertexType::Source(SourceVtxConfig { @@ -437,6 +429,7 @@ mod tests { use crate::message::{Message, MessageID, Offset, StringOffset}; let message = Message { keys: vec!["key1".to_string()], + tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset::new("123".to_string(), 0))), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index 065b19e48c..e2b009e7d9 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -2,7 +2,7 @@ use tokio_util::sync::CancellationToken; use crate::error; use crate::error::Error; -use crate::pipeline::isb::jetstream::ISBWriter; +use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use crate::source::Source; use crate::transformer::Transformer; @@ -11,7 +11,7 @@ use crate::transformer::Transformer; pub(crate) struct SourceForwarder { source: Source, transformer: Option, - writer: ISBWriter, + writer: JetstreamWriter, cln_token: CancellationToken, } @@ -19,14 +19,14 @@ pub(crate) struct SourceForwarder { pub(crate) struct SourceForwarderBuilder { streaming_source: Source, transformer: Option, - writer: ISBWriter, + writer: JetstreamWriter, cln_token: CancellationToken, } impl SourceForwarderBuilder { pub(crate) fn new( streaming_source: Source, - writer: ISBWriter, + writer: JetstreamWriter, cln_token: CancellationToken, ) -> Self { Self { @@ -114,7 +114,8 @@ mod tests { use tokio_util::sync::CancellationToken; use crate::config::pipeline::isb::BufferWriterConfig; - use crate::pipeline::isb::jetstream::ISBWriter; + use crate::config::pipeline::ToVertexConfig; + use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use crate::pipeline::source_forwarder::SourceForwarderBuilder; use crate::shared::grpc::create_rpc_channel; use crate::source::user_defined::new_source; @@ -275,17 +276,20 @@ mod tests { .await .unwrap(); - let writer = ISBWriter::new( - 10, - vec![BufferWriterConfig { - streams: vec![(stream_name.to_string(), 0)], - ..Default::default() + let writer = JetstreamWriter::new( + vec![ToVertexConfig { + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }, + conditions: None, + name: "test-vertex".to_string(), }], context.clone(), + 100, tracker_handle.clone(), cln_token.clone(), - ) - .await; + ); // create a transformer let (st_shutdown_tx, st_shutdown_rx) = oneshot::channel(); diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream.rs b/rust/numaflow-core/src/pipeline/isb/jetstream.rs index d4f37cf184..c7a8f74c1e 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream.rs @@ -1,337 +1,11 @@ -use async_nats::jetstream::Context; -use bytes::BytesMut; -use tokio::task::JoinHandle; -use tokio_stream::wrappers::ReceiverStream; -use tokio_stream::StreamExt; -use tokio_util::sync::CancellationToken; -use tracing::info; - -use crate::config::pipeline::isb::BufferWriterConfig; -use crate::message::Message; -use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; -use crate::pipeline::isb::jetstream::writer::{ - JetstreamWriter, PafResolver, ResolveAndPublishResult, -}; -use crate::tracker::TrackerHandle; -use crate::Result; - /// JetStream Writer is responsible for writing messages to JetStream ISB. /// it exposes both sync and async methods to write messages. It has gates /// to prevent writing into the buffer if the buffer is full. After successful /// writes, it will let the callee know the status (or return a non-retryable /// exception). -pub(super) mod writer; +pub(crate) mod writer; pub(crate) mod reader; /// Stream is a combination of stream name and partition id. type Stream = (String, u16); - -/// StreamingJetstreamWriter is a streaming version of JetstreamWriter. It accepts a stream of messages -/// and writes them to Jetstream ISB. It also has a PAF resolver actor to resolve the PAFs. -#[derive(Clone)] -pub(crate) struct ISBWriter { - paf_concurrency: usize, - config: Vec, - writer: JetstreamWriter, - tracker_handle: TrackerHandle, -} - -impl ISBWriter { - pub(crate) async fn new( - paf_concurrency: usize, - config: Vec, - js_ctx: Context, - tracker_handle: TrackerHandle, - cancel_token: CancellationToken, - ) -> Self { - info!(?config, paf_concurrency, "Streaming JetstreamWriter",); - - let js_writer = JetstreamWriter::new( - // flatten the streams across the config - config.iter().flat_map(|c| c.streams.clone()).collect(), - config.first().unwrap().clone(), - js_ctx, - cancel_token.clone(), - ); - - Self { - config, - writer: js_writer, - paf_concurrency, - tracker_handle, - } - } - - /// Starts reading messages from the stream and writes them to Jetstream ISB. - pub(crate) async fn streaming_write( - &self, - messages_stream: ReceiverStream, - ) -> Result>> { - let handle: JoinHandle> = tokio::spawn({ - let writer = self.writer.clone(); - let paf_concurrency = self.paf_concurrency; - let config = self.config.clone(); - let tracker_handle = self.tracker_handle.clone(); - - let mut messages_stream = messages_stream; - let mut index = 0; - - async move { - let paf_resolver = - PafResolver::new(paf_concurrency, writer.clone(), tracker_handle.clone()); - while let Some(message) = messages_stream.next().await { - // if message needs to be dropped, ack and continue - // TODO: add metric for dropped count - if message.dropped() { - // delete the entry from tracker - tracker_handle.delete(message.id.offset).await?; - continue; - } - let mut pafs = vec![]; - - // FIXME(CF): This is a temporary solution to round-robin the streams - for buffer in &config { - let payload: BytesMut = message - .clone() - .try_into() - .expect("message serialization should not fail"); - let stream = buffer.streams.get(index).unwrap(); - index = (index + 1) % buffer.streams.len(); - - let paf = writer.write(stream.clone(), payload.into()).await; - pafs.push((stream.clone(), paf)); - } - - pipeline_metrics() - .forwarder - .write_total - .get_or_create(pipeline_isb_metric_labels()) - .inc(); - - paf_resolver - .resolve_pafs(ResolveAndPublishResult { - pafs, - payload: message.value.clone().into(), - offset: message.id.offset, - }) - .await?; - } - Ok(()) - } - }); - Ok(handle) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - - use async_nats::jetstream; - use async_nats::jetstream::{consumer, stream}; - use chrono::Utc; - - use super::*; - use crate::message::{Message, MessageID, ReadAck}; - - #[cfg(feature = "nats-tests")] - #[tokio::test] - async fn test_publish_messages() { - let cln_token = CancellationToken::new(); - let js_url = "localhost:4222"; - // Create JetStream context - let client = async_nats::connect(js_url).await.unwrap(); - let context = jetstream::new(client); - let tracker_handle = TrackerHandle::new(); - - let stream_name = "test_publish_messages"; - // Delete stream if it exists - let _ = context.delete_stream(stream_name).await; - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - max_messages: 1000, - ..Default::default() - }) - .await - .unwrap(); - - let _consumer = context - .create_consumer_on_stream( - consumer::Config { - name: Some(stream_name.to_string()), - ack_policy: consumer::AckPolicy::Explicit, - ..Default::default() - }, - stream_name, - ) - .await - .unwrap(); - - let writer = ISBWriter::new( - 10, - vec![BufferWriterConfig { - streams: vec![(stream_name.to_string(), 0)], - max_length: 1000, - ..Default::default() - }], - context.clone(), - tracker_handle.clone(), - cln_token.clone(), - ) - .await; - - let (messages_tx, messages_rx) = tokio::sync::mpsc::channel(500); - let mut ack_rxs = vec![]; - // Publish 500 messages - for i in 0..500 { - let message = Message { - keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec().into(), - offset: None, - event_time: Utc::now(), - id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), - index: i, - }, - headers: HashMap::new(), - }; - let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); - tracker_handle - .insert(message.id.offset.clone(), ack_tx) - .await - .unwrap(); - ack_rxs.push(ack_rx); - messages_tx.send(message).await.unwrap(); - } - drop(messages_tx); - - let receiver_stream = ReceiverStream::new(messages_rx); - let _handle = writer.streaming_write(receiver_stream).await.unwrap(); - - for ack_rx in ack_rxs { - assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); - } - // make sure all messages are acked - assert!(tracker_handle.is_empty().await.unwrap()); - context.delete_stream(stream_name).await.unwrap(); - } - - #[cfg(feature = "nats-tests")] - #[tokio::test] - async fn test_publish_messages_with_cancellation() { - let js_url = "localhost:4222"; - // Create JetStream context - let client = async_nats::connect(js_url).await.unwrap(); - let context = jetstream::new(client); - let tracker_handle = TrackerHandle::new(); - - let stream_name = "test_publish_cancellation"; - // Delete stream if it exists - let _ = context.delete_stream(stream_name).await; - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - max_message_size: 1024, - ..Default::default() - }) - .await - .unwrap(); - - let _consumer = context - .create_consumer_on_stream( - consumer::Config { - name: Some(stream_name.to_string()), - ack_policy: consumer::AckPolicy::Explicit, - ..Default::default() - }, - stream_name, - ) - .await - .unwrap(); - - let cancel_token = CancellationToken::new(); - let writer = ISBWriter::new( - 10, - vec![BufferWriterConfig { - streams: vec![(stream_name.to_string(), 0)], - ..Default::default() - }], - context.clone(), - tracker_handle.clone(), - cancel_token.clone(), - ) - .await; - - let (tx, rx) = tokio::sync::mpsc::channel(500); - let mut ack_rxs = vec![]; - // Publish 100 messages successfully - for i in 0..100 { - let message = Message { - keys: vec![format!("key_{}", i)], - value: format!("message {}", i).as_bytes().to_vec().into(), - offset: None, - event_time: Utc::now(), - id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), - index: i, - }, - headers: HashMap::new(), - }; - let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); - tracker_handle - .insert(message.id.offset.clone(), ack_tx) - .await - .unwrap(); - ack_rxs.push(ack_rx); - tx.send(message).await.unwrap(); - } - - let receiver_stream = ReceiverStream::new(rx); - let _handle = writer.streaming_write(receiver_stream).await.unwrap(); - - // Attempt to publish the 101th message, which should get stuck in the retry loop - // because the max message size is set to 1024 - let message = Message { - keys: vec!["key_101".to_string()], - value: vec![0; 1025].into(), - offset: None, - event_time: Utc::now(), - id: MessageID { - vertex_name: "vertex".to_string(), - offset: "offset_101".to_string(), - index: 101, - }, - headers: HashMap::new(), - }; - let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); - tracker_handle - .insert("offset_101".to_string(), ack_tx) - .await - .unwrap(); - ack_rxs.push(ack_rx); - tx.send(message).await.unwrap(); - drop(tx); - - // Cancel the token to exit the retry loop - cancel_token.cancel(); - // Check the results - for (i, receiver) in ack_rxs.into_iter().enumerate() { - let result = receiver.await.unwrap(); - if i < 100 { - assert_eq!(result, ReadAck::Ack); - } else { - assert_eq!(result, ReadAck::Nak); - } - } - - // make sure all messages are acked - assert!(tracker_handle.is_empty().await.unwrap()); - context.delete_stream(stream_name).await.unwrap(); - } -} diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index a9e3622005..4216484ecc 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -104,8 +104,6 @@ impl JetstreamReader { )) })?; - let mut start_time = Instant::now(); - let mut total_messages = 0; loop { tokio::select! { _ = cancel_token.cancelled() => { // should we drain from the stream when token is cancelled? @@ -175,16 +173,6 @@ impl JetstreamReader { .read_total .get_or_create(labels) .inc(); - - if start_time.elapsed() >= Duration::from_millis(1000) { - info!( - "Total messages read from Jetstream in {:?} seconds: {}", - start_time.elapsed(), - total_messages - ); - start_time = Instant::now(); - total_messages = 0; - } } } } @@ -273,7 +261,6 @@ mod tests { use super::*; use crate::message::{Message, MessageID}; - use crate::pipeline::isb::jetstream::writer::JetstreamWriter; use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; @@ -338,17 +325,10 @@ mod tests { .await .unwrap(); - let writer_cancel_token = CancellationToken::new(); - let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - Default::default(), - context.clone(), - writer_cancel_token.clone(), - ); - for i in 0..10 { let message = Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -360,16 +340,12 @@ mod tests { headers: HashMap::new(), }; let message_bytes: BytesMut = message.try_into().unwrap(); - writer - .write((stream_name.to_string(), 0), message_bytes.into()) - .await + context + .publish(stream_name, message_bytes.into()) .await .unwrap(); } - // Cancel the token to exit the retry loop - writer_cancel_token.cancel(); - let mut buffer = vec![]; for _ in 0..10 { let Some(val) = js_reader_rx.next().await else { @@ -449,19 +425,12 @@ mod tests { .await .unwrap(); - let writer_cancel_token = CancellationToken::new(); - let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - Default::default(), - context.clone(), - writer_cancel_token.clone(), - ); - let mut offsets = vec![]; // write 5 messages for i in 0..5 { let message = Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -474,14 +443,11 @@ mod tests { }; offsets.push(message.id.offset.clone()); let message_bytes: BytesMut = message.try_into().unwrap(); - writer - .write((stream_name.to_string(), 0), message_bytes.into()) - .await + context + .publish(stream_name, message_bytes.into()) .await .unwrap(); } - // Cancel the token to exit the retry loop - writer_cancel_token.cancel(); for _ in 0..5 { let Some(_val) = js_reader_rx.next().await else { diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index a90b20a5c1..53c20a3af3 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -1,60 +1,77 @@ use std::collections::HashMap; +use std::hash::DefaultHasher; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; +use crate::config::pipeline::isb::BufferFullStrategy; +use crate::config::pipeline::ToVertexConfig; +use crate::error::Error; +use crate::message::{IntOffset, Message, Offset}; +use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; +use crate::pipeline::isb::jetstream::Stream; +use crate::tracker::TrackerHandle; +use crate::Result; + +use crate::shared::forward; use async_nats::jetstream::consumer::PullConsumer; use async_nats::jetstream::context::PublishAckFuture; use async_nats::jetstream::publish::PublishAck; use async_nats::jetstream::stream::RetentionPolicy::Limits; use async_nats::jetstream::Context; -use bytes::Bytes; +use bytes::{Bytes, BytesMut}; use tokio::sync::Semaphore; +use tokio::task::JoinHandle; use tokio::time::{sleep, Instant}; +use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; -use crate::config::pipeline::isb::BufferWriterConfig; -use crate::error::Error; -use crate::message::{IntOffset, Offset}; -use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; -use crate::pipeline::isb::jetstream::Stream; -use crate::tracker::TrackerHandle; -use crate::Result; +const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; +const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; -#[derive(Clone, Debug)] +#[derive(Clone)] /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. /// JetstreamWriter is one to many mapping of streams to write messages to. It also /// maintains the buffer usage metrics for each stream. pub(crate) struct JetstreamWriter { - streams: Vec, - config: BufferWriterConfig, + config: Arc>, js_ctx: Context, is_full: HashMap>, cancel_token: CancellationToken, + tracker_handle: TrackerHandle, + sem: Arc, } impl JetstreamWriter { /// Creates a JetStream Writer and a background task to make sure the Write futures (PAFs) are /// successful. Batch Size determines the maximum pending futures. pub(crate) fn new( - streams: Vec, - config: BufferWriterConfig, + config: Vec, js_ctx: Context, + paf_concurrency: usize, + tracker_handle: TrackerHandle, cancel_token: CancellationToken, ) -> Self { + let streams = config + .iter() + .flat_map(|c| c.writer_config.streams.clone()) + .collect::>(); + let is_full = streams .iter() .map(|stream| (stream.0.clone(), Arc::new(AtomicBool::new(false)))) .collect::>(); let this = Self { - streams, - config, + config: Arc::new(config), js_ctx, is_full, cancel_token, + tracker_handle, + sem: Arc::new(Semaphore::new(paf_concurrency)), }; // spawn a task for checking whether buffer is_full @@ -71,25 +88,28 @@ impl JetstreamWriter { /// Checks the buffer usage metrics (soft and solid usage) for each stream in the streams vector. /// If the usage is greater than the bufferUsageLimit, it sets the is_full flag to true. async fn check_stream_status(&mut self) { - let mut interval = tokio::time::interval(self.config.refresh_interval); + let mut interval = + tokio::time::interval(Duration::from_secs(DEFAULT_REFRESH_INTERVAL_SECS)); loop { tokio::select! { _ = interval.tick() => { - for stream in &self.streams { - match Self::fetch_buffer_usage(self.js_ctx.clone(), stream.0.as_str(), self.config.max_length).await { - Ok((soft_usage, solid_usage)) => { - if solid_usage >= self.config.usage_limit && soft_usage >= self.config.usage_limit { + for config in &*self.config { + for stream in &config.writer_config.streams { + match Self::fetch_buffer_usage(self.js_ctx.clone(), stream.0.as_str(), config.writer_config.max_length).await { + Ok((soft_usage, solid_usage)) => { + if solid_usage >= config.writer_config.usage_limit && soft_usage >= config.writer_config.usage_limit { + if let Some(is_full) = self.is_full.get(stream.0.as_str()) { + is_full.store(true, Ordering::Relaxed); + } + } else if let Some(is_full) = self.is_full.get(stream.0.as_str()) { + is_full.store(false, Ordering::Relaxed); + } + } + Err(e) => { + error!(?e, "Failed to fetch buffer usage for stream {}, updating isFull to true", stream.0.as_str()); if let Some(is_full) = self.is_full.get(stream.0.as_str()) { is_full.store(true, Ordering::Relaxed); } - } else if let Some(is_full) = self.is_full.get(stream.0.as_str()) { - is_full.store(false, Ordering::Relaxed); - } - } - Err(e) => { - error!(?e, "Failed to fetch buffer usage for stream {}, updating isFull to true", stream.0.as_str()); - if let Some(is_full) = self.is_full.get(stream.0.as_str()) { - is_full.store(true, Ordering::Relaxed); } } } @@ -152,13 +172,96 @@ impl JetstreamWriter { Ok((soft_usage, solid_usage)) } + /// Starts reading messages from the stream and writes them to Jetstream ISB. + pub(crate) async fn streaming_write( + &self, + messages_stream: ReceiverStream, + ) -> Result>> { + let this = self.clone(); + + let handle: JoinHandle> = tokio::spawn(async move { + let mut messages_stream = messages_stream; + let mut hash = DefaultHasher::new(); + + while let Some(message) = messages_stream.next().await { + // if message needs to be dropped, ack and continue + // TODO: add metric for dropped count + if message.dropped() { + // delete the entry from tracker + this.tracker_handle.delete(message.id.offset).await?; + continue; + } + + let mut pafs = vec![]; + for vertex in &*this.config { + // check whether we need to write to this downstream vertex + if !forward::should_forward(message.tags.clone(), vertex.conditions.clone()) { + continue; + } + + // check to which partition the message should be written + let partition = forward::determine_partition( + message.id.offset.clone(), + vertex.writer_config.partitions, + &mut hash, + ); + + // write the message to the corresponding stream + let stream = vertex + .writer_config + .streams + .get(partition as usize) + .expect("stream should be present") + .clone(); + if let Some(paf) = this + .write( + stream.clone(), + message.clone(), + vertex.writer_config.buffer_full_strategy.clone(), + ) + .await + { + pafs.push((stream.clone(), paf)); + } + } + + pipeline_metrics() + .forwarder + .write_total + .get_or_create(pipeline_isb_metric_labels()) + .inc(); + + if pafs.is_empty() { + continue; + } + + this.resolve_pafs(ResolveAndPublishResult { + pafs, + payload: message.value.clone().into(), + offset: message.id.offset, + }) + .await?; + } + Ok(()) + }); + Ok(handle) + } + /// Writes the message to the JetStream ISB and returns a future which can be /// awaited to get the PublishAck. It will do infinite retries until the message /// gets published successfully. If it returns an error it means it is fatal error - pub(super) async fn write(&self, stream: Stream, payload: Vec) -> PublishAckFuture { - let js_ctx = self.js_ctx.clone(); + pub(super) async fn write( + &self, + stream: Stream, + message: Message, + on_full: BufferFullStrategy, + ) -> Option { + let mut counter = 500u16; - let mut counter = 500u64; + let offset = message.id.offset.clone(); + let payload: BytesMut = message + .try_into() + .expect("message serialization should not fail"); // loop till we get a PAF, there could be other reasons why PAFs cannot be created. let paf = loop { @@ -175,10 +278,20 @@ impl JetstreamWriter { counter = 0; } counter += 1; - - // FIXME: consider buffer-full strategy + match on_full { + BufferFullStrategy::DiscardLatest => { + // delete the entry from tracker + self.tracker_handle + .delete(offset.clone()) + .await + .expect("Failed to delete offset from tracker"); + return None; + } + BufferFullStrategy::RetryUntilSuccess => {} + } } - Some(false) => match js_ctx + Some(false) => match self + .js_ctx .publish(stream.0.clone(), Bytes::from(payload.clone())) .await { @@ -199,106 +312,27 @@ impl JetstreamWriter { } // sleep to avoid busy looping - sleep(self.config.retry_interval).await; + sleep(Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS)).await; }; - - paf - } - - /// Writes the message to the JetStream ISB and returns the PublishAck. It will do - /// infinite retries until the message gets published successfully. If it returns - /// an error it means it is fatal non-retryable error. - pub(super) async fn blocking_write( - &self, - stream: Stream, - payload: Vec, - ) -> Result { - let js_ctx = self.js_ctx.clone(); - let start_time = Instant::now(); - info!("Blocking write for stream {}", stream.0); - loop { - match js_ctx - .publish(stream.0.clone(), Bytes::from(payload.clone())) - .await - { - Ok(paf) => match paf.await { - Ok(ack) => { - if ack.duplicate { - // should we return an error here? Because duplicate messages are not fatal - // But it can mess up the watermark progression because the offset will be - // same as the previous message offset - warn!(?ack, "Duplicate message detected, ignoring"); - } - debug!( - elapsed_ms = start_time.elapsed().as_millis(), - "Blocking write successful in", - ); - return Ok(ack); - } - Err(e) => { - error!(?e, "awaiting publish ack failed, retrying"); - sleep(Duration::from_millis(10)).await; - } - }, - Err(e) => { - error!(?e, "publishing failed, retrying"); - sleep(self.config.retry_interval).await; - } - } - if self.cancel_token.is_cancelled() { - return Err(Error::ISB("Shutdown signal received".to_string())); - } - } - } -} - -/// ResolveAndPublishResult resolves the result of the write PAF operation. -/// It contains the list of pafs(one message can be written to multiple streams) -/// and the payload that was written. Once the PAFs for all the streams have been -/// resolved, the information is published to callee_tx. -#[derive(Debug)] -pub(crate) struct ResolveAndPublishResult { - pub(crate) pafs: Vec<(Stream, PublishAckFuture)>, - pub(crate) payload: Vec, - pub(crate) offset: String, -} - -/// Resolves the PAF from the write call, if not successful it will do a blocking write so that -/// it is eventually successful. Once the PAF has been resolved (by either means) it will notify -/// the top-level callee via the oneshot rx. -pub(crate) struct PafResolver { - sem: Arc, - js_writer: JetstreamWriter, - tracker_handle: TrackerHandle, -} - -impl PafResolver { - pub(crate) fn new( - concurrency: usize, - js_writer: JetstreamWriter, - tracker_handle: TrackerHandle, - ) -> Self { - PafResolver { - sem: Arc::new(Semaphore::new(concurrency)), // concurrency limit for resolving PAFs - js_writer, - tracker_handle, - } + Some(paf) } /// resolve_pafs resolves the PAFs for the given result. It will try to resolve the PAFs /// asynchronously, if it fails it will do a blocking write to resolve the PAFs. /// At any point in time, we will only have X PAF resolvers running, this will help us create a /// natural backpressure. - pub(crate) async fn resolve_pafs(&self, result: ResolveAndPublishResult) -> Result<()> { + pub(super) async fn resolve_pafs(&self, result: ResolveAndPublishResult) -> Result<()> { let start_time = Instant::now(); let permit = Arc::clone(&self.sem) .acquire_owned() .await .map_err(|_e| Error::ISB("Failed to acquire semaphore permit".to_string()))?; - let tracker_handle = self.tracker_handle.clone(); + let mut offsets = Vec::new(); + let js_ctx = self.js_ctx.clone(); + let cancel_token = self.cancel_token.clone(); + let tracker_handle = self.tracker_handle.clone(); - let js_writer = self.js_writer.clone(); tokio::spawn(async move { let _permit = permit; for (stream, paf) in result.pafs { @@ -325,9 +359,13 @@ impl PafResolver { "Failed to resolve the future for stream {}, trying blocking write", stream.0 ); - match js_writer - .blocking_write(stream.clone(), result.payload.clone()) - .await + match JetstreamWriter::blocking_write( + stream.clone(), + result.payload.clone(), + js_ctx.clone(), + cancel_token.clone(), + ) + .await { Ok(ack) => { if ack.duplicate { @@ -362,24 +400,86 @@ impl PafResolver { }); Ok(()) } + + /// Writes the message to the JetStream ISB and returns the PublishAck. It will do + /// infinite retries until the message gets published successfully. If it returns + /// an error it means it is fatal non-retryable error. + async fn blocking_write( + stream: Stream, + payload: Vec, + js_ctx: Context, + cln_token: CancellationToken, + ) -> Result { + let start_time = Instant::now(); + info!("Blocking write for stream {}", stream.0); + loop { + match js_ctx + .publish(stream.0.clone(), Bytes::from(payload.clone())) + .await + { + Ok(paf) => match paf.await { + Ok(ack) => { + if ack.duplicate { + // should we return an error here? Because duplicate messages are not fatal + // But it can mess up the watermark progression because the offset will be + // same as the previous message offset + warn!(?ack, "Duplicate message detected, ignoring"); + } + debug!( + elapsed_ms = start_time.elapsed().as_millis(), + "Blocking write successful in", + ); + return Ok(ack); + } + Err(e) => { + error!(?e, "awaiting publish ack failed, retrying"); + sleep(Duration::from_millis(10)).await; + } + }, + Err(e) => { + error!(?e, "publishing failed, retrying"); + sleep(Duration::from_millis(DEFAULT_RETRY_INTERVAL_MILLIS)).await; + } + } + if cln_token.is_cancelled() { + return Err(Error::ISB("Shutdown signal received".to_string())); + } + } + } +} + +/// ResolveAndPublishResult resolves the result of the write PAF operation. +/// It contains the list of pafs(one message can be written to multiple streams) +/// and the payload that was written. Once the PAFs for all the streams have been +/// resolved, the information is published to callee_tx. +#[derive(Debug)] +pub(crate) struct ResolveAndPublishResult { + pub(crate) pafs: Vec<(Stream, PublishAckFuture)>, + pub(crate) payload: Vec, + pub(crate) offset: String, } #[cfg(test)] mod tests { + use crate::pipeline::pipeline::isb::BufferWriterConfig; + use numaflow_models::models::ForwardConditions; + use numaflow_models::models::TagConditions; use std::collections::HashMap; use std::time::Instant; use async_nats::jetstream; + use async_nats::jetstream::consumer::{Config, Consumer}; use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; use chrono::Utc; use super::*; - use crate::message::{Message, MessageID}; + use crate::message::{Message, MessageID, ReadAck}; #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_async_write() { + let tracker_handle = TrackerHandle::new(); let cln_token = CancellationToken::new(); let js_url = "localhost:4222"; // Create JetStream context @@ -400,7 +500,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -411,14 +511,23 @@ mod tests { .unwrap(); let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - Default::default(), + vec![ToVertexConfig { + name: "test-vertex".to_string(), + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }, + conditions: None, + }], context.clone(), + 100, + tracker_handle, cln_token.clone(), ); let message = Message { keys: vec!["key_0".to_string()], + tags: None, value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -430,11 +539,14 @@ mod tests { headers: HashMap::new(), }; - let message_bytes: BytesMut = message.try_into().unwrap(); let paf = writer - .write((stream_name.to_string(), 0), message_bytes.into()) + .write( + (stream_name.to_string(), 0), + message, + BufferFullStrategy::RetryUntilSuccess, + ) .await; - assert!(paf.await.is_ok()); + assert!(paf.unwrap().await.is_ok()); context.delete_stream(stream_name).await.unwrap(); } @@ -462,7 +574,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -472,15 +584,9 @@ mod tests { .await .unwrap(); - let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - Default::default(), - context.clone(), - cln_token.clone(), - ); - let message = Message { keys: vec!["key_0".to_string()], + tags: None, value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -493,9 +599,13 @@ mod tests { }; let message_bytes: BytesMut = message.try_into().unwrap(); - let result = writer - .blocking_write((stream_name.to_string(), 0), message_bytes.into()) - .await; + let result = JetstreamWriter::blocking_write( + (stream_name.to_string(), 0), + message_bytes.into(), + context.clone(), + cln_token.clone(), + ) + .await; assert!(result.is_ok()); let publish_ack = result.unwrap(); @@ -507,6 +617,7 @@ mod tests { #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_write_with_cancellation() { + let tracker_handle = TrackerHandle::new(); let js_url = "localhost:4222"; // Create JetStream context let client = async_nats::connect(js_url).await.unwrap(); @@ -527,7 +638,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -538,10 +649,19 @@ mod tests { .unwrap(); let cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - Default::default(), + vec![ToVertexConfig { + name: "test-vertex".to_string(), + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }, + conditions: None, + }], context.clone(), + 100, + tracker_handle, cancel_token.clone(), ); @@ -550,6 +670,7 @@ mod tests { for i in 0..10 { let message = Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -560,9 +681,12 @@ mod tests { }, headers: HashMap::new(), }; - let message_bytes: BytesMut = message.try_into().unwrap(); let paf = writer - .write((stream_name.to_string(), 0), message_bytes.into()) + .write( + (stream_name.to_string(), 0), + message, + BufferFullStrategy::RetryUntilSuccess, + ) .await; result_receivers.push(paf); } @@ -571,6 +695,7 @@ mod tests { // so that it fails and sync write will be attempted and it will be blocked let message = Message { keys: vec!["key_11".to_string()], + tags: None, value: vec![0; 1025].into(), offset: None, event_time: Utc::now(), @@ -581,9 +706,12 @@ mod tests { }, headers: HashMap::new(), }; - let message_bytes: BytesMut = message.try_into().unwrap(); let paf = writer - .write((stream_name.to_string(), 0), message_bytes.into()) + .write( + (stream_name.to_string(), 0), + message, + BufferFullStrategy::RetryUntilSuccess, + ) .await; result_receivers.push(paf); @@ -594,13 +722,13 @@ mod tests { for (i, receiver) in result_receivers.into_iter().enumerate() { if i < 10 { assert!( - receiver.await.is_ok(), + receiver.unwrap().await.is_ok(), "Message {} should be published successfully", i ); } else { assert!( - receiver.await.is_err(), + receiver.unwrap().await.is_err(), "Message 11 should fail with cancellation error" ); } @@ -635,7 +763,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -646,7 +774,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -687,6 +815,7 @@ mod tests { #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_check_stream_status() { + let tracker_handle = TrackerHandle::new(); let js_url = "localhost:4222"; // Create JetStream context let client = async_nats::connect(js_url).await.unwrap(); @@ -710,7 +839,7 @@ mod tests { let _consumer = context .create_consumer_on_stream( - consumer::Config { + Config { name: Some(stream_name.to_string()), ack_policy: consumer::AckPolicy::Explicit, ..Default::default() @@ -722,12 +851,18 @@ mod tests { let cancel_token = CancellationToken::new(); let writer = JetstreamWriter::new( - vec![(stream_name.to_string(), 0)], - BufferWriterConfig { - max_length: 100, - ..Default::default() - }, + vec![ToVertexConfig { + name: "test-vertex".to_string(), + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + max_length: 100, + ..Default::default() + }, + conditions: None, + }], context.clone(), + 100, + tracker_handle, cancel_token.clone(), ); @@ -769,4 +904,387 @@ mod tests { // Clean up context.delete_stream(stream_name).await.unwrap(); } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_streaming_write() { + let cln_token = CancellationToken::new(); + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); + + let stream_name = "test_publish_messages"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_messages: 1000, + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_on_stream( + Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let writer = JetstreamWriter::new( + vec![ToVertexConfig { + name: "test-vertex".to_string(), + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + max_length: 1000, + ..Default::default() + }, + conditions: None, + }], + context.clone(), + 100, + tracker_handle.clone(), + cln_token.clone(), + ); + + let (messages_tx, messages_rx) = tokio::sync::mpsc::channel(500); + let mut ack_rxs = vec![]; + // Publish 500 messages + for i in 0..500 { + let message = Message { + keys: vec![format!("key_{}", i)], + tags: None, + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert(message.id.offset.clone(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + messages_tx.send(message).await.unwrap(); + } + drop(messages_tx); + + let receiver_stream = ReceiverStream::new(messages_rx); + let _handle = writer.streaming_write(receiver_stream).await.unwrap(); + + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); + } + // make sure all messages are acked + assert!(tracker_handle.is_empty().await.unwrap()); + context.delete_stream(stream_name).await.unwrap(); + } + + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_streaming_write_with_cancellation() { + let js_url = "localhost:4222"; + // Create JetStream context + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); + + let stream_name = "test_publish_cancellation"; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 1024, + ..Default::default() + }) + .await + .unwrap(); + + let _consumer = context + .create_consumer_on_stream( + Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + let cancel_token = CancellationToken::new(); + let writer = JetstreamWriter::new( + vec![ToVertexConfig { + name: "test-vertex".to_string(), + writer_config: BufferWriterConfig { + streams: vec![(stream_name.to_string(), 0)], + ..Default::default() + }, + conditions: None, + }], + context.clone(), + 100, + tracker_handle.clone(), + cancel_token.clone(), + ); + + let (tx, rx) = tokio::sync::mpsc::channel(500); + let mut ack_rxs = vec![]; + // Publish 100 messages successfully + for i in 0..100 { + let message = Message { + keys: vec![format!("key_{}", i)], + tags: None, + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert(message.id.offset.clone(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + tx.send(message).await.unwrap(); + } + + let receiver_stream = ReceiverStream::new(rx); + let _handle = writer.streaming_write(receiver_stream).await.unwrap(); + + // Attempt to publish the 101st message, which should get stuck in the retry loop + // because the max message size is set to 1024 + let message = Message { + keys: vec!["key_101".to_string()], + tags: None, + value: vec![0; 1025].into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: "offset_101".to_string(), + index: 101, + }, + headers: HashMap::new(), + }; + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert("offset_101".to_string(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + tx.send(message).await.unwrap(); + drop(tx); + + // Cancel the token to exit the retry loop + cancel_token.cancel(); + // Check the results + for (i, receiver) in ack_rxs.into_iter().enumerate() { + let result = receiver.await.unwrap(); + if i < 100 { + assert_eq!(result, ReadAck::Ack); + } else { + assert_eq!(result, ReadAck::Nak); + } + } + + // make sure all messages are acked + assert!(tracker_handle.is_empty().await.unwrap()); + context.delete_stream(stream_name).await.unwrap(); + } + + #[tokio::test] + async fn test_streaming_write_multiple_streams_vertices() { + let js_url = "localhost:4222"; + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + let tracker_handle = TrackerHandle::new(); + let cln_token = CancellationToken::new(); + + let vertex1_streams = vec!["vertex1-0", "vertex1-1"]; + let vertex2_streams = vec!["vertex2-0", "vertex2-1"]; + let vertex3_streams = vec!["vertex3-0", "vertex3-1"]; + + let (_, consumers1) = create_streams_and_consumers(&context, &vertex1_streams).await; + let (_, consumers2) = create_streams_and_consumers(&context, &vertex2_streams).await; + let (_, consumers3) = create_streams_and_consumers(&context, &vertex3_streams).await; + + let writer = JetstreamWriter::new( + vec![ + ToVertexConfig { + name: "vertex1".to_string(), + writer_config: BufferWriterConfig { + streams: vec![ + (vertex1_streams[0].to_string(), 0), + (vertex1_streams[1].to_string(), 1), + ], + partitions: 2, + ..Default::default() + }, + conditions: Some(Box::new(ForwardConditions::new(TagConditions { + operator: Some("and".to_string()), + values: vec!["tag1".to_string(), "tag2".to_string()], + }))), + }, + ToVertexConfig { + name: "vertex2".to_string(), + writer_config: BufferWriterConfig { + streams: vec![ + (vertex2_streams[0].to_string(), 0), + (vertex2_streams[1].to_string(), 1), + ], + partitions: 2, + ..Default::default() + }, + conditions: Some(Box::new(ForwardConditions::new(TagConditions { + operator: Some("or".to_string()), + values: vec!["tag2".to_string()], + }))), + }, + ToVertexConfig { + name: "vertex3".to_string(), + writer_config: BufferWriterConfig { + streams: vec![ + (vertex3_streams[0].to_string(), 0), + (vertex3_streams[1].to_string(), 1), + ], + partitions: 2, + ..Default::default() + }, + conditions: Some(Box::new(ForwardConditions::new(TagConditions { + operator: Some("not".to_string()), + values: vec!["tag1".to_string()], + }))), + }, + ], + context.clone(), + 100, + tracker_handle.clone(), + cln_token.clone(), + ); + + let (messages_tx, messages_rx) = tokio::sync::mpsc::channel(500); + let mut ack_rxs = vec![]; + for i in 0..10 { + let message = Message { + keys: vec![format!("key_{}", i)], + tags: Some(vec!["tag1".to_string(), "tag2".to_string()]), + value: format!("message {}", i).as_bytes().to_vec().into(), + offset: None, + event_time: Utc::now(), + id: MessageID { + vertex_name: "vertex".to_string(), + offset: format!("offset_{}", i), + index: i, + }, + headers: HashMap::new(), + }; + let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); + tracker_handle + .insert(message.id.offset.clone(), ack_tx) + .await + .unwrap(); + ack_rxs.push(ack_rx); + messages_tx.send(message).await.unwrap(); + } + drop(messages_tx); + + let receiver_stream = ReceiverStream::new(messages_rx); + let _handle = writer.streaming_write(receiver_stream).await.unwrap(); + + for ack_rx in ack_rxs { + assert_eq!(ack_rx.await.unwrap(), ReadAck::Ack); + } + + // since its and operation and both the tags match all 10 messages should be written + // messages will be distributed based on the message id but the total message count + // should be 10 + let mut write_count = 0; + for mut consumer in consumers1 { + write_count += consumer.info().await.unwrap().num_pending; + } + assert_eq!(write_count, 10); + + // since its or operation and one of the tags match all 10 messages should be written + write_count = 0; + for mut consumer in consumers2 { + write_count += consumer.info().await.unwrap().num_pending; + } + assert_eq!(write_count, 10); + + // since it's a not operation, and none of the tags match, no messages should be written + write_count = 0; + for mut consumer in consumers3 { + write_count += consumer.info().await.unwrap().num_pending; + } + assert_eq!(write_count, 0); + + // make sure all messages are acked + assert!(tracker_handle.is_empty().await.unwrap()); + + for stream_name in vertex1_streams + .iter() + .chain(&vertex2_streams) + .chain(&vertex3_streams) + { + context.delete_stream(stream_name).await.unwrap(); + } + } + + async fn create_streams_and_consumers( + context: &Context, + stream_names: &[&str], + ) -> (Vec, Vec>) { + let mut streams = Vec::new(); + let mut consumers = Vec::new(); + + for stream_name in stream_names { + let _ = context.delete_stream(stream_name).await; + let stream = context + .get_or_create_stream(stream::Config { + name: stream_name.to_string(), + subjects: vec![stream_name.to_string()], + ..Default::default() + }) + .await + .unwrap(); + streams.push(stream); + + let consumer = context + .create_consumer_on_stream( + Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + consumers.push(consumer); + } + + (streams, consumers) + } } diff --git a/rust/numaflow-core/src/shared.rs b/rust/numaflow-core/src/shared.rs index 0117040c49..3d8461b173 100644 --- a/rust/numaflow-core/src/shared.rs +++ b/rust/numaflow-core/src/shared.rs @@ -10,3 +10,6 @@ pub(crate) mod metrics; /// Shared methods for creating Sources, Sinks, Transformers, etc. as they are required for both /// MonoVertex and Pipeline. pub(crate) mod create_components; + +/// Shared methods for forwarding messages. +pub(crate) mod forward; diff --git a/rust/numaflow-core/src/shared/forward.rs b/rust/numaflow-core/src/shared/forward.rs new file mode 100644 index 0000000000..e249989cd1 --- /dev/null +++ b/rust/numaflow-core/src/shared/forward.rs @@ -0,0 +1,108 @@ +use numaflow_models::models::ForwardConditions; +use std::hash::{DefaultHasher, Hasher}; + +/// Checks if the message should to written to downstream vertex based the conditions +/// and message tags. If not tags are provided by there are edge conditions present, we will +/// still forward to all vertices. +pub(crate) fn should_forward( + tags: Option>, + conditions: Option>, +) -> bool { + conditions.map_or(true, |conditions| { + conditions.tags.operator.as_ref().map_or(true, |operator| { + tags.as_ref().map_or(true, |tags| { + !conditions.tags.values.is_empty() + && check_operator_condition(operator, &conditions.tags.values, tags) + }) + }) + }) +} +/// Determine the partition to write the message to by hashing the message id. +pub(crate) fn determine_partition( + message_id: String, + partitions_count: u16, + hash: &mut DefaultHasher, +) -> u16 { + hash.write(message_id.as_bytes()); + let hash_value = hash.finish(); + (hash_value % partitions_count as u64) as u16 +} + +/// Check whether a message should be forwarded to the next vertex based on the tags and tags in the +/// edge condition. +fn check_operator_condition( + set_operator: &str, + tags_from_edge_condition: &[String], + tags_from_message: &[String], +) -> bool { + match set_operator { + "and" => { + // returns true if all the elements of vec a are in vec b + tags_from_edge_condition + .iter() + .all(|val| tags_from_message.contains(val)) + } + "or" => { + // returns true if any of the elements of vec a are in vec b + tags_from_edge_condition + .iter() + .any(|val| tags_from_message.contains(val)) + } + "not" => { + // returns false if any of the elements of vec a are in vec b + !tags_from_edge_condition + .iter() + .any(|val| tags_from_message.contains(val)) + } + _ => false, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use numaflow_models::models::TagConditions; + + #[tokio::test] + async fn test_evaluate_write_condition_no_conditions() { + let result = should_forward(None, None); + assert!(result); + } + + #[tokio::test] + async fn test_evaluate_write_condition_no_tags() { + let conditions = ForwardConditions::new(TagConditions::new(vec!["tag1".to_string()])); + let result = should_forward(None, Some(Box::new(conditions))); + assert!(result); + } + + #[tokio::test] + async fn test_evaluate_write_condition_and_operator() { + let mut tag_conditions = TagConditions::new(vec!["tag1".to_string(), "tag2".to_string()]); + tag_conditions.operator = Some("and".to_string()); + let conditions = ForwardConditions::new(tag_conditions); + let tags = Some(vec!["tag1".to_string(), "tag2".to_string()]); + let result = should_forward(tags, Some(Box::new(conditions))); + assert!(result); + } + + #[tokio::test] + async fn test_evaluate_write_condition_or_operator() { + let mut tag_conditions = TagConditions::new(vec!["tag1".to_string()]); + tag_conditions.operator = Some("or".to_string()); + let conditions = ForwardConditions::new(tag_conditions); + let tags = Some(vec!["tag2".to_string(), "tag1".to_string()]); + let result = should_forward(tags, Some(Box::new(conditions))); + assert!(result); + } + + #[tokio::test] + async fn test_evaluate_write_condition_not_operator() { + let mut tag_conditions = TagConditions::new(vec!["tag1".to_string()]); + tag_conditions.operator = Some("not".to_string()); + let conditions = ForwardConditions::new(tag_conditions); + let tags = Some(vec!["tag2".to_string()]); + let result = should_forward(tags, Some(Box::new(conditions))); + assert!(result); + } +} diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index b820ceb57d..6f8fa84542 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -275,6 +275,12 @@ impl SinkWriter { .map(|msg| msg.id.offset.clone()) .collect::>(); + // filter out the messages which needs to be dropped + let batch = batch + .into_iter() + .filter(|msg| !msg.dropped()) + .collect::>(); + let n = batch.len(); match this.write(batch, cancellation_token.clone()).await { Ok(_) => { @@ -297,7 +303,7 @@ impl SinkWriter { info!( "Processed {} messages at {:?}", processed_msgs_count, - std::time::Instant::now() + time::Instant::now() ); processed_msgs_count = 0; last_logged_at = std::time::Instant::now(); @@ -645,6 +651,7 @@ mod tests { let messages: Vec = (0..5) .map(|i| Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -679,6 +686,7 @@ mod tests { let messages: Vec = (0..10) .map(|i| Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -756,6 +764,7 @@ mod tests { let messages: Vec = (0..10) .map(|i| Message { keys: vec!["error".to_string()], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), @@ -842,6 +851,7 @@ mod tests { let messages: Vec = (0..20) .map(|i| Message { keys: vec!["fallback".to_string()], + tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index d3cc7a53ca..7b6bc870b3 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -32,6 +32,7 @@ mod tests { let messages = vec![ Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), @@ -44,6 +45,7 @@ mod tests { }, Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 970ab66bd0..e83fa08472 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -48,6 +48,7 @@ mod tests { let messages = vec![ Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), @@ -60,6 +61,7 @@ mod tests { }, Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index 11e84af61f..e475c77cd4 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -180,6 +180,7 @@ mod tests { let messages = vec![ Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), @@ -192,6 +193,7 @@ mod tests { }, Message { keys: vec![], + tags: None, value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 8409187d6c..df551e6962 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -291,7 +291,7 @@ impl Source { let mut ack_batch = Vec::with_capacity(n); for message in messages { let (resp_ack_tx, resp_ack_rx) = oneshot::channel(); - let offset = message.offset.clone().unwrap(); + let offset = message.offset.clone().expect("offset can never be none"); // insert the offset and the ack one shot in the tracker. tracker_handle @@ -411,11 +411,11 @@ mod tests { use std::time::Duration; use chrono::Utc; - use futures::StreamExt; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow_pb::clients::source::source_client::SourceClient; use tokio::sync::mpsc::Sender; + use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use crate::shared::grpc::create_rpc_channel; diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index c3671d0a98..3dd4f8aba9 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -1,9 +1,8 @@ -use futures::StreamExt; - use crate::config::components::source::GeneratorConfig; use crate::message::{Message, Offset}; use crate::reader; use crate::source; +use tokio_stream::StreamExt; /// Stream Generator returns a set of messages for every `.next` call. It will throttle itself if /// the call exceeds the RPU. It will return a max (batch size, RPU) till the quota for that unit of @@ -167,6 +166,7 @@ mod stream_generator { Message { keys: self.next_key_to_be_fetched(), + tags: None, value: data.into(), offset: Some(offset.clone()), event_time, @@ -234,7 +234,7 @@ mod stream_generator { #[cfg(test)] mod tests { - use futures::StreamExt; + use tokio_stream::StreamExt; use super::*; diff --git a/rust/numaflow-core/src/source/pulsar.rs b/rust/numaflow-core/src/source/pulsar.rs index 0b81f2615b..64a3ecda9e 100644 --- a/rust/numaflow-core/src/source/pulsar.rs +++ b/rust/numaflow-core/src/source/pulsar.rs @@ -15,6 +15,7 @@ impl TryFrom for Message { Ok(Message { keys: vec![message.key], + tags: None, value: message.payload, offset: Some(offset.clone()), event_time: message.event_time, diff --git a/rust/numaflow-core/src/tracker.rs b/rust/numaflow-core/src/tracker.rs index 1177bc7438..9f0839a2e2 100644 --- a/rust/numaflow-core/src/tracker.rs +++ b/rust/numaflow-core/src/tracker.rs @@ -160,7 +160,7 @@ impl Tracker { /// TrackerHandle provides an interface to interact with the Tracker. /// It allows inserting, updating, deleting, and discarding tracked messages. #[derive(Clone)] -pub struct TrackerHandle { +pub(crate) struct TrackerHandle { sender: mpsc::Sender, } diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index d8302fb59d..b10a5215e5 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -1,9 +1,9 @@ -use futures::StreamExt; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use std::sync::Arc; use tokio::sync::{mpsc, oneshot, OwnedSemaphorePermit, Semaphore}; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; use tonic::transport::Channel; use tracing::error; @@ -235,6 +235,7 @@ mod tests { let message = Message { keys: vec!["first".into()], + tags: None, value: "hello".into(), offset: Some(Offset::String(crate::message::StringOffset::new( "0".to_string(), @@ -311,6 +312,7 @@ mod tests { for i in 0..5 { let message = Message { keys: vec![format!("key_{}", i)], + tags: None, value: format!("value_{}", i).into(), offset: Some(Offset::String(crate::message::StringOffset::new( i.to_string(), diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 32c13eb0e9..6a493c2a99 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -103,6 +103,7 @@ impl UserDefinedTransformer { offset: msg_info.offset.to_string(), }, keys: result.keys, + tags: Some(result.tags), value: result.value.into(), offset: None, event_time: utc_from_timestamp(result.event_time), @@ -125,7 +126,7 @@ impl UserDefinedTransformer { ) { let msg_id = message.id.to_string(); let msg_info = ParentMessageInfo { - offset: message.offset.clone().unwrap(), + offset: message.offset.clone().expect("offset can never be none"), headers: message.headers.clone(), }; @@ -194,6 +195,7 @@ mod tests { let message = crate::message::Message { keys: vec!["first".into()], + tags: None, value: "hello".into(), offset: Some(crate::message::Offset::String(StringOffset::new( "0".to_string(), From df9be77bea7f769685241296a696bce78d446cfc Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Tue, 10 Dec 2024 05:23:22 +0530 Subject: [PATCH 159/188] fix: "Loading Pods View" and "failed to get pod details" state on UI (#2248) Signed-off-by: adarsh0728 --- .../partials/NodeInfo/partials/Pods/index.tsx | 2 +- ui/src/utils/fetcherHooks/podsViewFetch.ts | 303 +++++++++--------- 2 files changed, 160 insertions(+), 145 deletions(-) diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx index 6f12096ea7..681f74bd83 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/index.tsx @@ -290,7 +290,7 @@ export function Pods(props: PodsProps) { ); } - if (podsErr || podsDetailsErr || !pods?.length) { + if (podsErr || !pods?.length) { return ( (AppContext); + const [lastRetryTime, setLastRetryTime] = useState(0); // call to get pods for a given vertex - useEffect(() => { - const fetchPods = async () => { - try { - const response = await fetch( - `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}${ - type === "monoVertex" - ? `/mono-vertices` - : `/pipelines/${pipelineId}/vertices` - }/${vertexId}/pods?refreshKey=${requestKey}` - ); - if (response.ok) { - const json = await response.json(); - if (json?.data) { - let data = json?.data; - data = data.filter( - (pod: any) => !pod?.metadata?.name.includes("-daemon-") - ); - const pList = data?.map((pod: any) => { - const containers: string[] = []; - const containerSpecMap = new Map(); + const fetchPods = async () => { + try { + const response = await fetch( + `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}${ + type === "monoVertex" + ? `/mono-vertices` + : `/pipelines/${pipelineId}/vertices` + }/${vertexId}/pods?refreshKey=${requestKey}` + ); + if (response.ok) { + const json = await response.json(); + if (json?.data) { + let data = json?.data; + data = data.filter( + (pod: any) => !pod?.metadata?.name.includes("-daemon-") + ); + const pList = data?.map((pod: any) => { + const containers: string[] = []; + const containerSpecMap = new Map(); - const containersList = JSON.parse( - JSON.stringify(pod?.spec?.containers) - ); - pod?.spec?.initContainers - ?.filter((initContainer: any) => initContainer?.name !== "init") - ?.forEach((container: any) => containersList.push(container)); + const containersList = JSON.parse( + JSON.stringify(pod?.spec?.containers) + ); + pod?.spec?.initContainers + ?.filter((initContainer: any) => initContainer?.name !== "init") + ?.forEach((container: any) => containersList.push(container)); - containersList?.forEach((container: any) => { - const cpu = container?.resources?.requests?.cpu; - let cpuParsed: undefined | number; - if (cpu) { - try { - cpuParsed = Number(quantityToScalar(cpu)); - } catch (e) { - cpuParsed = undefined; - } + containersList?.forEach((container: any) => { + const cpu = container?.resources?.requests?.cpu; + let cpuParsed: undefined | number; + if (cpu) { + try { + cpuParsed = Number(quantityToScalar(cpu)); + } catch (e) { + cpuParsed = undefined; } - const memory = container?.resources?.requests?.memory; - let memoryParsed: undefined | number; - if (memory) { - try { - memoryParsed = Number(quantityToScalar(memory)); - } catch (e) { - memoryParsed = undefined; - } + } + const memory = container?.resources?.requests?.memory; + let memoryParsed: undefined | number; + if (memory) { + try { + memoryParsed = Number(quantityToScalar(memory)); + } catch (e) { + memoryParsed = undefined; } - containers.push(container?.name); - containerSpecMap.set(container?.name, { - name: container?.name, - cpu, - cpuParsed, - memory, - memoryParsed, - }); + } + containers.push(container?.name); + containerSpecMap.set(container?.name, { + name: container?.name, + cpu, + cpuParsed, + memory, + memoryParsed, }); - return { - name: pod?.metadata?.name, - containers, - containerSpecMap, - }; }); - setPods(pList); - } else if (json?.errMsg) { - setPodsErr([ - { - error: json.errMsg, - options: { - toastId: `${vertexId}-pod-fetch-error`, - autoClose: 5000, - }, - }, - ]); - } - } else { + return { + name: pod?.metadata?.name, + containers, + containerSpecMap, + }; + }); + setPods(pList); + } else if (json?.errMsg) { setPodsErr([ { - error: `Failed to get pods for ${vertexId} vertex`, - options: { toastId: `${vertexId}-pod-fetch`, autoClose: 5000 }, + error: json.errMsg, + options: { + toastId: `${vertexId}-pod-fetch-error`, + autoClose: 5000, + }, }, ]); } - } catch { + } else { setPodsErr([ { error: `Failed to get pods for ${vertexId} vertex`, @@ -126,10 +118,19 @@ export const usePodsViewFetch = ( }, ]); } - }; + } catch { + setPodsErr([ + { + error: `Failed to get pods for ${vertexId} vertex`, + options: { toastId: `${vertexId}-pod-fetch`, autoClose: 5000 }, + }, + ]); + } + }; + useEffect(() => { fetchPods(); - }, [vertexId, requestKey, host]); + },[vertexId, requestKey, host]); useEffect(() => { if (pods?.length) { @@ -145,74 +146,68 @@ export const usePodsViewFetch = ( } }, [pods]); - useEffect(() => { - const fetchPods = async () => { - try { - const response = await fetch( - `${host}${getBaseHref()}/api/v1/metrics/namespaces/${namespaceId}/pods?refreshKey=${requestKey}` - ); - if (response.ok) { - const json = await response.json(); - if (json?.data) { - const data = json?.data; - const podsMap = new Map(); - data?.forEach((pod: any) => { - const containerMap = new Map(); - pod?.containers?.forEach((c: any) => { - const cpu = c?.usage?.cpu; - let cpuParsed: undefined | number; - if (cpu) { - try { - cpuParsed = Number(quantityToScalar(cpu)); - } catch (e) { - cpuParsed = undefined; - } + // call to get pods details (metrics) + // to do: deprecate this and gather all metrics from pods-info + const fetchPodDetails = async () => { + try { + const response = await fetch( + `${host}${getBaseHref()}/api/v1/metrics/namespaces/${namespaceId}/pods?refreshKey=${requestKey}` + ); + if (response.ok) { + const json = await response.json(); + if (json?.data) { + const data = json?.data; + const podsMap = new Map(); + data?.forEach((pod: any) => { + const containerMap = new Map(); + pod?.containers?.forEach((c: any) => { + const cpu = c?.usage?.cpu; + let cpuParsed: undefined | number; + if (cpu) { + try { + cpuParsed = Number(quantityToScalar(cpu)); + } catch (e) { + cpuParsed = undefined; } - const memory = c?.usage?.memory; - let memoryParsed: undefined | number; - if (memory) { - try { - memoryParsed = Number(quantityToScalar(memory)); - } catch (e) { - memoryParsed = undefined; - } + } + const memory = c?.usage?.memory; + let memoryParsed: undefined | number; + if (memory) { + try { + memoryParsed = Number(quantityToScalar(memory)); + } catch (e) { + memoryParsed = undefined; } - const container = { - name: c?.name, - cpu, - cpuParsed, - memory, - memoryParsed, - }; - containerMap.set(container.name, container); - }); - const podDetail = { - name: pod?.metadata?.name, - containerMap, + } + const container = { + name: c?.name, + cpu, + cpuParsed, + memory, + memoryParsed, }; - podsMap.set(podDetail.name, podDetail); + containerMap.set(container.name, container); }); - setPodsDetails(podsMap); - } else if (json?.errMsg) { - setPodsDetailsErr([ - { - error: json.errMsg, - options: { - toastId: `${vertexId}-pod-fetch-error`, - autoClose: 5000, - }, - }, - ]); - } - } else { + const podDetail = { + name: pod?.metadata?.name, + containerMap, + }; + podsMap.set(podDetail.name, podDetail); + }); + setPodsDetails(podsMap); + setPodsDetailsErr(undefined); + } else if (json?.errMsg) { setPodsDetailsErr([ { - error: `Failed to get pods details for ${vertexId} vertex`, - options: { toastId: `${vertexId}-pod-fetch`, autoClose: 5000 }, + error: json.errMsg, + options: { + toastId: `${vertexId}-pod-fetch-error`, + autoClose: 5000, + }, }, ]); } - } catch { + } else { setPodsDetailsErr([ { error: `Failed to get pods details for ${vertexId} vertex`, @@ -220,33 +215,53 @@ export const usePodsViewFetch = ( }, ]); } - }; + } catch { + setPodsDetailsErr([ + { + error: `Failed to get pods details for ${vertexId} vertex`, + options: { toastId: `${vertexId}-pod-fetch`, autoClose: 5000 }, + }, + ]); + } + }; - fetchPods(); + // Fetch pod details whenever requestKey (after every 20 sec) or host changes + useEffect(() => { + fetchPodDetails(); }, [requestKey, host]); + // Refresh requestKey every 20 sec useEffect(() => { - // Refresh pod details every 30 sec const interval = setInterval(() => { setRequestKey(`${Date.now()}`); - }, 30000); + }, 20000); return () => { clearInterval(interval); }; }, []); - // checks if all pods are present in podsDetailsMap - const checkPodDetails = () => { - if (!pods || !podsDetails) return false; - for (let i = 0; i < pods.length; i++) { - if (!podsDetails.has(pods[i]?.name)) return false; + // Retry fetching pods details if there is an error + useEffect(() => { + const currentTime = Date.now(); + if (podsDetailsErr && (currentTime - lastRetryTime > 5000)) { + const retryFetch = setTimeout(() => { + fetchPodDetails(); + setLastRetryTime(currentTime); + }, 5000); + + return () => clearTimeout(retryFetch); } + }, [podsDetailsErr, lastRetryTime]); + + // return false if pods/podsDetails are still undefined + const checkPodDetailsResponse = () => { + if (!pods || !podsDetails) return false; return true; }; - //sets loading variable + //sets loading variable true only when requests are pending useEffect(() => { - if (checkPodDetails()) { + if (checkPodDetailsResponse()) { setLoading(false); } else if (podsErr || podsDetailsErr) { setLoading(false); From a8288ae0817d6e8a6e1c79982a6fccaf492c0c79 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Tue, 10 Dec 2024 11:24:41 +0530 Subject: [PATCH 160/188] chore: clippy error fixes (#2271) Signed-off-by: Sreekanth --- rust/backoff/src/retry.rs | 4 +- rust/numaflow-core/src/metrics.rs | 2 +- rust/numaflow-core/src/monovertex.rs | 1 - .../src/pipeline/isb/jetstream/writer.rs | 1 + rust/numaflow-core/src/transformer.rs | 2 +- rust/serving/src/app.rs | 63 ++++++++++--------- .../src/app/callback/store/memstore.rs | 4 +- rust/serving/src/config.rs | 8 +-- rust/serving/src/lib.rs | 7 ++- rust/serving/src/metrics.rs | 10 ++- 10 files changed, 51 insertions(+), 51 deletions(-) diff --git a/rust/backoff/src/retry.rs b/rust/backoff/src/retry.rs index 5ead53915a..a0cc4b8eb7 100644 --- a/rust/backoff/src/retry.rs +++ b/rust/backoff/src/retry.rs @@ -190,7 +190,7 @@ mod tests { let interval = fixed::Interval::from_millis(1).take(10); let counter = Arc::new(AtomicUsize::new(0)); - let cloned_counter = counter.clone(); + let cloned_counter = Arc::clone(&counter); let fut = Retry::retry( interval, @@ -213,7 +213,7 @@ mod tests { let interval = fixed::Interval::from_millis(1).take(attempts); let counter = Arc::new(AtomicUsize::new(0)); - let cloned_counter = counter.clone(); + let cloned_counter = Arc::clone(&counter); let fut = Retry::retry( interval, diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index d8fa156d77..045b3817b9 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -1096,7 +1096,7 @@ mod tests { } tokio::spawn({ - let pending_stats = pending_stats.clone(); + let pending_stats = Arc::clone(&pending_stats); async move { expose_pending_metrics(true, refresh_interval, pending_stats, lookback_seconds) .await; diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 5a830cfd15..8085658db8 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -243,7 +243,6 @@ mod tests { }; let result = start_forwarder(cln_token.clone(), &config).await; - dbg!(&result); assert!(result.is_ok()); // stop the source and sink servers diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 53c20a3af3..5ff61de3d5 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -1114,6 +1114,7 @@ mod tests { context.delete_stream(stream_name).await.unwrap(); } + #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_streaming_write_multiple_streams_vertices() { let js_url = "localhost:4222"; diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index b10a5215e5..eec95e37ab 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -253,7 +253,7 @@ mod tests { let (output_tx, mut output_rx) = mpsc::channel(10); let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.clone().acquire_owned().await.unwrap(); + let permit = semaphore.acquire_owned().await.unwrap(); Transformer::transform( transformer.sender.clone(), permit, diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index c2ba4c708d..d1d29c4c21 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -289,11 +289,14 @@ mod tests { use super::*; use crate::app::callback::store::memstore::InMemoryStore; - use crate::config::cert_key_pair; + use crate::config::generate_certs; + + type Result = core::result::Result; + type Error = Box; #[tokio::test] - async fn test_start_main_server() { - let (cert, key) = cert_key_pair(); + async fn test_start_main_server() -> Result<()> { + let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) .await @@ -310,12 +313,13 @@ mod tests { // Stop the server server.abort(); + Ok(()) } #[cfg(feature = "all-tests")] #[tokio::test] - async fn test_setup_app() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + async fn test_setup_app() -> Result<()> { + let client = async_nats::connect(&config().jetstream.url).await?; let context = jetstream::new(client); let stream_name = &config().jetstream.stream; @@ -330,18 +334,19 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; - let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); + let callback_state = CallbackState::new(msg_graph, mem_store).await?; let result = setup_app(context, callback_state).await; assert!(result.is_ok()); + Ok(()) } #[cfg(feature = "all-tests")] #[tokio::test] - async fn test_livez() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + async fn test_livez() -> Result<()> { + let client = async_nats::connect(&config().jetstream.url).await?; let context = jetstream::new(client); let stream_name = &config().jetstream.stream; @@ -356,25 +361,23 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; - let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); + let callback_state = CallbackState::new(msg_graph, mem_store).await?; let result = setup_app(context, callback_state).await; - let request = Request::builder() - .uri("/livez") - .body(Body::empty()) - .unwrap(); + let request = Request::builder().uri("/livez").body(Body::empty())?; - let response = result.unwrap().oneshot(request).await.unwrap(); + let response = result?.oneshot(request).await?; assert_eq!(response.status(), StatusCode::NO_CONTENT); + Ok(()) } #[cfg(feature = "all-tests")] #[tokio::test] - async fn test_readyz() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + async fn test_readyz() -> Result<()> { + let client = async_nats::connect(&config().jetstream.url).await?; let context = jetstream::new(client); let stream_name = &config().jetstream.stream; @@ -389,19 +392,17 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; - let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); + let callback_state = CallbackState::new(msg_graph, mem_store).await?; let result = setup_app(context, callback_state).await; - let request = Request::builder() - .uri("/readyz") - .body(Body::empty()) - .unwrap(); + let request = Request::builder().uri("/readyz").body(Body::empty())?; - let response = result.unwrap().oneshot(request).await.unwrap(); + let response = result.unwrap().oneshot(request).await?; assert_eq!(response.status(), StatusCode::NO_CONTENT); + Ok(()) } #[tokio::test] @@ -413,8 +414,8 @@ mod tests { #[cfg(feature = "all-tests")] #[tokio::test] - async fn test_auth_middleware() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + async fn test_auth_middleware() -> Result<()> { + let client = async_nats::connect(&config().jetstream.url).await?; let context = jetstream::new(client); let stream_name = &config().jetstream.stream; @@ -429,8 +430,8 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); - let callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); + let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; + let callback_state = CallbackState::new(msg_graph, mem_store).await?; let app = Router::new() .nest( @@ -447,10 +448,10 @@ mod tests { .body(Body::empty()) .unwrap(), ) - .await - .unwrap(); + .await?; assert_eq!(res.status(), StatusCode::UNAUTHORIZED); env::remove_var(ENV_NUMAFLOW_SERVING_AUTH_TOKEN); + Ok(()) } } diff --git a/rust/serving/src/app/callback/store/memstore.rs b/rust/serving/src/app/callback/store/memstore.rs index 9ec9cfac9b..a9cbaea31d 100644 --- a/rust/serving/src/app/callback/store/memstore.rs +++ b/rust/serving/src/app/callback/store/memstore.rs @@ -116,7 +116,7 @@ mod tests { store .save(vec![PayloadToSave::Callback { key: key.clone(), - value: value.clone(), + value: Arc::clone(&value), }]) .await .unwrap(); @@ -191,7 +191,7 @@ mod tests { let result = store .save(vec![PayloadToSave::Callback { key: "".to_string(), - value: value.clone(), + value: Arc::clone(&value), }]) .await; diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index d3e3268f32..82e663c8f5 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -36,19 +36,13 @@ pub fn config() -> &'static Settings { }) } -static GLOBAL_TLS_CONFIG: OnceLock<(Certificate, KeyPair)> = OnceLock::new(); - -fn init_cert_key_pair() -> std::result::Result<(Certificate, KeyPair), String> { +pub fn generate_certs() -> std::result::Result<(Certificate, KeyPair), String> { let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) .map_err(|e| format!("Failed to generate cert {:?}", e))?; Ok((cert, key_pair)) } -pub fn cert_key_pair() -> &'static (Certificate, KeyPair) { - GLOBAL_TLS_CONFIG.get_or_init(|| init_cert_key_pair().expect("Failed to initialize TLS config")) -} - #[derive(Debug, Deserialize)] pub struct JetStreamConfig { pub stream: String, diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 6e27c53e34..09e2dfcaa5 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -5,7 +5,7 @@ use tracing::info; pub use self::error::{Error, Result}; use crate::app::start_main_server; -use crate::config::{cert_key_pair, config}; +use crate::config::{config, generate_certs}; use crate::metrics::start_https_metrics_server; use crate::pipeline::min_pipeline_spec; @@ -16,8 +16,9 @@ mod error; mod metrics; mod pipeline; -pub async fn serve() -> std::result::Result<(), Box> { - let (cert, key) = cert_key_pair(); +pub async fn serve() -> std::result::Result<(), Box> +{ + let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) .await diff --git a/rust/serving/src/metrics.rs b/rust/serving/src/metrics.rs index a9cd7a3492..830a37c0c5 100644 --- a/rust/serving/src/metrics.rs +++ b/rust/serving/src/metrics.rs @@ -167,11 +167,14 @@ mod tests { use tower::ServiceExt; use super::*; - use crate::config::cert_key_pair; + use crate::config::generate_certs; + + type Result = core::result::Result; + type Error = Box; #[tokio::test] - async fn test_start_metrics_server() { - let (cert, key) = cert_key_pair(); + async fn test_start_metrics_server() -> Result<()> { + let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) .await @@ -188,6 +191,7 @@ mod tests { // Stop the server server.abort(); + Ok(()) } #[tokio::test] From 1c0989a9667b3c664bc965655c40bc05d4537384 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Tue, 10 Dec 2024 11:34:05 +0530 Subject: [PATCH 161/188] fix: Numaflow serving sink (#2103) Signed-off-by: Yashash H L Co-authored-by: Vigith Maurice --- rust/servesink/src/lib.rs | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/rust/servesink/src/lib.rs b/rust/servesink/src/lib.rs index 371638360f..e1fad8c2ca 100644 --- a/rust/servesink/src/lib.rs +++ b/rust/servesink/src/lib.rs @@ -6,6 +6,8 @@ use tracing::{error, warn}; const NUMAFLOW_CALLBACK_URL_HEADER: &str = "X-Numaflow-Callback-Url"; const NUMAFLOW_ID_HEADER: &str = "X-Numaflow-Id"; +const ENV_NUMAFLOW_CALLBACK_URL_KEY: &str = "NUMAFLOW_CALLBACK_URL_KEY"; +const ENV_NUMAFLOW_MESSAGE_ID_KEY: &str = "NUMAFLOW_MESSAGE_ID_KEY"; /// servesink is a Numaflow Sink which forwards the payload to the Numaflow serving URL. pub async fn servesink() -> Result<(), Box> { @@ -13,13 +15,28 @@ pub async fn servesink() -> Result<(), Box> { } struct ServeSink { + callback_url_key: String, + message_id_key: String, client: Client, } impl ServeSink { fn new() -> Self { + // extract the callback url key from the environment + let callback_url_key = std::env::var(ENV_NUMAFLOW_CALLBACK_URL_KEY) + .unwrap_or_else(|_| NUMAFLOW_CALLBACK_URL_HEADER.to_string()); + + // extract the message id key from the environment + let message_id_key = std::env::var(ENV_NUMAFLOW_MESSAGE_ID_KEY) + .unwrap_or_else(|_| NUMAFLOW_ID_HEADER.to_string()); + Self { - client: Client::new(), + callback_url_key, + message_id_key, + client: Client::builder() + .danger_accept_invalid_certs(true) + .build() + .unwrap(), } } } @@ -31,12 +48,12 @@ impl sink::Sinker for ServeSink { while let Some(datum) = input.recv().await { // if the callback url is absent, ignore the request - let url = match datum.headers.get(NUMAFLOW_CALLBACK_URL_HEADER) { + let url = match datum.headers.get(self.callback_url_key.as_str()) { Some(url) => url, None => { warn!( "Missing {} header, Ignoring the request", - NUMAFLOW_CALLBACK_URL_HEADER + self.callback_url_key ); responses.push(Response::ok(datum.id)); continue; @@ -44,12 +61,12 @@ impl sink::Sinker for ServeSink { }; // if the numaflow id is absent, ignore the request - let numaflow_id = match datum.headers.get(NUMAFLOW_ID_HEADER) { + let numaflow_id = match datum.headers.get(self.message_id_key.as_str()) { Some(id) => id, None => { warn!( "Missing {} header, Ignoring the request", - NUMAFLOW_ID_HEADER + self.message_id_key ); responses.push(Response::ok(datum.id)); continue; @@ -59,7 +76,7 @@ impl sink::Sinker for ServeSink { let resp = self .client .post(format!("{}_{}", url, "save")) - .header(NUMAFLOW_ID_HEADER, numaflow_id) + .header(self.message_id_key.as_str(), numaflow_id) .header("id", numaflow_id) .body(datum.value) .send() From 8bed236dc3f9f423d5f36bf5d259d0486c73bc76 Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Tue, 10 Dec 2024 20:51:24 +0530 Subject: [PATCH 162/188] feat: mvtx gauge metrics support (#2259) Signed-off-by: veds-g --- .../namespaced-numaflow-server.yaml | 29 ++++++- config/advanced-install/numaflow-server.yaml | 29 ++++++- .../numaflow-server-metrics-proxy-config.yaml | 29 ++++++- config/install.yaml | 29 ++++++- config/namespace-install.yaml | 29 ++++++- server/apis/v1/promql_service_test.go | 87 +++++++++++++++++++ .../Metrics/partials/LineChart/index.tsx | 26 ++++-- .../partials/common/FiltersDropdown/index.tsx | 13 ++- .../partials/Metrics/utils/constants.ts | 6 +- 9 files changed, 263 insertions(+), 14 deletions(-) diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index 2804c41248..8dd8b588da 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -137,12 +137,39 @@ metadata: --- apiVersion: v1 data: - config.yaml: |- + config.yaml: | # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: mono_vertex_gauge + object: mono-vertex + title: Pending Messages Lag + description: This query is the total number of pending messages for the mono vertex + expr: | + $metric_name{$filters} + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + filters: + - name: period + required: false - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index 6a7444d395..b136cca998 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -144,12 +144,39 @@ metadata: --- apiVersion: v1 data: - config.yaml: |- + config.yaml: | # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: mono_vertex_gauge + object: mono-vertex + title: Pending Messages Lag + description: This query is the total number of pending messages for the mono vertex + expr: | + $metric_name{$filters} + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + filters: + - name: period + required: false - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml index 7824bae01f..273727433a 100644 --- a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -9,6 +9,33 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: mono_vertex_gauge + object: mono-vertex + title: Pending Messages Lag + description: This query is the total number of pending messages for the mono vertex + expr: | + $metric_name{$filters} + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + filters: + - name: period + required: false - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -77,4 +104,4 @@ data: - name: pod filters: - name: pod - required: false \ No newline at end of file + required: false diff --git a/config/install.yaml b/config/install.yaml index 60bbd60918..a117951763 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28557,12 +28557,39 @@ metadata: --- apiVersion: v1 data: - config.yaml: |- + config.yaml: | # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: mono_vertex_gauge + object: mono-vertex + title: Pending Messages Lag + description: This query is the total number of pending messages for the mono vertex + expr: | + $metric_name{$filters} + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + filters: + - name: period + required: false - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 58c769f7ff..e384b39e14 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28445,12 +28445,39 @@ metadata: --- apiVersion: v1 data: - config.yaml: |- + config.yaml: | # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: mono_vertex_gauge + object: mono-vertex + title: Pending Messages Lag + description: This query is the total number of pending messages for the mono vertex + expr: | + $metric_name{$filters} + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + filters: + - name: period + required: false - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency diff --git a/server/apis/v1/promql_service_test.go b/server/apis/v1/promql_service_test.go index 733476ad24..3d923bb850 100644 --- a/server/apis/v1/promql_service_test.go +++ b/server/apis/v1/promql_service_test.go @@ -273,6 +273,70 @@ func Test_PromQueryBuilder(t *testing.T) { } }) } + + // tests for gauge metrics + var gauge_service = &PromQlService{ + PlaceHolders: map[string]map[string][]string{ + "monovtx_pending": { + "mono-vertex": {"$dimension", "$metric_name", "$filters"}, + }, + }, + Expression: map[string]map[string]string{ + "monovtx_pending": { + "mono-vertex": "$metric_name{$filters}", + }, + }, + } + + gauge_metrics_tests := []struct { + name string + requestBody MetricsRequestBody + expectedQuery string + expectError bool + }{ + { + name: "Successful gauge metrics template substitution", + requestBody: MetricsRequestBody{ + MetricName: "monovtx_pending", + Dimension: "mono-vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "mvtx_name": "test_mvtx", + "period": "5m", + }, + }, + expectedQuery: `monovtx_pending{namespace= "test_namespace", mvtx_name= "test_mvtx", period= "5m"}`, + }, + { + name: "Missing metric name in service config", + requestBody: MetricsRequestBody{ + MetricName: "non_existent_metric", + Dimension: "mono-vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "mvtx_name": "test_mvtx", + "period": "5m", + }, + }, + expectError: true, + }, + } + + for _, tt := range gauge_metrics_tests { + t.Run(tt.name, func(t *testing.T) { + actualQuery, err := gauge_service.BuildQuery(tt.requestBody) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if !comparePrometheusQueries(tt.expectedQuery, actualQuery) { + t.Errorf("Prometheus queries do not match.\nExpected: %s\nGot: %s", tt.expectedQuery, actualQuery) + } else { + t.Log("Prometheus queries match!") + } + } + }) + } } func Test_QueryPrometheus(t *testing.T) { @@ -322,6 +386,29 @@ func Test_QueryPrometheus(t *testing.T) { assert.Equal(t, 1, matrix.Len()) }) + t.Run("Successful gauge query", func(t *testing.T) { + mockAPI := &MockPrometheusAPI{} + promQlService := &PromQlService{ + PrometheusClient: &Prometheus{ + Api: mockAPI, + }, + } + query := `monovtx_pending{namespace="default", mvtx_name="test-mvtx", pending="5m"}` + startTime := time.Now().Add(-30 * time.Minute) + endTime := time.Now() + + ctx := context.Background() + result, err := promQlService.QueryPrometheus(ctx, query, startTime, endTime) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // for query range , response should be a matrix + matrix, ok := result.(model.Matrix) + assert.True(t, ok) + assert.Equal(t, 1, matrix.Len()) + }) + t.Run("Prometheus client is nil", func(t *testing.T) { service := &PromQlService{ PrometheusClient: nil, diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx index b7171a0c42..2415a39d12 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx @@ -18,7 +18,13 @@ import EmptyChart from "../EmptyChart"; import { useMetricsFetch } from "../../../../../../../../../../../../../../../utils/fetchWrappers/metricsFetch"; // TODO have a check for metricReq against metric object to ensure required fields are passed -const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: any) => { +const LineChartComponent = ({ + namespaceId, + pipelineId, + type, + metric, + vertexId, +}: any) => { const [transformedData, setTransformedData] = useState([]); const [chartLabels, setChartLabels] = useState([]); const [metricsReq, setMetricsReq] = useState({ @@ -28,7 +34,9 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: // store all filters for each selected dimension const [filtersList, setFiltersList] = useState([]); const [filters, setFilters] = useState({}); - const [previousDimension, setPreviousDimension] = useState(metricsReq?.dimension); + const [previousDimension, setPreviousDimension] = useState( + metricsReq?.dimension + ); const getRandomColor = useCallback((index: number) => { const hue = (index * 137.508) % 360; @@ -96,7 +104,7 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: name: param?.Name, required: param?.Required, })) || []; - + setParamsList([...initParams, ...newParams]); }, [metric, setParamsList]); @@ -110,7 +118,12 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: filters, }); - const groupByLabel = useCallback((dimension: string) => { + const groupByLabel = useCallback((dimension: string, metricName: string) => { + switch (metricName) { + case "monovtx_pending": + return "period"; + } + switch (dimension) { case "mono-vertex": return "mvtx_name"; @@ -123,7 +136,10 @@ const LineChartComponent = ({ namespaceId, pipelineId, type, metric, vertexId }: if (chartData) { const labels: any[] = []; const transformedData: any[] = []; - const label = groupByLabel(metricsReq?.dimension); + const label = groupByLabel( + metricsReq?.dimension, + metricsReq?.metric_name + ); chartData?.forEach((item) => { const labelVal = item?.metric?.[label]; labels.push(labelVal); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx index 193f2ef855..360fdbd922 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/FiltersDropdown/index.tsx @@ -29,6 +29,13 @@ export interface FiltersDropdownProps { setFilters: any; } +const periodData = [ + { name: "default" }, + { name: "1m" }, + { name: "5m" }, + { name: "15m" }, +]; + const FiltersDropdown = ({ items, namespaceId, @@ -64,7 +71,9 @@ const FiltersDropdown = ({ try { const response = await fetch( `${host}${getBaseHref()}/api/v1/namespaces/${namespaceId}/${ - type === "monoVertex" ? `mono-vertices/${pipelineId}/pods` : `pipelines/${pipelineId}/vertices/${vertexId}/pods` + type === "monoVertex" + ? `mono-vertices/${pipelineId}/pods` + : `pipelines/${pipelineId}/vertices/${vertexId}/pods` }` ); if (!response.ok) { @@ -101,6 +110,8 @@ const FiltersDropdown = ({ switch (filterName) { case "pod": return podsData; + case "period": + return periodData; default: return null; } diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts index 925d8acea1..6c367c4276 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -19,7 +19,7 @@ export const dimensionMap: { [p: string]: string } = { "mono-vertex": "MonoVertex", pod: "Pod", pipeline: "Pipeline", - vertex: "Vertex" + vertex: "Vertex", }; export const dimensionReverseMap: { [p: string]: string } = { @@ -38,6 +38,6 @@ export const metricNameMap: { [p: string]: string } = { "Mono Vertex Processing Time Latency (in micro seconds)", monovtx_sink_time_bucket: "Mono Vertex Sink Write Time Latency (in micro seconds)", - forwarder_data_read_total: - "Vertex Read Processing Rate" + forwarder_data_read_total: "Vertex Read Processing Rate", + monovtx_pending: "Mono Vertex Pending", }; From 059a585b04c85415cea9f343a0cf08927100ba35 Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Tue, 10 Dec 2024 23:33:14 +0530 Subject: [PATCH 163/188] feat: counter metrics visualizer for mono-vertex (#2256) Signed-off-by: adarsh0728 --- .../namespaced-numaflow-server.yaml | 55 ++++++++++++++----- config/advanced-install/numaflow-server.yaml | 55 ++++++++++++++----- .../numaflow-server-metrics-proxy-config.yaml | 53 ++++++++++++++---- config/install.yaml | 55 ++++++++++++++----- config/namespace-install.yaml | 55 ++++++++++++++----- .../PodDetails/partials/Metrics/index.tsx | 2 +- .../partials/Metrics/utils/constants.ts | 5 +- 7 files changed, 214 insertions(+), 66 deletions(-) diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index 8dd8b588da..9d34601fd3 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -137,7 +137,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -191,28 +191,30 @@ data: - namespace - mvtx_name dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression - name: pod # expr: optional expression for prometheus query # overrides the default expression filters: - name: pod required: false + - metric_name: monovtx_sink_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: - name: mono-vertex # expr: optional expression for prometheus query # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false # Add histogram metrics similar to the pattern above - #- metric_name: monovtx_sink_time_bucket - # required_filters: - # - namespace - # - mvtx_name - # dimensions: - # - name: pod - # #expr: optional - # filters: - # - name: pod - # required: false - # - name: mono-vertex - # #expr: optional - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -239,6 +241,33 @@ data: filters: - name: pod required: false + - name: mono_vertex_throughput + object: mono-vertex + title: Mono-Vertex Throughput and Message Rates + description: This pattern measures the throughput of a mono-vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_read_total + required_filters: + - namespace + - mvtx_name + dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index b136cca998..bca4a13bbf 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -144,7 +144,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -198,28 +198,30 @@ data: - namespace - mvtx_name dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression - name: pod # expr: optional expression for prometheus query # overrides the default expression filters: - name: pod required: false + - metric_name: monovtx_sink_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: - name: mono-vertex # expr: optional expression for prometheus query # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false # Add histogram metrics similar to the pattern above - #- metric_name: monovtx_sink_time_bucket - # required_filters: - # - namespace - # - mvtx_name - # dimensions: - # - name: pod - # #expr: optional - # filters: - # - name: pod - # required: false - # - name: mono-vertex - # #expr: optional - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -246,6 +248,33 @@ data: filters: - name: pod required: false + - name: mono_vertex_throughput + object: mono-vertex + title: Mono-Vertex Throughput and Message Rates + description: This pattern measures the throughput of a mono-vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_read_total + required_filters: + - namespace + - mvtx_name + dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml index 273727433a..d97370e392 100644 --- a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -57,28 +57,30 @@ data: - namespace - mvtx_name dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression - name: pod # expr: optional expression for prometheus query # overrides the default expression filters: - name: pod required: false + - metric_name: monovtx_sink_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: - name: mono-vertex # expr: optional expression for prometheus query # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false # Add histogram metrics similar to the pattern above - #- metric_name: monovtx_sink_time_bucket - # required_filters: - # - namespace - # - mvtx_name - # dimensions: - # - name: pod - # #expr: optional - # filters: - # - name: pod - # required: false - # - name: mono-vertex - # #expr: optional - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -105,3 +107,30 @@ data: filters: - name: pod required: false + - name: mono_vertex_throughput + object: mono-vertex + title: Mono-Vertex Throughput and Message Rates + description: This pattern measures the throughput of a mono-vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_read_total + required_filters: + - namespace + - mvtx_name + dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false \ No newline at end of file diff --git a/config/install.yaml b/config/install.yaml index a117951763..72df249f0f 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28557,7 +28557,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -28611,28 +28611,30 @@ data: - namespace - mvtx_name dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression - name: pod # expr: optional expression for prometheus query # overrides the default expression filters: - name: pod required: false + - metric_name: monovtx_sink_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: - name: mono-vertex # expr: optional expression for prometheus query # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false # Add histogram metrics similar to the pattern above - #- metric_name: monovtx_sink_time_bucket - # required_filters: - # - namespace - # - mvtx_name - # dimensions: - # - name: pod - # #expr: optional - # filters: - # - name: pod - # required: false - # - name: mono-vertex - # #expr: optional - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -28659,6 +28661,33 @@ data: filters: - name: pod required: false + - name: mono_vertex_throughput + object: mono-vertex + title: Mono-Vertex Throughput and Message Rates + description: This pattern measures the throughput of a mono-vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_read_total + required_filters: + - namespace + - mvtx_name + dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index e384b39e14..1ae302f71f 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28445,7 +28445,7 @@ metadata: --- apiVersion: v1 data: - config.yaml: | + config.yaml: |- # url is a required field, it should be the url of the service to which the metrics proxy will connect # url: service_name + "." + service_namespace + ".svc.cluster.local" + ":" + port # example for local prometheus service @@ -28499,28 +28499,30 @@ data: - namespace - mvtx_name dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression - name: pod # expr: optional expression for prometheus query # overrides the default expression filters: - name: pod required: false + - metric_name: monovtx_sink_time_bucket + required_filters: + - namespace + - mvtx_name + dimensions: - name: mono-vertex # expr: optional expression for prometheus query # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false # Add histogram metrics similar to the pattern above - #- metric_name: monovtx_sink_time_bucket - # required_filters: - # - namespace - # - mvtx_name - # dimensions: - # - name: pod - # #expr: optional - # filters: - # - name: pod - # required: false - # - name: mono-vertex - # #expr: optional - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -28547,6 +28549,33 @@ data: filters: - name: pod required: false + - name: mono_vertex_throughput + object: mono-vertex + title: Mono-Vertex Throughput and Message Rates + description: This pattern measures the throughput of a mono-vertex in messages per second across different dimensions + expr: sum(rate($metric_name{$filters}[$duration])) by ($dimension) + params: + - name: duration + required: true + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: monovtx_read_total + required_filters: + - namespace + - mvtx_name + dimensions: + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false kind: ConfigMap metadata: name: numaflow-server-metrics-proxy-config diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx index d078509923..a32fec5322 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx @@ -77,7 +77,7 @@ export function Metrics({ namespaceId, pipelineId, type, vertexId }: MetricsProp aria-controls={`${metric?.metric_name}-content`} id={`${metric?.metric_name}-header`} > - + {metricNameMap[metric?.metric_name] || metric?.metric_name} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts index 6c367c4276..56833633fb 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -38,6 +38,9 @@ export const metricNameMap: { [p: string]: string } = { "Mono Vertex Processing Time Latency (in micro seconds)", monovtx_sink_time_bucket: "Mono Vertex Sink Write Time Latency (in micro seconds)", - forwarder_data_read_total: "Vertex Read Processing Rate", + forwarder_data_read_total: + "Vertex Read Processing Rate (messages per second)", + monovtx_read_total: + "Mono Vertex Read Processing Rate (messages per second)", monovtx_pending: "Mono Vertex Pending", }; From b2e480b1bbad85e4deeb0a70edb6b1b9205743b4 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Wed, 11 Dec 2024 08:49:41 +0530 Subject: [PATCH 164/188] chore: make Message cheap to clone (#2272) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/message.rs | 109 ++++++++++-------- rust/numaflow-core/src/pipeline.rs | 7 +- .../src/pipeline/isb/jetstream/reader.rs | 17 +-- .../src/pipeline/isb/jetstream/writer.rs | 56 ++++----- rust/numaflow-core/src/shared/forward.rs | 9 +- rust/numaflow-core/src/sink.rs | 25 ++-- rust/numaflow-core/src/sink/blackhole.rs | 13 ++- rust/numaflow-core/src/sink/log.rs | 13 ++- rust/numaflow-core/src/sink/user_defined.rs | 13 ++- rust/numaflow-core/src/source.rs | 2 +- rust/numaflow-core/src/source/generator.rs | 7 +- rust/numaflow-core/src/source/pulsar.rs | 8 +- rust/numaflow-core/src/tracker.rs | 57 +++++---- rust/numaflow-core/src/transformer.rs | 12 +- .../src/transformer/user_defined.rs | 15 +-- 15 files changed, 201 insertions(+), 162 deletions(-) diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index f8cb25ff1a..2b3ca0b5fc 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -1,6 +1,7 @@ use std::cmp::PartialEq; use std::collections::HashMap; use std::fmt; +use std::sync::Arc; use async_nats::HeaderValue; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; @@ -23,13 +24,13 @@ use crate::{config, Error}; const DROP: &str = "U+005C__DROP__"; /// A message that is sent from the source to the sink. -#[derive(Debug, Clone, Serialize, Deserialize)] +/// It is cheap to clone. +#[derive(Debug, Clone)] pub(crate) struct Message { - // FIXME: Arc<[Bytes]> /// keys of the message - pub(crate) keys: Vec, + pub(crate) keys: Arc<[String]>, /// tags of the message - pub(crate) tags: Option>, + pub(crate) tags: Option>, /// actual payload of the message pub(crate) value: Bytes, /// offset of the message, it is optional because offset is only @@ -81,8 +82,8 @@ impl TryFrom for Message { let event_time = Utc::now(); let offset = None; let id = MessageID { - vertex_name: config::get_vertex_name().to_string(), - offset: "0".to_string(), + vertex_name: config::get_vertex_name().to_string().into(), + offset: "0".to_string().into(), index: 0, }; @@ -131,15 +132,16 @@ impl fmt::Display for IntOffset { /// StringOffset is string based offset enum type. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StringOffset { - offset: String, +pub(crate) struct StringOffset { + /// offset could be a complex base64 string. + offset: Bytes, partition_idx: u16, } impl StringOffset { pub fn new(seq: String, partition_idx: u16) -> Self { Self { - offset: seq, + offset: seq.into(), partition_idx, } } @@ -147,7 +149,12 @@ impl StringOffset { impl fmt::Display for StringOffset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}", self.offset, self.partition_idx) + write!( + f, + "{}-{}", + std::str::from_utf8(&self.offset).expect("it should be valid utf-8"), + self.partition_idx + ) } } @@ -159,18 +166,19 @@ pub(crate) enum ReadAck { Nak, } +/// Message ID which is used to uniquely identify a message. It cheap to clone this. #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct MessageID { - pub(crate) vertex_name: String, - pub(crate) offset: String, + pub(crate) vertex_name: Bytes, + pub(crate) offset: Bytes, pub(crate) index: i32, } impl From for MessageID { fn from(id: numaflow_pb::objects::isb::MessageId) -> Self { Self { - vertex_name: id.vertex_name, - offset: id.offset, + vertex_name: id.vertex_name.into(), + offset: id.offset.into(), index: id.index, } } @@ -179,16 +187,21 @@ impl From for MessageID { impl From for numaflow_pb::objects::isb::MessageId { fn from(id: MessageID) -> Self { Self { - vertex_name: id.vertex_name, - offset: id.offset, + vertex_name: String::from_utf8_lossy(&id.vertex_name).to_string(), + offset: String::from_utf8_lossy(&id.offset).to_string(), index: id.index, } } } - impl fmt::Display for MessageID { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}-{}", self.vertex_name, self.offset, self.index) + write!( + f, + "{}-{}-{}", + std::str::from_utf8(&self.vertex_name).expect("it should be valid utf-8"), + std::str::from_utf8(&self.offset).expect("it should be valid utf-8"), + self.index + ) } } @@ -220,8 +233,8 @@ impl TryFrom for BytesMut { }), kind: numaflow_pb::objects::isb::MessageKind::Data as i32, id: Some(message.id.into()), - keys: message.keys.clone(), - headers: message.headers.clone(), + keys: message.keys.to_vec(), + headers: message.headers, }), body: Some(numaflow_pb::objects::isb::Body { payload: message.value.to_vec(), @@ -255,7 +268,7 @@ impl TryFrom for Message { let id = header.id.ok_or(Error::Proto("Missing id".to_string()))?; Ok(Message { - keys: header.keys, + keys: Arc::from(header.keys.into_boxed_slice()), tags: None, value: body.payload.into(), offset: None, @@ -273,7 +286,7 @@ impl From for SourceTransformRequest { request: Some( numaflow_pb::clients::sourcetransformer::source_transform_request::Request { id: message.id.to_string(), - keys: message.keys, + keys: message.keys.to_vec(), value: message.value.to_vec(), event_time: prost_timestamp_from_utc(message.event_time), watermark: None, @@ -292,21 +305,21 @@ impl TryFrom for Message { fn try_from(result: read_response::Result) -> Result { let source_offset = match result.offset { Some(o) => Offset::String(StringOffset { - offset: BASE64_STANDARD.encode(o.offset), + offset: BASE64_STANDARD.encode(o.offset).into(), partition_idx: o.partition_id as u16, }), None => return Err(Error::Source("Offset not found".to_string())), }; Ok(Message { - keys: result.keys, + keys: Arc::from(result.keys), tags: None, value: result.payload.into(), offset: Some(source_offset.clone()), event_time: utc_from_timestamp(result.event_time), id: MessageID { - vertex_name: config::get_vertex_name().to_string(), - offset: source_offset.to_string(), + vertex_name: config::get_vertex_name().to_string().into(), + offset: source_offset.to_string().into(), index: 0, }, headers: result.headers, @@ -319,7 +332,7 @@ impl From for SinkRequest { fn from(message: Message) -> Self { Self { request: Some(Request { - keys: message.keys, + keys: message.keys.to_vec(), value: message.value.to_vec(), event_time: prost_timestamp_from_utc(message.event_time), watermark: None, @@ -399,7 +412,7 @@ mod tests { #[test] fn test_offset_display() { let offset = Offset::String(StringOffset { - offset: "123".to_string(), + offset: "123".to_string().into(), partition_idx: 1, }); assert_eq!(format!("{}", offset), "123-1"); @@ -408,8 +421,8 @@ mod tests { #[test] fn test_message_id_display() { let message_id = MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }; assert_eq!(format!("{}", message_id), "vertex-123-0"); @@ -418,17 +431,17 @@ mod tests { #[test] fn test_message_to_vec_u8() { let message = Message { - keys: vec!["key1".to_string()], + keys: Arc::from(vec!["key1".to_string()]), tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { - offset: "123".to_string(), + offset: "123".to_string().into(), partition_idx: 0, })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }, headers: HashMap::new(), @@ -445,8 +458,8 @@ mod tests { }), kind: numaflow_pb::objects::isb::MessageKind::Data as i32, id: Some(message.id.into()), - keys: message.keys.clone(), - headers: message.headers.clone(), + keys: message.keys.to_vec(), + headers: message.headers, }), body: Some(Body { payload: message.value.clone().into(), @@ -488,7 +501,7 @@ mod tests { assert!(result.is_ok()); let message = result.unwrap(); - assert_eq!(message.keys, vec!["key1".to_string()]); + assert_eq!(message.keys.to_vec(), vec!["key1".to_string()]); assert_eq!(message.value, vec![1, 2, 3]); assert_eq!( message.event_time, @@ -499,17 +512,17 @@ mod tests { #[test] fn test_message_to_source_transform_request() { let message = Message { - keys: vec!["key1".to_string()], + keys: Arc::from(vec!["key1".to_string()]), tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { - offset: "123".to_string(), + offset: "123".to_string().into(), partition_idx: 0, })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }, headers: HashMap::new(), @@ -538,7 +551,7 @@ mod tests { assert!(message.is_ok()); let message = message.unwrap(); - assert_eq!(message.keys, vec!["key1".to_string()]); + assert_eq!(message.keys.to_vec(), vec!["key1".to_string()]); assert_eq!(message.value, vec![1, 2, 3]); assert_eq!( message.event_time, @@ -549,17 +562,17 @@ mod tests { #[test] fn test_message_to_sink_request() { let message = Message { - keys: vec!["key1".to_string()], + keys: Arc::from(vec!["key1".to_string()]), tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset { - offset: "123".to_string(), + offset: "123".to_string().into(), partition_idx: 0, })), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }, headers: HashMap::new(), @@ -622,8 +635,8 @@ mod tests { #[test] fn test_message_id_to_proto() { let message_id = MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }; let proto_id: MessageId = message_id.into(); diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index ffee46033b..a8deaa7b64 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -229,6 +229,7 @@ async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Re #[cfg(test)] mod tests { use std::collections::HashMap; + use std::sync::Arc; use std::time::Duration; use async_nats::jetstream; @@ -428,14 +429,14 @@ mod tests { use crate::message::{Message, MessageID, Offset, StringOffset}; let message = Message { - keys: vec!["key1".to_string()], + keys: Arc::from(vec!["key1".to_string()]), tags: None, value: vec![1, 2, 3].into(), offset: Some(Offset::String(StringOffset::new("123".to_string(), 0))), event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "123".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), index: 0, }, headers: HashMap::new(), diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 4216484ecc..3773228906 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -149,8 +149,8 @@ impl JetstreamReader { ))); message.id = MessageID { - vertex_name: pipeline_config.vertex_name.clone(), - offset: msg_info.stream_sequence.to_string(), + vertex_name: pipeline_config.vertex_name.clone().into(), + offset: msg_info.stream_sequence.to_string().into(), index: 0, }; @@ -258,6 +258,7 @@ impl fmt::Display for JetstreamReader { #[cfg(test)] mod tests { use std::collections::HashMap; + use std::sync::Arc; use super::*; use crate::message::{Message, MessageID}; @@ -327,14 +328,14 @@ mod tests { for i in 0..10 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -429,14 +430,14 @@ mod tests { // write 5 messages for i in 0..5 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("{}", i + 1), + vertex_name: "vertex".to_string().into(), + offset: format!("{}", i + 1).into(), index: i, }, headers: HashMap::new(), diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 5ff61de3d5..969f343ab1 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -201,7 +201,7 @@ impl JetstreamWriter { // check to which partition the message should be written let partition = forward::determine_partition( - message.id.offset.clone(), + String::from_utf8_lossy(&message.id.offset).to_string(), vertex.writer_config.partitions, &mut hash, ); @@ -456,7 +456,7 @@ impl JetstreamWriter { pub(crate) struct ResolveAndPublishResult { pub(crate) pafs: Vec<(Stream, PublishAckFuture)>, pub(crate) payload: Vec, - pub(crate) offset: String, + pub(crate) offset: Bytes, } #[cfg(test)] @@ -526,14 +526,14 @@ mod tests { ); let message = Message { - keys: vec!["key_0".to_string()], + keys: Arc::from(vec!["key_0".to_string()]), tags: None, value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "offset_0".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "offset_0".to_string().into(), index: 0, }, headers: HashMap::new(), @@ -585,14 +585,14 @@ mod tests { .unwrap(); let message = Message { - keys: vec!["key_0".to_string()], + keys: Arc::from(vec!["key_0".to_string()]), tags: None, value: "message 0".as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "offset_0".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "offset_0".to_string().into(), index: 0, }, headers: HashMap::new(), @@ -669,14 +669,14 @@ mod tests { // Publish 10 messages successfully for i in 0..10 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -694,14 +694,14 @@ mod tests { // Attempt to publish a message which has a payload size greater than the max_message_size // so that it fails and sync write will be attempted and it will be blocked let message = Message { - keys: vec!["key_11".to_string()], + keys: Arc::from(vec!["key_11".to_string()]), tags: None, value: vec![0; 1025].into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "offset_11".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "offset_11".to_string().into(), index: 11, }, headers: HashMap::new(), @@ -961,14 +961,14 @@ mod tests { // Publish 500 messages for i in 0..500 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -1049,14 +1049,14 @@ mod tests { // Publish 100 messages successfully for i in 0..100 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -1076,21 +1076,21 @@ mod tests { // Attempt to publish the 101st message, which should get stuck in the retry loop // because the max message size is set to 1024 let message = Message { - keys: vec!["key_101".to_string()], + keys: Arc::from(vec!["key_101".to_string()]), tags: None, value: vec![0; 1025].into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "offset_101".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "offset_101".to_string().into(), index: 101, }, headers: HashMap::new(), }; let (ack_tx, ack_rx) = tokio::sync::oneshot::channel(); tracker_handle - .insert("offset_101".to_string(), ack_tx) + .insert("offset_101".to_string().into(), ack_tx) .await .unwrap(); ack_rxs.push(ack_rx); @@ -1189,14 +1189,14 @@ mod tests { let mut ack_rxs = vec![]; for i in 0..10 { let message = Message { - keys: vec![format!("key_{}", i)], - tags: Some(vec!["tag1".to_string(), "tag2".to_string()]), + keys: Arc::from(vec![format!("key_{}", i)]), + tags: Some(Arc::from(vec!["tag1".to_string(), "tag2".to_string()])), value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), diff --git a/rust/numaflow-core/src/shared/forward.rs b/rust/numaflow-core/src/shared/forward.rs index e249989cd1..11b9195ccb 100644 --- a/rust/numaflow-core/src/shared/forward.rs +++ b/rust/numaflow-core/src/shared/forward.rs @@ -1,11 +1,12 @@ use numaflow_models::models::ForwardConditions; use std::hash::{DefaultHasher, Hasher}; +use std::sync::Arc; /// Checks if the message should to written to downstream vertex based the conditions /// and message tags. If not tags are provided by there are edge conditions present, we will /// still forward to all vertices. pub(crate) fn should_forward( - tags: Option>, + tags: Option>, conditions: Option>, ) -> bool { conditions.map_or(true, |conditions| { @@ -81,7 +82,7 @@ mod tests { let mut tag_conditions = TagConditions::new(vec!["tag1".to_string(), "tag2".to_string()]); tag_conditions.operator = Some("and".to_string()); let conditions = ForwardConditions::new(tag_conditions); - let tags = Some(vec!["tag1".to_string(), "tag2".to_string()]); + let tags = Some(Arc::from(vec!["tag1".to_string(), "tag2".to_string()])); let result = should_forward(tags, Some(Box::new(conditions))); assert!(result); } @@ -91,7 +92,7 @@ mod tests { let mut tag_conditions = TagConditions::new(vec!["tag1".to_string()]); tag_conditions.operator = Some("or".to_string()); let conditions = ForwardConditions::new(tag_conditions); - let tags = Some(vec!["tag2".to_string(), "tag1".to_string()]); + let tags = Some(Arc::from(vec!["tag2".to_string(), "tag1".to_string()])); let result = should_forward(tags, Some(Box::new(conditions))); assert!(result); } @@ -101,7 +102,7 @@ mod tests { let mut tag_conditions = TagConditions::new(vec!["tag1".to_string()]); tag_conditions.operator = Some("not".to_string()); let conditions = ForwardConditions::new(tag_conditions); - let tags = Some(vec!["tag2".to_string()]); + let tags = Some(Arc::from(vec!["tag2".to_string()])); let result = should_forward(tags, Some(Box::new(conditions))); assert!(result); } diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 6f8fa84542..474f91e77f 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -607,6 +607,7 @@ impl Drop for SinkWriter { mod tests { use chrono::Utc; use numaflow::sink; + use std::sync::Arc; use tokio::time::Duration; use tokio_util::sync::CancellationToken; @@ -650,14 +651,14 @@ mod tests { let messages: Vec = (0..5) .map(|i| Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -685,14 +686,14 @@ mod tests { let messages: Vec = (0..10) .map(|i| Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -763,14 +764,14 @@ mod tests { let messages: Vec = (0..10) .map(|i| Message { - keys: vec!["error".to_string()], + keys: Arc::from(vec!["error".to_string()]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), @@ -850,14 +851,14 @@ mod tests { let messages: Vec = (0..20) .map(|i| Message { - keys: vec!["fallback".to_string()], + keys: Arc::from(vec!["fallback".to_string()]), tags: None, value: format!("message {}", i).as_bytes().to_vec().into(), offset: None, event_time: Utc::now(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: format!("offset_{}", i), + vertex_name: "vertex".to_string().into(), + offset: format!("offset_{}", i).into(), index: i, }, headers: HashMap::new(), diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index 7b6bc870b3..dd537d18b1 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -20,6 +20,7 @@ impl Sink for BlackholeSink { #[cfg(test)] mod tests { use chrono::Utc; + use std::sync::Arc; use super::BlackholeSink; use crate::message::IntOffset; @@ -31,28 +32,28 @@ mod tests { let mut sink = BlackholeSink; let messages = vec![ Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "1".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "1".to_string().into(), index: 0, }, }, Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "2".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "2".to_string().into(), index: 1, }, }, diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index e83fa08472..a82670e8d8 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -36,6 +36,7 @@ impl Sink for LogSink { #[cfg(test)] mod tests { use chrono::Utc; + use std::sync::Arc; use super::LogSink; use crate::message::IntOffset; @@ -47,28 +48,28 @@ mod tests { let mut sink = LogSink; let messages = vec![ Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "1".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "1".to_string().into(), index: 0, }, }, Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: Some(Offset::Int(IntOffset::new(1, 0))), event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "2".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "2".to_string().into(), index: 1, }, }, diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index e475c77cd4..a1817b1ae0 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -120,6 +120,7 @@ impl Sink for UserDefinedSink { mod tests { use chrono::offset::Utc; use numaflow::sink; + use std::sync::Arc; use tokio::sync::mpsc; use tracing::info; @@ -179,28 +180,28 @@ mod tests { let messages = vec![ Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "1".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "1".to_string().into(), index: 0, }, }, Message { - keys: vec![], + keys: Arc::from(vec![]), tags: None, value: b"Hello, World!".to_vec().into(), offset: None, event_time: Utc::now(), headers: Default::default(), id: MessageID { - vertex_name: "vertex".to_string(), - offset: "2".to_string(), + vertex_name: "vertex".to_string().into(), + offset: "2".to_string().into(), index: 1, }, }, diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index df551e6962..3f2816514f 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -295,7 +295,7 @@ impl Source { // insert the offset and the ack one shot in the tracker. tracker_handle - .insert(offset.to_string(), resp_ack_tx) + .insert(offset.to_string().into(), resp_ack_tx) .await?; // store the ack one shot in the batch to invoke ack later. diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index 3dd4f8aba9..fdc5d590a5 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -21,6 +21,7 @@ use tokio_stream::StreamExt; /// NOTE: The minimum granularity of duration is 10ms. mod stream_generator { use std::pin::Pin; + use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -165,14 +166,14 @@ mod stream_generator { } Message { - keys: self.next_key_to_be_fetched(), + keys: Arc::from(self.next_key_to_be_fetched()), tags: None, value: data.into(), offset: Some(offset.clone()), event_time, id: MessageID { - vertex_name: get_vertex_name().to_string(), - offset: offset.to_string(), + vertex_name: get_vertex_name().to_string().into(), + offset: offset.to_string().into(), index: Default::default(), }, headers: Default::default(), diff --git a/rust/numaflow-core/src/source/pulsar.rs b/rust/numaflow-core/src/source/pulsar.rs index 64a3ecda9e..6a2c7162b6 100644 --- a/rust/numaflow-core/src/source/pulsar.rs +++ b/rust/numaflow-core/src/source/pulsar.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::time::Duration; use numaflow_pulsar::source::{PulsarMessage, PulsarSource, PulsarSourceConfig}; @@ -14,14 +15,14 @@ impl TryFrom for Message { let offset = Offset::Int(IntOffset::new(message.offset, 1)); // FIXME: partition id Ok(Message { - keys: vec![message.key], + keys: Arc::from(vec![message.key]), tags: None, value: message.payload, offset: Some(offset.clone()), event_time: message.event_time, id: MessageID { - vertex_name: get_vertex_name().to_string(), - offset: offset.to_string(), + vertex_name: get_vertex_name().to_string().into(), + offset: offset.to_string().into(), index: 0, }, headers: message.headers, @@ -153,6 +154,7 @@ mod tests { assert_eq!(messages.len(), 10); let offsets: Vec = messages.into_iter().map(|m| m.offset.unwrap()).collect(); + pulsar.ack(offsets).await?; Ok(()) diff --git a/rust/numaflow-core/src/tracker.rs b/rust/numaflow-core/src/tracker.rs index 9f0839a2e2..a1dd32662b 100644 --- a/rust/numaflow-core/src/tracker.rs +++ b/rust/numaflow-core/src/tracker.rs @@ -11,6 +11,7 @@ use crate::error::Error; use crate::message::ReadAck; use crate::Result; +use bytes::Bytes; use std::collections::HashMap; use tokio::sync::{mpsc, oneshot}; use tracing::warn; @@ -176,10 +177,13 @@ impl TrackerHandle { /// Inserts a new message into the Tracker with the given offset and acknowledgment sender. pub(crate) async fn insert( &self, - offset: String, + offset: Bytes, ack_send: oneshot::Sender, ) -> Result<()> { - let message = ActorMessage::Insert { offset, ack_send }; + let message = ActorMessage::Insert { + offset: String::from_utf8_lossy(&offset).to_string(), + ack_send, + }; self.sender .send(message) .await @@ -188,8 +192,12 @@ impl TrackerHandle { } /// Updates an existing message in the Tracker with the given offset, count, and EOF status. - pub(crate) async fn update(&self, offset: String, count: u32, eof: bool) -> Result<()> { - let message = ActorMessage::Update { offset, count, eof }; + pub(crate) async fn update(&self, offset: Bytes, count: u32, eof: bool) -> Result<()> { + let message = ActorMessage::Update { + offset: String::from_utf8_lossy(&offset).to_string(), + count, + eof, + }; self.sender .send(message) .await @@ -198,8 +206,10 @@ impl TrackerHandle { } /// Deletes a message from the Tracker with the given offset. - pub(crate) async fn delete(&self, offset: String) -> Result<()> { - let message = ActorMessage::Delete { offset }; + pub(crate) async fn delete(&self, offset: Bytes) -> Result<()> { + let message = ActorMessage::Delete { + offset: String::from_utf8_lossy(&offset).to_string(), + }; self.sender .send(message) .await @@ -208,8 +218,10 @@ impl TrackerHandle { } /// Discards a message from the Tracker with the given offset. - pub(crate) async fn discard(&self, offset: String) -> Result<()> { - let message = ActorMessage::Discard { offset }; + pub(crate) async fn discard(&self, offset: Bytes) -> Result<()> { + let message = ActorMessage::Discard { + offset: String::from_utf8_lossy(&offset).to_string(), + }; self.sender .send(message) .await @@ -245,15 +257,18 @@ mod tests { // Insert a new message handle - .insert("offset1".to_string(), ack_send) + .insert("offset1".to_string().into(), ack_send) .await .unwrap(); // Update the message - handle.update("offset1".to_string(), 1, true).await.unwrap(); + handle + .update("offset1".to_string().into(), 1, true) + .await + .unwrap(); // Delete the message - handle.delete("offset1".to_string()).await.unwrap(); + handle.delete("offset1".to_string().into()).await.unwrap(); // Verify that the message was deleted and ack was received let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); @@ -269,20 +284,20 @@ mod tests { // Insert a new message handle - .insert("offset1".to_string(), ack_send) + .insert("offset1".to_string().into(), ack_send) .await .unwrap(); // Update the message with a count of 3 handle - .update("offset1".to_string(), 3, false) + .update("offset1".to_string().into(), 3, false) .await .unwrap(); // Delete the message three times - handle.delete("offset1".to_string()).await.unwrap(); - handle.delete("offset1".to_string()).await.unwrap(); - handle.delete("offset1".to_string()).await.unwrap(); + handle.delete("offset1".to_string().into()).await.unwrap(); + handle.delete("offset1".to_string().into()).await.unwrap(); + handle.delete("offset1".to_string().into()).await.unwrap(); // Verify that the message was deleted and ack was received after the third delete let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); @@ -298,12 +313,12 @@ mod tests { // Insert a new message handle - .insert("offset1".to_string(), ack_send) + .insert("offset1".to_string().into(), ack_send) .await .unwrap(); // Discard the message - handle.discard("offset1".to_string()).await.unwrap(); + handle.discard("offset1".to_string().into()).await.unwrap(); // Verify that the message was discarded and nak was received let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); @@ -319,18 +334,18 @@ mod tests { // Insert a new message handle - .insert("offset1".to_string(), ack_send) + .insert("offset1".to_string().into(), ack_send) .await .unwrap(); // Update the message with a count of 3 handle - .update("offset1".to_string(), 3, false) + .update("offset1".to_string().into(), 3, false) .await .unwrap(); // Discard the message - handle.discard("offset1".to_string()).await.unwrap(); + handle.discard("offset1".to_string().into()).await.unwrap(); // Verify that the message was discarded and nak was received let result = timeout(Duration::from_secs(1), ack_recv).await.unwrap(); diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index eec95e37ab..d6d63bdfea 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -234,7 +234,7 @@ mod tests { let transformer = Transformer::new(500, 10, client, tracker_handle.clone()).await?; let message = Message { - keys: vec!["first".into()], + keys: Arc::from(vec!["first".into()]), tags: None, value: "hello".into(), offset: Some(Offset::String(crate::message::StringOffset::new( @@ -243,8 +243,8 @@ mod tests { ))), event_time: chrono::Utc::now(), id: MessageID { - vertex_name: "vertex_name".to_string(), - offset: "0".to_string(), + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), index: 0, }, headers: Default::default(), @@ -311,7 +311,7 @@ mod tests { for i in 0..5 { let message = Message { - keys: vec![format!("key_{}", i)], + keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("value_{}", i).into(), offset: Some(Offset::String(crate::message::StringOffset::new( @@ -320,8 +320,8 @@ mod tests { ))), event_time: chrono::Utc::now(), id: MessageID { - vertex_name: "vertex_name".to_string(), - offset: i.to_string(), + vertex_name: "vertex_name".to_string().into(), + offset: i.to_string().into(), index: i, }, headers: Default::default(), diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 6a493c2a99..9a82275ac8 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -98,12 +98,12 @@ impl UserDefinedTransformer { for (i, result) in resp.results.into_iter().enumerate() { let message = Message { id: MessageID { - vertex_name: get_vertex_name().to_string(), + vertex_name: get_vertex_name().to_string().into(), index: i as i32, - offset: msg_info.offset.to_string(), + offset: msg_info.offset.to_string().into(), }, - keys: result.keys, - tags: Some(result.tags), + keys: Arc::from(result.keys), + tags: Some(Arc::from(result.tags)), value: result.value.into(), offset: None, event_time: utc_from_timestamp(result.event_time), @@ -142,6 +142,7 @@ impl UserDefinedTransformer { #[cfg(test)] mod tests { use std::error::Error; + use std::sync::Arc; use std::time::Duration; use numaflow::sourcetransform; @@ -194,7 +195,7 @@ mod tests { .await?; let message = crate::message::Message { - keys: vec!["first".into()], + keys: Arc::from(vec!["first".into()]), tags: None, value: "hello".into(), offset: Some(crate::message::Offset::String(StringOffset::new( @@ -203,8 +204,8 @@ mod tests { ))), event_time: chrono::Utc::now(), id: MessageID { - vertex_name: "vertex_name".to_string(), - offset: "0".to_string(), + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), index: 0, }, headers: Default::default(), From 10b4978a5dc673bdc59bb0a9284f8cc0e537be13 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 19:56:45 -0800 Subject: [PATCH 165/188] docs: updated CHANGELOG.md (#2276) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e21c675fd..875ad8fe82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## v1.4.2 (2024-12-11) + + * [c9dc38f4](https://github.com/numaproj/numaflow/commit/c9dc38f4cce2b5db598536a7539f2a35febcf1ca) Update manifests to v1.4.2 + * [fea792b3](https://github.com/numaproj/numaflow/commit/fea792b36bd342adcdcdd96768b6fdd68921bfd2) fix: set max decode size of proto message (#2275) + +### Contributors + + * Sidhant Kohli + ## v1.4.1 (2024-12-05) * [346f2a73](https://github.com/numaproj/numaflow/commit/346f2a7321d158fa9ce9392cfdcc76d671d6f577) Update manifests to v1.4.1 From a32ebbcc93d8a0c566652efb2f3c8d260d8fb7b0 Mon Sep 17 00:00:00 2001 From: Sidhant Kohli Date: Fri, 13 Dec 2024 14:27:19 -0800 Subject: [PATCH 166/188] chore: add container type to server info log (#2286) Signed-off-by: Sidhant Kohli --- rust/numaflow-core/src/shared/server_info.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index 40ec6b37d6..ee3b1c8d6a 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -97,14 +97,17 @@ pub(crate) async fn sdk_server_info( // Read the server info file let server_info = read_server_info(&file_path, cln_token).await?; + // Get the container type from the server info file + let container_type = get_container_type(&file_path).unwrap_or(ContainerType::Unknown); + // Log the server info - info!("Server info file: {:?}", server_info); + info!(?container_type, ?server_info, "Server info file"); // Extract relevant fields from server info let sdk_version = &server_info.version; let min_numaflow_version = &server_info.minimum_numaflow_version; let sdk_language = &server_info.language; - let container_type = get_container_type(&file_path).unwrap_or(ContainerType::Unknown); + // Get version information let version_info = version::get_version_info(); let numaflow_version = &version_info.version; From 883650cbe584668b9b4492cc188e5ae5c4ef8e6b Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 15 Dec 2024 12:11:34 -0800 Subject: [PATCH 167/188] chore: always publish watermark to sink OT (#2288) --- pkg/sinks/forward/forward.go | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/pkg/sinks/forward/forward.go b/pkg/sinks/forward/forward.go index ac60ecddf7..15b14daf3f 100644 --- a/pkg/sinks/forward/forward.go +++ b/pkg/sinks/forward/forward.go @@ -230,7 +230,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { return nil } - // if the validation passed, we will publish the watermark to all the toBuffer partitions. + // if the validation passed, we will publish the idle watermark to SINK OT even though we do not use it today. idlehandler.PublishIdleWatermark(ctx, df.sinkWriter.GetPartitionIdx(), df.sinkWriter, df.wmPublisher, df.idleManager, df.opts.logger, df.vertexName, df.pipelineName, dfv1.VertexTypeSink, df.vertexReplica, wmb.Watermark(time.UnixMilli(processorWMB.Watermark))) return nil } @@ -271,7 +271,7 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } // write the messages to the sink - writeOffsets, fallbackMessages, err := df.writeToSink(ctx, df.sinkWriter, writeMessages, false) + _, fallbackMessages, err := df.writeToSink(ctx, df.sinkWriter, writeMessages, false) // error will not be nil only when we get ctx.Done() if err != nil { df.opts.logger.Errorw("failed to write to sink", zap.Error(err)) @@ -292,19 +292,13 @@ func (df *DataForward) forwardAChunk(ctx context.Context) error { } } - // FIXME: offsets are not supported for sink, so len(writeOffsets) > 0 will always fail - // in sink we don't drop any messages - // so len(dataMessages) should be the same as len(writeOffsets) - // if len(writeOffsets) is greater than 0, publish normal watermark - // if len(writeOffsets) is 0, meaning we only have control messages, - // we should not publish anything: the next len(readMessage) check will handle this idling situation - if len(writeOffsets) > 0 { - df.wmPublisher.PublishWatermark(processorWM, nil, int32(0)) - // reset because the toBuffer is no longer idling - df.idleManager.MarkActive(df.fromBufferPartition.GetPartitionIdx(), df.sinkWriter.GetName()) - } + // Always publish the watermark to SINK OT even though we do not use it today. + // There's no offset returned from sink writer. + df.wmPublisher.PublishWatermark(processorWM, nil, int32(0)) + // reset because the toBuffer is no longer idling + df.idleManager.MarkActive(df.fromBufferPartition.GetPartitionIdx(), df.sinkWriter.GetName()) - df.opts.logger.Debugw("write to sink completed") + df.opts.logger.Debugw("Write to sink completed") ackStart := time.Now() err = df.ackFromBuffer(ctx, readOffsets) From d75998fdd3fcb5a3ecc0ccfc3e47a8bc7f97c33b Mon Sep 17 00:00:00 2001 From: Vedant Gupta <49195734+veds-g@users.noreply.github.com> Date: Mon, 16 Dec 2024 03:38:59 +0530 Subject: [PATCH 168/188] feat: pipeline gauge metrics (#2284) Signed-off-by: veds-g --- .../namespaced-numaflow-server.yaml | 87 +++++++++++------ config/advanced-install/numaflow-server.yaml | 87 +++++++++++------ .../numaflow-server-metrics-proxy-config.yaml | 87 +++++++++++------ config/install.yaml | 87 +++++++++++------ config/namespace-install.yaml | 87 +++++++++++------ server/apis/v1/promql_service_test.go | 97 ++++++++++++++++++- .../PodDetails/partials/Metrics/index.tsx | 5 + .../Metrics/partials/LineChart/index.tsx | 20 +++- .../partials/Metrics/utils/constants.ts | 6 +- 9 files changed, 397 insertions(+), 166 deletions(-) diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index 9d34601fd3..7cb350b073 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -143,33 +143,71 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: vertex_gauge + object: vertex + title: Vertex Pending Messages + description: This query is the total number of pending messages for the vertex + expr: | + sum($metric_name{$filters}) by ($dimension, period) + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: vertex_pending_messages + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_gauge object: mono-vertex title: Pending Messages Lag description: This query is the total number of pending messages for the mono vertex expr: | - $metric_name{$filters} + sum($metric_name{$filters}) by ($dimension, period) params: - name: start_time required: false - name: end_time required: false metrics: - - metric_name: monovtx_pending - required_filters: - - namespace - - mvtx_name - dimensions: - - name: pod - filters: - - name: pod - required: false - - name: period - required: false - - name: mono-vertex - filters: - - name: period - required: false + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -192,11 +230,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false @@ -206,15 +240,11 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false - # Add histogram metrics similar to the pattern above + - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -235,12 +265,11 @@ data: - vertex dimensions: - name: vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod filters: - name: pod required: false + - name: mono_vertex_throughput object: mono-vertex title: Mono-Vertex Throughput and Message Rates @@ -260,11 +289,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index bca4a13bbf..fcb283f11e 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -150,33 +150,71 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: vertex_gauge + object: vertex + title: Vertex Pending Messages + description: This query is the total number of pending messages for the vertex + expr: | + sum($metric_name{$filters}) by ($dimension, period) + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: vertex_pending_messages + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_gauge object: mono-vertex title: Pending Messages Lag description: This query is the total number of pending messages for the mono vertex expr: | - $metric_name{$filters} + sum($metric_name{$filters}) by ($dimension, period) params: - name: start_time required: false - name: end_time required: false metrics: - - metric_name: monovtx_pending - required_filters: - - namespace - - mvtx_name - dimensions: - - name: pod - filters: - - name: pod - required: false - - name: period - required: false - - name: mono-vertex - filters: - - name: period - required: false + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -199,11 +237,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false @@ -213,15 +247,11 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false - # Add histogram metrics similar to the pattern above + - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -242,12 +272,11 @@ data: - vertex dimensions: - name: vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod filters: - name: pod required: false + - name: mono_vertex_throughput object: mono-vertex title: Mono-Vertex Throughput and Message Rates @@ -267,11 +296,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml index d97370e392..fe634f5f17 100644 --- a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -9,33 +9,71 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: vertex_gauge + object: vertex + title: Vertex Pending Messages + description: This query is the total number of pending messages for the vertex + expr: | + sum($metric_name{$filters}) by ($dimension, period) + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: vertex_pending_messages + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_gauge object: mono-vertex title: Pending Messages Lag description: This query is the total number of pending messages for the mono vertex expr: | - $metric_name{$filters} + sum($metric_name{$filters}) by ($dimension, period) params: - name: start_time required: false - name: end_time required: false metrics: - - metric_name: monovtx_pending - required_filters: - - namespace - - mvtx_name - dimensions: - - name: pod - filters: - - name: pod - required: false - - name: period - required: false - - name: mono-vertex - filters: - - name: period - required: false + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -58,11 +96,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false @@ -72,15 +106,11 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false - # Add histogram metrics similar to the pattern above + - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -101,12 +131,11 @@ data: - vertex dimensions: - name: vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod filters: - name: pod required: false + - name: mono_vertex_throughput object: mono-vertex title: Mono-Vertex Throughput and Message Rates @@ -126,11 +155,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false \ No newline at end of file diff --git a/config/install.yaml b/config/install.yaml index 72df249f0f..69fbd4ca4d 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28563,33 +28563,71 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: vertex_gauge + object: vertex + title: Vertex Pending Messages + description: This query is the total number of pending messages for the vertex + expr: | + sum($metric_name{$filters}) by ($dimension, period) + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: vertex_pending_messages + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_gauge object: mono-vertex title: Pending Messages Lag description: This query is the total number of pending messages for the mono vertex expr: | - $metric_name{$filters} + sum($metric_name{$filters}) by ($dimension, period) params: - name: start_time required: false - name: end_time required: false metrics: - - metric_name: monovtx_pending - required_filters: - - namespace - - mvtx_name - dimensions: - - name: pod - filters: - - name: pod - required: false - - name: period - required: false - - name: mono-vertex - filters: - - name: period - required: false + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -28612,11 +28650,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false @@ -28626,15 +28660,11 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false - # Add histogram metrics similar to the pattern above + - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -28655,12 +28685,11 @@ data: - vertex dimensions: - name: vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod filters: - name: pod required: false + - name: mono_vertex_throughput object: mono-vertex title: Mono-Vertex Throughput and Message Rates @@ -28680,11 +28709,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 1ae302f71f..810422a7cc 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28451,33 +28451,71 @@ data: # example for local prometheus service # url: http://prometheus-operated.monitoring.svc.cluster.local:9090 patterns: + - name: vertex_gauge + object: vertex + title: Vertex Pending Messages + description: This query is the total number of pending messages for the vertex + expr: | + sum($metric_name{$filters}) by ($dimension, period) + params: + - name: start_time + required: false + - name: end_time + required: false + metrics: + - metric_name: vertex_pending_messages + required_filters: + - namespace + - pipeline + - vertex + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_gauge object: mono-vertex title: Pending Messages Lag description: This query is the total number of pending messages for the mono vertex expr: | - $metric_name{$filters} + sum($metric_name{$filters}) by ($dimension, period) params: - name: start_time required: false - name: end_time required: false metrics: - - metric_name: monovtx_pending - required_filters: - - namespace - - mvtx_name - dimensions: - - name: pod - filters: - - name: pod - required: false - - name: period - required: false - - name: mono-vertex - filters: - - name: period - required: false + - metric_name: monovtx_pending + required_filters: + - namespace + - mvtx_name + dimensions: + - name: pod + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: pod + required: false + - name: period + required: false + - name: mono-vertex + # expr: optional expression for prometheus query + # overrides the default expression + filters: + - name: period + required: false + - name: mono_vertex_histogram object: mono-vertex title: Processing Time Latency @@ -28500,11 +28538,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false @@ -28514,15 +28548,11 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false - # Add histogram metrics similar to the pattern above + - name: vertex_throughput object: vertex title: Vertex Throughput and Message Rates @@ -28543,12 +28573,11 @@ data: - vertex dimensions: - name: vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod filters: - name: pod required: false + - name: mono_vertex_throughput object: mono-vertex title: Mono-Vertex Throughput and Message Rates @@ -28568,11 +28597,7 @@ data: - mvtx_name dimensions: - name: mono-vertex - # expr: optional expression for prometheus query - # overrides the default expression - name: pod - # expr: optional expression for prometheus query - # overrides the default expression filters: - name: pod required: false diff --git a/server/apis/v1/promql_service_test.go b/server/apis/v1/promql_service_test.go index 3d923bb850..54270cf0d2 100644 --- a/server/apis/v1/promql_service_test.go +++ b/server/apis/v1/promql_service_test.go @@ -274,7 +274,7 @@ func Test_PromQueryBuilder(t *testing.T) { }) } - // tests for gauge metrics + // tests for mono-vertex gauge metrics var gauge_service = &PromQlService{ PlaceHolders: map[string]map[string][]string{ "monovtx_pending": { @@ -305,7 +305,7 @@ func Test_PromQueryBuilder(t *testing.T) { "period": "5m", }, }, - expectedQuery: `monovtx_pending{namespace= "test_namespace", mvtx_name= "test_mvtx", period= "5m"}`, + expectedQuery: `sum(monovtx_pending{namespace= "test_namespace", mvtx_name= "test_mvtx", period= "5m"}) by (mvtx_name, period)`, }, { name: "Missing metric name in service config", @@ -337,6 +337,72 @@ func Test_PromQueryBuilder(t *testing.T) { } }) } + + // tests for pipeline gauge metrics + var pl_gauge_service = &PromQlService{ + PlaceHolders: map[string]map[string][]string{ + "vertex_pending_messages": { + "vertex": {"$dimension", "$metric_name", "$filters"}, + }, + }, + Expression: map[string]map[string]string{ + "vertex_pending_messages": { + "vertex": "$metric_name{$filters}", + }, + }, + } + + pl_gauge_metrics_tests := []struct { + name string + requestBody MetricsRequestBody + expectedQuery string + expectError bool + }{ + { + name: "Successful pipeline gauge metrics template substitution", + requestBody: MetricsRequestBody{ + MetricName: "vertex_pending_messages", + Dimension: "vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "pipeline": "test_pipeline", + "vertex": "test_vertex", + "period": "5m", + }, + }, + expectedQuery: `sum(vertex_pending_messages{namespace= "test_namespace", pipeline= "test_pipeline", vertex= "test_vertex", period= "5m"}) by (vertex, period)`, + }, + { + name: "Missing metric name in service config", + requestBody: MetricsRequestBody{ + MetricName: "non_existent_metric", + Dimension: "mono-vertex", + Filters: map[string]string{ + "namespace": "test_namespace", + "pipeline": "test_pipeline", + "vertex": "test_vertex", + "period": "5m", + }, + }, + expectError: true, + }, + } + + for _, tt := range pl_gauge_metrics_tests { + t.Run(tt.name, func(t *testing.T) { + actualQuery, err := pl_gauge_service.BuildQuery(tt.requestBody) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if !comparePrometheusQueries(tt.expectedQuery, actualQuery) { + t.Errorf("Prometheus queries do not match.\nExpected: %s\nGot: %s", tt.expectedQuery, actualQuery) + } else { + t.Log("Prometheus queries match!") + } + } + }) + } } func Test_QueryPrometheus(t *testing.T) { @@ -386,14 +452,37 @@ func Test_QueryPrometheus(t *testing.T) { assert.Equal(t, 1, matrix.Len()) }) - t.Run("Successful gauge query", func(t *testing.T) { + t.Run("Successful mono-vertex gauge query", func(t *testing.T) { + mockAPI := &MockPrometheusAPI{} + promQlService := &PromQlService{ + PrometheusClient: &Prometheus{ + Api: mockAPI, + }, + } + query := `sum(monovtx_pending{namespace="default", mvtx_name="test-mvtx", period="5m"}) by (mvtx_name, period)` + startTime := time.Now().Add(-30 * time.Minute) + endTime := time.Now() + + ctx := context.Background() + result, err := promQlService.QueryPrometheus(ctx, query, startTime, endTime) + + assert.NoError(t, err) + assert.NotNil(t, result) + + // for query range , response should be a matrix + matrix, ok := result.(model.Matrix) + assert.True(t, ok) + assert.Equal(t, 1, matrix.Len()) + }) + + t.Run("Successful pipeline gauge query", func(t *testing.T) { mockAPI := &MockPrometheusAPI{} promQlService := &PromQlService{ PrometheusClient: &Prometheus{ Api: mockAPI, }, } - query := `monovtx_pending{namespace="default", mvtx_name="test-mvtx", pending="5m"}` + query := `sum(vertex_pending_messages{namespace="default", pipeline="test-pipeline", vertex="test-vertex", period="5m"}) by (vertex, period)` startTime := time.Now().Add(-30 * time.Minute) endTime := time.Now() diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx index a32fec5322..71f5ee862d 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx @@ -65,6 +65,11 @@ export function Metrics({ namespaceId, pipelineId, type, vertexId }: MetricsProp return ( {discoveredMetrics?.data?.map((metric: any) => { + if ( + type === "source" && + metric?.metric_name === "vertex_pending_messages" + ) + return null; const panelId = `${metric?.metric_name}-panel`; return ( { switch (metricName) { case "monovtx_pending": - return "period"; + case "vertex_pending_messages": + return dimension === "pod" ? ["pod", "period"] : ["period"]; } switch (dimension) { case "mono-vertex": - return "mvtx_name"; + return ["mvtx_name"]; default: - return dimension; + return [dimension]; } }, []); @@ -141,7 +142,18 @@ const LineChartComponent = ({ metricsReq?.metric_name ); chartData?.forEach((item) => { - const labelVal = item?.metric?.[label]; + let labelVal = ""; + label?.forEach((eachLabel: string) => { + if (item?.metric?.[eachLabel] !== undefined) { + labelVal += (labelVal ? "-" : "") + item.metric[eachLabel]; + } + }); + + // Remove initial hyphen if labelVal is not empty + if (labelVal.startsWith("-") && labelVal.length > 1) { + labelVal = labelVal.substring(1); + } + labels.push(labelVal); item?.values?.forEach(([timestamp, value]: [number, string]) => { const date = new Date(timestamp * 1000); diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts index 56833633fb..b953f217e3 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -40,7 +40,7 @@ export const metricNameMap: { [p: string]: string } = { "Mono Vertex Sink Write Time Latency (in micro seconds)", forwarder_data_read_total: "Vertex Read Processing Rate (messages per second)", - monovtx_read_total: - "Mono Vertex Read Processing Rate (messages per second)", - monovtx_pending: "Mono Vertex Pending", + monovtx_read_total: "Mono Vertex Read Processing Rate (messages per second)", + monovtx_pending: "Mono Vertex Pending Messages", + vertex_pending_messages: "Vertex Pending Messages", }; From d94461541c41d2f0d7c68ebd2e73030e8e5a0095 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 15 Dec 2024 16:25:15 -0800 Subject: [PATCH 169/188] chore(deps): bump nanoid from 3.3.7 to 3.3.8 in /ui (#2289) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/yarn.lock b/ui/yarn.lock index e507771e3f..5779331c0a 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -8333,9 +8333,9 @@ mz@^2.7.0: thenify-all "^1.0.0" nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== + version "3.3.8" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== natural-compare-lite@^1.4.0: version "1.4.0" From c05c96ac3cfcc201121bc740d6908d03bd302831 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 15 Dec 2024 19:21:53 -0800 Subject: [PATCH 170/188] chore(deps): bump golang.org/x/crypto from 0.27.0 to 0.31.0 (#2290) --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index b5b7d57160..77dd7923a5 100644 --- a/go.mod +++ b/go.mod @@ -48,11 +48,11 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.10.0 golang.org/x/tools v0.24.0 google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 google.golang.org/grpc v1.66.0 @@ -200,9 +200,9 @@ require ( go.mongodb.org/mongo-driver v1.15.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.20.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 48b6fba0ed..bd78cd2779 100644 --- a/go.sum +++ b/go.sum @@ -691,8 +691,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -806,8 +806,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -867,15 +867,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -888,8 +888,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From fc14696aa1d8c79c98e6d20e12bdc438d8c6a740 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Mon, 16 Dec 2024 09:58:42 +0530 Subject: [PATCH 171/188] fix: set max decode size of proto message, add mvtx metrics (#2283) Signed-off-by: Yashash H L --- rust/numaflow-core/src/config/monovertex.rs | 2 +- rust/numaflow-core/src/config/pipeline.rs | 2 +- rust/numaflow-core/src/metrics.rs | 22 ++++ rust/numaflow-core/src/pipeline.rs | 2 +- .../src/pipeline/isb/jetstream/reader.rs | 5 +- .../src/pipeline/isb/jetstream/writer.rs | 26 ++--- .../src/shared/create_components.rs | 10 +- rust/numaflow-core/src/shared/forward.rs | 6 +- rust/numaflow-core/src/shared/grpc.rs | 14 ++- rust/numaflow-core/src/sink.rs | 101 +++++++++++++----- rust/numaflow-core/src/sink/blackhole.rs | 3 +- rust/numaflow-core/src/sink/log.rs | 3 +- rust/numaflow-core/src/sink/user_defined.rs | 12 ++- rust/numaflow-core/src/source.rs | 24 ++--- rust/numaflow-core/src/source/generator.rs | 3 +- rust/numaflow-core/src/tracker.rs | 13 ++- rust/numaflow-core/src/transformer.rs | 17 ++- 17 files changed, 182 insertions(+), 83 deletions(-) diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index 686e284615..c5f4f2622b 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -1,4 +1,3 @@ -use crate::config::monovertex::sink::SinkType; use std::time::Duration; use base64::prelude::BASE64_STANDARD; @@ -14,6 +13,7 @@ use crate::config::components::transformer::{ }; use crate::config::components::{sink, source}; use crate::config::get_vertex_replica; +use crate::config::monovertex::sink::SinkType; use crate::error::Error; use crate::Result; diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index c9d02e632c..6c0a4a08bc 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -1,4 +1,3 @@ -use crate::config::components::sink::SinkType; use std::collections::HashMap; use std::env; use std::time::Duration; @@ -10,6 +9,7 @@ use serde_json::from_slice; use crate::config::components::metrics::MetricsConfig; use crate::config::components::sink::SinkConfig; +use crate::config::components::sink::SinkType; use crate::config::components::source::SourceConfig; use crate::config::components::transformer::{TransformerConfig, TransformerType}; use crate::config::get_vertex_replica; diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 045b3817b9..866e58f2c2 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -78,6 +78,7 @@ const PENDING: &str = "pending"; // processing times as timers const E2E_TIME: &str = "processing_time"; const READ_TIME: &str = "read_time"; +const WRITE_TIME: &str = "write_time"; const TRANSFORM_TIME: &str = "time"; const ACK_TIME: &str = "ack_time"; const SINK_TIME: &str = "time"; @@ -226,9 +227,11 @@ pub(crate) struct PipelineForwarderMetrics { pub(crate) ack_total: Family, Counter>, pub(crate) ack_time: Family, Histogram>, pub(crate) write_total: Family, Counter>, + pub(crate) write_time: Family, Histogram>, pub(crate) read_bytes_total: Family, Counter>, pub(crate) processed_time: Family, Histogram>, pub(crate) pending: Family, Gauge>, + pub(crate) dropped_total: Family, Counter>, } pub(crate) struct PipelineISBMetrics { @@ -395,6 +398,10 @@ impl PipelineMetrics { }), pending: Family::, Gauge>::default(), write_total: Family::, Counter>::default(), + write_time: Family::, Histogram>::new_with_constructor( + || Histogram::new(exponential_buckets_range(100.0, 60000000.0 * 15.0, 10)), + ), + dropped_total: Family::, Counter>::default(), }, isb: PipelineISBMetrics { paf_resolution_time: @@ -442,6 +449,21 @@ impl PipelineMetrics { "Number of pending messages", metrics.forwarder.pending.clone(), ); + forwarder_registry.register( + SINK_WRITE_TOTAL, + "Total number of Data Messages Written", + metrics.forwarder.write_total.clone(), + ); + forwarder_registry.register( + DROPPED_TOTAL, + "Total number of dropped messages", + metrics.forwarder.dropped_total.clone(), + ); + forwarder_registry.register( + WRITE_TIME, + "Time taken to write data", + metrics.forwarder.write_time.clone(), + ); metrics } } diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index a8deaa7b64..d59cd182b6 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -1,4 +1,3 @@ -use crate::pipeline::pipeline::isb::BufferReaderConfig; use std::time::Duration; use async_nats::jetstream::Context; @@ -13,6 +12,7 @@ use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; use crate::pipeline::forwarder::source_forwarder; use crate::pipeline::isb::jetstream::reader::JetstreamReader; use crate::pipeline::isb::jetstream::writer::JetstreamWriter; +use crate::pipeline::pipeline::isb::BufferReaderConfig; use crate::shared::create_components; use crate::shared::create_components::create_sink_writer; use crate::shared::metrics::start_metrics_server; diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 3773228906..4513cb9182 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -260,14 +260,15 @@ mod tests { use std::collections::HashMap; use std::sync::Arc; - use super::*; - use crate::message::{Message, MessageID}; use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; use chrono::Utc; use tokio::time::sleep; + use super::*; + use crate::message::{Message, MessageID}; + #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_jetstream_read() { diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index 969f343ab1..a99d43856d 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -4,16 +4,6 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; -use crate::config::pipeline::isb::BufferFullStrategy; -use crate::config::pipeline::ToVertexConfig; -use crate::error::Error; -use crate::message::{IntOffset, Message, Offset}; -use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; -use crate::pipeline::isb::jetstream::Stream; -use crate::tracker::TrackerHandle; -use crate::Result; - -use crate::shared::forward; use async_nats::jetstream::consumer::PullConsumer; use async_nats::jetstream::context::PublishAckFuture; use async_nats::jetstream::publish::PublishAck; @@ -28,6 +18,16 @@ use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; +use crate::config::pipeline::isb::BufferFullStrategy; +use crate::config::pipeline::ToVertexConfig; +use crate::error::Error; +use crate::message::{IntOffset, Message, Offset}; +use crate::metrics::{pipeline_isb_metric_labels, pipeline_metrics}; +use crate::pipeline::isb::jetstream::Stream; +use crate::shared::forward; +use crate::tracker::TrackerHandle; +use crate::Result; + const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; @@ -461,9 +461,6 @@ pub(crate) struct ResolveAndPublishResult { #[cfg(test)] mod tests { - use crate::pipeline::pipeline::isb::BufferWriterConfig; - use numaflow_models::models::ForwardConditions; - use numaflow_models::models::TagConditions; use std::collections::HashMap; use std::time::Instant; @@ -472,9 +469,12 @@ mod tests { use async_nats::jetstream::{consumer, stream}; use bytes::BytesMut; use chrono::Utc; + use numaflow_models::models::ForwardConditions; + use numaflow_models::models::TagConditions; use super::*; use crate::message::{Message, MessageID, ReadAck}; + use crate::pipeline::pipeline::isb::BufferWriterConfig; #[cfg(feature = "nats-tests")] #[tokio::test] diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index 26516e34d9..f67715112e 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -74,7 +74,7 @@ pub(crate) async fn create_sink_writer( grpc::create_rpc_channel(ud_config.socket_path.clone().into()).await?, ) .max_encoding_message_size(ud_config.grpc_max_message_size) - .max_encoding_message_size(ud_config.grpc_max_message_size); + .max_decoding_message_size(ud_config.grpc_max_message_size); grpc::wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; ( SinkWriterBuilder::new( @@ -129,7 +129,7 @@ pub(crate) async fn create_sink_writer( grpc::create_rpc_channel(ud_config.socket_path.clone().into()).await?, ) .max_encoding_message_size(ud_config.grpc_max_message_size) - .max_encoding_message_size(ud_config.grpc_max_message_size); + .max_decoding_message_size(ud_config.grpc_max_message_size); grpc::wait_until_sink_ready(cln_token, &mut sink_grpc_client).await?; Ok(( @@ -178,7 +178,7 @@ pub async fn create_transformer( grpc::create_rpc_channel(ud_transformer.socket_path.clone().into()).await?, ) .max_encoding_message_size(ud_transformer.grpc_max_message_size) - .max_encoding_message_size(ud_transformer.grpc_max_message_size); + .max_decoding_message_size(ud_transformer.grpc_max_message_size); grpc::wait_until_transformer_ready(&cln_token, &mut transformer_grpc_client).await?; return Ok(( Some( @@ -242,7 +242,7 @@ pub async fn create_source( grpc::create_rpc_channel(udsource_config.socket_path.clone().into()).await?, ) .max_encoding_message_size(udsource_config.grpc_max_message_size) - .max_encoding_message_size(udsource_config.grpc_max_message_size); + .max_decoding_message_size(udsource_config.grpc_max_message_size); grpc::wait_until_source_ready(&cln_token, &mut source_grpc_client).await?; let (ud_read, ud_ack, ud_lag) = new_source(source_grpc_client.clone(), batch_size, read_timeout).await?; @@ -329,7 +329,7 @@ mod tests { #[tonic::async_trait] impl sink::Sinker for InMemorySink { - async fn sink(&self, mut _input: mpsc::Receiver) -> Vec { + async fn sink(&self, _input: mpsc::Receiver) -> Vec { vec![] } } diff --git a/rust/numaflow-core/src/shared/forward.rs b/rust/numaflow-core/src/shared/forward.rs index 11b9195ccb..050902328e 100644 --- a/rust/numaflow-core/src/shared/forward.rs +++ b/rust/numaflow-core/src/shared/forward.rs @@ -1,7 +1,8 @@ -use numaflow_models::models::ForwardConditions; use std::hash::{DefaultHasher, Hasher}; use std::sync::Arc; +use numaflow_models::models::ForwardConditions; + /// Checks if the message should to written to downstream vertex based the conditions /// and message tags. If not tags are provided by there are edge conditions present, we will /// still forward to all vertices. @@ -61,9 +62,10 @@ fn check_operator_condition( #[cfg(test)] mod tests { - use super::*; use numaflow_models::models::TagConditions; + use super::*; + #[tokio::test] async fn test_evaluate_write_condition_no_conditions() { let result = should_forward(None, None); diff --git a/rust/numaflow-core/src/shared/grpc.rs b/rust/numaflow-core/src/shared/grpc.rs index d6246b60a6..3500524f02 100644 --- a/rust/numaflow-core/src/shared/grpc.rs +++ b/rust/numaflow-core/src/shared/grpc.rs @@ -15,7 +15,7 @@ use tokio_util::sync::CancellationToken; use tonic::transport::{Channel, Endpoint}; use tonic::Request; use tower::service_fn; -use tracing::info; +use tracing::{info, warn}; use crate::error; use crate::error::Error; @@ -90,13 +90,21 @@ pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { pub(crate) async fn create_rpc_channel(socket_path: PathBuf) -> error::Result { const RECONNECT_INTERVAL: u64 = 1000; - const MAX_RECONNECT_ATTEMPTS: usize = 5; + const MAX_RECONNECT_ATTEMPTS: usize = 60; let interval = fixed::Interval::from_millis(RECONNECT_INTERVAL).take(MAX_RECONNECT_ATTEMPTS); let channel = Retry::retry( interval, - || async { connect_with_uds(socket_path.clone()).await }, + || async { + match connect_with_uds(socket_path.clone()).await { + Ok(channel) => Ok(channel), + Err(e) => { + warn!(?e, "Failed to connect to UDS socket"); + Err(Error::Connection(format!("Failed to connect: {:?}", e))) + } + } + }, |_: &Error| true, ) .await?; diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 474f91e77f..0b30f4c30a 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -15,8 +15,13 @@ use tracing::{debug, error, info, warn}; use user_defined::UserDefinedSink; use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; +use crate::config::is_mono_vertex; use crate::error::Error; use crate::message::{Message, ResponseFromSink, ResponseStatusFromSink}; +use crate::metrics::{ + monovertex_metrics, mvtx_forward_metric_labels, pipeline_forward_metric_labels, + pipeline_metrics, +}; use crate::tracker::TrackerHandle; use crate::Result; @@ -73,6 +78,12 @@ where } } } + + async fn run(mut self) { + while let Some(msg) = self.actor_messages.recv().await { + self.handle_message(msg).await; + } + } } pub(crate) enum SinkClientType { @@ -137,28 +148,22 @@ impl SinkWriterBuilder { SinkClientType::Log => { let log_sink = log::LogSink; tokio::spawn(async { - let mut actor = SinkActor::new(receiver, log_sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(receiver, log_sink); + actor.run().await; }); } SinkClientType::Blackhole => { let blackhole_sink = blackhole::BlackholeSink; tokio::spawn(async { - let mut actor = SinkActor::new(receiver, blackhole_sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(receiver, blackhole_sink); + actor.run().await; }); } SinkClientType::UserDefined(sink_client) => { let sink = UserDefinedSink::new(sink_client).await?; tokio::spawn(async { - let mut actor = SinkActor::new(receiver, sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(receiver, sink); + actor.run().await; }); } }; @@ -169,28 +174,22 @@ impl SinkWriterBuilder { SinkClientType::Log => { let log_sink = log::LogSink; tokio::spawn(async { - let mut actor = SinkActor::new(fb_receiver, log_sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(fb_receiver, log_sink); + actor.run().await; }); } SinkClientType::Blackhole => { let blackhole_sink = blackhole::BlackholeSink; tokio::spawn(async { - let mut actor = SinkActor::new(fb_receiver, blackhole_sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(fb_receiver, blackhole_sink); + actor.run().await; }); } SinkClientType::UserDefined(sink_client) => { let sink = UserDefinedSink::new(sink_client).await?; tokio::spawn(async { - let mut actor = SinkActor::new(fb_receiver, sink); - while let Some(msg) = actor.actor_messages.recv().await { - actor.handle_message(msg).await; - } + let actor = SinkActor::new(fb_receiver, sink); + actor.run().await; }); } }; @@ -275,13 +274,15 @@ impl SinkWriter { .map(|msg| msg.id.offset.clone()) .collect::>(); + let total_msgs = batch.len(); // filter out the messages which needs to be dropped let batch = batch .into_iter() .filter(|msg| !msg.dropped()) .collect::>(); - let n = batch.len(); + let sink_start = time::Instant::now(); + let total_valid_msgs = batch.len(); match this.write(batch, cancellation_token.clone()).await { Ok(_) => { for offset in offsets { @@ -298,7 +299,31 @@ impl SinkWriter { } } - processed_msgs_count += n; + // publish sink metrics + if is_mono_vertex() { + monovertex_metrics() + .sink + .time + .get_or_create(mvtx_forward_metric_labels()) + .observe(sink_start.elapsed().as_micros() as f64); + monovertex_metrics() + .dropped_total + .get_or_create(mvtx_forward_metric_labels()) + .inc_by((total_msgs - total_valid_msgs) as u64); + } else { + pipeline_metrics() + .forwarder + .write_time + .get_or_create(pipeline_forward_metric_labels("Sink", None)) + .observe(sink_start.elapsed().as_micros() as f64); + pipeline_metrics() + .forwarder + .dropped_total + .get_or_create(pipeline_forward_metric_labels("Sink", None)) + .inc_by((total_msgs - total_valid_msgs) as u64); + } + + processed_msgs_count += total_msgs; if last_logged_at.elapsed().as_millis() >= 1000 { info!( "Processed {} messages at {:?}", @@ -326,6 +351,7 @@ impl SinkWriter { return Ok(()); } + let total_msgs = messages.len(); let mut attempts = 0; let mut error_map = HashMap::new(); let mut fallback_msgs = Vec::new(); @@ -388,12 +414,32 @@ impl SinkWriter { } } + let fb_msgs_total = fallback_msgs.len(); // If there are fallback messages, write them to the fallback sink if !fallback_msgs.is_empty() { self.handle_fallback_messages(fallback_msgs, retry_config) .await?; } + if is_mono_vertex() { + monovertex_metrics() + .sink + .write_total + .get_or_create(mvtx_forward_metric_labels()) + .inc_by((total_msgs - fb_msgs_total) as u64); + monovertex_metrics() + .fb_sink + .write_total + .get_or_create(mvtx_forward_metric_labels()) + .inc_by(fb_msgs_total as u64); + } else { + pipeline_metrics() + .forwarder + .write_total + .get_or_create(pipeline_forward_metric_labels("Sink", None)) + .inc_by(total_msgs as u64); + } + Ok(()) } @@ -605,9 +651,10 @@ impl Drop for SinkWriter { #[cfg(test)] mod tests { + use std::sync::Arc; + use chrono::Utc; use numaflow::sink; - use std::sync::Arc; use tokio::time::Duration; use tokio_util::sync::CancellationToken; diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index dd537d18b1..eb2f331360 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -19,9 +19,10 @@ impl Sink for BlackholeSink { #[cfg(test)] mod tests { - use chrono::Utc; use std::sync::Arc; + use chrono::Utc; + use super::BlackholeSink; use crate::message::IntOffset; use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index a82670e8d8..9ae426f1f2 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -35,9 +35,10 @@ impl Sink for LogSink { #[cfg(test)] mod tests { - use chrono::Utc; use std::sync::Arc; + use chrono::Utc; + use super::LogSink; use crate::message::IntOffset; use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index a1817b1ae0..efb9c1178d 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -1,7 +1,3 @@ -use crate::message::{Message, ResponseFromSink}; -use crate::sink::Sink; -use crate::Error; -use crate::Result; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::sink::{Handshake, SinkRequest, SinkResponse, TransmissionStatus}; use tokio::sync::mpsc; @@ -10,6 +6,11 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::error; +use crate::message::{Message, ResponseFromSink}; +use crate::sink::Sink; +use crate::Error; +use crate::Result; + const DEFAULT_CHANNEL_SIZE: usize = 1000; /// User-Defined Sink code writes messages to a custom [SinkWriter]. @@ -118,9 +119,10 @@ impl Sink for UserDefinedSink { #[cfg(test)] mod tests { + use std::sync::Arc; + use chrono::offset::Utc; use numaflow::sink; - use std::sync::Arc; use tokio::sync::mpsc; use tracing::info; diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 3f2816514f..66361d84ac 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -129,6 +129,12 @@ where } } } + + async fn run(mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } } /// Source is used to read, ack, and get the pending messages count from the source. @@ -150,31 +156,25 @@ impl Source { match src_type { SourceType::UserDefinedSource(reader, acker, lag_reader) => { tokio::spawn(async move { - let mut actor = SourceActor::new(receiver, reader, acker, lag_reader); - while let Some(msg) = actor.receiver.recv().await { - actor.handle_message(msg).await; - } + let actor = SourceActor::new(receiver, reader, acker, lag_reader); + actor.run().await; }); } SourceType::Generator(reader, acker, lag_reader) => { tokio::spawn(async move { - let mut actor = SourceActor::new(receiver, reader, acker, lag_reader); - while let Some(msg) = actor.receiver.recv().await { - actor.handle_message(msg).await; - } + let actor = SourceActor::new(receiver, reader, acker, lag_reader); + actor.run().await; }); } SourceType::Pulsar(pulsar_source) => { tokio::spawn(async move { - let mut actor = SourceActor::new( + let actor = SourceActor::new( receiver, pulsar_source.clone(), pulsar_source.clone(), pulsar_source, ); - while let Some(msg) = actor.receiver.recv().await { - actor.handle_message(msg).await; - } + actor.run().await; }); } }; diff --git a/rust/numaflow-core/src/source/generator.rs b/rust/numaflow-core/src/source/generator.rs index fdc5d590a5..855030f73b 100644 --- a/rust/numaflow-core/src/source/generator.rs +++ b/rust/numaflow-core/src/source/generator.rs @@ -1,8 +1,9 @@ +use tokio_stream::StreamExt; + use crate::config::components::source::GeneratorConfig; use crate::message::{Message, Offset}; use crate::reader; use crate::source; -use tokio_stream::StreamExt; /// Stream Generator returns a set of messages for every `.next` call. It will throttle itself if /// the call exceeds the RPU. It will return a max (batch size, RPU) till the quota for that unit of diff --git a/rust/numaflow-core/src/tracker.rs b/rust/numaflow-core/src/tracker.rs index a1dd32662b..a4ef30e24c 100644 --- a/rust/numaflow-core/src/tracker.rs +++ b/rust/numaflow-core/src/tracker.rs @@ -8,14 +8,16 @@ //! //! In the future Watermark will also be propagated based on this. -use crate::error::Error; -use crate::message::ReadAck; -use crate::Result; -use bytes::Bytes; use std::collections::HashMap; + +use bytes::Bytes; use tokio::sync::{mpsc, oneshot}; use tracing::warn; +use crate::error::Error; +use crate::message::ReadAck; +use crate::Result; + /// TrackerEntry represents the state of a tracked message. #[derive(Debug)] struct TrackerEntry { @@ -246,10 +248,11 @@ impl TrackerHandle { #[cfg(test)] mod tests { - use super::*; use tokio::sync::oneshot; use tokio::time::{timeout, Duration}; + use super::*; + #[tokio::test] async fn test_insert_update_delete() { let handle = TrackerHandle::new(); diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index d6d63bdfea..0b26a7e76a 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -1,5 +1,6 @@ -use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use std::sync::Arc; + +use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tokio::sync::{mpsc, oneshot, OwnedSemaphorePermit, Semaphore}; use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; @@ -7,7 +8,9 @@ use tokio_stream::StreamExt; use tonic::transport::Channel; use tracing::error; +use crate::error::Error; use crate::message::Message; +use crate::metrics::{monovertex_metrics, mvtx_forward_metric_labels}; use crate::tracker::TrackerHandle; use crate::transformer::user_defined::UserDefinedTransformer; use crate::Result; @@ -104,6 +107,7 @@ impl Transformer { // invoke transformer and then wait for the one-shot tokio::spawn(async move { + let start_time = tokio::time::Instant::now(); let _permit = permit; let (sender, receiver) = oneshot::channel(); @@ -141,6 +145,11 @@ impl Transformer { .expect("failed to discard tracker"); } } + monovertex_metrics() + .transformer + .time + .get_or_create(mvtx_forward_metric_labels()) + .observe(start_time.elapsed().as_micros() as f64); }); Ok(()) @@ -156,14 +165,16 @@ impl Transformer { let transform_handle = self.sender.clone(); let tracker_handle = self.tracker_handle.clone(); - // FIXME: batch_size should not be used, introduce a new config called udf concurrenc + // FIXME: batch_size should not be used, introduce a new config called udf concurrency let semaphore = Arc::new(Semaphore::new(self.concurrency)); let handle = tokio::spawn(async move { let mut input_stream = input_stream; while let Some(read_msg) = input_stream.next().await { - let permit = Arc::clone(&semaphore).acquire_owned().await.unwrap(); + let permit = Arc::clone(&semaphore).acquire_owned().await.map_err(|e| { + Error::Transformer(format!("failed to acquire semaphore: {}", e)) + })?; Self::transform( transform_handle.clone(), From 7450e16c1c63069cd8472550ce09eb1026a94410 Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Tue, 17 Dec 2024 23:06:03 +0530 Subject: [PATCH 172/188] chore: e2e tests for metrics APIs (#2277) --- go.mod | 2 +- go.sum | 2 +- server/apis/v1/handler.go | 21 +++++++ server/apis/v1/promql_service_test.go | 51 ++++++++++++++--- server/routes/routes.go | 2 + test/api-e2e/api_test.go | 78 +++++++++++++++++++++++++- test/api-e2e/testdata.go | 4 +- test/api-e2e/testdata/mono-vertex.yaml | 26 +++++++++ 8 files changed, 171 insertions(+), 15 deletions(-) create mode 100644 test/api-e2e/testdata/mono-vertex.yaml diff --git a/go.mod b/go.mod index 77dd7923a5..3783ba46da 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang/mock v1.6.0 + github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -118,7 +119,6 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect diff --git a/go.sum b/go.sum index bd78cd2779..65f904db62 100644 --- a/go.sum +++ b/go.sum @@ -1164,4 +1164,4 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= \ No newline at end of file diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 9e588daed8..17f0322c2a 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -1133,6 +1133,27 @@ func (h *handler) GetMonoVertex(c *gin.Context) { c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, monoVertexResp)) } +// DeleteMonoVertex is used to delete a mono vertex +func (h *handler) DeleteMonoVertex(c *gin.Context) { + ns, monoVertex := c.Param("namespace"), c.Param("mono-vertex") + + // Check if the mono vertex exists + _, err := h.numaflowClient.MonoVertices(ns).Get(c, monoVertex, metav1.GetOptions{}) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to fetch mono vertex %q in namespace %q, %s", monoVertex, ns, err.Error())) + return + } + + // Delete the mono vertex + err = h.numaflowClient.MonoVertices(ns).Delete(c, monoVertex, metav1.DeleteOptions{}) + if err != nil { + h.respondWithError(c, fmt.Sprintf("Failed to delete mono vertex %q in namespace %q, %s", monoVertex, ns, err.Error())) + return + } + + c.JSON(http.StatusOK, NewNumaflowAPIResponse(nil, nil)) +} + // CreateMonoVertex is used to create a mono vertex func (h *handler) CreateMonoVertex(c *gin.Context) { if h.opts.readonly { diff --git a/server/apis/v1/promql_service_test.go b/server/apis/v1/promql_service_test.go index 54270cf0d2..ed50a4ec68 100644 --- a/server/apis/v1/promql_service_test.go +++ b/server/apis/v1/promql_service_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" @@ -38,13 +40,46 @@ func (m *MockPrometheusAPI) QueryRange(ctx context.Context, query string, r v1.R return mockResponse, nil, nil } +func compareFilters(query1, query2 string) bool { + //Extract the filter portions of the queries + filters1 := extractfilters(query1) + filters2 := extractfilters(query2) + return reflect.DeepEqual(filters1, filters2) +} + // comparePrometheusQueries compares two Prometheus queries, ignoring the order of filters within the curly braces func comparePrometheusQueries(query1, query2 string) bool { - // Extract the filter portions of the queries + //Extract the filter portions of the queries filters1 := extractfilters(query1) filters2 := extractfilters(query2) - // Compare the filter portions using reflect.DeepEqual, which ignores order - return reflect.DeepEqual(filters1, filters2) + //Compare the filter portions using reflect.DeepEqual, which ignores order + if !reflect.DeepEqual(filters1, filters2) { + return false // Filters don't match + } + + //Remove filter portions from the queries + query1 = removeFilters(query1) + query2 = removeFilters(query2) + + //Normalize the remaining parts of the queries + query1 = normalizeQuery(query1) + query2 = normalizeQuery(query2) + + //Compare the normalized queries + return cmp.Equal(query1, query2, cmpopts.IgnoreUnexported(struct{}{})) + +} + +func normalizeQuery(query string) string { + // Remove extra whitespace and normalize case + query = strings.TrimSpace(strings.ToLower(query)) + return query +} + +// remove filters within {} +func removeFilters(query string) string { + re := regexp.MustCompile(`\{(.*?)\}`) + return re.ReplaceAllString(query, "") } // extractfilters extracts the key-value pairs within the curly braces @@ -97,7 +132,7 @@ func Test_PopulateReqMap(t *testing.T) { assert.Equal(t, actualMap["$quantile"], expectedMap["$quantile"]) assert.Equal(t, actualMap["$duration"], expectedMap["$duration"]) assert.Equal(t, actualMap["$dimension"], expectedMap["$dimension"]) - if !comparePrometheusQueries(expectedMap["$filters"], actualMap["$filters"]) { + if !compareFilters(expectedMap["$filters"], actualMap["$filters"]) { t.Errorf("filters do not match") } }) @@ -121,7 +156,7 @@ func Test_PopulateReqMap(t *testing.T) { assert.Equal(t, actualMap["$duration"], expectedMap["$duration"]) assert.Equal(t, actualMap["$dimension"], expectedMap["$dimension"]) - if !comparePrometheusQueries(expectedMap["$filters"], actualMap["$filters"]) { + if !compareFilters(expectedMap["$filters"], actualMap["$filters"]) { t.Errorf("filters do not match") } }) @@ -160,7 +195,7 @@ func Test_PromQueryBuilder(t *testing.T) { "pod": "test-pod", }, }, - expectedQuery: `histogram_quantile(0.90, sum by(test_dimension,le) (rate(test_bucket{namespace= "test_namespace", mvtx_name= "test-mono-vertex", pod= "test-pod"}[5m])))`, + expectedQuery: `histogram_quantile(0.90, sum by(test_dimension,le) (rate(test_metric{namespace= "test_namespace", mvtx_name= "test-mono-vertex", pod= "test-pod"}[5m])))`, }, { name: "Missing placeholder in req", @@ -283,7 +318,7 @@ func Test_PromQueryBuilder(t *testing.T) { }, Expression: map[string]map[string]string{ "monovtx_pending": { - "mono-vertex": "$metric_name{$filters}", + "mono-vertex": "sum($metric_name{$filters}) by ($dimension, period)", }, }, } @@ -347,7 +382,7 @@ func Test_PromQueryBuilder(t *testing.T) { }, Expression: map[string]map[string]string{ "vertex_pending_messages": { - "vertex": "$metric_name{$filters}", + "vertex": "sum($metric_name{$filters}) by ($dimension, period)", }, }, } diff --git a/server/routes/routes.go b/server/routes/routes.go index 1872e90181..500a32a587 100644 --- a/server/routes/routes.go +++ b/server/routes/routes.go @@ -171,6 +171,8 @@ func v1Routes(ctx context.Context, r gin.IRouter, dexObj *v1.DexObject, localUse r.GET("/namespaces/:namespace/mono-vertices", handler.ListMonoVertices) // Get the mono vertex information. r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex", handler.GetMonoVertex) + // Delete a mono-vertex. + r.DELETE("/namespaces/:namespace/mono-vertices/:mono-vertex", handler.DeleteMonoVertex) // Get all the pods of a mono vertex. r.GET("/namespaces/:namespace/mono-vertices/:mono-vertex/pods", handler.ListMonoVertexPods) // Create a mono vertex. diff --git a/test/api-e2e/api_test.go b/test/api-e2e/api_test.go index b23735e699..e1800ea678 100644 --- a/test/api-e2e/api_test.go +++ b/test/api-e2e/api_test.go @@ -35,6 +35,10 @@ type APISuite struct { E2ESuite } +func TestAPISuite(t *testing.T) { + suite.Run(t, new(APISuite)) +} + func (s *APISuite) TestGetSysInfo() { defer s.Given().When().UXServerPodPortForward(8043, 8443).TerminateAllPodPortForwards() @@ -209,9 +213,17 @@ func (s *APISuite) TestAPIsForIsbAndPipelineAndMonoVertex() { Expect(). Status(200).Body().Raw() assert.Contains(s.T(), listMonoVertexBody, testMonoVertex1Name) + + // deletes a mono-vertex + deleteMonoVertex := HTTPExpect(s.T(), "https://localhost:8145").DELETE(fmt.Sprintf("/api/v1/namespaces/%s/mono-vertices/%s", Namespace, testMonoVertex1Name)). + Expect(). + Status(200).Body().Raw() + var deleteMonoVertexSuccessExpect = `"data":null` + assert.Contains(s.T(), deleteMonoVertex, deleteMonoVertexSuccessExpect) + } -func (s *APISuite) TestAPIsForMetricsAndWatermarkAndPods() { +func (s *APISuite) TestAPIsForMetricsAndWatermarkAndPodsForPipeline() { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -275,8 +287,68 @@ func (s *APISuite) TestAPIsForMetricsAndWatermarkAndPods() { Expect(). Status(200).Body().Raw() assert.Contains(s.T(), getVerticesPodsBody, `simple-pipeline-input-0`) + + // Call the DiscoverMetrics API for the vertex object + discoverMetricsBodyForVertex := HTTPExpect(s.T(), "https://localhost:8146").GET("/api/v1/metrics-discovery/object/vertex"). + Expect(). + Status(200).Body().Raw() + + // Check that the response contains expected metrics for vertex object + assert.Contains(s.T(), discoverMetricsBodyForVertex, "forwarder_data_read_total") + + // Call the API to get input vertex pods info + getVertexPodsInfoBody := HTTPExpect(s.T(), "https://localhost:8146"). + GET(fmt.Sprintf("/api/v1/namespaces/%s/pipelines/%s/vertices/%s/pods-info", Namespace, pipelineName, "input")). + Expect(). + Status(200).Body().Raw() + + // Check that the response contains expected pod details + assert.Contains(s.T(), getVertexPodsInfoBody, `"name":`) // Check for pod name + assert.Contains(s.T(), getVertexPodsInfoBody, `"status":`) // Check for pod status + assert.Contains(s.T(), getVertexPodsInfoBody, `"totalCPU":`) // Check for pod's cpu usage + assert.Contains(s.T(), getVertexPodsInfoBody, `"totalMemory":`) // Check for pod's memory usage + assert.Contains(s.T(), getVertexPodsInfoBody, `"containerDetailsMap":`) // Check for pod's containers } -func TestAPISuite(t *testing.T) { - suite.Run(t, new(APISuite)) +func (s *APISuite) TestMetricsAPIsForMonoVertex() { + _, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + w := s.Given().MonoVertex("@testdata/mono-vertex.yaml"). + When(). + CreateMonoVertexAndWait() + defer w.DeleteMonoVertexAndWait() + + monoVertexName := "mono-vertex" + + defer w.UXServerPodPortForward(8149, 8443).TerminateAllPodPortForwards() + + w.Expect().MonoVertexPodsRunning() + // Expect the messages to reach the sink. + w.Expect().RedisSinkContains("mono-vertex", "199") + w.Expect().RedisSinkContains("mono-vertex", "200") + + // Call the API to get mono vertex pods info + getMonoVertexPodsInfoBody := HTTPExpect(s.T(), "https://localhost:8149"). + GET(fmt.Sprintf("/api/v1/namespaces/%s/mono-vertices/%s/pods-info", Namespace, monoVertexName)). + Expect(). + Status(200).Body().Raw() + + // Check that the response contains expected pod details + assert.Contains(s.T(), getMonoVertexPodsInfoBody, `"name":`) // Check for pod name + assert.Contains(s.T(), getMonoVertexPodsInfoBody, `"status":`) // Check for pod status + assert.Contains(s.T(), getMonoVertexPodsInfoBody, `"totalCPU":`) // Check for pod's cpu usage + assert.Contains(s.T(), getMonoVertexPodsInfoBody, `"totalMemory":`) // Check for pod's memory usage + assert.Contains(s.T(), getMonoVertexPodsInfoBody, `"containerDetailsMap":`) // Check for pod's containers + + // Call the DiscoverMetrics API for mono-vertex + discoverMetricsBodyForMonoVertex := HTTPExpect(s.T(), "https://localhost:8149").GET("/api/v1/metrics-discovery/object/mono-vertex"). + Expect(). + Status(200).Body().Raw() + + // Check that the response contains expected metrics for mono-vertex + assert.Contains(s.T(), discoverMetricsBodyForMonoVertex, "monovtx_processing_time_bucket") + assert.Contains(s.T(), discoverMetricsBodyForMonoVertex, "monovtx_sink_time_bucket") + assert.Contains(s.T(), discoverMetricsBodyForMonoVertex, "monovtx_read_total") + assert.Contains(s.T(), discoverMetricsBodyForMonoVertex, "monovtx_pending") } diff --git a/test/api-e2e/testdata.go b/test/api-e2e/testdata.go index 411a7b586a..bdee0da422 100644 --- a/test/api-e2e/testdata.go +++ b/test/api-e2e/testdata.go @@ -168,7 +168,7 @@ var ( "source": { "udsource": { "container": { - "image": "quay.io/numaio/numaflow-java/source-simple-source:stable" + "image": "quay.io/numaio/numaflow-rs/simple-source:stable" } }, "transformer": { @@ -180,7 +180,7 @@ var ( "sink": { "udsink": { "container": { - "image": "quay.io/numaio/numaflow-java/simple-sink:stable" + "image": "quay.io/numaio/numaflow-rs/sink-log:stable" } } } diff --git a/test/api-e2e/testdata/mono-vertex.yaml b/test/api-e2e/testdata/mono-vertex.yaml new file mode 100644 index 0000000000..c90ae8bd9e --- /dev/null +++ b/test/api-e2e/testdata/mono-vertex.yaml @@ -0,0 +1,26 @@ +apiVersion: numaflow.numaproj.io/v1alpha1 +kind: MonoVertex +metadata: + name: mono-vertex +spec: + scale: + min: 1 + source: + udsource: + container: + image: quay.io/numaio/numaflow-go/source-simple-source:stable + imagePullPolicy: Always + transformer: + container: + image: quay.io/numaio/numaflow-go/mapt-assign-event-time:stable + imagePullPolicy: Always + sink: + udsink: + container: + # A redis sink for e2e testing, see https://github.com/numaproj/numaflow-go/tree/main/pkg/sinker/examples/redis_sink + image: quay.io/numaio/numaflow-go/redis-sink:stable + imagePullPolicy: Always + env: + - name: SINK_HASH_KEY + # Use the name of the mono vertex as the key + value: "mono-vertex" \ No newline at end of file From 40ecf06fc10d89a9bc2f8cb0b15b9efe3824db6a Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 17 Dec 2024 21:14:24 -0800 Subject: [PATCH 173/188] chore: use same metric to expose binary build info (#2293) --- pkg/daemon/server/daemon_server.go | 5 ++++- pkg/daemon/server/metrics.go | 5 +++-- pkg/metrics/metrics.go | 5 +++++ pkg/mvtxdaemon/server/daemon_server.go | 5 ++++- pkg/mvtxdaemon/server/metrics.go | 5 +++-- 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/pkg/daemon/server/daemon_server.go b/pkg/daemon/server/daemon_server.go index 768f789dde..351b2607ce 100644 --- a/pkg/daemon/server/daemon_server.go +++ b/pkg/daemon/server/daemon_server.go @@ -42,6 +42,7 @@ import ( "github.com/numaproj/numaflow/pkg/daemon/server/service" server "github.com/numaproj/numaflow/pkg/daemon/server/service/rater" "github.com/numaproj/numaflow/pkg/isbsvc" + "github.com/numaproj/numaflow/pkg/metrics" jsclient "github.com/numaproj/numaflow/pkg/shared/clients/nats" redisclient "github.com/numaproj/numaflow/pkg/shared/clients/redis" "github.com/numaproj/numaflow/pkg/shared/logging" @@ -156,7 +157,9 @@ func (ds *daemonServer) Run(ctx context.Context) error { go ds.exposeMetrics(ctx) version := numaflow.GetVersion() - pipeline_info.WithLabelValues(version.Version, version.Platform, ds.pipeline.Name).Set(1) + // TODO: clean it up in v1.6 + deprecatedPipelineInfo.WithLabelValues(version.Version, version.Platform, ds.pipeline.Name).Set(1) + metrics.BuildInfo.WithLabelValues(v1alpha1.ComponentDaemon, ds.pipeline.Name, version.Version, version.Platform).Set(1) log.Infof("Daemon server started successfully on %s", address) <-ctx.Done() diff --git a/pkg/daemon/server/metrics.go b/pkg/daemon/server/metrics.go index c27bdeedb0..6c31e5e727 100644 --- a/pkg/daemon/server/metrics.go +++ b/pkg/daemon/server/metrics.go @@ -23,10 +23,11 @@ import ( ) var ( - pipeline_info = promauto.NewGaugeVec(prometheus.GaugeOpts{ + // Deprecated: Use pkg/metrics.BuildInfo instead. + deprecatedPipelineInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "pipeline", Name: "build_info", - Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the pipeline name", + Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the pipeline name. Deprecated: Use build_info instead", }, []string{metrics.LabelVersion, metrics.LabelPlatform, metrics.LabelPipeline}) // Pipeline processing lag, max(watermark) - min(watermark) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 2754983481..53605eca2c 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -41,6 +41,11 @@ const ( ) var ( + BuildInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "build_info", + Help: "A metric with a constant value '1', labeled by Numaflow binary version, platform, and other information", + }, []string{LabelComponent, LabelComponentName, LabelVersion, LabelPlatform}) + SDKInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "sdk_info", Help: "A metric with a constant value '1', labeled by SDK information such as version, language, and type", diff --git a/pkg/mvtxdaemon/server/daemon_server.go b/pkg/mvtxdaemon/server/daemon_server.go index a8f05f64a7..09f4e3bd77 100644 --- a/pkg/mvtxdaemon/server/daemon_server.go +++ b/pkg/mvtxdaemon/server/daemon_server.go @@ -38,6 +38,7 @@ import ( "github.com/numaproj/numaflow" "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" "github.com/numaproj/numaflow/pkg/apis/proto/mvtxdaemon" + "github.com/numaproj/numaflow/pkg/metrics" "github.com/numaproj/numaflow/pkg/mvtxdaemon/server/service" rateServer "github.com/numaproj/numaflow/pkg/mvtxdaemon/server/service/rater" "github.com/numaproj/numaflow/pkg/shared/logging" @@ -108,7 +109,9 @@ func (ds *daemonServer) Run(ctx context.Context) error { }() version := numaflow.GetVersion() - monoVertexInfo.WithLabelValues(version.Version, version.Platform, ds.monoVtx.Name).Set(1) + // Todo: clean it up in v1.6 + deprecatedMonoVertexInfo.WithLabelValues(version.Version, version.Platform, ds.monoVtx.Name).Set(1) + metrics.BuildInfo.WithLabelValues(v1alpha1.ComponentMonoVertexDaemon, ds.monoVtx.Name, version.Version, version.Platform).Set(1) log.Infof("MonoVertex daemon server started successfully on %s", address) <-ctx.Done() diff --git a/pkg/mvtxdaemon/server/metrics.go b/pkg/mvtxdaemon/server/metrics.go index f3c0c30796..e06ea5906d 100644 --- a/pkg/mvtxdaemon/server/metrics.go +++ b/pkg/mvtxdaemon/server/metrics.go @@ -24,9 +24,10 @@ import ( ) var ( - monoVertexInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ + // Deprecated: Use pkg/metrics.BuildInfo instead. + deprecatedMonoVertexInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ Subsystem: "monovtx", Name: "build_info", - Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the mono vertex name", + Help: "A metric with a constant value '1', labeled by Numaflow binary version and platform, as well as the mono vertex name. Deprecated: Use build_info instead", }, []string{metrics.LabelVersion, metrics.LabelPlatform, metrics.LabelMonoVertexName}) ) From f41e6ff8213e0749ac757be8dd7cc1c82d663843 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Thu, 19 Dec 2024 10:30:03 +0530 Subject: [PATCH 174/188] chore: make serving config compatible with numaflow-core configs (#2291) Signed-off-by: Sreekanth --- Dockerfile | 4 +- rust/Cargo.lock | 238 +------------- rust/numaflow-core/Cargo.toml | 1 + rust/numaflow-core/src/config/components.rs | 52 +++ .../src/shared/create_components.rs | 3 + rust/numaflow/src/main.rs | 41 ++- rust/serving/Cargo.toml | 1 - rust/serving/config/default.toml | 17 - rust/serving/config/jetstream.conf | 4 - rust/serving/config/pipeline_spec.json | 21 -- rust/serving/src/app.rs | 205 +++++++----- rust/serving/src/app/callback.rs | 69 ++-- rust/serving/src/app/callback/state.rs | 43 ++- .../src/app/callback/store/redisstore.rs | 116 ++++--- rust/serving/src/app/direct_proxy.rs | 20 +- rust/serving/src/app/jetstream_proxy.rs | 148 +++++---- rust/serving/src/app/message_path.rs | 7 +- rust/serving/src/config.rs | 310 ++++++++++++------ rust/serving/src/lib.rs | 37 ++- rust/serving/src/metrics.rs | 1 + rust/serving/src/pipeline.rs | 75 ++--- 21 files changed, 731 insertions(+), 682 deletions(-) delete mode 100644 rust/serving/config/default.toml delete mode 100644 rust/serving/config/jetstream.conf delete mode 100644 rust/serving/config/pipeline_spec.json diff --git a/Dockerfile b/Dockerfile index bded35d1f9..48d9d68959 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,8 +69,6 @@ COPY --from=base /bin/numaflow /bin/numaflow COPY --from=base /bin/numaflow-rs /bin/numaflow-rs COPY ui/build /ui/build -COPY ./rust/serving/config config - ENTRYPOINT [ "/bin/numaflow" ] #################################################################################################### @@ -89,4 +87,4 @@ RUN chmod +x /bin/e2eapi #################################################################################################### FROM scratch AS e2eapi COPY --from=testbase /bin/e2eapi . -ENTRYPOINT ["/e2eapi"] \ No newline at end of file +ENTRYPOINT ["/e2eapi"] diff --git a/rust/Cargo.lock b/rust/Cargo.lock index ec51332105..a210284fcd 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -17,18 +17,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -38,12 +26,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - [[package]] name = "android-tzdata" version = "0.1.1" @@ -71,12 +53,6 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" -[[package]] -name = "arraydeque" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" - [[package]] name = "async-nats" version = "0.35.1" @@ -388,9 +364,6 @@ name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" -dependencies = [ - "serde", -] [[package]] name = "block-buffer" @@ -503,60 +476,12 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "config" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" -dependencies = [ - "async-trait", - "convert_case", - "json5", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml", - "yaml-rust2", -] - [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "const-random" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" -dependencies = [ - "const-random-macro", -] - -[[package]] -name = "const-random-macro" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" -dependencies = [ - "getrandom", - "once_cell", - "tiny-keccak", -] - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -607,12 +532,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-common" version = "0.1.6" @@ -697,15 +616,6 @@ dependencies = [ "syn 2.0.90", ] -[[package]] -name = "dlv-list" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" -dependencies = [ - "const-random", -] - [[package]] name = "dtoa" version = "1.0.9" @@ -984,31 +894,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", - "allocator-api2", -] - [[package]] name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "headers" version = "0.4.0" @@ -1493,17 +1384,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - [[package]] name = "jsonpath-rust" version = "0.5.1" @@ -1873,6 +1753,7 @@ dependencies = [ "semver", "serde", "serde_json", + "serving", "tempfile", "thiserror 2.0.3", "tokio", @@ -1954,16 +1835,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "ordered-multimap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" -dependencies = [ - "dlv-list", - "hashbrown 0.14.5", -] - [[package]] name = "overload" version = "0.1.1" @@ -1999,12 +1870,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pathdiff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" - [[package]] name = "pem" version = "3.0.4" @@ -2630,28 +2495,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ron" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" -dependencies = [ - "base64 0.21.7", - "bitflags 2.6.0", - "serde", - "serde_derive", -] - -[[package]] -name = "rust-ini" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - [[package]] name = "rustc-demangle" version = "0.1.24" @@ -2955,15 +2798,6 @@ dependencies = [ "syn 2.0.90", ] -[[package]] -name = "serde_spanned" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3011,7 +2845,6 @@ dependencies = [ "backoff", "base64 0.22.1", "chrono", - "config", "hyper-util", "numaflow-models", "parking_lot", @@ -3319,15 +3152,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinystr" version = "0.7.6" @@ -3458,40 +3282,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "toml" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" -dependencies = [ - "indexmap 2.7.0", - "serde", - "serde_spanned", - "toml_datetime", - "winnow", -] - [[package]] name = "tonic" version = "0.12.3" @@ -3718,12 +3508,6 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - [[package]] name = "unicode-width" version = "0.1.14" @@ -4133,15 +3917,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winnow" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -4164,17 +3939,6 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" -[[package]] -name = "yaml-rust2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" -dependencies = [ - "arraydeque", - "encoding_rs", - "hashlink", -] - [[package]] name = "yasna" version = "0.5.2" diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 73c15c489c..b4688a135b 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -17,6 +17,7 @@ tracing.workspace = true numaflow-pulsar.workspace = true numaflow-models.workspace = true numaflow-pb.workspace = true +serving.workspace = true backoff.workspace = true axum.workspace = true axum-server.workspace = true diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index 9b26c1d5d7..f17331ddaa 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -3,6 +3,8 @@ pub(crate) mod source { const DEFAULT_SOURCE_SOCKET: &str = "/var/run/numaflow/source.sock"; const DEFAULT_SOURCE_SERVER_INFO_FILE: &str = "/var/run/numaflow/sourcer-server-info"; + use std::collections::HashMap; + use std::env; use std::{fmt::Debug, time::Duration}; use bytes::Bytes; @@ -31,6 +33,7 @@ pub(crate) mod source { Generator(GeneratorConfig), UserDefined(UserDefinedConfig), Pulsar(PulsarSourceConfig), + Serving(serving::Settings), } impl From> for SourceType { @@ -96,6 +99,55 @@ pub(crate) mod source { } } + impl TryFrom> for SourceType { + type Error = Error; + // FIXME: Currently, the same settings comes from user-defined settings and env variables. + // We parse both, with user-defined values having higher precedence. + // There should be only one option (user-defined) to define the settings. + fn try_from(cfg: Box) -> Result { + let env_vars = env::vars().collect::>(); + + let mut settings: serving::Settings = env_vars + .try_into() + .map_err(|e: serving::Error| Error::Config(e.to_string()))?; + + settings.tid_header = cfg.msg_id_header_key; + + if let Some(auth) = cfg.auth { + if let Some(token) = auth.token { + let secret = crate::shared::create_components::get_secret_from_volume( + &token.name, + &token.key, + ) + .map_err(|e| Error::Config(format!("Reading API auth token secret: {e:?}")))?; + settings.api_auth_token = Some(secret); + } else { + tracing::warn!("Authentication token for Serving API is specified, but the secret is empty"); + }; + } + + if let Some(ttl) = cfg.store.ttl { + if ttl.is_negative() { + return Err(Error::Config(format!( + "TTL value for the store can not be negative. Provided value = {ttl:?}" + ))); + } + let ttl: std::time::Duration = ttl.into(); + let ttl_secs = ttl.as_secs() as u32; + // TODO: Identify a minimum value + if ttl_secs < 1 { + return Err(Error::Config(format!( + "TTL value for the store must not be less than 1 second. Provided value = {ttl:?}" + ))); + } + settings.redis.ttl_secs = Some(ttl_secs); + } + settings.redis.addr = cfg.store.url; + + Ok(SourceType::Serving(settings)) + } + } + impl TryFrom> for SourceType { type Error = Error; diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index f67715112e..0e5fade691 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -266,6 +266,9 @@ pub async fn create_source( None, )) } + SourceType::Serving(_) => { + unimplemented!("Serving as built-in source is not yet implemented") + } } } diff --git a/rust/numaflow/src/main.rs b/rust/numaflow/src/main.rs index e25fbf9c66..60e26ef850 100644 --- a/rust/numaflow/src/main.rs +++ b/rust/numaflow/src/main.rs @@ -1,13 +1,14 @@ +use std::collections::HashMap; use std::env; +use std::error::Error; +use std::sync::Arc; -use tracing::{error, info}; +use tracing::error; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; #[tokio::main] -async fn main() { - let args: Vec = env::args().collect(); - +async fn main() -> Result<(), Box> { // Set up the tracing subscriber. RUST_LOG can be used to set the log level. // The default log level is `info`. The `axum::rejection=trace` enables showing // rejections from built-in extractors at `TRACE` level. @@ -20,21 +21,31 @@ async fn main() { ) .with(tracing_subscriber::fmt::layer().with_ansi(false)) .init(); + if let Err(e) = run().await { + error!("{e:?}"); + return Err(e); + } + Ok(()) +} +async fn run() -> Result<(), Box> { + let args: Vec = env::args().collect(); // Based on the argument, run the appropriate component. if args.contains(&"--serving".to_string()) { - if let Err(e) = serving::serve().await { - error!("Error running serving: {}", e); - } + let env_vars: HashMap = env::vars().collect(); + let settings: serving::Settings = env_vars.try_into()?; + let settings = Arc::new(settings); + serving::serve(settings) + .await + .map_err(|e| format!("Error running serving: {e:?}"))?; } else if args.contains(&"--servesink".to_string()) { - if let Err(e) = servesink::servesink().await { - info!("Error running servesink: {}", e); - } + servesink::servesink() + .await + .map_err(|e| format!("Error running servesink: {e:?}"))?; } else if args.contains(&"--rust".to_string()) { - if let Err(e) = numaflow_core::run().await { - error!("Error running rust binary: {}", e); - } - } else { - error!("Invalid argument. Use --serving, --servesink, or --rust."); + numaflow_core::run() + .await + .map_err(|e| format!("Error running rust binary: {e:?}"))? } + Err("Invalid argument. Use --serving, --servesink, or --rust".into()) } diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index d62a1d2d8f..de2f8bb820 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -27,7 +27,6 @@ tower = "0.4.13" tower-http = { version = "0.5.2", features = ["trace", "timeout"] } uuid = { version = "1.10.0", features = ["v4"] } redis = { version = "0.26.0", features = ["tokio-comp", "aio", "connection-manager"] } -config = "0.14.0" trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } base64 = "0.22.1" diff --git a/rust/serving/config/default.toml b/rust/serving/config/default.toml deleted file mode 100644 index 448672abc1..0000000000 --- a/rust/serving/config/default.toml +++ /dev/null @@ -1,17 +0,0 @@ -tid_header = "ID" -app_listen_port = 3000 -metrics_server_listen_port = 3001 -upstream_addr = "localhost:8888" -drain_timeout_secs = 10 -host_ip = "localhost" - -[jetstream] -stream = "default" -url = "localhost:4222" - -[redis] -addr = "redis://127.0.0.1/" -max_tasks = 50 -retries = 5 -retries_duration_millis = 100 -ttl_secs = 1 diff --git a/rust/serving/config/jetstream.conf b/rust/serving/config/jetstream.conf deleted file mode 100644 index e09998c0ac..0000000000 --- a/rust/serving/config/jetstream.conf +++ /dev/null @@ -1,4 +0,0 @@ -jetstream: { - max_mem_store: 1MiB, - max_file_store: 1GiB -} \ No newline at end of file diff --git a/rust/serving/config/pipeline_spec.json b/rust/serving/config/pipeline_spec.json deleted file mode 100644 index 1698329e86..0000000000 --- a/rust/serving/config/pipeline_spec.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "vertices": [ - { - "name": "in" - }, { - "name": "cat" - }, { - "name": "out" - } - ], - "edges": [ - { - "from": "in", - "to": "cat" - }, - { - "from": "cat", - "to": "out" - } - ] -} \ No newline at end of file diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index d1d29c4c21..56d4a33cb3 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -1,5 +1,5 @@ -use std::env; use std::net::SocketAddr; +use std::sync::Arc; use std::time::Duration; use async_nats::jetstream; @@ -17,7 +17,7 @@ use tokio::signal; use tower::ServiceBuilder; use tower_http::timeout::TimeoutLayer; use tower_http::trace::{DefaultOnResponse, TraceLayer}; -use tracing::{debug, info, info_span, Level}; +use tracing::{info, info_span, Level}; use uuid::Uuid; use self::{ @@ -26,9 +26,11 @@ use self::{ }; use crate::app::callback::store::Store; use crate::app::tracker::MessageGraph; -use crate::pipeline::min_pipeline_spec; -use crate::Error::{InitError, MetricsServer}; -use crate::{app::callback::state::State as CallbackState, config, metrics::capture_metrics}; +use crate::config::JetStreamConfig; +use crate::pipeline::PipelineDCG; +use crate::Error::InitError; +use crate::Settings; +use crate::{app::callback::state::State as CallbackState, metrics::capture_metrics}; /// manage callbacks pub(crate) mod callback; @@ -41,10 +43,6 @@ mod message_path; // TODO: merge message_path and tracker mod response; mod tracker; -const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; -const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; -const ENV_NUMAFLOW_SERVING_AUTH_TOKEN: &str = "NUMAFLOW_SERVING_AUTH_TOKEN"; - /// Everything for numaserve starts here. The routing, middlewares, proxying, etc. // TODO // - [ ] implement an proxy and pass in UUID in the header if not present @@ -52,19 +50,23 @@ const ENV_NUMAFLOW_SERVING_AUTH_TOKEN: &str = "NUMAFLOW_SERVING_AUTH_TOKEN"; /// Start the main application Router and the axum server. pub(crate) async fn start_main_server( - addr: SocketAddr, + settings: Arc, tls_config: RustlsConfig, + pipeline_spec: PipelineDCG, ) -> crate::Result<()> { - debug!(?addr, "App server started"); + let app_addr: SocketAddr = format!("0.0.0.0:{}", &settings.app_listen_port) + .parse() + .map_err(|e| InitError(format!("{e:?}")))?; + let tid_header = settings.tid_header.clone(); let layers = ServiceBuilder::new() // Add tracing to all requests .layer( TraceLayer::new_for_http() - .make_span_with(|req: &Request| { + .make_span_with(move |req: &Request| { let tid = req .headers() - .get(&config().tid_header) + .get(&tid_header) .and_then(|v| v.to_str().ok()) .map(|v| v.to_string()) .unwrap_or_else(|| Uuid::new_v4().to_string()); @@ -83,13 +85,16 @@ pub(crate) async fn start_main_server( .layer( // Graceful shutdown will wait for outstanding requests to complete. Add a timeout so // requests don't hang forever. - TimeoutLayer::new(Duration::from_secs(config().drain_timeout_secs)), + TimeoutLayer::new(Duration::from_secs(settings.drain_timeout_secs)), ) // Add auth middleware to all user facing routes - .layer(middleware::from_fn(auth_middleware)); + .layer(middleware::from_fn_with_state( + settings.api_auth_token.clone(), + auth_middleware, + )); // Create the message graph from the pipeline spec and the redis store - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).map_err(|e| { + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).map_err(|e| { InitError(format!( "Creating message graph from pipeline spec: {:?}", e @@ -97,11 +102,8 @@ pub(crate) async fn start_main_server( })?; // Create a redis store to store the callbacks and the custom responses - let redis_store = callback::store::redisstore::RedisConnection::new( - &config().redis.addr, - config().redis.max_tasks, - ) - .await?; + let redis_store = + callback::store::redisstore::RedisConnection::new(settings.redis.clone()).await?; let state = CallbackState::new(msg_graph, redis_store).await?; let handle = Handle::new(); @@ -109,14 +111,17 @@ pub(crate) async fn start_main_server( tokio::spawn(graceful_shutdown(handle.clone())); // Create a Jetstream context - let js_context = create_js_context().await?; + let js_context = create_js_context(&settings.jetstream).await?; + + let router = setup_app(settings, js_context, state).await?.layer(layers); + + info!(?app_addr, "Starting application server"); - let router = setup_app(js_context, state).await?.layer(layers); - axum_server::bind_rustls(addr, tls_config) + axum_server::bind_rustls(app_addr, tls_config) .handle(handle) .serve(router.into_make_service()) .await - .map_err(|e| MetricsServer(format!("Starting web server for metrics: {}", e)))?; + .map_err(|e| InitError(format!("Starting web server for metrics: {}", e)))?; Ok(()) } @@ -149,19 +154,16 @@ async fn graceful_shutdown(handle: Handle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } -async fn create_js_context() -> crate::Result { - // Check for user and password in the Jetstream configuration - let js_config = &config().jetstream; - +async fn create_js_context(js_config: &JetStreamConfig) -> crate::Result { // Connect to Jetstream with user and password if they are set - let js_client = match ( - env::var(ENV_NUMAFLOW_SERVING_JETSTREAM_USER), - env::var(ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD), - ) { - (Ok(user), Ok(password)) => { + let js_client = match js_config.auth.as_ref() { + Some(auth) => { async_nats::connect_with_options( &js_config.url, - async_nats::ConnectOptions::with_user_and_password(user, password), + async_nats::ConnectOptions::with_user_and_password( + auth.username.clone(), + auth.password.clone(), + ), ) .await } @@ -170,8 +172,7 @@ async fn create_js_context() -> crate::Result { .map_err(|e| { InitError(format!( "Connecting to jetstream server {}: {}", - &config().jetstream.url, - e + &js_config.url, e )) })?; Ok(jetstream::new(js_client)) @@ -185,7 +186,11 @@ const PUBLISH_ENDPOINTS: [&str; 3] = [ // auth middleware to do token based authentication for all user facing routes // if auth is enabled. -async fn auth_middleware(request: axum::extract::Request, next: Next) -> Response { +async fn auth_middleware( + State(api_auth_token): State>, + request: axum::extract::Request, + next: Next, +) -> Response { let path = request.uri().path(); // we only need to check for the presence of the auth token in the request headers for the publish endpoints @@ -193,8 +198,8 @@ async fn auth_middleware(request: axum::extract::Request, next: Next) -> Respons return next.run(request).await; } - match env::var(ENV_NUMAFLOW_SERVING_AUTH_TOKEN) { - Ok(token) => { + match api_auth_token { + Some(token) => { // Check for the presence of the auth token in the request headers let auth_token = match request.headers().get("Authorization") { Some(token) => token, @@ -216,22 +221,35 @@ async fn auth_middleware(request: axum::extract::Request, next: Next) -> Respons next.run(request).await } } - Err(_) => { + None => { // If the auth token is not set, we don't need to check for the presence of the auth token in the request headers next.run(request).await } } } +#[derive(Clone)] +pub(crate) struct AppState { + pub(crate) settings: Arc, + pub(crate) callback_state: CallbackState, + pub(crate) context: Context, +} + async fn setup_app( + settings: Arc, context: Context, - state: CallbackState, + callback_state: CallbackState, ) -> crate::Result { + let app_state = AppState { + settings, + callback_state: callback_state.clone(), + context: context.clone(), + }; let parent = Router::new() .route("/health", get(health_check)) .route("/livez", get(livez)) // Liveliness check .route("/readyz", get(readyz)) - .with_state((state.clone(), context.clone())); // Readiness check + .with_state(app_state.clone()); // Readiness check // a pool based client implementation for direct proxy, this client is cloneable. let client: direct_proxy::Client = @@ -240,8 +258,11 @@ async fn setup_app( // let's nest each endpoint let app = parent - .nest("/v1/direct", direct_proxy(client)) - .nest("/v1/process", routes(context, state).await?); + .nest( + "/v1/direct", + direct_proxy(client, app_state.settings.upstream_addr.clone()), + ) + .nest("/v1/process", routes(app_state).await?); Ok(app) } @@ -250,16 +271,20 @@ async fn health_check() -> impl IntoResponse { "ok" } -async fn livez( - State((_state, _context)): State<(CallbackState, Context)>, -) -> impl IntoResponse { +async fn livez() -> impl IntoResponse { StatusCode::NO_CONTENT } async fn readyz( - State((mut state, context)): State<(CallbackState, Context)>, + State(app): State>, ) -> impl IntoResponse { - if state.ready().await && context.get_stream(&config().jetstream.stream).await.is_ok() { + if app.callback_state.clone().ready().await + && app + .context + .get_stream(&app.settings.jetstream.stream) + .await + .is_ok() + { StatusCode::NO_CONTENT } else { StatusCode::INTERNAL_SERVER_ERROR @@ -267,11 +292,14 @@ async fn readyz( } async fn routes( - context: Context, - state: CallbackState, + app_state: AppState, ) -> crate::Result { - let jetstream_proxy = jetstream_proxy(context, state.clone()).await?; - let callback_router = callback_handler(state.clone()); + let state = app_state.callback_state.clone(); + let jetstream_proxy = jetstream_proxy(app_state.clone()).await?; + let callback_router = callback_handler( + app_state.settings.tid_header.clone(), + app_state.callback_state.clone(), + ); let message_path_handler = get_message_path(state); Ok(jetstream_proxy .merge(callback_router) @@ -280,8 +308,6 @@ async fn routes( #[cfg(test)] mod tests { - use std::net::SocketAddr; - use async_nats::jetstream::stream; use axum::http::StatusCode; use tokio::time::{sleep, Duration}; @@ -291,6 +317,8 @@ mod tests { use crate::app::callback::store::memstore::InMemoryStore; use crate::config::generate_certs; + const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; + type Result = core::result::Result; type Error = Box; @@ -302,9 +330,14 @@ mod tests { .await .unwrap(); - let addr = SocketAddr::from(([127, 0, 0, 1], 0)); + let settings = Arc::new(Settings { + app_listen_port: 0, + ..Settings::default() + }); + let server = tokio::spawn(async move { - let result = start_main_server(addr, tls_config).await; + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let result = start_main_server(settings, tls_config, pipeline_spec).await; assert!(result.is_ok()) }); @@ -319,9 +352,10 @@ mod tests { #[cfg(feature = "all-tests")] #[tokio::test] async fn test_setup_app() -> Result<()> { - let client = async_nats::connect(&config().jetstream.url).await?; + let settings = Arc::new(Settings::default()); + let client = async_nats::connect(&settings.jetstream.url).await?; let context = jetstream::new(client); - let stream_name = &config().jetstream.stream; + let stream_name = &settings.jetstream.stream; let stream = context .get_or_create_stream(stream::Config { @@ -334,11 +368,12 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; - let result = setup_app(context, callback_state).await; + let result = setup_app(settings, context, callback_state).await; assert!(result.is_ok()); Ok(()) } @@ -346,9 +381,10 @@ mod tests { #[cfg(feature = "all-tests")] #[tokio::test] async fn test_livez() -> Result<()> { - let client = async_nats::connect(&config().jetstream.url).await?; + let settings = Arc::new(Settings::default()); + let client = async_nats::connect(&settings.jetstream.url).await?; let context = jetstream::new(client); - let stream_name = &config().jetstream.stream; + let stream_name = &settings.jetstream.stream; let stream = context .get_or_create_stream(stream::Config { @@ -361,11 +397,12 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; - let result = setup_app(context, callback_state).await; + let result = setup_app(settings, context, callback_state).await; let request = Request::builder().uri("/livez").body(Body::empty())?; @@ -377,9 +414,10 @@ mod tests { #[cfg(feature = "all-tests")] #[tokio::test] async fn test_readyz() -> Result<()> { - let client = async_nats::connect(&config().jetstream.url).await?; + let settings = Arc::new(Settings::default()); + let client = async_nats::connect(&settings.jetstream.url).await?; let context = jetstream::new(client); - let stream_name = &config().jetstream.stream; + let stream_name = &settings.jetstream.stream; let stream = context .get_or_create_stream(stream::Config { @@ -392,11 +430,12 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; - let result = setup_app(context, callback_state).await; + let result = setup_app(settings, context, callback_state).await; let request = Request::builder().uri("/readyz").body(Body::empty())?; @@ -415,9 +454,10 @@ mod tests { #[cfg(feature = "all-tests")] #[tokio::test] async fn test_auth_middleware() -> Result<()> { - let client = async_nats::connect(&config().jetstream.url).await?; + let settings = Arc::new(Settings::default()); + let client = async_nats::connect(&settings.jetstream.url).await?; let context = jetstream::new(client); - let stream_name = &config().jetstream.stream; + let stream_name = &settings.jetstream.stream; let stream = context .get_or_create_stream(stream::Config { @@ -430,17 +470,23 @@ mod tests { assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec())?; + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; + let app_state = AppState { + settings, + callback_state, + context, + }; + let app = Router::new() - .nest( - "/v1/process", - routes(context, callback_state).await.unwrap(), - ) - .layer(middleware::from_fn(auth_middleware)); + .nest("/v1/process", routes(app_state).await.unwrap()) + .layer(middleware::from_fn_with_state( + Some("test_token".to_owned()), + auth_middleware, + )); - env::set_var(ENV_NUMAFLOW_SERVING_AUTH_TOKEN, "test_token"); let res = app .oneshot( axum::extract::Request::builder() @@ -451,7 +497,6 @@ mod tests { .await?; assert_eq!(res.status(), StatusCode::UNAUTHORIZED); - env::remove_var(ENV_NUMAFLOW_SERVING_AUTH_TOKEN); Ok(()) } } diff --git a/rust/serving/src/app/callback.rs b/rust/serving/src/app/callback.rs index 2fe7a2f6fe..b4d43868ee 100644 --- a/rust/serving/src/app/callback.rs +++ b/rust/serving/src/app/callback.rs @@ -1,14 +1,14 @@ use axum::{body::Bytes, extract::State, http::HeaderMap, routing, Json, Router}; use serde::{Deserialize, Serialize}; -use state::State as CallbackState; use tracing::error; use self::store::Store; use crate::app::response::ApiError; -use crate::config; /// in-memory state store including connection tracking pub(crate) mod state; +use state::State as CallbackState; + /// store for storing the state pub(crate) mod store; @@ -21,38 +21,58 @@ pub(crate) struct CallbackRequest { pub(crate) tags: Option>, } +#[derive(Clone)] +struct CallbackAppState { + tid_header: String, + callback_state: CallbackState, +} + pub fn callback_handler( - callback_store: CallbackState, + tid_header: String, + callback_state: CallbackState, ) -> Router { + let app_state = CallbackAppState { + tid_header, + callback_state, + }; Router::new() .route("/callback", routing::post(callback)) .route("/callback_save", routing::post(callback_save)) - .with_state(callback_store) + .with_state(app_state) } async fn callback_save( - State(mut proxy_state): State>, + State(app_state): State>, headers: HeaderMap, body: Bytes, ) -> Result<(), ApiError> { let id = headers - .get(&config().tid_header) + .get(&app_state.tid_header) .map(|id| String::from_utf8_lossy(id.as_bytes()).to_string()) .ok_or_else(|| ApiError::BadRequest("Missing id header".to_string()))?; - proxy_state.save_response(id, body).await.map_err(|e| { - error!(error=?e, "Saving body from callback save request"); - ApiError::InternalServerError("Failed to save body from callback save request".to_string()) - })?; + app_state + .callback_state + .clone() + .save_response(id, body) + .await + .map_err(|e| { + error!(error=?e, "Saving body from callback save request"); + ApiError::InternalServerError( + "Failed to save body from callback save request".to_string(), + ) + })?; Ok(()) } async fn callback( - State(mut proxy_state): State>, + State(app_state): State>, Json(payload): Json>, ) -> Result<(), ApiError> { - proxy_state + app_state + .callback_state + .clone() .insert_callback_requests(payload) .await .map_err(|e| { @@ -72,16 +92,20 @@ mod tests { use tower::ServiceExt; use super::*; + use crate::app::callback::state::State as CallbackState; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; - use crate::pipeline::min_pipeline_spec; + use crate::pipeline::PipelineDCG; + + const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; #[tokio::test] async fn test_callback_failure() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); - let app = callback_handler(state); + let app = callback_handler("ID".to_owned(), state); let payload = vec![CallbackRequest { id: "test_id".to_string(), @@ -106,7 +130,8 @@ mod tests { #[tokio::test] async fn test_callback_success() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let mut state = CallbackState::new(msg_graph, store).await.unwrap(); let x = state.register("test_id".to_string()); @@ -115,7 +140,7 @@ mod tests { let _ = x.await.unwrap(); }); - let app = callback_handler(state); + let app = callback_handler("ID".to_owned(), state); let payload = vec![ CallbackRequest { @@ -157,9 +182,10 @@ mod tests { #[tokio::test] async fn test_callback_save() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); - let app = callback_handler(state); + let app = callback_handler("ID".to_owned(), state); let res = Request::builder() .method("POST") @@ -176,9 +202,10 @@ mod tests { #[tokio::test] async fn test_without_id_header() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); - let app = callback_handler(state); + let app = callback_handler("ID".to_owned(), state); let res = Request::builder() .method("POST") diff --git a/rust/serving/src/app/callback/state.rs b/rust/serving/src/app/callback/state.rs index db145f5beb..293478ead2 100644 --- a/rust/serving/src/app/callback/state.rs +++ b/rust/serving/src/app/callback/state.rs @@ -236,11 +236,14 @@ mod tests { use super::*; use crate::app::callback::store::memstore::InMemoryStore; - use crate::pipeline::min_pipeline_spec; + use crate::pipeline::PipelineDCG; + + const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; #[tokio::test] async fn test_state() { - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -277,16 +280,37 @@ mod tests { }, CallbackRequest { id: id.clone(), - vertex: "cat".to_string(), + vertex: "planner".to_string(), cb_time: 12345, from_vertex: "in".to_string(), + tags: Some(vec!["tiger".to_owned(), "asciiart".to_owned()]), + }, + CallbackRequest { + id: id.clone(), + vertex: "tiger".to_string(), + cb_time: 12345, + from_vertex: "planner".to_string(), + tags: None, + }, + CallbackRequest { + id: id.clone(), + vertex: "asciiart".to_string(), + cb_time: 12345, + from_vertex: "planner".to_string(), + tags: None, + }, + CallbackRequest { + id: id.clone(), + vertex: "serve-sink".to_string(), + cb_time: 12345, + from_vertex: "tiger".to_string(), tags: None, }, CallbackRequest { id: id.clone(), - vertex: "out".to_string(), + vertex: "serve-sink".to_string(), cb_time: 12345, - from_vertex: "cat".to_string(), + from_vertex: "asciiart".to_string(), tags: None, }, ]; @@ -300,7 +324,8 @@ mod tests { #[tokio::test] async fn test_retrieve_saved_no_entry() { - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -315,7 +340,8 @@ mod tests { #[tokio::test] async fn test_insert_callback_requests_invalid_id() { - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); @@ -336,7 +362,8 @@ mod tests { #[tokio::test] async fn test_retrieve_subgraph_from_storage_no_entry() { - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let store = InMemoryStore::new(); let mut state = State::new(msg_graph, store).await.unwrap(); diff --git a/rust/serving/src/app/callback/store/redisstore.rs b/rust/serving/src/app/callback/store/redisstore.rs index 6e5decf880..4439e7ce8e 100644 --- a/rust/serving/src/app/callback/store/redisstore.rs +++ b/rust/serving/src/app/callback/store/redisstore.rs @@ -8,9 +8,10 @@ use tokio::sync::Semaphore; use super::PayloadToSave; use crate::app::callback::CallbackRequest; +use crate::config::RedisConfig; use crate::consts::SAVED; +use crate::Error; use crate::Error::Connection; -use crate::{config, Error}; const LPUSH: &str = "LPUSH"; const LRANGE: &str = "LRANGE"; @@ -19,14 +20,14 @@ const EXPIRE: &str = "EXPIRE"; // Handle to the Redis actor. #[derive(Clone)] pub(crate) struct RedisConnection { - max_tasks: usize, conn_manager: ConnectionManager, + config: RedisConfig, } impl RedisConnection { /// Creates a new RedisConnection with concurrent operations on Redis set by max_tasks. - pub(crate) async fn new(addr: &str, max_tasks: usize) -> crate::Result { - let client = redis::Client::open(addr) + pub(crate) async fn new(config: RedisConfig) -> crate::Result { + let client = redis::Client::open(config.addr.as_str()) .map_err(|e| Connection(format!("Creating Redis client: {e:?}")))?; let conn = client .get_connection_manager() @@ -34,37 +35,13 @@ impl RedisConnection { .map_err(|e| Connection(format!("Connecting to Redis server: {e:?}")))?; Ok(Self { conn_manager: conn, - max_tasks, + config, }) } - async fn handle_write_requests( - conn_manager: &mut ConnectionManager, - msg: PayloadToSave, - ) -> crate::Result<()> { - match msg { - PayloadToSave::Callback { key, value } => { - // Convert the CallbackRequest to a byte array - let value = serde_json::to_vec(&*value) - .map_err(|e| Error::StoreWrite(format!("Serializing payload - {}", e)))?; - - Self::write_to_redis(conn_manager, &key, &value).await - } - - // Write the byte array to Redis - PayloadToSave::DatumFromPipeline { key, value } => { - // we have to differentiate between the saved responses and the callback requests - // saved responses are stored in "id_SAVED", callback requests are stored in "id" - let key = format!("{}_{}", key, SAVED); - let value: Vec = value.into(); - - Self::write_to_redis(conn_manager, &key, &value).await - } - } - } - async fn execute_redis_cmd( conn_manager: &mut ConnectionManager, + ttl_secs: Option, key: &str, val: &Vec, ) -> Result<(), RedisError> { @@ -72,7 +49,7 @@ impl RedisConnection { pipe.cmd(LPUSH).arg(key).arg(val); // if the ttl is configured, add the EXPIRE command to the pipeline - if let Some(ttl) = config().redis.ttl_secs { + if let Some(ttl) = ttl_secs { pipe.cmd(EXPIRE).arg(key).arg(ttl); } @@ -81,19 +58,21 @@ impl RedisConnection { } // write to Redis with retries - async fn write_to_redis( - conn_manager: &mut ConnectionManager, - key: &str, - value: &Vec, - ) -> crate::Result<()> { - let interval = fixed::Interval::from_millis(config().redis.retries_duration_millis.into()) - .take(config().redis.retries); + async fn write_to_redis(&self, key: &str, value: &Vec) -> crate::Result<()> { + let interval = fixed::Interval::from_millis(self.config.retries_duration_millis.into()) + .take(self.config.retries); Retry::retry( interval, || async { // https://hackmd.io/@compiler-errors/async-closures - Self::execute_redis_cmd(&mut conn_manager.clone(), key, value).await + Self::execute_redis_cmd( + &mut self.conn_manager.clone(), + self.config.ttl_secs, + key, + value, + ) + .await }, |e: &RedisError| !e.is_unrecoverable_error(), ) @@ -102,6 +81,31 @@ impl RedisConnection { } } +async fn handle_write_requests( + redis_conn: RedisConnection, + msg: PayloadToSave, +) -> crate::Result<()> { + match msg { + PayloadToSave::Callback { key, value } => { + // Convert the CallbackRequest to a byte array + let value = serde_json::to_vec(&*value) + .map_err(|e| Error::StoreWrite(format!("Serializing payload - {}", e)))?; + + redis_conn.write_to_redis(&key, &value).await + } + + // Write the byte array to Redis + PayloadToSave::DatumFromPipeline { key, value } => { + // we have to differentiate between the saved responses and the callback requests + // saved responses are stored in "id_SAVED", callback requests are stored in "id" + let key = format!("{}_{}", key, SAVED); + let value: Vec = value.into(); + + redis_conn.write_to_redis(&key, &value).await + } + } +} + // It is possible to move the methods defined here to be methods on the Redis actor and communicate through channels. // With that, all public APIs defined on RedisConnection can be on &self (immutable). impl super::Store for RedisConnection { @@ -110,13 +114,13 @@ impl super::Store for RedisConnection { let mut tasks = vec![]; // This is put in place not to overload Redis and also way some kind of // flow control. - let sem = Arc::new(Semaphore::new(self.max_tasks)); + let sem = Arc::new(Semaphore::new(self.config.max_tasks)); for msg in messages { let permit = Arc::clone(&sem).acquire_owned().await; - let mut _conn_mgr = self.conn_manager.clone(); + let redis_conn = self.clone(); let task = tokio::spawn(async move { let _permit = permit; - Self::handle_write_requests(&mut _conn_mgr, msg).await + handle_write_requests(redis_conn, msg).await }); tasks.push(task); } @@ -205,12 +209,16 @@ mod tests { #[tokio::test] async fn test_redis_store() { - let redis_connection = RedisConnection::new("no_such_redis://127.0.0.1:6379", 10).await; + let redis_config = RedisConfig { + addr: "no_such_redis://127.0.0.1:6379".to_owned(), + max_tasks: 10, + ..Default::default() + }; + let redis_connection = RedisConnection::new(redis_config).await; assert!(redis_connection.is_err()); // Test Redis connection - let redis_connection = - RedisConnection::new(format!("redis://127.0.0.1:{}", "6379").as_str(), 10).await; + let redis_connection = RedisConnection::new(RedisConfig::default()).await; assert!(redis_connection.is_ok()); let key = uuid::Uuid::new_v4().to_string(); @@ -273,7 +281,11 @@ mod tests { #[tokio::test] async fn test_redis_ttl() { - let redis_connection = RedisConnection::new("redis://127.0.0.1:6379", 10) + let redis_config = RedisConfig { + max_tasks: 10, + ..Default::default() + }; + let redis_connection = RedisConnection::new(redis_config) .await .expect("Failed to connect to Redis"); @@ -287,14 +299,12 @@ mod tests { }); // Save with TTL of 1 second + redis_connection + .write_to_redis(&key, &serde_json::to_vec(&*value).unwrap()) + .await + .expect("Failed to write to Redis"); + let mut conn_manager = redis_connection.conn_manager.clone(); - RedisConnection::write_to_redis( - &mut conn_manager, - &key, - &serde_json::to_vec(&*value).unwrap(), - ) - .await - .expect("Failed to write to Redis"); let exists: bool = conn_manager .exists(&key) diff --git a/rust/serving/src/app/direct_proxy.rs b/rust/serving/src/app/direct_proxy.rs index 1f80ff5e7f..9f08321e23 100644 --- a/rust/serving/src/app/direct_proxy.rs +++ b/rust/serving/src/app/direct_proxy.rs @@ -11,7 +11,6 @@ use hyper_util::client::legacy::connect::HttpConnector; use tracing::error; use crate::app::response::ApiError; -use crate::config; pub(crate) type Client = hyper_util::client::legacy::Client; @@ -24,11 +23,15 @@ pub(crate) type Client = hyper_util::client::legacy::Client #[derive(Clone, Debug)] struct ProxyState { client: Client, + upstream_addr: String, } /// Router for direct proxy. -pub(crate) fn direct_proxy(client: Client) -> Router { - let proxy_state = ProxyState { client }; +pub(crate) fn direct_proxy(client: Client, upstream_addr: String) -> Router { + let proxy_state = ProxyState { + client, + upstream_addr, + }; Router::new() // https://docs.rs/axum/latest/axum/struct.Router.html#wildcards @@ -44,7 +47,7 @@ async fn proxy( // This handler is registered with wildcard capture /*upstream. So the path here will never be empty. let path_query = request.uri().path_and_query().unwrap(); - let upstream_uri = format!("http://{}{}", &config().upstream_addr, path_query); + let upstream_uri = format!("http://{}{}", &proxy_state.upstream_addr, path_query); *request.uri_mut() = Uri::try_from(&upstream_uri) .inspect_err(|e| error!(?e, upstream_uri, "Parsing URI for upstream")) .map_err(|e| ApiError::BadRequest(e.to_string()))?; @@ -69,10 +72,8 @@ mod tests { use tower::ServiceExt; use crate::app::direct_proxy::direct_proxy; - use crate::config; - async fn start_server() { - let addr = config().upstream_addr.to_string(); + async fn start_server(addr: String) { let listener = TcpListener::bind(&addr).await.unwrap(); tokio::spawn(async move { loop { @@ -98,11 +99,12 @@ mod tests { #[tokio::test] async fn test_direct_proxy() { - start_server().await; + let upstream_addr = "localhost:4321".to_owned(); + start_server(upstream_addr.clone()).await; let client = hyper_util::client::legacy::Client::<(), ()>::builder(TokioExecutor::new()) .build(HttpConnector::new()); - let app = direct_proxy(client); + let app = direct_proxy(client, upstream_addr.clone()); // Test valid request let res = Request::builder() diff --git a/rust/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs index 8123197857..af7d3917ff 100644 --- a/rust/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -1,4 +1,4 @@ -use std::borrow::Borrow; +use std::{borrow::Borrow, sync::Arc}; use async_nats::{jetstream::Context, HeaderMap as JSHeaderMap}; use axum::{ @@ -12,10 +12,9 @@ use axum::{ use tracing::error; use uuid::Uuid; -use super::callback::{state::State as CallbackState, store::Store}; +use super::{callback::store::Store, AppState}; use crate::app::callback::state; use crate::app::response::{ApiError, ServeResponse}; -use crate::config; // TODO: // - [ ] better health check @@ -33,34 +32,31 @@ use crate::config; // "from_vertex": "a" // } -const ID_HEADER_KEY: &str = "X-Numaflow-Id"; const CALLBACK_URL_KEY: &str = "X-Numaflow-Callback-Url"; - const NUMAFLOW_RESP_ARRAY_LEN: &str = "Numaflow-Array-Len"; const NUMAFLOW_RESP_ARRAY_IDX_LEN: &str = "Numaflow-Array-Index-Len"; -#[derive(Clone)] struct ProxyState { + tid_header: String, context: Context, callback: state::State, - stream: &'static str, + stream: String, callback_url: String, } pub(crate) async fn jetstream_proxy( - context: Context, - callback_store: CallbackState, + state: AppState, ) -> crate::Result { - let proxy_state = ProxyState { - context, - callback: callback_store, - stream: &config().jetstream.stream, + let proxy_state = Arc::new(ProxyState { + tid_header: state.settings.tid_header.clone(), + context: state.context.clone(), + callback: state.callback_state.clone(), + stream: state.settings.jetstream.stream.clone(), callback_url: format!( "https://{}:{}/v1/process/callback", - config().host_ip, - config().app_listen_port + state.settings.host_ip, state.settings.app_listen_port ), - }; + }); let router = Router::new() .route("/async", post(async_publish)) @@ -71,27 +67,28 @@ pub(crate) async fn jetstream_proxy( } async fn sync_publish_serve( - State(mut proxy_state): State>, + State(proxy_state): State>>, headers: HeaderMap, body: Bytes, ) -> impl IntoResponse { - let id = extract_id_from_headers(&headers); + let id = extract_id_from_headers(&proxy_state.tid_header, &headers); // Register the ID in the callback proxy state - let notify = proxy_state.callback.register(id.clone()); + let notify = proxy_state.callback.clone().register(id.clone()); if let Err(e) = publish_to_jetstream( - proxy_state.stream, + proxy_state.stream.clone(), &proxy_state.callback_url, headers, body, - proxy_state.context, - id.clone(), + proxy_state.context.clone(), + proxy_state.tid_header.as_str(), + id.as_str(), ) .await { // Deregister the ID in the callback proxy state if writing to Jetstream fails - let _ = proxy_state.callback.deregister(&id).await; + let _ = proxy_state.callback.clone().deregister(&id).await; error!(error = ?e, "Publishing message to Jetstream for sync serve request"); return Err(ApiError::BadGateway( "Failed to write message to Jetstream".to_string(), @@ -106,7 +103,7 @@ async fn sync_publish_serve( )); } - let result = match proxy_state.callback.retrieve_saved(&id).await { + let result = match proxy_state.callback.clone().retrieve_saved(&id).await { Ok(result) => result, Err(e) => { error!(error = ?e, "Failed to retrieve from redis"); @@ -140,27 +137,28 @@ async fn sync_publish_serve( } async fn sync_publish( - State(mut proxy_state): State>, + State(proxy_state): State>>, headers: HeaderMap, body: Bytes, ) -> Result, ApiError> { - let id = extract_id_from_headers(&headers); + let id = extract_id_from_headers(&proxy_state.tid_header, &headers); // Register the ID in the callback proxy state - let notify = proxy_state.callback.register(id.clone()); + let notify = proxy_state.callback.clone().register(id.clone()); if let Err(e) = publish_to_jetstream( - proxy_state.stream, + proxy_state.stream.clone(), &proxy_state.callback_url, headers, body, - proxy_state.context, - id.clone(), + proxy_state.context.clone(), + &proxy_state.tid_header, + id.as_str(), ) .await { // Deregister the ID in the callback proxy state if writing to Jetstream fails - let _ = proxy_state.callback.deregister(&id).await; + let _ = proxy_state.callback.clone().deregister(&id).await; error!(error = ?e, "Publishing message to Jetstream for sync request"); return Err(ApiError::BadGateway( "Failed to write message to Jetstream".to_string(), @@ -189,19 +187,19 @@ async fn sync_publish( } async fn async_publish( - State(proxy_state): State>, + State(proxy_state): State>>, headers: HeaderMap, body: Bytes, ) -> Result, ApiError> { - let id = extract_id_from_headers(&headers); - + let id = extract_id_from_headers(&proxy_state.tid_header, &headers); let result = publish_to_jetstream( - proxy_state.stream, + proxy_state.stream.clone(), &proxy_state.callback_url, headers, body, - proxy_state.context, - id.clone(), + proxy_state.context.clone(), + &proxy_state.tid_header, + id.as_str(), ) .await; @@ -222,12 +220,13 @@ async fn async_publish( /// Write to JetStream and return the metadata. It is responsible for getting the ID from the header. async fn publish_to_jetstream( - stream: &'static str, + stream: String, callback_url: &str, headers: HeaderMap, body: Bytes, js_context: Context, - id: String, // Added ID as a parameter + id_header: &str, + id_header_value: &str, ) -> Result<(), async_nats::Error> { let mut js_headers = JSHeaderMap::new(); @@ -236,24 +235,22 @@ async fn publish_to_jetstream( js_headers.append(k.as_ref(), String::from_utf8_lossy(v.as_bytes()).borrow()) } - js_headers.append(ID_HEADER_KEY, id.as_str()); // Use the passed ID + js_headers.append(id_header, id_header_value); // Use the passed ID js_headers.append(CALLBACK_URL_KEY, callback_url); js_context .publish_with_headers(stream, js_headers, body) .await - .inspect_err(|e| error!(stream, error=?e, "Publishing message to stream"))? + .map_err(|e| format!("Publishing message to stream: {e:?}"))? .await - .inspect_err( - |e| error!(stream, error=?e, "Waiting for acknowledgement of published message"), - )?; + .map_err(|e| format!("Waiting for acknowledgement of published message: {e:?}"))?; Ok(()) } // extracts the ID from the headers, if not found, generates a new UUID -fn extract_id_from_headers(headers: &HeaderMap) -> String { - headers.get(&config().tid_header).map_or_else( +fn extract_id_from_headers(tid_header: &str, headers: &HeaderMap) -> String { + headers.get(tid_header).map_or_else( || Uuid::new_v4().to_string(), |v| String::from_utf8_lossy(v.as_bytes()).to_string(), ) @@ -273,12 +270,15 @@ mod tests { use tower::ServiceExt; use super::*; + use crate::app::callback::state::State as CallbackState; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::callback::store::PayloadToSave; use crate::app::callback::CallbackRequest; use crate::app::tracker::MessageGraph; - use crate::pipeline::min_pipeline_spec; - use crate::Error; + use crate::pipeline::PipelineDCG; + use crate::{Error, Settings}; + + const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; #[derive(Clone)] struct MockStore; @@ -303,7 +303,9 @@ mod tests { #[tokio::test] async fn test_async_publish() -> Result<(), Box> { - let client = async_nats::connect(&config().jetstream.url) + let settings = Settings::default(); + let settings = Arc::new(settings); + let client = async_nats::connect(&settings.jetstream.url) .await .map_err(|e| format!("Connecting to Jetstream: {:?}", e))?; @@ -318,14 +320,20 @@ mod tests { ..Default::default() }) .await - .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e))?; + .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e))?; let mock_store = MockStore {}; - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()) + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec) .map_err(|e| format!("Failed to create message graph from pipeline spec: {:?}", e))?; let callback_state = CallbackState::new(msg_graph, mock_store).await?; - let app = jetstream_proxy(context, callback_state).await?; + let app_state = AppState { + callback_state, + context, + settings, + }; + let app = jetstream_proxy(app_state).await?; let res = Request::builder() .method("POST") .uri("/async") @@ -384,7 +392,8 @@ mod tests { #[tokio::test] async fn test_sync_publish() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + let settings = Settings::default(); + let client = async_nats::connect(&settings.jetstream.url).await.unwrap(); let context = jetstream::new(client); let id = "foobar"; let stream_name = "sync_pub"; @@ -396,16 +405,21 @@ mod tests { ..Default::default() }) .await - .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e)); + .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e)); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); - let app = jetstream_proxy(context, callback_state.clone()) - .await - .unwrap(); + let settings = Arc::new(settings); + let app_state = AppState { + settings, + callback_state: callback_state.clone(), + context, + }; + let app = jetstream_proxy(app_state).await.unwrap(); tokio::spawn(async move { let cbs = create_default_callbacks(id); @@ -448,7 +462,8 @@ mod tests { #[tokio::test] async fn test_sync_publish_serve() { - let client = async_nats::connect(&config().jetstream.url).await.unwrap(); + let settings = Arc::new(Settings::default()); + let client = async_nats::connect(&settings.jetstream.url).await.unwrap(); let context = jetstream::new(client); let id = "foobar"; let stream_name = "sync_serve_pub"; @@ -460,16 +475,21 @@ mod tests { ..Default::default() }) .await - .map_err(|e| format!("creating stream {}: {}", &config().jetstream.url, e)); + .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e)); let mem_store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); - let app = jetstream_proxy(context, callback_state.clone()) - .await - .unwrap(); + let app_state = AppState { + settings, + callback_state: callback_state.clone(), + context, + }; + + let app = jetstream_proxy(app_state).await.unwrap(); // pipeline is in -> cat -> out, so we will have 3 callback requests let cbs = create_default_callbacks(id); diff --git a/rust/serving/src/app/message_path.rs b/rust/serving/src/app/message_path.rs index 933c58a815..20e5701864 100644 --- a/rust/serving/src/app/message_path.rs +++ b/rust/serving/src/app/message_path.rs @@ -46,12 +46,15 @@ mod tests { use super::*; use crate::app::callback::store::memstore::InMemoryStore; use crate::app::tracker::MessageGraph; - use crate::pipeline::min_pipeline_spec; + use crate::pipeline::PipelineDCG; + + const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; #[tokio::test] async fn test_message_path_not_present() { let store = InMemoryStore::new(); - let msg_graph = MessageGraph::from_pipeline(min_pipeline_spec()).unwrap(); + let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).unwrap(); let state = CallbackState::new(msg_graph, store).await.unwrap(); let app = get_message_path(state); diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index 82e663c8f5..7ba3778d00 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -1,40 +1,23 @@ +use std::collections::HashMap; use std::fmt::Debug; -use std::path::Path; -use std::{env, sync::OnceLock}; use async_nats::rustls; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use config::Config; use rcgen::{generate_simple_self_signed, Certificate, CertifiedKey, KeyPair}; use serde::{Deserialize, Serialize}; -use tracing::info; use crate::Error::ParseConfig; -use crate::{Error, Result}; -const ENV_PREFIX: &str = "NUMAFLOW_SERVING"; const ENV_NUMAFLOW_SERVING_SOURCE_OBJECT: &str = "NUMAFLOW_SERVING_SOURCE_OBJECT"; const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; const ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM: &str = "NUMAFLOW_SERVING_JETSTREAM_STREAM"; const ENV_NUMAFLOW_SERVING_STORE_TTL: &str = "NUMAFLOW_SERVING_STORE_TTL"; - -pub fn config() -> &'static Settings { - static CONF: OnceLock = OnceLock::new(); - CONF.get_or_init(|| { - let config_dir = env::var("CONFIG_PATH").unwrap_or_else(|_| { - info!("Config directory is not specified, using default config directory: './config'"); - String::from("config") - }); - - match Settings::load(config_dir) { - Ok(v) => v, - Err(e) => { - panic!("Failed to load configuration: {:?}", e); - } - } - }) -} +const ENV_NUMAFLOW_SERVING_HOST_IP: &str = "NUMAFLOW_SERVING_HOST_IP"; +const ENV_NUMAFLOW_SERVING_APP_PORT: &str = "NUMAFLOW_SERVING_APP_LISTEN_PORT"; +const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; +const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; +const ENV_NUMAFLOW_SERVING_AUTH_TOKEN: &str = "NUMAFLOW_SERVING_AUTH_TOKEN"; pub fn generate_certs() -> std::result::Result<(Certificate, KeyPair), String> { let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); @@ -43,13 +26,47 @@ pub fn generate_certs() -> std::result::Result<(Certificate, KeyPair), String> { Ok((cert, key_pair)) } -#[derive(Debug, Deserialize)] +#[derive(Deserialize, Clone, PartialEq)] +pub struct BasicAuth { + pub username: String, + pub password: String, +} + +impl Debug for BasicAuth { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let passwd_printable = if self.password.len() > 4 { + let passwd: String = self + .password + .chars() + .skip(self.password.len() - 2) + .take(2) + .collect(); + format!("***{}", passwd) + } else { + "*****".to_owned() + }; + write!(f, "{}:{}", self.username, passwd_printable) + } +} + +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct JetStreamConfig { pub stream: String, pub url: String, + pub auth: Option, +} + +impl Default for JetStreamConfig { + fn default() -> Self { + Self { + stream: "default".to_owned(), + url: "localhost:4222".to_owned(), + auth: None, + } + } } -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct RedisConfig { pub addr: String, pub max_tasks: usize, @@ -58,7 +75,20 @@ pub struct RedisConfig { pub ttl_secs: Option, } -#[derive(Debug, Deserialize)] +impl Default for RedisConfig { + fn default() -> Self { + Self { + addr: "redis://127.0.0.1:6379".to_owned(), + max_tasks: 50, + retries: 5, + retries_duration_millis: 100, + // TODO: we might need an option type here. Zero value of u32 can be used instead of None + ttl_secs: Some(1), + } + } +} + +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct Settings { pub tid_header: String, pub app_listen_port: u16, @@ -69,6 +99,23 @@ pub struct Settings { pub redis: RedisConfig, /// The IP address of the numaserve pod. This will be used to construct the value for X-Numaflow-Callback-Url header pub host_ip: String, + pub api_auth_token: Option, +} + +impl Default for Settings { + fn default() -> Self { + Self { + tid_header: "ID".to_owned(), + app_listen_port: 3000, + metrics_server_listen_port: 3001, + upstream_addr: "localhost:8888".to_owned(), + drain_timeout_secs: 10, + jetstream: JetStreamConfig::default(), + redis: RedisConfig::default(), + host_ip: "127.0.0.1".to_owned(), + api_auth_token: None, + } + } } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -84,92 +131,104 @@ pub struct CallbackStorageConfig { pub url: String, } -impl Settings { - fn load>(config_dir: P) -> Result { - let config_dir = config_dir.as_ref(); - if !config_dir.is_dir() { - return Err(Error::Other(format!( - "Path {} is not a directory", - config_dir.to_string_lossy() - ))); +/// This implementation is to load settings from env variables +impl TryFrom> for Settings { + type Error = crate::Error; + fn try_from(env_vars: HashMap) -> std::result::Result { + let host_ip = env_vars + .get(ENV_NUMAFLOW_SERVING_HOST_IP) + .ok_or_else(|| { + ParseConfig(format!( + "Environment variable {ENV_NUMAFLOW_SERVING_HOST_IP} is not set" + )) + })? + .to_owned(); + + let mut settings = Settings { + host_ip, + ..Default::default() + }; + + if let Some(jetstream_url) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_URL) { + settings.jetstream.url = jetstream_url.to_owned(); } - let settings = Config::builder() - .add_source(config::File::from(config_dir.join("default.toml"))) - .add_source( - config::Environment::with_prefix(ENV_PREFIX) - .prefix_separator("_") - .separator("."), - ) - .build() - .map_err(|e| ParseConfig(format!("generating runtime configuration: {e:?}")))?; - - let mut settings = settings - .try_deserialize::() - .map_err(|e| ParseConfig(format!("parsing runtime configuration: {e:?}")))?; - - // Update JetStreamConfig from environment variables - if let Ok(url) = env::var(ENV_NUMAFLOW_SERVING_JETSTREAM_URL) { - settings.jetstream.url = url; + if let Some(jetstream_stream) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM) { + settings.jetstream.stream = jetstream_stream.to_owned(); } - if let Ok(stream) = env::var(ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM) { - settings.jetstream.stream = stream; + + if let Some(api_auth_token) = env_vars.get(ENV_NUMAFLOW_SERVING_AUTH_TOKEN) { + settings.api_auth_token = Some(api_auth_token.to_owned()); } - let source_spec_encoded = env::var(ENV_NUMAFLOW_SERVING_SOURCE_OBJECT); - - match source_spec_encoded { - Ok(source_spec_encoded) => { - let source_spec_decoded = BASE64_STANDARD - .decode(source_spec_encoded.as_bytes()) - .map_err(|e| ParseConfig(format!("decoding NUMAFLOW_SERVING_SOURCE: {e:?}")))?; - - let source_spec = serde_json::from_slice::(&source_spec_decoded) - .map_err(|e| ParseConfig(format!("parsing NUMAFLOW_SERVING_SOURCE: {e:?}")))?; - - // Update tid_header from source_spec - if let Some(msg_id_header_key) = source_spec.msg_id_header_key { - settings.tid_header = msg_id_header_key; - } - - // Update redis.addr from source_spec, currently we only support redis as callback storage - settings.redis.addr = source_spec.callback_storage.url; - - // Update redis.ttl_secs from environment variable - settings.redis.ttl_secs = match env::var(ENV_NUMAFLOW_SERVING_STORE_TTL) { - Ok(ttl_secs) => Some(ttl_secs.parse().map_err(|e| { - ParseConfig(format!( - "parsing NUMAFLOW_SERVING_STORE_TTL: expected u32, got {:?}", - e - )) - })?), - Err(_) => None, - }; - - Ok(settings) - } - Err(_) => Ok(settings), + if let Some(app_port) = env_vars.get(ENV_NUMAFLOW_SERVING_APP_PORT) { + settings.app_listen_port = app_port.parse().map_err(|e| { + ParseConfig(format!( + "Parsing {ENV_NUMAFLOW_SERVING_APP_PORT}(set to '{app_port}'): {e:?}" + )) + })?; } + + // If username is set, the password also must be set + if let Some(username) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_USER) { + let Some(password) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD) else { + return Err(ParseConfig(format!("Env variable {ENV_NUMAFLOW_SERVING_JETSTREAM_USER} is set, but {ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD} is not set"))); + }; + settings.jetstream.auth = Some(BasicAuth { + username: username.to_owned(), + password: password.to_owned(), + }); + } + + // Update redis.ttl_secs from environment variable + if let Some(ttl_secs) = env_vars.get(ENV_NUMAFLOW_SERVING_STORE_TTL) { + let ttl_secs: u32 = ttl_secs.parse().map_err(|e| { + ParseConfig(format!("parsing {ENV_NUMAFLOW_SERVING_STORE_TTL}: {e:?}")) + })?; + settings.redis.ttl_secs = Some(ttl_secs); + } + + let Some(source_spec_encoded) = env_vars.get(ENV_NUMAFLOW_SERVING_SOURCE_OBJECT) else { + return Ok(settings); + }; + + let source_spec_decoded = BASE64_STANDARD + .decode(source_spec_encoded.as_bytes()) + .map_err(|e| ParseConfig(format!("decoding NUMAFLOW_SERVING_SOURCE: {e:?}")))?; + + let source_spec = serde_json::from_slice::(&source_spec_decoded) + .map_err(|e| ParseConfig(format!("parsing NUMAFLOW_SERVING_SOURCE: {e:?}")))?; + + // Update tid_header from source_spec + if let Some(msg_id_header_key) = source_spec.msg_id_header_key { + settings.tid_header = msg_id_header_key; + } + + // Update redis.addr from source_spec, currently we only support redis as callback storage + settings.redis.addr = source_spec.callback_storage.url; + + Ok(settings) } } #[cfg(test)] mod tests { - use std::env; - use super::*; #[test] - fn test_config() { - // Set up the environment variable for the config directory - env::set_var("RUN_ENV", "Development"); - env::set_var("APP_HOST_IP", "10.244.0.6"); - env::set_var("CONFIG_PATH", "config"); + fn test_basic_auth_debug_print() { + let auth = BasicAuth { + username: "js-auth-user".into(), + password: "js-auth-password".into(), + }; + let auth_debug = format!("{auth:?}"); + assert_eq!(auth_debug, "js-auth-user:***rd"); + } - // Call the config method - let settings = config(); + #[test] + fn test_default_config() { + let settings = Settings::default(); - // Assert that the settings are as expected assert_eq!(settings.tid_header, "ID"); assert_eq!(settings.app_listen_port, 3000); assert_eq!(settings.metrics_server_listen_port, 3001); @@ -177,9 +236,66 @@ mod tests { assert_eq!(settings.drain_timeout_secs, 10); assert_eq!(settings.jetstream.stream, "default"); assert_eq!(settings.jetstream.url, "localhost:4222"); - assert_eq!(settings.redis.addr, "redis://127.0.0.1/"); + assert_eq!(settings.redis.addr, "redis://127.0.0.1:6379"); assert_eq!(settings.redis.max_tasks, 50); assert_eq!(settings.redis.retries, 5); assert_eq!(settings.redis.retries_duration_millis, 100); } + + #[test] + fn test_config_parse() { + // Set up the environment variables + let env_vars = [ + ( + ENV_NUMAFLOW_SERVING_JETSTREAM_URL, + "nats://isbsvc-default-js-svc.default.svc:4222", + ), + ( + ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM, + "ascii-art-pipeline-in-serving-source", + ), + (ENV_NUMAFLOW_SERVING_JETSTREAM_USER, "js-auth-user"), + (ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD, "js-user-password"), + (ENV_NUMAFLOW_SERVING_HOST_IP, "10.2.3.5"), + (ENV_NUMAFLOW_SERVING_AUTH_TOKEN, "api-auth-token"), + (ENV_NUMAFLOW_SERVING_APP_PORT, "8443"), + (ENV_NUMAFLOW_SERVING_STORE_TTL, "86400"), + (ENV_NUMAFLOW_SERVING_SOURCE_OBJECT, "eyJhdXRoIjpudWxsLCJzZXJ2aWNlIjp0cnVlLCJtc2dJREhlYWRlcktleSI6IlgtTnVtYWZsb3ctSWQiLCJzdG9yZSI6eyJ1cmwiOiJyZWRpczovL3JlZGlzOjYzNzkifX0=") + ]; + + // Call the config method + let settings: Settings = env_vars + .into_iter() + .map(|(key, val)| (key.to_owned(), val.to_owned())) + .collect::>() + .try_into() + .unwrap(); + + let expected_config = Settings { + tid_header: "X-Numaflow-Id".into(), + app_listen_port: 8443, + metrics_server_listen_port: 3001, + upstream_addr: "localhost:8888".into(), + drain_timeout_secs: 10, + jetstream: JetStreamConfig { + stream: "ascii-art-pipeline-in-serving-source".into(), + url: "nats://isbsvc-default-js-svc.default.svc:4222".into(), + auth: Some(BasicAuth { + username: "js-auth-user".into(), + password: "js-user-password".into(), + }), + }, + redis: RedisConfig { + addr: "redis://redis:6379".into(), + max_tasks: 50, + retries: 5, + retries_duration_millis: 100, + ttl_secs: Some(86400), + }, + host_ip: "10.2.3.5".into(), + api_auth_token: Some("api-auth-token".into()), + }; + + assert_eq!(settings, expected_config); + } } diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 09e2dfcaa5..796313bdb2 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -1,42 +1,61 @@ +use std::env; use std::net::SocketAddr; +use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tracing::info; pub use self::error::{Error, Result}; +use self::pipeline::PipelineDCG; use crate::app::start_main_server; -use crate::config::{config, generate_certs}; +use crate::config::generate_certs; use crate::metrics::start_https_metrics_server; -use crate::pipeline::min_pipeline_spec; mod app; + mod config; +pub use config::Settings; + mod consts; mod error; mod metrics; mod pipeline; -pub async fn serve() -> std::result::Result<(), Box> -{ +const ENV_MIN_PIPELINE_SPEC: &str = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC"; + +pub async fn serve( + settings: Arc, +) -> std::result::Result<(), Box> { let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) .await .map_err(|e| format!("Failed to create tls config {:?}", e))?; - info!(config = ?config(), pipeline_spec = ? min_pipeline_spec(), "Starting server with config and pipeline spec"); + // TODO: Move all env variables into one place. Some env variables are loaded when Settings is initialized + let pipeline_spec: PipelineDCG = env::var(ENV_MIN_PIPELINE_SPEC) + .map_err(|_| { + format!("Pipeline spec is not set using environment variable {ENV_MIN_PIPELINE_SPEC}") + })? + .parse() + .map_err(|e| { + format!( + "Parsing pipeline spec: {}: error={e:?}", + env::var(ENV_MIN_PIPELINE_SPEC).unwrap() + ) + })?; + + info!(config = ?settings, ?pipeline_spec, "Starting server with config and pipeline spec"); // Start the metrics server, which serves the prometheus metrics. let metrics_addr: SocketAddr = - format!("0.0.0.0:{}", &config().metrics_server_listen_port).parse()?; + format!("0.0.0.0:{}", &settings.metrics_server_listen_port).parse()?; let metrics_server_handle = tokio::spawn(start_https_metrics_server(metrics_addr, tls_config.clone())); - let app_addr: SocketAddr = format!("0.0.0.0:{}", &config().app_listen_port).parse()?; - // Start the main server, which serves the application. - let app_server_handle = tokio::spawn(start_main_server(app_addr, tls_config)); + let app_server_handle = tokio::spawn(start_main_server(settings, tls_config, pipeline_spec)); // TODO: is try_join the best? we need to short-circuit at the first failure tokio::try_join!(flatten(app_server_handle), flatten(metrics_server_handle))?; diff --git a/rust/serving/src/metrics.rs b/rust/serving/src/metrics.rs index 830a37c0c5..4c64760d4d 100644 --- a/rust/serving/src/metrics.rs +++ b/rust/serving/src/metrics.rs @@ -97,6 +97,7 @@ pub(crate) async fn start_https_metrics_server( ) -> crate::Result<()> { let metrics_app = Router::new().route("/metrics", get(metrics_handler)); + tracing::info!(?addr, "Starting metrics server"); axum_server::bind_rustls(addr, tls_config) .serve(metrics_app.into_make_service()) .await diff --git a/rust/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs index 042e5923b4..d782e3d73a 100644 --- a/rust/serving/src/pipeline.rs +++ b/rust/serving/src/pipeline.rs @@ -1,5 +1,4 @@ -use std::env; -use std::sync::OnceLock; +use std::str::FromStr; use base64::prelude::BASE64_STANDARD; use base64::Engine; @@ -8,16 +7,6 @@ use serde::{Deserialize, Serialize}; use crate::Error::ParseConfig; -const ENV_MIN_PIPELINE_SPEC: &str = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC"; - -pub fn min_pipeline_spec() -> &'static PipelineDCG { - static PIPELINE: OnceLock = OnceLock::new(); - PIPELINE.get_or_init(|| match PipelineDCG::load() { - Ok(pipeline) => pipeline, - Err(e) => panic!("Failed to load minimum pipeline spec: {:?}", e), - }) -} - // OperatorType is an enum that contains the types of operators // that can be used in the conditions for the edge. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] @@ -53,6 +42,7 @@ impl From for OperatorType { } // Tag is a struct that contains the information about the tags for the edge +#[cfg_attr(test, derive(PartialEq))] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Tag { pub operator: Option, @@ -60,6 +50,7 @@ pub struct Tag { } // Conditions is a struct that contains the information about the conditions for the edge +#[cfg_attr(test, derive(PartialEq))] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Conditions { pub tags: Option, @@ -87,27 +78,17 @@ pub struct Vertex { pub name: String, } -impl PipelineDCG { - pub fn load() -> crate::Result { - let full_pipeline_spec = match env::var(ENV_MIN_PIPELINE_SPEC) { - Ok(env_value) => { - // If the environment variable is set, decode and parse the pipeline - let decoded = BASE64_STANDARD - .decode(env_value.as_bytes()) - .map_err(|e| ParseConfig(format!("decoding pipeline from env: {e:?}")))?; - - serde_json::from_slice::(&decoded) - .map_err(|e| ParseConfig(format!("parsing pipeline from env: {e:?}")))? - } - Err(_) => { - // If the environment variable is not set, read the pipeline from a file - let file_path = "./config/pipeline_spec.json"; - let file_contents = std::fs::read_to_string(file_path) - .map_err(|e| ParseConfig(format!("reading pipeline file: {e:?}")))?; - serde_json::from_str::(&file_contents) - .map_err(|e| ParseConfig(format!("parsing pipeline file: {e:?}")))? - } - }; +impl FromStr for PipelineDCG { + type Err = crate::Error; + + fn from_str(pipeline_spec_encoded: &str) -> Result { + let full_pipeline_spec_decoded = BASE64_STANDARD + .decode(pipeline_spec_encoded) + .map_err(|e| ParseConfig(format!("Decoding pipeline from env: {e:?}")))?; + + let full_pipeline_spec = + serde_json::from_slice::(&full_pipeline_spec_decoded) + .map_err(|e| ParseConfig(format!("parsing pipeline from env: {e:?}")))?; let vertices: Vec = full_pipeline_spec .vertices @@ -148,17 +129,29 @@ mod tests { #[test] fn test_pipeline_load() { - let pipeline = min_pipeline_spec(); - assert_eq!(pipeline.vertices.len(), 3); - assert_eq!(pipeline.edges.len(), 2); + let pipeline: PipelineDCG = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0=".parse().unwrap(); + + assert_eq!(pipeline.vertices.len(), 8); + assert_eq!(pipeline.edges.len(), 10); assert_eq!(pipeline.vertices[0].name, "in"); assert_eq!(pipeline.edges[0].from, "in"); - assert_eq!(pipeline.edges[0].to, "cat"); + assert_eq!(pipeline.edges[0].to, "planner"); assert!(pipeline.edges[0].conditions.is_none()); - assert_eq!(pipeline.vertices[1].name, "cat"); - assert_eq!(pipeline.vertices[2].name, "out"); - assert_eq!(pipeline.edges[1].from, "cat"); - assert_eq!(pipeline.edges[1].to, "out"); + assert_eq!(pipeline.vertices[1].name, "planner"); + assert_eq!(pipeline.edges[1].from, "planner"); + assert_eq!(pipeline.edges[1].to, "asciiart"); + assert_eq!( + pipeline.edges[1].conditions, + Some(Conditions { + tags: Some(Tag { + operator: Some(OperatorType::Or), + values: vec!["asciiart".to_owned()] + }) + }) + ); + + assert_eq!(pipeline.vertices[2].name, "tiger"); + assert_eq!(pipeline.vertices[3].name, "dog"); } } From dfae9893d74c8803b48cf5317fd757a2793de722 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 19 Dec 2024 22:18:45 +0530 Subject: [PATCH 175/188] chore: Expose env to disable read ahead in source (#2297) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config/components.rs | 6 +++++ rust/numaflow-core/src/config/monovertex.rs | 6 +++++ rust/numaflow-core/src/config/pipeline.rs | 6 +++++ rust/numaflow-core/src/monovertex.rs | 1 + .../numaflow-core/src/monovertex/forwarder.rs | 2 ++ rust/numaflow-core/src/pipeline.rs | 1 + .../pipeline/forwarder/source_forwarder.rs | 1 + .../src/shared/create_components.rs | 3 +++ rust/numaflow-core/src/source.rs | 26 +++++++++++++++++++ 9 files changed, 52 insertions(+) diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index f17331ddaa..a49692060f 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -17,12 +17,16 @@ pub(crate) mod source { #[derive(Debug, Clone, PartialEq)] pub(crate) struct SourceConfig { + /// for high-throughput use-cases we read-ahead the next batch before the previous batch has + /// been acked (or completed). For most cases it should be set to false. + pub(crate) read_ahead: bool, pub(crate) source_type: SourceType, } impl Default for SourceConfig { fn default() -> Self { Self { + read_ahead: false, source_type: SourceType::Generator(GeneratorConfig::default()), } } @@ -518,6 +522,7 @@ mod source_tests { fn test_source_config_generator() { let generator_config = GeneratorConfig::default(); let source_config = SourceConfig { + read_ahead: false, source_type: SourceType::Generator(generator_config.clone()), }; if let SourceType::Generator(config) = source_config.source_type { @@ -531,6 +536,7 @@ mod source_tests { fn test_source_config_user_defined() { let user_defined_config = UserDefinedConfig::default(); let source_config = SourceConfig { + read_ahead: false, source_type: SourceType::UserDefined(user_defined_config.clone()), }; if let SourceType::UserDefined(config) = source_config.source_type { diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index c5f4f2622b..75f5a8cb9a 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -1,3 +1,4 @@ +use std::env; use std::time::Duration; use base64::prelude::BASE64_STANDARD; @@ -42,6 +43,7 @@ impl Default for MonovertexConfig { read_timeout: Duration::from_millis(DEFAULT_TIMEOUT_IN_MS as u64), replica: 0, source_config: SourceConfig { + read_ahead: false, source_type: source::SourceType::Generator(GeneratorConfig::default()), }, sink_config: SinkConfig { @@ -107,6 +109,10 @@ impl MonovertexConfig { .ok_or_else(|| Error::Config("Source not found".to_string()))?; let source_config = SourceConfig { + read_ahead: env::var("READ_AHEAD") + .unwrap_or("false".to_string()) + .parse() + .unwrap(), source_type: source.try_into()?, }; diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 6c0a4a08bc..9509e8f4af 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -157,6 +157,10 @@ impl PipelineConfig { VertexType::Source(SourceVtxConfig { source_config: SourceConfig { + read_ahead: env::var("READ_AHEAD") + .unwrap_or("false".to_string()) + .parse() + .unwrap(), source_type: source.try_into()?, }, transformer_config, @@ -424,6 +428,7 @@ mod tests { }], vertex_config: VertexType::Source(SourceVtxConfig { source_config: SourceConfig { + read_ahead: false, source_type: SourceType::Generator(GeneratorConfig { rpu: 100000, content: Default::default(), @@ -476,6 +481,7 @@ mod tests { }], vertex_config: VertexType::Source(SourceVtxConfig { source_config: SourceConfig { + read_ahead: false, source_type: SourceType::Pulsar(PulsarSourceConfig { pulsar_server_addr: "pulsar://pulsar-service:6650".to_string(), topic: "test_persistent".to_string(), diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index 8085658db8..ba488cc8fd 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -221,6 +221,7 @@ mod tests { let config = MonovertexConfig { source_config: components::source::SourceConfig { + read_ahead: false, source_type: components::source::SourceType::UserDefined( components::source::UserDefinedConfig { socket_path: src_sock_file.to_str().unwrap().to_string(), diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index dcdfdb21a5..b048680482 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -266,6 +266,7 @@ mod tests { 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), tracker_handle.clone(), + true, ); // create a transformer @@ -395,6 +396,7 @@ mod tests { 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), tracker_handle.clone(), + true, ); // create a transformer diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index d59cd182b6..434b9aa6d2 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -329,6 +329,7 @@ mod tests { }], vertex_config: VertexType::Source(SourceVtxConfig { source_config: SourceConfig { + read_ahead: false, source_type: SourceType::Generator(GeneratorConfig { rpu: 10, content: bytes::Bytes::new(), diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index e2b009e7d9..d494cbbd93 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -243,6 +243,7 @@ mod tests { 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), tracker_handle.clone(), + true, ); // create a js writer diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index 0e5fade691..9dd0f39596 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -214,6 +214,7 @@ pub async fn create_source( batch_size, source::SourceType::Generator(generator_read, generator_ack, generator_lag), tracker_handle, + source_config.read_ahead, ), None, )) @@ -251,6 +252,7 @@ pub async fn create_source( batch_size, source::SourceType::UserDefinedSource(ud_read, ud_ack, ud_lag), tracker_handle, + source_config.read_ahead, ), Some(source_grpc_client), )) @@ -262,6 +264,7 @@ pub async fn create_source( batch_size, source::SourceType::Pulsar(pulsar), tracker_handle, + source_config.read_ahead, ), None, )) diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 66361d84ac..8be9d85493 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -1,4 +1,7 @@ use numaflow_pulsar::source::PulsarSource; +use std::sync::Arc; +use tokio::sync::OwnedSemaphorePermit; +use tokio::sync::Semaphore; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; use tokio::time; @@ -143,6 +146,7 @@ pub(crate) struct Source { read_batch_size: usize, sender: mpsc::Sender, tracker_handle: TrackerHandle, + read_ahead: bool, } impl Source { @@ -151,6 +155,7 @@ impl Source { batch_size: usize, src_type: SourceType, tracker_handle: TrackerHandle, + read_ahead: bool, ) -> Self { let (sender, receiver) = mpsc::channel(batch_size); match src_type { @@ -182,6 +187,7 @@ impl Source { read_batch_size: batch_size, sender, tracker_handle, + read_ahead, } } @@ -234,6 +240,7 @@ impl Source { let (messages_tx, messages_rx) = mpsc::channel(batch_size); let source_handle = self.sender.clone(); let tracker_handle = self.tracker_handle.clone(); + let read_ahead_enabled = self.read_ahead; let pipeline_labels = pipeline_forward_metric_labels("Source", Some(get_vertex_name())); let mvtx_labels = mvtx_forward_metric_labels(); @@ -242,12 +249,21 @@ impl Source { let handle = tokio::spawn(async move { let mut processed_msgs_count: usize = 0; let mut last_logged_at = time::Instant::now(); + // this semaphore is used only if read-ahead is disabled. we hold this semaphore to + // make sure we can read only if the current inflight ones are ack'ed. + let semaphore = Arc::new(Semaphore::new(1)); loop { if cln_token.is_cancelled() { info!("Cancellation token is cancelled. Stopping the source."); return Ok(()); } + + if !read_ahead_enabled { + // Acquire the semaphore permit before reading the next batch to make + // sure we are not reading ahead and all the inflight messages are acked. + let _permit = Arc::clone(&semaphore).acquire_owned().await.unwrap(); + } // Reserve the permits before invoking the read method. let mut permit = match messages_tx.reserve_many(batch_size).await { Ok(permit) => permit, @@ -265,6 +281,7 @@ impl Source { return Err(e); } }; + let n = messages.len(); if is_mono_vertex() { monovertex_metrics() @@ -314,10 +331,17 @@ impl Source { } // start a background task to invoke ack on the source for the offsets that are acked. + // if read ahead is disabled, acquire the semaphore permit before invoking ack so that + // we wait for all the inflight messages to be acked before reading the next batch. tokio::spawn(Self::invoke_ack( read_start_time, source_handle.clone(), ack_batch, + if !read_ahead_enabled { + Some(Arc::clone(&semaphore).acquire_owned().await.unwrap()) + } else { + None + }, )); processed_msgs_count += n; @@ -340,6 +364,7 @@ impl Source { e2e_start_time: time::Instant, source_handle: mpsc::Sender, ack_rx_batch: Vec<(Offset, oneshot::Receiver)>, + _permit: Option, // permit to release after acking the offsets. ) -> Result<()> { let n = ack_rx_batch.len(); let mut offsets_to_ack = Vec::with_capacity(n); @@ -523,6 +548,7 @@ mod tests { 5, SourceType::UserDefinedSource(src_read, src_ack, lag_reader), TrackerHandle::new(), + true, ); let cln_token = CancellationToken::new(); From 8128476528bf49ae742ff9259c5291bd0a6473e3 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Tue, 24 Dec 2024 09:11:14 +0530 Subject: [PATCH 176/188] feat: Asynchronous Map Implementation for Pipeline (#2295) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- pkg/apis/numaflow/v1alpha1/udf.go | 3 + rust/Cargo.lock | 24 +- rust/numaflow-core/Cargo.toml | 2 +- rust/numaflow-core/src/config/pipeline.rs | 221 +++- rust/numaflow-core/src/error.rs | 3 + rust/numaflow-core/src/lib.rs | 3 + rust/numaflow-core/src/mapper.rs | 31 + rust/numaflow-core/src/mapper/map.rs | 1176 +++++++++++++++++ .../src/mapper/map/user_defined.rs | 721 ++++++++++ rust/numaflow-core/src/message.rs | 23 +- rust/numaflow-core/src/metrics.rs | 14 +- rust/numaflow-core/src/monovertex.rs | 4 +- .../numaflow-core/src/monovertex/forwarder.rs | 12 +- rust/numaflow-core/src/pipeline.rs | 366 ++++- rust/numaflow-core/src/pipeline/forwarder.rs | 4 + .../src/pipeline/forwarder/map_forwarder.rs | 63 + .../src/pipeline/forwarder/sink_forwarder.rs | 7 +- .../pipeline/forwarder/source_forwarder.rs | 14 +- .../src/pipeline/isb/jetstream/reader.rs | 51 +- .../src/pipeline/isb/jetstream/writer.rs | 17 +- .../src/shared/create_components.rs | 71 +- rust/numaflow-core/src/shared/grpc.rs | 21 + rust/numaflow-core/src/shared/server_info.rs | 17 + rust/numaflow-core/src/source.rs | 19 +- rust/numaflow-core/src/source/user_defined.rs | 4 +- rust/numaflow-core/src/tracker.rs | 47 +- rust/numaflow-core/src/transformer.rs | 206 ++- .../src/transformer/user_defined.rs | 69 +- 28 files changed, 3051 insertions(+), 162 deletions(-) create mode 100644 rust/numaflow-core/src/mapper.rs create mode 100644 rust/numaflow-core/src/mapper/map.rs create mode 100644 rust/numaflow-core/src/mapper/map/user_defined.rs create mode 100644 rust/numaflow-core/src/pipeline/forwarder/map_forwarder.rs diff --git a/pkg/apis/numaflow/v1alpha1/udf.go b/pkg/apis/numaflow/v1alpha1/udf.go index 573ddcbca1..7a1a44c702 100644 --- a/pkg/apis/numaflow/v1alpha1/udf.go +++ b/pkg/apis/numaflow/v1alpha1/udf.go @@ -51,6 +51,9 @@ func (in UDF) getContainers(req getContainerReq) ([]corev1.Container, []corev1.C func (in UDF) getMainContainer(req getContainerReq) corev1.Container { if in.GroupBy == nil { + if req.executeRustBinary { + return containerBuilder{}.init(req).command(NumaflowRustBinary).args("processor", "--type="+string(VertexTypeMapUDF), "--isbsvc-type="+string(req.isbSvcType), "--rust").build() + } args := []string{"processor", "--type=" + string(VertexTypeMapUDF), "--isbsvc-type=" + string(req.isbSvcType)} return containerBuilder{}. init(req).args(args...).build() diff --git a/rust/Cargo.lock b/rust/Cargo.lock index a210284fcd..beec59aa4b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1722,6 +1722,28 @@ dependencies = [ "uuid", ] +[[package]] +name = "numaflow" +version = "0.2.1" +source = "git+https://github.com/numaproj/numaflow-rs.git?rev=9ca9362ad511084501520e5a37d40cdcd0cdc9d9#9ca9362ad511084501520e5a37d40cdcd0cdc9d9" +dependencies = [ + "chrono", + "futures-util", + "hyper-util", + "prost 0.13.3", + "prost-types 0.13.3", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tonic-build", + "tracing", + "uuid", +] + [[package]] name = "numaflow-core" version = "0.1.0" @@ -1736,7 +1758,7 @@ dependencies = [ "futures", "hyper-util", "kube", - "numaflow 0.1.1", + "numaflow 0.2.1", "numaflow-models", "numaflow-pb", "numaflow-pulsar", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index b4688a135b..38cabb704f 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -49,7 +49,7 @@ async-nats = "0.38.0" [dev-dependencies] tempfile = "3.11.0" -numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "ddd879588e11455921f1ca958ea2b3c076689293" } +numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "9ca9362ad511084501520e5a37d40cdcd0cdc9d9" } pulsar = { version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"] } [build-dependencies] diff --git a/rust/numaflow-core/src/config/pipeline.rs b/rust/numaflow-core/src/config/pipeline.rs index 9509e8f4af..1368b0b32d 100644 --- a/rust/numaflow-core/src/config/pipeline.rs +++ b/rust/numaflow-core/src/config/pipeline.rs @@ -14,6 +14,8 @@ use crate::config::components::source::SourceConfig; use crate::config::components::transformer::{TransformerConfig, TransformerType}; use crate::config::get_vertex_replica; use crate::config::pipeline::isb::{BufferReaderConfig, BufferWriterConfig}; +use crate::config::pipeline::map::MapMode; +use crate::config::pipeline::map::MapVtxConfig; use crate::error::Error; use crate::Result; @@ -23,6 +25,11 @@ const DEFAULT_LOOKBACK_WINDOW_IN_SECS: u16 = 120; const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; +const DEFAULT_GRPC_MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; // 64 MB +const DEFAULT_MAP_SOCKET: &str = "/var/run/numaflow/map.sock"; +pub(crate) const DEFAULT_BATCH_MAP_SOCKET: &str = "/var/run/numaflow/batchmap.sock"; +pub(crate) const DEFAULT_STREAM_MAP_SOCKET: &str = "/var/run/numaflow/mapstream.sock"; +const DEFAULT_MAP_SERVER_INFO_FILE: &str = "/var/run/numaflow/mapper-server-info"; pub(crate) mod isb; @@ -69,6 +76,84 @@ pub(crate) struct SourceVtxConfig { pub(crate) transformer_config: Option, } +pub(crate) mod map { + use std::collections::HashMap; + + use numaflow_models::models::Udf; + + use crate::config::pipeline::{ + DEFAULT_GRPC_MAX_MESSAGE_SIZE, DEFAULT_MAP_SERVER_INFO_FILE, DEFAULT_MAP_SOCKET, + }; + use crate::error::Error; + + /// A map can be run in different modes. + #[derive(Debug, Clone, PartialEq)] + pub enum MapMode { + Unary, + Batch, + Stream, + } + + impl MapMode { + pub(crate) fn from_str(s: &str) -> Option { + match s { + "unary-map" => Some(MapMode::Unary), + "stream-map" => Some(MapMode::Stream), + "batch-map" => Some(MapMode::Batch), + _ => None, + } + } + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct MapVtxConfig { + pub(crate) concurrency: usize, + pub(crate) map_type: MapType, + pub(crate) map_mode: MapMode, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) enum MapType { + UserDefined(UserDefinedConfig), + Builtin(BuiltinConfig), + } + + impl TryFrom> for MapType { + type Error = Error; + fn try_from(udf: Box) -> std::result::Result { + if let Some(builtin) = udf.builtin { + Ok(MapType::Builtin(BuiltinConfig { + name: builtin.name, + kwargs: builtin.kwargs, + args: builtin.args, + })) + } else if let Some(_container) = udf.container { + Ok(MapType::UserDefined(UserDefinedConfig { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_MAP_SOCKET.to_string(), + server_info_path: DEFAULT_MAP_SERVER_INFO_FILE.to_string(), + })) + } else { + Err(Error::Config("Invalid UDF".to_string())) + } + } + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct UserDefinedConfig { + pub grpc_max_message_size: usize, + pub socket_path: String, + pub server_info_path: String, + } + + #[derive(Debug, Clone, PartialEq)] + pub(crate) struct BuiltinConfig { + pub(crate) name: String, + pub(crate) kwargs: Option>, + pub(crate) args: Option>, + } +} + #[derive(Debug, Clone, PartialEq)] pub(crate) struct SinkVtxConfig { pub(crate) sink_config: SinkConfig, @@ -79,6 +164,7 @@ pub(crate) struct SinkVtxConfig { pub(crate) enum VertexType { Source(SourceVtxConfig), Sink(SinkVtxConfig), + Map(MapVtxConfig), } impl std::fmt::Display for VertexType { @@ -86,6 +172,7 @@ impl std::fmt::Display for VertexType { match self { VertexType::Source(_) => write!(f, "Source"), VertexType::Sink(_) => write!(f, "Sink"), + VertexType::Map(_) => write!(f, "Map"), } } } @@ -182,6 +269,12 @@ impl PipelineConfig { }, fb_sink_config, }) + } else if let Some(map) = vertex_obj.spec.udf { + VertexType::Map(MapVtxConfig { + concurrency: batch_size as usize, + map_type: map.try_into()?, + map_mode: MapMode::Unary, + }) } else { return Err(Error::Config( "Only source and sink are supported ATM".to_string(), @@ -283,7 +376,7 @@ impl PipelineConfig { Ok(PipelineConfig { batch_size: batch_size as usize, paf_concurrency: env::var("PAF_BATCH_SIZE") - .unwrap_or("30000".to_string()) + .unwrap_or((DEFAULT_BATCH_SIZE * 2).to_string()) .parse() .unwrap(), read_timeout: Duration::from_millis(timeout_in_ms as u64), @@ -301,11 +394,13 @@ impl PipelineConfig { #[cfg(test)] mod tests { + use numaflow_models::models::{Container, Function, Udf}; use numaflow_pulsar::source::PulsarSourceConfig; use super::*; use crate::config::components::sink::{BlackholeConfig, LogConfig, SinkType}; use crate::config::components::source::{GeneratorConfig, SourceType}; + use crate::config::pipeline::map::{MapType, UserDefinedConfig}; #[test] fn test_default_pipeline_config() { @@ -360,7 +455,7 @@ mod tests { vertex_name: "out".to_string(), replica: 0, batch_size: 500, - paf_concurrency: 30000, + paf_concurrency: 1000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -371,7 +466,7 @@ mod tests { name: "in".to_string(), reader_config: BufferReaderConfig { partitions: 1, - streams: vec![("default-simple-pipeline-out-0".into(), 0)], + streams: vec![("default-simple-pipeline-out-0", 0)], wip_ack_interval: Duration::from_secs(1), }, partitions: 0, @@ -407,7 +502,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 1000, - paf_concurrency: 30000, + paf_concurrency: 1000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -460,7 +555,7 @@ mod tests { vertex_name: "in".to_string(), replica: 0, batch_size: 50, - paf_concurrency: 30000, + paf_concurrency: 1000, read_timeout: Duration::from_secs(1), js_client_config: isb::jetstream::ClientConfig { url: "localhost:4222".to_string(), @@ -498,4 +593,120 @@ mod tests { assert_eq!(pipeline_config, expected); } + + #[test] + fn test_map_vertex_config_user_defined() { + let udf = Udf { + builtin: None, + container: Some(Box::from(Container { + args: None, + command: None, + env: None, + env_from: None, + image: None, + image_pull_policy: None, + liveness_probe: None, + ports: None, + readiness_probe: None, + resources: None, + security_context: None, + volume_mounts: None, + })), + group_by: None, + }; + + let map_type = MapType::try_from(Box::new(udf)).unwrap(); + assert!(matches!(map_type, MapType::UserDefined(_))); + + let map_vtx_config = MapVtxConfig { + concurrency: 10, + map_type, + map_mode: MapMode::Unary, + }; + + assert_eq!(map_vtx_config.concurrency, 10); + if let MapType::UserDefined(config) = map_vtx_config.map_type { + assert_eq!(config.grpc_max_message_size, DEFAULT_GRPC_MAX_MESSAGE_SIZE); + assert_eq!(config.socket_path, DEFAULT_MAP_SOCKET); + assert_eq!(config.server_info_path, DEFAULT_MAP_SERVER_INFO_FILE); + } else { + panic!("Expected UserDefined map type"); + } + } + + #[test] + fn test_map_vertex_config_builtin() { + let udf = Udf { + builtin: Some(Box::from(Function { + args: None, + kwargs: None, + name: "cat".to_string(), + })), + container: None, + group_by: None, + }; + + let map_type = MapType::try_from(Box::new(udf)).unwrap(); + assert!(matches!(map_type, MapType::Builtin(_))); + + let map_vtx_config = MapVtxConfig { + concurrency: 5, + map_type, + map_mode: MapMode::Unary, + }; + + assert_eq!(map_vtx_config.concurrency, 5); + if let MapType::Builtin(config) = map_vtx_config.map_type { + assert_eq!(config.name, "cat"); + assert!(config.kwargs.is_none()); + assert!(config.args.is_none()); + } else { + panic!("Expected Builtin map type"); + } + } + + #[test] + fn test_pipeline_config_load_map_vertex() { + let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW1hcCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im1hcCIsInVkZiI6eyJjb250YWluZXIiOnsidGVtcGxhdGUiOiJkZWZhdWx0In19LCJsaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJzY2FsZSI6eyJtaW4iOjF9LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoibWFwIiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJNYXAiLCJ0b1ZlcnRleFBhcnRpdGlvbkNvdW50IjoxLCJ0b1ZlcnRleExpbWl0cyI6eyJyZWFkQmF0Y2hTaXplIjo1MDAsInJlYWRUaW1lb3V0IjoiMXMiLCJidWZmZXJNYXhMZW5ndGgiOjMwMDAwLCJidWZmZXJVc2FnZUxpbWl0Ijo4MH19XSwid2F0ZXJtYXJrIjp7Im1heERlbGF5IjoiMHMifX0sInN0YXR1cyI6eyJwaGFzZSI6IiIsInJlcGxpY2FzIjowLCJkZXNpcmVkUmVwbGljYXMiOjAsImxhc3RTY2FsZWRBdCI6bnVsbH19"; + + let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; + let pipeline_config = + PipelineConfig::load(pipeline_cfg_base64.to_string(), env_vars).unwrap(); + + let expected = PipelineConfig { + pipeline_name: "simple-pipeline".to_string(), + vertex_name: "map".to_string(), + replica: 0, + batch_size: 500, + paf_concurrency: 1000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + from_vertex_config: vec![FromVertexConfig { + name: "in".to_string(), + reader_config: BufferReaderConfig { + partitions: 1, + streams: vec![("default-simple-pipeline-map-0", 0)], + wip_ack_interval: Duration::from_secs(1), + }, + partitions: 0, + }], + to_vertex_config: vec![], + vertex_config: VertexType::Map(MapVtxConfig { + concurrency: 500, + map_type: MapType::UserDefined(UserDefinedConfig { + grpc_max_message_size: DEFAULT_GRPC_MAX_MESSAGE_SIZE, + socket_path: DEFAULT_MAP_SOCKET.to_string(), + server_info_path: DEFAULT_MAP_SERVER_INFO_FILE.to_string(), + }), + map_mode: MapMode::Unary, + }), + metrics_config: MetricsConfig::default(), + }; + + assert_eq!(pipeline_config, expected); + } } diff --git a/rust/numaflow-core/src/error.rs b/rust/numaflow-core/src/error.rs index e82a93e2d8..0e499d0689 100644 --- a/rust/numaflow-core/src/error.rs +++ b/rust/numaflow-core/src/error.rs @@ -16,6 +16,9 @@ pub enum Error { #[error("Transformer Error - {0}")] Transformer(String), + #[error("Mapper Error - {0}")] + Mapper(String), + #[error("Forwarder Error - {0}")] Forwarder(String), diff --git a/rust/numaflow-core/src/lib.rs b/rust/numaflow-core/src/lib.rs index 727a119f1b..d65380f8d2 100644 --- a/rust/numaflow-core/src/lib.rs +++ b/rust/numaflow-core/src/lib.rs @@ -51,6 +51,9 @@ mod pipeline; /// Tracker to track the completeness of message processing. mod tracker; +/// Map is a feature that allows users to execute custom code to transform their data. +mod mapper; + pub async fn run() -> Result<()> { let cln_token = CancellationToken::new(); let shutdown_cln_token = cln_token.clone(); diff --git a/rust/numaflow-core/src/mapper.rs b/rust/numaflow-core/src/mapper.rs new file mode 100644 index 0000000000..56d0f51f3a --- /dev/null +++ b/rust/numaflow-core/src/mapper.rs @@ -0,0 +1,31 @@ +//! Numaflow supports flatmap operation through [map::MapHandle] an actor interface. +//! +//! The [map::MapHandle] orchestrates reading messages from the input stream, invoking the map operation, +//! and sending the mapped messages to the output stream. +//! +//! The [map::MapHandle] reads messages from the input stream and invokes the map operation based on the +//! mode: +//! - Unary: Concurrent operations controlled using permits and `tokio::spawn`. +//! - Batch: Synchronous operations, one batch at a time, followed by an invoke. +//! - Stream: Concurrent operations controlled using permits and `tokio::spawn`, followed by an +//! invoke. +//! +//! Error handling in unary and stream operations with concurrency N: +//! ```text +//! (Read) <----- (error_tx) <-------- + +//! | | +//! + -->-- (tokio map task 1) -->--- + +//! | | +//! + -->-- (tokio map task 2) -->--- + +//! | | +//! : : +//! | | +//! + -->-- (tokio map task N) -->--- + +//! ``` +//! In case of errors in unary/stream, tasks will write to the error channel (`error_tx`), and the `MapHandle` +//! will stop reading new requests and return an error. +//! +//! Error handling in batch operation is easier because it is synchronous and one batch at a time. If there +//! is an error, the [map::MapHandle] will stop reading new requests and return an error. + +pub(crate) mod map; diff --git a/rust/numaflow-core/src/mapper/map.rs b/rust/numaflow-core/src/mapper/map.rs new file mode 100644 index 0000000000..8c279376ac --- /dev/null +++ b/rust/numaflow-core/src/mapper/map.rs @@ -0,0 +1,1176 @@ +use crate::config::pipeline::map::MapMode; +use crate::error; +use crate::error::Error; +use crate::mapper::map::user_defined::{ + UserDefinedBatchMap, UserDefinedStreamMap, UserDefinedUnaryMap, +}; +use crate::message::Message; +use crate::tracker::TrackerHandle; +use numaflow_pb::clients::map::map_client::MapClient; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{mpsc, oneshot, OwnedSemaphorePermit, Semaphore}; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; +use tonic::transport::Channel; +pub(super) mod user_defined; + +/// UnaryActorMessage is a message that is sent to the UnaryMapperActor. +struct UnaryActorMessage { + message: Message, + respond_to: oneshot::Sender>>, +} + +/// BatchActorMessage is a message that is sent to the BatchMapperActor. +struct BatchActorMessage { + messages: Vec, + respond_to: Vec>>>, +} + +/// StreamActorMessage is a message that is sent to the StreamMapperActor. +struct StreamActorMessage { + message: Message, + respond_to: mpsc::Sender>, +} + +/// UnaryMapperActor is responsible for handling the unary map operation. +struct UnaryMapperActor { + receiver: mpsc::Receiver, + mapper: UserDefinedUnaryMap, +} + +impl UnaryMapperActor { + fn new(receiver: mpsc::Receiver, mapper: UserDefinedUnaryMap) -> Self { + Self { receiver, mapper } + } + + async fn handle_message(&mut self, msg: UnaryActorMessage) { + self.mapper.unary_map(msg.message, msg.respond_to).await; + } + + async fn run(mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } +} + +/// BatchMapActor is responsible for handling the batch map operation. +struct BatchMapActor { + receiver: mpsc::Receiver, + mapper: UserDefinedBatchMap, +} + +impl BatchMapActor { + fn new(receiver: mpsc::Receiver, mapper: UserDefinedBatchMap) -> Self { + Self { receiver, mapper } + } + + async fn handle_message(&mut self, msg: BatchActorMessage) { + self.mapper.batch_map(msg.messages, msg.respond_to).await; + } + + async fn run(mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } +} + +/// StreamMapActor is responsible for handling the stream map operation. +struct StreamMapActor { + receiver: mpsc::Receiver, + mapper: UserDefinedStreamMap, +} + +impl StreamMapActor { + fn new(receiver: mpsc::Receiver, mapper: UserDefinedStreamMap) -> Self { + Self { receiver, mapper } + } + + async fn handle_message(&mut self, msg: StreamActorMessage) { + self.mapper.stream_map(msg.message, msg.respond_to).await; + } + + async fn run(mut self) { + while let Some(msg) = self.receiver.recv().await { + self.handle_message(msg).await; + } + } +} + +/// ActorSender is an enum to store the handles to different types of actors. +#[derive(Clone)] +enum ActorSender { + Unary(mpsc::Sender), + Batch(mpsc::Sender), + Stream(mpsc::Sender), +} + +/// MapHandle is responsible for reading messages from the stream and invoke the map operation +/// on those messages and send the mapped messages to the output stream. +pub(crate) struct MapHandle { + batch_size: usize, + read_timeout: Duration, + concurrency: usize, + tracker: TrackerHandle, + actor_sender: ActorSender, + task_handle: JoinHandle<()>, +} + +/// Abort all the background tasks when the mapper is dropped. +impl Drop for MapHandle { + fn drop(&mut self) { + self.task_handle.abort(); + } +} + +/// Response channel size for streaming map. +const STREAMING_MAP_RESP_CHANNEL_SIZE: usize = 10; + +impl MapHandle { + /// Creates a new mapper with the given batch size, concurrency, client, and tracker handle. + /// It spawns the appropriate actor based on the map mode. + pub(crate) async fn new( + map_mode: MapMode, + batch_size: usize, + read_timeout: Duration, + concurrency: usize, + client: MapClient, + tracker_handle: TrackerHandle, + ) -> error::Result { + let task_handle; + + // Based on the map mode, spawn the appropriate map actor + // and store the sender handle in the actor_sender. + let actor_sender = match map_mode { + MapMode::Unary => { + let (sender, receiver) = mpsc::channel(batch_size); + let mapper_actor = UnaryMapperActor::new( + receiver, + UserDefinedUnaryMap::new(batch_size, client).await?, + ); + + let handle = tokio::spawn(async move { + mapper_actor.run().await; + }); + task_handle = handle; + ActorSender::Unary(sender) + } + MapMode::Batch => { + let (batch_sender, batch_receiver) = mpsc::channel(batch_size); + let batch_mapper_actor = BatchMapActor::new( + batch_receiver, + UserDefinedBatchMap::new(batch_size, client).await?, + ); + + let handle = tokio::spawn(async move { + batch_mapper_actor.run().await; + }); + task_handle = handle; + ActorSender::Batch(batch_sender) + } + MapMode::Stream => { + let (stream_sender, stream_receiver) = mpsc::channel(batch_size); + let stream_mapper_actor = StreamMapActor::new( + stream_receiver, + UserDefinedStreamMap::new(batch_size, client).await?, + ); + + let handle = tokio::spawn(async move { + stream_mapper_actor.run().await; + }); + task_handle = handle; + ActorSender::Stream(stream_sender) + } + }; + + Ok(Self { + actor_sender, + batch_size, + read_timeout, + concurrency, + tracker: tracker_handle, + task_handle, + }) + } + + /// Maps the input stream of messages and returns the output stream and the handle to the + /// background task. In case of critical errors it stops reading from the input stream and + /// returns the error using the join handle. + pub(crate) async fn streaming_map( + &self, + input_stream: ReceiverStream, + ) -> error::Result<(ReceiverStream, JoinHandle>)> { + let (output_tx, output_rx) = mpsc::channel(self.batch_size); + let (error_tx, mut error_rx) = mpsc::channel(1); + + let actor_handle = self.actor_sender.clone(); + let tracker = self.tracker.clone(); + let semaphore = Arc::new(Semaphore::new(self.concurrency)); + let batch_size = self.batch_size; + let read_timeout = self.read_timeout; + + let handle = tokio::spawn(async move { + let mut input_stream = input_stream; + + // based on the map mode, send the message to the appropriate actor handle. + match actor_handle { + ActorSender::Unary(map_handle) => loop { + // we need tokio select here because we have to listen to both the input stream + // and the error channel. If there is an error, we need to discard all the messages + // in the tracker and stop processing the input stream. + tokio::select! { + read_msg = input_stream.next() => { + if let Some(read_msg) = read_msg { + let permit = Arc::clone(&semaphore).acquire_owned().await.map_err(|e| Error::Mapper(format!("failed to acquire semaphore: {}", e)))?; + let error_tx = error_tx.clone(); + Self::unary( + map_handle.clone(), + permit, + read_msg, + output_tx.clone(), + tracker.clone(), + error_tx, + ).await; + } else { + break; + } + }, + Some(error) = error_rx.recv() => { + // if there is an error, discard all the messages in the tracker and return the error. + tracker.discard_all().await?; + return Err(error); + }, + } + }, + + ActorSender::Batch(map_handle) => { + let timeout_duration = read_timeout; + let chunked_stream = input_stream.chunks_timeout(batch_size, timeout_duration); + tokio::pin!(chunked_stream); + // we don't need to tokio spawn here because, unlike unary and stream, batch is a blocking operation, + // and we process one batch at a time. + while let Some(batch) = chunked_stream.next().await { + if !batch.is_empty() { + if let Err(e) = Self::batch( + map_handle.clone(), + batch, + output_tx.clone(), + tracker.clone(), + ) + .await + { + // if there is an error, discard all the messages in the tracker and return the error. + tracker.discard_all().await?; + return Err(e); + } + } + } + } + + ActorSender::Stream(map_handle) => loop { + // we need tokio select here because we have to listen to both the input stream + // and the error channel. If there is an error, we need to discard all the messages + // in the tracker and stop processing the input stream. + tokio::select! { + read_msg = input_stream.next() => { + if let Some(read_msg) = read_msg { + let permit = Arc::clone(&semaphore).acquire_owned().await.map_err(|e| Error::Mapper(format!("failed to acquire semaphore: {}", e)))?; + let error_tx = error_tx.clone(); + Self::stream( + map_handle.clone(), + permit, + read_msg, + output_tx.clone(), + tracker.clone(), + error_tx, + ).await; + } else { + break; + } + }, + Some(error) = error_rx.recv() => { + // if there is an error, discard all the messages in the tracker and return the error. + tracker.discard_all().await?; + return Err(error); + }, + } + }, + } + Ok(()) + }); + + Ok((ReceiverStream::new(output_rx), handle)) + } + + /// performs unary map operation on the given message and sends the mapped messages to the output + /// stream. It updates the tracker with the number of messages sent. If there are any errors, it + /// sends the error to the error channel. + /// + /// We use permit to limit the number of concurrent map unary operations, so that at any point in time + /// we don't have more than `concurrency` number of map operations running. + async fn unary( + map_handle: mpsc::Sender, + permit: OwnedSemaphorePermit, + read_msg: Message, + output_tx: mpsc::Sender, + tracker_handle: TrackerHandle, + error_tx: mpsc::Sender, + ) { + let output_tx = output_tx.clone(); + + // short-lived tokio spawns we don't need structured concurrency here + tokio::spawn(async move { + let _permit = permit; + + let (sender, receiver) = oneshot::channel(); + let msg = UnaryActorMessage { + message: read_msg.clone(), + respond_to: sender, + }; + + if let Err(e) = map_handle.send(msg).await { + let _ = error_tx + .send(Error::Mapper(format!("failed to send message: {}", e))) + .await; + return; + } + + match receiver.await { + Ok(Ok(mut mapped_messages)) => { + // update the tracker with the number of messages sent and send the mapped messages + if let Err(e) = tracker_handle + .update( + read_msg.id.offset.clone(), + mapped_messages.len() as u32, + true, + ) + .await + { + error_tx.send(e).await.expect("failed to send error"); + return; + } + for mapped_message in mapped_messages.drain(..) { + output_tx + .send(mapped_message) + .await + .expect("failed to send response"); + } + } + Ok(Err(e)) => { + error_tx.send(e).await.expect("failed to send error"); + } + Err(e) => { + error_tx + .send(Error::Mapper(format!("failed to receive message: {}", e))) + .await + .expect("failed to send error"); + } + } + }); + } + + /// performs batch map operation on the given batch of messages and sends the mapped messages to + /// the output stream. It updates the tracker with the number of messages sent. + async fn batch( + map_handle: mpsc::Sender, + batch: Vec, + output_tx: mpsc::Sender, + tracker_handle: TrackerHandle, + ) -> error::Result<()> { + let (senders, receivers): (Vec<_>, Vec<_>) = + batch.iter().map(|_| oneshot::channel()).unzip(); + let msg = BatchActorMessage { + messages: batch, + respond_to: senders, + }; + + map_handle + .send(msg) + .await + .map_err(|e| Error::Mapper(format!("failed to send message: {}", e)))?; + + for receiver in receivers { + match receiver.await { + Ok(Ok(mut mapped_messages)) => { + let offset = mapped_messages.first().unwrap().id.offset.clone(); + tracker_handle + .update(offset.clone(), mapped_messages.len() as u32, true) + .await?; + for mapped_message in mapped_messages.drain(..) { + output_tx + .send(mapped_message) + .await + .expect("failed to send response"); + } + } + Ok(Err(e)) => { + return Err(e); + } + Err(e) => { + return Err(Error::Mapper(format!("failed to receive message: {}", e))); + } + } + } + Ok(()) + } + + /// performs stream map operation on the given message and sends the mapped messages to the output + /// stream. It updates the tracker with the number of messages sent. If there are any errors, + /// it sends the error to the error channel. + /// + /// We use permit to limit the number of concurrent map unary operations, so that at any point in time + /// we don't have more than `concurrency` number of map operations running. + async fn stream( + map_handle: mpsc::Sender, + permit: OwnedSemaphorePermit, + read_msg: Message, + output_tx: mpsc::Sender, + tracker_handle: TrackerHandle, + error_tx: mpsc::Sender, + ) { + let output_tx = output_tx.clone(); + + tokio::spawn(async move { + let _permit = permit; + + let (sender, mut receiver) = mpsc::channel(STREAMING_MAP_RESP_CHANNEL_SIZE); + let msg = StreamActorMessage { + message: read_msg.clone(), + respond_to: sender, + }; + + if let Err(e) = map_handle.send(msg).await { + let _ = error_tx + .send(Error::Mapper(format!("failed to send message: {}", e))) + .await; + return; + } + + while let Some(result) = receiver.recv().await { + match result { + Ok(mapped_message) => { + let offset = mapped_message.id.offset.clone(); + if let Err(e) = tracker_handle.update(offset.clone(), 1, false).await { + error_tx.send(e).await.expect("failed to send error"); + return; + } + if let Err(e) = output_tx.send(mapped_message).await { + error_tx + .send(Error::Mapper(format!("failed to send message: {}", e))) + .await + .expect("failed to send error"); + return; + } + } + Err(e) => { + error_tx.send(e).await.expect("failed to send error"); + return; + } + } + } + + if let Err(e) = tracker_handle.update(read_msg.id.offset, 0, true).await { + error_tx.send(e).await.expect("failed to send error"); + } + }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Result; + use std::time::Duration; + + use crate::message::{MessageID, Offset, StringOffset}; + use crate::shared::grpc::create_rpc_channel; + use numaflow::mapstream; + use numaflow::{batchmap, map}; + use numaflow_pb::clients::map::map_client::MapClient; + use tempfile::TempDir; + use tokio::sync::mpsc::Sender; + use tokio::sync::oneshot; + + struct SimpleMapper; + + #[tonic::async_trait] + impl map::Mapper for SimpleMapper { + async fn map(&self, input: map::MapRequest) -> Vec { + let message = map::Message::new(input.value) + .keys(input.keys) + .tags(vec!["test".to_string()]); + vec![message] + } + } + + #[tokio::test] + async fn mapper_operations() -> Result<()> { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map.sock"); + let server_info_file = tmp_dir.path().join("map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + map::Server::new(SimpleMapper) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + let tracker_handle = TrackerHandle::new(); + + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Unary, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let message = Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(Offset::String(crate::message::StringOffset::new( + "0".to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + let (output_tx, mut output_rx) = mpsc::channel(10); + + let semaphore = Arc::new(Semaphore::new(10)); + let permit = semaphore.acquire_owned().await.unwrap(); + let (error_tx, mut error_rx) = mpsc::channel(1); + + let ActorSender::Unary(input_tx) = mapper.actor_sender.clone() else { + panic!("Expected Unary actor sender"); + }; + + MapHandle::unary( + input_tx, + permit, + message, + output_tx, + tracker_handle, + error_tx, + ) + .await; + + // check for errors + assert!(error_rx.recv().await.is_none()); + + let mapped_message = output_rx.recv().await.unwrap(); + assert_eq!(mapped_message.value, "hello"); + + // we need to drop the mapper, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + #[tokio::test] + async fn test_map_stream() -> Result<()> { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map.sock"); + let server_info_file = tmp_dir.path().join("map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + map::Server::new(SimpleMapper) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let tracker_handle = TrackerHandle::new(); + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Unary, + 10, + Duration::from_millis(10), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + for i in 0..5 { + let message = Message { + keys: Arc::from(vec![format!("key_{}", i)]), + tags: None, + value: format!("value_{}", i).into(), + offset: Some(Offset::String(StringOffset::new(i.to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: i.to_string().into(), + index: i, + }, + headers: Default::default(), + }; + input_tx.send(message).await.unwrap(); + } + drop(input_tx); + + let (output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + + let mut output_rx = output_stream.into_inner(); + + for i in 0..5 { + let mapped_message = output_rx.recv().await.unwrap(); + assert_eq!(mapped_message.value, format!("value_{}", i)); + } + + // we need to drop the mapper, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + assert!( + map_handle.is_finished(), + "Expected mapper to have shut down" + ); + Ok(()) + } + + struct PanicCat; + + #[tonic::async_trait] + impl map::Mapper for PanicCat { + async fn map(&self, _input: map::MapRequest) -> Vec { + panic!("PanicCat panicked!"); + } + } + + #[tokio::test] + async fn test_map_stream_with_panic() -> Result<()> { + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map.sock"); + let server_info_file = tmp_dir.path().join("map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + map::Server::new(PanicCat) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start() + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let tracker_handle = TrackerHandle::new(); + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Unary, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + let message = Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + input_tx.send(message).await.unwrap(); + + let (_output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + + // Await the join handle and expect an error due to the panic + let result = map_handle.await.unwrap(); + assert!(result.is_err(), "Expected an error due to panic"); + assert!(result + .unwrap_err() + .to_string() + .contains("PanicCat panicked!")); + + // we need to drop the mapper, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + struct SimpleBatchMap; + + #[tonic::async_trait] + impl batchmap::BatchMapper for SimpleBatchMap { + async fn batchmap( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses: Vec = Vec::new(); + while let Some(datum) = input.recv().await { + let mut response = batchmap::BatchResponse::from_id(datum.id); + response.append(batchmap::Message { + keys: Option::from(datum.keys), + value: datum.value, + tags: None, + }); + responses.push(response); + } + responses + } + } + + #[tokio::test] + async fn batch_mapper_operations() -> Result<()> { + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("batch_map.sock"); + let server_info_file = tmp_dir.path().join("batch_map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + batchmap::Server::new(SimpleBatchMap) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + let tracker_handle = TrackerHandle::new(); + + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Batch, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let messages = vec![ + Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }, + Message { + keys: Arc::from(vec!["second".into()]), + tags: None, + value: "world".into(), + offset: Some(Offset::String(StringOffset::new("1".to_string(), 1))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "1".to_string().into(), + index: 1, + }, + headers: Default::default(), + }, + ]; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + for message in messages { + input_tx.send(message).await.unwrap(); + } + drop(input_tx); + + let (output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + let mut output_rx = output_stream.into_inner(); + + let mapped_message1 = output_rx.recv().await.unwrap(); + assert_eq!(mapped_message1.value, "hello"); + + let mapped_message2 = output_rx.recv().await.unwrap(); + assert_eq!(mapped_message2.value, "world"); + + // we need to drop the mapper, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + assert!( + map_handle.is_finished(), + "Expected mapper to have shut down" + ); + Ok(()) + } + + struct PanicBatchMap; + + #[tonic::async_trait] + impl batchmap::BatchMapper for PanicBatchMap { + async fn batchmap( + &self, + _input: mpsc::Receiver, + ) -> Vec { + panic!("PanicBatchMap panicked!"); + } + } + + #[tokio::test] + async fn test_batch_map_with_panic() -> Result<()> { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("batch_map_panic.sock"); + let server_info_file = tmp_dir.path().join("batch_map_panic-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + batchmap::Server::new(PanicBatchMap) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let tracker_handle = TrackerHandle::new(); + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Batch, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let messages = vec![ + Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }, + Message { + keys: Arc::from(vec!["second".into()]), + tags: None, + value: "world".into(), + offset: Some(Offset::String(StringOffset::new("1".to_string(), 1))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "1".to_string().into(), + index: 1, + }, + headers: Default::default(), + }, + ]; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + for message in messages { + input_tx.send(message).await.unwrap(); + } + drop(input_tx); + + let (_output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + + // Await the join handle and expect an error due to the panic + let result = map_handle.await.unwrap(); + assert!(result.is_err(), "Expected an error due to panic"); + + // we need to drop the mapper, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + struct FlatmapStream; + + #[tonic::async_trait] + impl mapstream::MapStreamer for FlatmapStream { + async fn map_stream( + &self, + input: mapstream::MapStreamRequest, + tx: Sender, + ) { + let payload_str = String::from_utf8(input.value).unwrap_or_default(); + let splits: Vec<&str> = payload_str.split(',').collect(); + + for split in splits { + let message = mapstream::Message::new(split.as_bytes().to_vec()) + .keys(input.keys.clone()) + .tags(vec![]); + if tx.send(message).await.is_err() { + break; + } + } + } + } + + #[tokio::test] + async fn map_stream_operations() -> Result<()> { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map_stream.sock"); + let server_info_file = tmp_dir.path().join("map_stream-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let _handle = tokio::spawn(async move { + mapstream::Server::new(FlatmapStream) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + let tracker_handle = TrackerHandle::new(); + + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let mapper = MapHandle::new( + MapMode::Stream, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle.clone(), + ) + .await?; + + let message = Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "test,map,stream".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + input_tx.send(message).await.unwrap(); + drop(input_tx); + + let (mut output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + + let mut responses = vec![]; + while let Some(response) = output_stream.next().await { + responses.push(response); + } + + assert_eq!(responses.len(), 3); + // convert the bytes value to string and compare + let values: Vec = responses + .iter() + .map(|r| String::from_utf8(Vec::from(r.value.clone())).unwrap()) + .collect(); + assert_eq!(values, vec!["test", "map", "stream"]); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + map_handle.is_finished(), + "Expected mapper to have shut down" + ); + Ok(()) + } + + struct PanicFlatmapStream; + + #[tonic::async_trait] + impl mapstream::MapStreamer for PanicFlatmapStream { + async fn map_stream( + &self, + _input: mapstream::MapStreamRequest, + _tx: Sender, + ) { + panic!("PanicFlatmapStream panicked!"); + } + } + + #[tokio::test] + async fn map_stream_panic_case() -> Result<()> { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map_stream_panic.sock"); + let server_info_file = tmp_dir.path().join("map_stream_panic-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + mapstream::Server::new(PanicFlatmapStream) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let client = MapClient::new(create_rpc_channel(sock_file).await?); + let tracker_handle = TrackerHandle::new(); + let mapper = MapHandle::new( + MapMode::Stream, + 500, + Duration::from_millis(1000), + 10, + client, + tracker_handle, + ) + .await?; + + let message = Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "panic".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + input_tx.send(message).await.unwrap(); + + let (_output_stream, map_handle) = mapper.streaming_map(input_stream).await?; + + // Await the join handle and expect an error due to the panic + let result = map_handle.await.unwrap(); + assert!(result.is_err(), "Expected an error due to panic"); + assert!(result + .unwrap_err() + .to_string() + .contains("PanicFlatmapStream panicked!")); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(mapper); + + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } +} diff --git a/rust/numaflow-core/src/mapper/map/user_defined.rs b/rust/numaflow-core/src/mapper/map/user_defined.rs new file mode 100644 index 0000000000..6bc816c405 --- /dev/null +++ b/rust/numaflow-core/src/mapper/map/user_defined.rs @@ -0,0 +1,721 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use chrono::{DateTime, Utc}; +use numaflow_pb::clients::map::{self, map_client::MapClient, MapRequest, MapResponse}; +use tokio::sync::Mutex; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::transport::Channel; +use tonic::{Request, Streaming}; +use tracing::error; + +use crate::config::get_vertex_name; +use crate::error::{Error, Result}; +use crate::message::{Message, MessageID, Offset}; + +type ResponseSenderMap = + Arc>>)>>>; + +type StreamResponseSenderMap = + Arc>)>>>; + +struct ParentMessageInfo { + offset: Offset, + event_time: DateTime, + headers: HashMap, +} + +/// UserDefinedUnaryMap is a grpc client that sends unary requests to the map server +/// and forwards the responses. +pub(in crate::mapper) struct UserDefinedUnaryMap { + read_tx: mpsc::Sender, + senders: ResponseSenderMap, + task_handle: tokio::task::JoinHandle<()>, +} + +/// Abort the background task that receives responses when the UserDefinedBatchMap is dropped. +impl Drop for UserDefinedUnaryMap { + fn drop(&mut self) { + self.task_handle.abort(); + } +} + +impl UserDefinedUnaryMap { + /// Performs handshake with the server and creates a new UserDefinedMap. + pub(in crate::mapper) async fn new( + batch_size: usize, + mut client: MapClient, + ) -> Result { + let (read_tx, read_rx) = mpsc::channel(batch_size); + let resp_stream = create_response_stream(read_tx.clone(), read_rx, &mut client).await?; + + // map to track the oneshot sender for each request along with the message info + let sender_map = Arc::new(Mutex::new(HashMap::new())); + + // background task to receive responses from the server and send them to the appropriate + // oneshot sender based on the message id + let task_handle = tokio::spawn(Self::receive_unary_responses( + Arc::clone(&sender_map), + resp_stream, + )); + + let mapper = Self { + read_tx, + senders: sender_map, + task_handle, + }; + + Ok(mapper) + } + + /// receive responses from the server and gets the corresponding oneshot response sender from the map + /// and sends the response. + async fn receive_unary_responses( + sender_map: ResponseSenderMap, + mut resp_stream: Streaming, + ) { + while let Some(resp) = match resp_stream.message().await { + Ok(message) => message, + Err(e) => { + let error = Error::Mapper(format!("failed to receive map response: {}", e)); + let mut senders = sender_map.lock().await; + for (_, (_, sender)) in senders.drain() { + let _ = sender.send(Err(error.clone())); + } + None + } + } { + process_response(&sender_map, resp).await + } + } + + /// Handles the incoming message and sends it to the server for mapping. + pub(in crate::mapper) async fn unary_map( + &mut self, + message: Message, + respond_to: oneshot::Sender>>, + ) { + let key = message.offset.clone().unwrap().to_string(); + let msg_info = ParentMessageInfo { + offset: message.offset.clone().expect("offset can never be none"), + event_time: message.event_time, + headers: message.headers.clone(), + }; + + self.senders + .lock() + .await + .insert(key, (msg_info, respond_to)); + + self.read_tx + .send(message.into()) + .await + .expect("failed to send message"); + } +} + +/// UserDefinedBatchMap is a grpc client that sends batch requests to the map server +/// and forwards the responses. +pub(in crate::mapper) struct UserDefinedBatchMap { + read_tx: mpsc::Sender, + senders: ResponseSenderMap, + task_handle: tokio::task::JoinHandle<()>, +} + +/// Abort the background task that receives responses when the UserDefinedBatchMap is dropped. +impl Drop for UserDefinedBatchMap { + fn drop(&mut self) { + self.task_handle.abort(); + } +} + +impl UserDefinedBatchMap { + /// Performs handshake with the server and creates a new UserDefinedMap. + pub(in crate::mapper) async fn new( + batch_size: usize, + mut client: MapClient, + ) -> Result { + let (read_tx, read_rx) = mpsc::channel(batch_size); + let resp_stream = create_response_stream(read_tx.clone(), read_rx, &mut client).await?; + + // map to track the oneshot response sender for each request along with the message info + let sender_map = Arc::new(Mutex::new(HashMap::new())); + + // background task to receive responses from the server and send them to the appropriate + // oneshot response sender based on the id + let task_handle = tokio::spawn(Self::receive_batch_responses( + Arc::clone(&sender_map), + resp_stream, + )); + + let mapper = Self { + read_tx, + senders: sender_map, + task_handle, + }; + Ok(mapper) + } + + /// receive responses from the server and gets the corresponding oneshot response sender from the map + /// and sends the response. + async fn receive_batch_responses( + sender_map: ResponseSenderMap, + mut resp_stream: Streaming, + ) { + while let Some(resp) = match resp_stream.message().await { + Ok(message) => message, + Err(e) => { + let error = Error::Mapper(format!("failed to receive map response: {}", e)); + let mut senders = sender_map.lock().await; + for (_, (_, sender)) in senders.drain() { + sender + .send(Err(error.clone())) + .expect("failed to send error response"); + } + None + } + } { + if let Some(map::TransmissionStatus { eot: true }) = resp.status { + if !sender_map.lock().await.is_empty() { + error!("received EOT but not all responses have been received"); + } + continue; + } + + process_response(&sender_map, resp).await + } + } + + /// Handles the incoming message and sends it to the server for mapping. + pub(in crate::mapper) async fn batch_map( + &mut self, + messages: Vec, + respond_to: Vec>>>, + ) { + for (message, respond_to) in messages.into_iter().zip(respond_to) { + let key = message.offset.clone().unwrap().to_string(); + let msg_info = ParentMessageInfo { + offset: message.offset.clone().expect("offset can never be none"), + event_time: message.event_time, + headers: message.headers.clone(), + }; + + self.senders + .lock() + .await + .insert(key, (msg_info, respond_to)); + self.read_tx + .send(message.into()) + .await + .expect("failed to send message"); + } + + // send eot request + self.read_tx + .send(MapRequest { + request: None, + id: "".to_string(), + handshake: None, + status: Some(map::TransmissionStatus { eot: true }), + }) + .await + .expect("failed to send eot request"); + } +} + +/// Processes the response from the server and sends it to the appropriate oneshot sender +/// based on the message id entry in the map. +async fn process_response(sender_map: &ResponseSenderMap, resp: MapResponse) { + let msg_id = resp.id; + if let Some((msg_info, sender)) = sender_map.lock().await.remove(&msg_id) { + let mut response_messages = vec![]; + for (i, result) in resp.results.into_iter().enumerate() { + let message = Message { + id: MessageID { + vertex_name: get_vertex_name().to_string().into(), + index: i as i32, + offset: msg_info.offset.to_string().into(), + }, + keys: Arc::from(result.keys), + tags: Some(Arc::from(result.tags)), + value: result.value.into(), + offset: Some(msg_info.offset.clone()), + event_time: msg_info.event_time, + headers: msg_info.headers.clone(), + }; + response_messages.push(message); + } + sender + .send(Ok(response_messages)) + .expect("failed to send response"); + } +} + +/// Performs handshake with the server and returns the response stream to receive responses. +async fn create_response_stream( + read_tx: mpsc::Sender, + read_rx: mpsc::Receiver, + client: &mut MapClient, +) -> Result> { + let handshake_request = MapRequest { + request: None, + id: "".to_string(), + handshake: Some(map::Handshake { sot: true }), + status: None, + }; + + read_tx + .send(handshake_request) + .await + .map_err(|e| Error::Mapper(format!("failed to send handshake request: {}", e)))?; + + let mut resp_stream = client + .map_fn(Request::new(ReceiverStream::new(read_rx))) + .await? + .into_inner(); + + let handshake_response = resp_stream.message().await?.ok_or(Error::Mapper( + "failed to receive handshake response".to_string(), + ))?; + + if handshake_response.handshake.map_or(true, |h| !h.sot) { + return Err(Error::Mapper("invalid handshake response".to_string())); + } + + Ok(resp_stream) +} + +/// UserDefinedStreamMap is a grpc client that sends stream requests to the map server +pub(in crate::mapper) struct UserDefinedStreamMap { + read_tx: mpsc::Sender, + senders: StreamResponseSenderMap, + task_handle: tokio::task::JoinHandle<()>, +} + +/// Abort the background task that receives responses when the UserDefinedBatchMap is dropped. +impl Drop for UserDefinedStreamMap { + fn drop(&mut self) { + self.task_handle.abort(); + } +} + +impl UserDefinedStreamMap { + /// Performs handshake with the server and creates a new UserDefinedMap. + pub(in crate::mapper) async fn new( + batch_size: usize, + mut client: MapClient, + ) -> Result { + let (read_tx, read_rx) = mpsc::channel(batch_size); + let resp_stream = create_response_stream(read_tx.clone(), read_rx, &mut client).await?; + + // map to track the oneshot response sender for each request along with the message info + let sender_map = Arc::new(Mutex::new(HashMap::new())); + + // background task to receive responses from the server and send them to the appropriate + // mpsc sender based on the id + let task_handle = tokio::spawn(Self::receive_stream_responses( + Arc::clone(&sender_map), + resp_stream, + )); + + let mapper = Self { + read_tx, + senders: sender_map, + task_handle, + }; + Ok(mapper) + } + + /// receive responses from the server and gets the corresponding oneshot sender from the map + /// and sends the response. + async fn receive_stream_responses( + sender_map: StreamResponseSenderMap, + mut resp_stream: Streaming, + ) { + while let Some(resp) = match resp_stream.message().await { + Ok(message) => message, + Err(e) => { + let error = Error::Mapper(format!("failed to receive map response: {}", e)); + let mut senders = sender_map.lock().await; + for (_, (_, sender)) in senders.drain() { + let _ = sender.send(Err(error.clone())).await; + } + None + } + } { + let (message_info, response_sender) = sender_map + .lock() + .await + .remove(&resp.id) + .expect("map entry should always be present"); + + // once we get eot, we can drop the sender to let the callee + // know that we are done sending responses + if let Some(map::TransmissionStatus { eot: true }) = resp.status { + continue; + } + + for (i, result) in resp.results.into_iter().enumerate() { + let message = Message { + id: MessageID { + vertex_name: get_vertex_name().to_string().into(), + index: i as i32, + offset: message_info.offset.to_string().into(), + }, + keys: Arc::from(result.keys), + tags: Some(Arc::from(result.tags)), + value: result.value.into(), + offset: None, + event_time: message_info.event_time, + headers: message_info.headers.clone(), + }; + response_sender + .send(Ok(message)) + .await + .expect("failed to send response"); + } + + // Write the sender back to the map, because we need to send + // more responses for the same request + sender_map + .lock() + .await + .insert(resp.id, (message_info, response_sender)); + } + } + + /// Handles the incoming message and sends it to the server for mapping. + pub(in crate::mapper) async fn stream_map( + &mut self, + message: Message, + respond_to: mpsc::Sender>, + ) { + let key = message.offset.clone().unwrap().to_string(); + let msg_info = ParentMessageInfo { + offset: message.offset.clone().expect("offset can never be none"), + event_time: message.event_time, + headers: message.headers.clone(), + }; + + self.senders + .lock() + .await + .insert(key, (msg_info, respond_to)); + + self.read_tx + .send(message.into()) + .await + .expect("failed to send message"); + } +} + +#[cfg(test)] +mod tests { + use numaflow::mapstream; + use std::error::Error; + use std::sync::Arc; + use std::time::Duration; + + use numaflow::batchmap::Server; + use numaflow::{batchmap, map}; + use numaflow_pb::clients::map::map_client::MapClient; + use tempfile::TempDir; + + use crate::mapper::map::user_defined::{ + UserDefinedBatchMap, UserDefinedStreamMap, UserDefinedUnaryMap, + }; + use crate::message::{MessageID, StringOffset}; + use crate::shared::grpc::create_rpc_channel; + + struct Cat; + + #[tonic::async_trait] + impl map::Mapper for Cat { + async fn map(&self, input: map::MapRequest) -> Vec { + let message = map::Message::new(input.value).keys(input.keys).tags(vec![]); + vec![message] + } + } + + #[tokio::test] + async fn map_operations() -> Result<(), Box> { + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = TempDir::new()?; + let sock_file = tmp_dir.path().join("map.sock"); + let server_info_file = tmp_dir.path().join("map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + map::Server::new(Cat) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let mut client = + UserDefinedUnaryMap::new(500, MapClient::new(create_rpc_channel(sock_file).await?)) + .await?; + + let message = crate::message::Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(crate::message::Offset::String(StringOffset::new( + "0".to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + let (tx, rx) = tokio::sync::oneshot::channel(); + + tokio::time::timeout(Duration::from_secs(2), client.unary_map(message, tx)) + .await + .unwrap(); + + let messages = rx.await.unwrap(); + assert!(messages.is_ok()); + assert_eq!(messages?.len(), 1); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(client); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + struct SimpleBatchMap; + + #[tonic::async_trait] + impl batchmap::BatchMapper for SimpleBatchMap { + async fn batchmap( + &self, + mut input: tokio::sync::mpsc::Receiver, + ) -> Vec { + let mut responses: Vec = Vec::new(); + while let Some(datum) = input.recv().await { + let mut response = batchmap::BatchResponse::from_id(datum.id); + response.append(batchmap::Message { + keys: Option::from(datum.keys), + value: datum.value, + tags: None, + }); + responses.push(response); + } + responses + } + } + + #[tokio::test] + async fn batch_map_operations() -> Result<(), Box> { + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = TempDir::new()?; + let sock_file = tmp_dir.path().join("batch_map.sock"); + let server_info_file = tmp_dir.path().join("batch_map-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + Server::new(SimpleBatchMap) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let mut client = + UserDefinedBatchMap::new(500, MapClient::new(create_rpc_channel(sock_file).await?)) + .await?; + + let messages = vec![ + crate::message::Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(crate::message::Offset::String(StringOffset::new( + "0".to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }, + crate::message::Message { + keys: Arc::from(vec!["second".into()]), + tags: None, + value: "world".into(), + offset: Some(crate::message::Offset::String(StringOffset::new( + "1".to_string(), + 1, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "1".to_string().into(), + index: 1, + }, + headers: Default::default(), + }, + ]; + + let (tx1, rx1) = tokio::sync::oneshot::channel(); + let (tx2, rx2) = tokio::sync::oneshot::channel(); + + tokio::time::timeout( + Duration::from_secs(2), + client.batch_map(messages, vec![tx1, tx2]), + ) + .await + .unwrap(); + + let messages1 = rx1.await.unwrap(); + let messages2 = rx2.await.unwrap(); + + assert!(messages1.is_ok()); + assert!(messages2.is_ok()); + assert_eq!(messages1?.len(), 1); + assert_eq!(messages2?.len(), 1); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(client); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } + + struct FlatmapStream; + + #[tonic::async_trait] + impl mapstream::MapStreamer for FlatmapStream { + async fn map_stream( + &self, + input: mapstream::MapStreamRequest, + tx: tokio::sync::mpsc::Sender, + ) { + let payload_str = String::from_utf8(input.value).unwrap_or_default(); + let splits: Vec<&str> = payload_str.split(',').collect(); + + for split in splits { + let message = mapstream::Message::new(split.as_bytes().to_vec()) + .keys(input.keys.clone()) + .tags(vec![]); + if tx.send(message).await.is_err() { + break; + } + } + } + } + + #[tokio::test] + async fn map_stream_operations() -> Result<(), Box> { + let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let tmp_dir = TempDir::new()?; + let sock_file = tmp_dir.path().join("map_stream.sock"); + let server_info_file = tmp_dir.path().join("map_stream-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + mapstream::Server::new(FlatmapStream) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start_with_shutdown(shutdown_rx) + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let mut client = + UserDefinedStreamMap::new(500, MapClient::new(create_rpc_channel(sock_file).await?)) + .await?; + + let message = crate::message::Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "test,map,stream".into(), + offset: Some(crate::message::Offset::String(StringOffset::new( + "0".to_string(), + 0, + ))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(3); + + tokio::time::timeout(Duration::from_secs(2), client.stream_map(message, tx)) + .await + .unwrap(); + + let mut responses = vec![]; + while let Some(response) = rx.recv().await { + responses.push(response.unwrap()); + } + + assert_eq!(responses.len(), 3); + // convert the bytes value to string and compare + let values: Vec = responses + .iter() + .map(|r| String::from_utf8(Vec::from(r.value.clone())).unwrap()) + .collect(); + assert_eq!(values, vec!["test", "map", "stream"]); + + // we need to drop the client, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(client); + + shutdown_tx + .send(()) + .expect("failed to send shutdown signal"); + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } +} diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 2b3ca0b5fc..a33b4a7041 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -8,6 +8,7 @@ use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use base64::Engine; use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; +use numaflow_pb::clients::map::MapRequest; use numaflow_pb::clients::sink::sink_request::Request; use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; use numaflow_pb::clients::sink::{sink_response, SinkRequest}; @@ -285,7 +286,10 @@ impl From for SourceTransformRequest { Self { request: Some( numaflow_pb::clients::sourcetransformer::source_transform_request::Request { - id: message.id.to_string(), + id: message + .offset + .expect("offset should be present") + .to_string(), keys: message.keys.to_vec(), value: message.value.to_vec(), event_time: prost_timestamp_from_utc(message.event_time), @@ -298,6 +302,23 @@ impl From for SourceTransformRequest { } } +impl From for MapRequest { + fn from(message: Message) -> Self { + Self { + request: Some(numaflow_pb::clients::map::map_request::Request { + keys: message.keys.to_vec(), + value: message.value.to_vec(), + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + }), + id: message.offset.unwrap().to_string(), + handshake: None, + status: None, + } + } +} + /// Convert [`read_response::Result`] to [`Message`] impl TryFrom for Message { type Error = Error; diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index 866e58f2c2..fa79e457b8 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -10,6 +10,7 @@ use axum::http::{Response, StatusCode}; use axum::response::IntoResponse; use axum::{routing::get, Router}; use axum_server::tls_rustls::RustlsConfig; +use numaflow_pb::clients::map::map_client::MapClient; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; @@ -116,6 +117,7 @@ pub(crate) enum PipelineContainerState { ), ), Sink((Option>, Option>)), + Map(Option>), } /// The global register of all metrics. @@ -689,6 +691,14 @@ async fn sidecar_livez(State(state): State) -> impl I } } } + PipelineContainerState::Map(map_client) => { + if let Some(mut map_client) = map_client { + if map_client.is_ready(Request::new(())).await.is_err() { + error!("Pipeline map client is not ready"); + return StatusCode::INTERNAL_SERVER_ERROR; + } + } + } }, } StatusCode::NO_CONTENT @@ -943,8 +953,8 @@ mod tests { async fn ack(&self, _: Vec) {} - async fn pending(&self) -> usize { - 0 + async fn pending(&self) -> Option { + Some(0) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/monovertex.rs b/rust/numaflow-core/src/monovertex.rs index ba488cc8fd..1518a3c9f3 100644 --- a/rust/numaflow-core/src/monovertex.rs +++ b/rust/numaflow-core/src/monovertex.rs @@ -127,8 +127,8 @@ mod tests { async fn ack(&self, _: Vec) {} - async fn pending(&self) -> usize { - 0 + async fn pending(&self) -> Option { + Some(0) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/monovertex/forwarder.rs b/rust/numaflow-core/src/monovertex/forwarder.rs index b048680482..51851e4eeb 100644 --- a/rust/numaflow-core/src/monovertex/forwarder.rs +++ b/rust/numaflow-core/src/monovertex/forwarder.rs @@ -111,9 +111,9 @@ impl Forwarder { sink_writer_handle, ) { Ok((reader_result, transformer_result, sink_writer_result)) => { - reader_result?; - transformer_result?; sink_writer_result?; + transformer_result?; + reader_result?; Ok(()) } Err(e) => Err(Error::Forwarder(format!( @@ -206,9 +206,11 @@ mod tests { } } - async fn pending(&self) -> usize { - self.num - self.sent_count.load(Ordering::SeqCst) - + self.yet_to_ack.read().unwrap().len() + async fn pending(&self) -> Option { + Some( + self.num - self.sent_count.load(Ordering::SeqCst) + + self.yet_to_ack.read().unwrap().len(), + ) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/pipeline.rs b/rust/numaflow-core/src/pipeline.rs index 434b9aa6d2..d2cb77091b 100644 --- a/rust/numaflow-core/src/pipeline.rs +++ b/rust/numaflow-core/src/pipeline.rs @@ -7,6 +7,7 @@ use tokio_util::sync::CancellationToken; use tracing::info; use crate::config::pipeline; +use crate::config::pipeline::map::MapVtxConfig; use crate::config::pipeline::{PipelineConfig, SinkVtxConfig, SourceVtxConfig}; use crate::metrics::{PipelineContainerState, UserDefinedContainerState}; use crate::pipeline::forwarder::source_forwarder; @@ -36,6 +37,10 @@ pub(crate) async fn start_forwarder( info!("Starting sink forwarder"); start_sink_forwarder(cln_token, config.clone(), sink.clone()).await?; } + pipeline::VertexType::Map(map) => { + info!("Starting map forwarder"); + start_map_forwarder(cln_token, config.clone(), map.clone()).await?; + } } Ok(()) } @@ -75,8 +80,8 @@ async fn start_source_forwarder( start_metrics_server( config.metrics_config.clone(), UserDefinedContainerState::Pipeline(PipelineContainerState::Source(( - source_grpc_client.clone(), - transformer_grpc_client.clone(), + source_grpc_client, + transformer_grpc_client, ))), ) .await; @@ -94,6 +99,92 @@ async fn start_source_forwarder( Ok(()) } +async fn start_map_forwarder( + cln_token: CancellationToken, + config: PipelineConfig, + map_vtx_config: MapVtxConfig, +) -> Result<()> { + let js_context = create_js_context(config.js_client_config.clone()).await?; + + // Only the reader config of the first "from" vertex is needed, as all "from" vertices currently write + // to a common buffer, in the case of a join. + let reader_config = &config + .from_vertex_config + .first() + .ok_or_else(|| error::Error::Config("No from vertex config found".to_string()))? + .reader_config; + + // Create buffer writers and buffer readers + let mut forwarder_components = vec![]; + let mut mapper_grpc_client = None; + for stream in reader_config.streams.clone() { + let tracker_handle = TrackerHandle::new(); + + let buffer_reader = create_buffer_reader( + stream, + reader_config.clone(), + js_context.clone(), + tracker_handle.clone(), + config.batch_size, + ) + .await?; + + let (mapper, mapper_rpc_client) = create_components::create_mapper( + config.batch_size, + config.read_timeout, + map_vtx_config.clone(), + tracker_handle.clone(), + cln_token.clone(), + ) + .await?; + + if let Some(mapper_rpc_client) = mapper_rpc_client { + mapper_grpc_client = Some(mapper_rpc_client); + } + + let buffer_writer = create_buffer_writer( + &config, + js_context.clone(), + tracker_handle.clone(), + cln_token.clone(), + ) + .await; + forwarder_components.push((buffer_reader, buffer_writer, mapper)); + } + + start_metrics_server( + config.metrics_config.clone(), + UserDefinedContainerState::Pipeline(PipelineContainerState::Map(mapper_grpc_client)), + ) + .await; + + let mut forwarder_tasks = vec![]; + for (buffer_reader, buffer_writer, mapper) in forwarder_components { + info!(%buffer_reader, "Starting forwarder for buffer reader"); + let forwarder = forwarder::map_forwarder::MapForwarder::new( + buffer_reader, + mapper, + buffer_writer, + cln_token.clone(), + ) + .await; + let task = tokio::spawn(async move { forwarder.start().await }); + forwarder_tasks.push(task); + } + + let results = try_join_all(forwarder_tasks) + .await + .map_err(|e| error::Error::Forwarder(e.to_string()))?; + + for result in results { + error!(?result, "Forwarder task failed"); + result?; + } + + info!("All forwarders have stopped successfully"); + Ok(()) +} + async fn start_sink_forwarder( cln_token: CancellationToken, config: PipelineConfig, @@ -120,6 +211,7 @@ async fn start_sink_forwarder( reader_config.clone(), js_context.clone(), tracker_handle.clone(), + config.batch_size, ) .await?; buffer_readers.push(buffer_reader); @@ -159,17 +251,19 @@ async fn start_sink_forwarder( ) .await; - let task = tokio::spawn({ - let config = config.clone(); - async move { forwarder.start(config.clone()).await } - }); - + let task = tokio::spawn(async move { forwarder.start().await }); forwarder_tasks.push(task); } - try_join_all(forwarder_tasks) + let results = try_join_all(forwarder_tasks) .await .map_err(|e| error::Error::Forwarder(e.to_string()))?; + + for result in results { + error!(?result, "Forwarder task failed"); + result?; + } + info!("All forwarders have stopped successfully"); Ok(()) } @@ -194,6 +288,7 @@ async fn create_buffer_reader( reader_config: BufferReaderConfig, js_context: Context, tracker_handle: TrackerHandle, + batch_size: usize, ) -> Result { JetstreamReader::new( stream.0, @@ -201,6 +296,7 @@ async fn create_buffer_reader( js_context, reader_config, tracker_handle, + batch_size, ) .await } @@ -228,12 +324,15 @@ async fn create_js_context(config: pipeline::isb::jetstream::ClientConfig) -> Re #[cfg(test)] mod tests { + use crate::pipeline::pipeline::map::MapMode; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use async_nats::jetstream; use async_nats::jetstream::{consumer, stream}; + use numaflow::map; + use tempfile::TempDir; use tokio_stream::StreamExt; use super::*; @@ -242,6 +341,7 @@ mod tests { use crate::config::components::source::GeneratorConfig; use crate::config::components::source::SourceConfig; use crate::config::components::source::SourceType; + use crate::config::pipeline::map::{MapType, UserDefinedConfig}; use crate::config::pipeline::PipelineConfig; use crate::pipeline::pipeline::isb; use crate::pipeline::pipeline::isb::{BufferReaderConfig, BufferWriterConfig}; @@ -250,6 +350,8 @@ mod tests { use crate::pipeline::pipeline::{SinkVtxConfig, SourceVtxConfig}; use crate::pipeline::tests::isb::BufferFullStrategy::RetryUntilSuccess; + // e2e test for source forwarder, reads from generator and writes to + // multi-partitioned buffer. #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_forwarder_for_source_vertex() { @@ -389,6 +491,8 @@ mod tests { } } + // e2e test for sink forwarder, reads from multi-partitioned buffer and + // writes to sink. #[cfg(feature = "nats-tests")] #[tokio::test] async fn test_forwarder_for_sink_vertex() { @@ -407,9 +511,6 @@ mod tests { const MESSAGE_COUNT: usize = 10; let mut consumers = vec![]; - // Create streams to which the generator source vertex we create later will forward - // messages to. The consumers created for the corresponding streams will be used to ensure - // that messages were actually written to the streams. for stream_name in &streams { let stream_name = *stream_name; // Delete stream if it exists @@ -546,4 +647,247 @@ mod tests { context.delete_stream(stream_name).await.unwrap(); } } + + struct SimpleCat; + + #[tonic::async_trait] + impl map::Mapper for SimpleCat { + async fn map(&self, input: map::MapRequest) -> Vec { + let message = map::Message::new(input.value) + .keys(input.keys) + .tags(vec!["test-forwarder".to_string()]); + vec![message] + } + } + + // e2e test for map forwarder, reads from multi-partitioned buffer, invokes map + // and writes to multi-partitioned buffer. + #[cfg(feature = "nats-tests")] + #[tokio::test] + async fn test_forwarder_for_map_vertex() { + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("map.sock"); + let server_info_file = tmp_dir.path().join("mapper-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let _handle = tokio::spawn(async move { + map::Server::new(SimpleCat) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start() + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + // Unique names for the streams we use in this test + let input_streams = vec![ + "default-test-forwarder-for-map-vertex-in-0", + "default-test-forwarder-for-map-vertex-in-1", + "default-test-forwarder-for-map-vertex-in-2", + "default-test-forwarder-for-map-vertex-in-3", + "default-test-forwarder-for-map-vertex-in-4", + ]; + + let output_streams = vec![ + "default-test-forwarder-for-map-vertex-out-0", + "default-test-forwarder-for-map-vertex-out-1", + "default-test-forwarder-for-map-vertex-out-2", + "default-test-forwarder-for-map-vertex-out-3", + "default-test-forwarder-for-map-vertex-out-4", + ]; + + let js_url = "localhost:4222"; + let client = async_nats::connect(js_url).await.unwrap(); + let context = jetstream::new(client); + + const MESSAGE_COUNT: usize = 10; + let mut input_consumers = vec![]; + let mut output_consumers = vec![]; + for stream_name in &input_streams { + let stream_name = *stream_name; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 64 * 1024, + max_messages: 10000, + ..Default::default() + }) + .await + .unwrap(); + + // Publish some messages into the stream + use chrono::{TimeZone, Utc}; + + use crate::message::{Message, MessageID, Offset, StringOffset}; + let message = Message { + keys: Arc::from(vec!["key1".to_string()]), + tags: None, + value: vec![1, 2, 3].into(), + offset: Some(Offset::String(StringOffset::new("123".to_string(), 0))), + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), + index: 0, + }, + headers: HashMap::new(), + }; + let message: bytes::BytesMut = message.try_into().unwrap(); + + for _ in 0..MESSAGE_COUNT { + context + .publish(stream_name.to_string(), message.clone().into()) + .await + .unwrap() + .await + .unwrap(); + } + + let c: consumer::PullConsumer = context + .create_consumer_on_stream( + consumer::pull::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + + input_consumers.push((stream_name.to_string(), c)); + } + + // Create output streams and consumers + for stream_name in &output_streams { + let stream_name = *stream_name; + // Delete stream if it exists + let _ = context.delete_stream(stream_name).await; + let _stream = context + .get_or_create_stream(stream::Config { + name: stream_name.into(), + subjects: vec![stream_name.into()], + max_message_size: 64 * 1024, + max_messages: 1000, + ..Default::default() + }) + .await + .unwrap(); + + let c: consumer::PullConsumer = context + .create_consumer_on_stream( + consumer::pull::Config { + name: Some(stream_name.to_string()), + ack_policy: consumer::AckPolicy::Explicit, + ..Default::default() + }, + stream_name, + ) + .await + .unwrap(); + output_consumers.push((stream_name.to_string(), c)); + } + + let pipeline_config = PipelineConfig { + pipeline_name: "simple-map-pipeline".to_string(), + vertex_name: "in".to_string(), + replica: 0, + batch_size: 1000, + paf_concurrency: 1000, + read_timeout: Duration::from_secs(1), + js_client_config: isb::jetstream::ClientConfig { + url: "localhost:4222".to_string(), + user: None, + password: None, + }, + to_vertex_config: vec![ToVertexConfig { + name: "map-out".to_string(), + writer_config: BufferWriterConfig { + streams: output_streams + .iter() + .enumerate() + .map(|(i, stream_name)| ((*stream_name).to_string(), i as u16)) + .collect(), + partitions: 5, + max_length: 30000, + usage_limit: 0.8, + buffer_full_strategy: RetryUntilSuccess, + }, + conditions: None, + }], + from_vertex_config: vec![FromVertexConfig { + name: "map-in".to_string(), + reader_config: BufferReaderConfig { + partitions: 5, + streams: input_streams + .iter() + .enumerate() + .map(|(i, key)| (*key, i as u16)) + .collect(), + wip_ack_interval: Duration::from_secs(1), + }, + partitions: 0, + }], + vertex_config: VertexType::Map(MapVtxConfig { + concurrency: 10, + map_type: MapType::UserDefined(UserDefinedConfig { + grpc_max_message_size: 4 * 1024 * 1024, + socket_path: sock_file.to_str().unwrap().to_string(), + server_info_path: server_info_file.to_str().unwrap().to_string(), + }), + map_mode: MapMode::Unary, + }), + metrics_config: MetricsConfig { + metrics_server_listen_port: 2469, + lag_check_interval_in_secs: 5, + lag_refresh_interval_in_secs: 3, + lookback_window_in_secs: 120, + }, + }; + + let cancellation_token = CancellationToken::new(); + let forwarder_task = tokio::spawn({ + let cancellation_token = cancellation_token.clone(); + async move { + start_forwarder(cancellation_token, pipeline_config) + .await + .unwrap(); + } + }); + + // Wait for a few messages to be forwarded + tokio::time::sleep(Duration::from_secs(3)).await; + cancellation_token.cancel(); + // token cancellation is not aborting the forwarder since we fetch messages from jetstream + // as a stream of messages (not using `consumer.batch()`). + // See `JetstreamReader::start` method in src/pipeline/isb/jetstream/reader.rs + //forwarder_task.await.unwrap(); + forwarder_task.abort(); + + // make sure we have mapped and written all messages to downstream + let mut written_count = 0; + for (_, mut stream_consumer) in output_consumers { + written_count += stream_consumer.info().await.unwrap().num_pending; + } + assert_eq!(written_count, (MESSAGE_COUNT * input_streams.len()) as u64); + + // make sure all the upstream messages are read and acked + for (_, mut stream_consumer) in input_consumers { + let con_info = stream_consumer.info().await.unwrap(); + assert_eq!(con_info.num_pending, 0); + assert_eq!(con_info.num_ack_pending, 0); + } + + // Delete all streams created in this test + for stream_name in input_streams.iter().chain(output_streams.iter()) { + context.delete_stream(stream_name).await.unwrap(); + } + } } diff --git a/rust/numaflow-core/src/pipeline/forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder.rs index e87a15ef48..3fb39e5a7e 100644 --- a/rust/numaflow-core/src/pipeline/forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder.rs @@ -35,6 +35,10 @@ /// the Write is User-defined Sink or builtin. pub(crate) mod sink_forwarder; +/// Forwarder specific to Mapper where Reader is ISB, UDF is User-defined Mapper, +/// Write is ISB. +pub(crate) mod map_forwarder; + /// Source where the Reader is builtin or User-defined Source, Write is ISB, /// with an optional Transformer. pub(crate) mod source_forwarder; diff --git a/rust/numaflow-core/src/pipeline/forwarder/map_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/map_forwarder.rs new file mode 100644 index 0000000000..afc08a6672 --- /dev/null +++ b/rust/numaflow-core/src/pipeline/forwarder/map_forwarder.rs @@ -0,0 +1,63 @@ +use tokio_util::sync::CancellationToken; + +use crate::error::Error; +use crate::mapper::map::MapHandle; +use crate::pipeline::isb::jetstream::reader::JetstreamReader; +use crate::pipeline::isb::jetstream::writer::JetstreamWriter; +use crate::Result; + +/// Map forwarder is a component which starts a streaming reader, a mapper, and a writer +/// and manages the lifecycle of these components. +pub(crate) struct MapForwarder { + jetstream_reader: JetstreamReader, + mapper: MapHandle, + jetstream_writer: JetstreamWriter, + cln_token: CancellationToken, +} + +impl MapForwarder { + pub(crate) async fn new( + jetstream_reader: JetstreamReader, + mapper: MapHandle, + jetstream_writer: JetstreamWriter, + cln_token: CancellationToken, + ) -> Self { + Self { + jetstream_reader, + mapper, + jetstream_writer, + cln_token, + } + } + + pub(crate) async fn start(&self) -> Result<()> { + // Create a child cancellation token only for the reader so that we can stop the reader first + let reader_cancellation_token = self.cln_token.child_token(); + let (read_messages_stream, reader_handle) = self + .jetstream_reader + .streaming_read(reader_cancellation_token.clone()) + .await?; + + let (mapped_messages_stream, mapper_handle) = + self.mapper.streaming_map(read_messages_stream).await?; + + let writer_handle = self + .jetstream_writer + .streaming_write(mapped_messages_stream) + .await?; + + // Join the reader, mapper, and writer + match tokio::try_join!(reader_handle, mapper_handle, writer_handle) { + Ok((reader_result, mapper_result, writer_result)) => { + writer_result?; + mapper_result?; + reader_result?; + Ok(()) + } + Err(e) => Err(Error::Forwarder(format!( + "Error while joining reader, mapper, and writer: {:?}", + e + ))), + } + } +} diff --git a/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs index 7153a4ff1d..1d560e94e5 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/sink_forwarder.rs @@ -1,6 +1,5 @@ use tokio_util::sync::CancellationToken; -use crate::config::pipeline::PipelineConfig; use crate::error::Error; use crate::pipeline::isb::jetstream::reader::JetstreamReader; use crate::sink::SinkWriter; @@ -27,12 +26,12 @@ impl SinkForwarder { } } - pub(crate) async fn start(&self, pipeline_config: PipelineConfig) -> Result<()> { + pub(crate) async fn start(&self) -> Result<()> { // Create a child cancellation token only for the reader so that we can stop the reader first let reader_cancellation_token = self.cln_token.child_token(); let (read_messages_stream, reader_handle) = self .jetstream_reader - .streaming_read(reader_cancellation_token.clone(), &pipeline_config) + .streaming_read(reader_cancellation_token.clone()) .await?; let sink_writer_handle = self @@ -43,8 +42,8 @@ impl SinkForwarder { // Join the reader and sink writer match tokio::try_join!(reader_handle, sink_writer_handle) { Ok((reader_result, sink_writer_result)) => { - reader_result?; sink_writer_result?; + reader_result?; Ok(()) } Err(e) => Err(Error::Forwarder(format!( diff --git a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs index d494cbbd93..b81ddaf80e 100644 --- a/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs +++ b/rust/numaflow-core/src/pipeline/forwarder/source_forwarder.rs @@ -81,9 +81,9 @@ impl SourceForwarder { writer_handle, ) { Ok((reader_result, transformer_result, sink_writer_result)) => { - reader_result?; - transformer_result?; sink_writer_result?; + transformer_result?; + reader_result?; Ok(()) } Err(e) => Err(Error::Forwarder(format!( @@ -180,9 +180,11 @@ mod tests { } } - async fn pending(&self) -> usize { - self.num - self.sent_count.load(Ordering::SeqCst) - + self.yet_to_ack.read().unwrap().len() + async fn pending(&self) -> Option { + Some( + self.num - self.sent_count.load(Ordering::SeqCst) + + self.yet_to_ack.read().unwrap().len(), + ) } async fn partitions(&self) -> Option> { @@ -212,7 +214,7 @@ mod tests { let cln_token = CancellationToken::new(); let (src_shutdown_tx, src_shutdown_rx) = oneshot::channel(); - let tmp_dir = tempfile::TempDir::new().unwrap(); + let tmp_dir = TempDir::new().unwrap(); let sock_file = tmp_dir.path().join("source.sock"); let server_info_file = tmp_dir.path().join("source-server-info"); diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs index 4513cb9182..79b8572efd 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/reader.rs @@ -12,8 +12,8 @@ use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use tracing::{error, info}; +use crate::config::get_vertex_name; use crate::config::pipeline::isb::BufferReaderConfig; -use crate::config::pipeline::PipelineConfig; use crate::error::Error; use crate::message::{IntOffset, Message, MessageID, Offset, ReadAck}; use crate::metrics::{ @@ -33,6 +33,7 @@ pub(crate) struct JetstreamReader { config: BufferReaderConfig, consumer: PullConsumer, tracker_handle: TrackerHandle, + batch_size: usize, } impl JetstreamReader { @@ -42,6 +43,7 @@ impl JetstreamReader { js_ctx: Context, config: BufferReaderConfig, tracker_handle: TrackerHandle, + batch_size: usize, ) -> Result { let mut config = config; @@ -69,6 +71,7 @@ impl JetstreamReader { config: config.clone(), consumer, tracker_handle, + batch_size, }) } @@ -81,10 +84,8 @@ impl JetstreamReader { pub(crate) async fn streaming_read( &self, cancel_token: CancellationToken, - pipeline_config: &PipelineConfig, ) -> Result<(ReceiverStream, JoinHandle>)> { - let (messages_tx, messages_rx) = mpsc::channel(2 * pipeline_config.batch_size); - let pipeline_config = pipeline_config.clone(); + let (messages_tx, messages_rx) = mpsc::channel(2 * self.batch_size); let handle: JoinHandle> = tokio::spawn({ let consumer = self.consumer.clone(); @@ -143,20 +144,23 @@ impl JetstreamReader { } }; - message.offset = Some(Offset::Int(IntOffset::new( + let offset = Offset::Int(IntOffset::new( msg_info.stream_sequence, partition_idx, - ))); + )); - message.id = MessageID { - vertex_name: pipeline_config.vertex_name.clone().into(), - offset: msg_info.stream_sequence.to_string().into(), + let message_id = MessageID { + vertex_name: get_vertex_name().to_string().into(), + offset: offset.to_string().into(), index: 0, }; + message.offset = Some(offset.clone()); + message.id = message_id.clone(); + // Insert the message into the tracker and wait for the ack to be sent back. let (ack_tx, ack_rx) = oneshot::channel(); - tracker_handle.insert(message.id.offset.clone(), ack_tx).await?; + tracker_handle.insert(message_id.offset.clone(), ack_tx).await?; tokio::spawn(Self::start_work_in_progress( jetstream_message, @@ -164,9 +168,14 @@ impl JetstreamReader { config.wip_ack_interval, )); - messages_tx.send(message).await.map_err(|e| { - Error::ISB(format!("Error while sending message to channel: {:?}", e)) - })?; + if let Err(e) = messages_tx.send(message).await { + // nak the read message and return + tracker_handle.discard(message_id.offset.clone()).await?; + return Err(Error::ISB(format!( + "Failed to send message to receiver: {:?}", + e + ))); + } pipeline_metrics() .forwarder @@ -313,17 +322,14 @@ mod tests { context.clone(), buf_reader_config, TrackerHandle::new(), + 500, ) .await .unwrap(); - let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); - - let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; - let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); let reader_cancel_token = CancellationToken::new(); let (mut js_reader_rx, js_reader_task) = js_reader - .streaming_read(reader_cancel_token.clone(), &pipeline_config) + .streaming_read(reader_cancel_token.clone()) .await .unwrap(); @@ -413,17 +419,14 @@ mod tests { context.clone(), buf_reader_config, tracker_handle.clone(), + 1, ) .await .unwrap(); - let pipeline_cfg_base64 = "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2ltcGxlLXBpcGVsaW5lLW91dCIsIm5hbWVzcGFjZSI6ImRlZmF1bHQiLCJjcmVhdGlvblRpbWVzdGFtcCI6bnVsbH0sInNwZWMiOnsibmFtZSI6Im91dCIsInNpbmsiOnsiYmxhY2tob2xlIjp7fSwicmV0cnlTdHJhdGVneSI6eyJvbkZhaWx1cmUiOiJyZXRyeSJ9fSwibGltaXRzIjp7InJlYWRCYXRjaFNpemUiOjUwMCwicmVhZFRpbWVvdXQiOiIxcyIsImJ1ZmZlck1heExlbmd0aCI6MzAwMDAsImJ1ZmZlclVzYWdlTGltaXQiOjgwfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19LCJwaXBlbGluZU5hbWUiOiJzaW1wbGUtcGlwZWxpbmUiLCJpbnRlclN0ZXBCdWZmZXJTZXJ2aWNlTmFtZSI6IiIsInJlcGxpY2FzIjowLCJmcm9tRWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoib3V0IiwiY29uZGl0aW9ucyI6bnVsbCwiZnJvbVZlcnRleFR5cGUiOiJTb3VyY2UiLCJmcm9tVmVydGV4UGFydGl0aW9uQ291bnQiOjEsImZyb21WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9LCJ0b1ZlcnRleFR5cGUiOiJTaW5rIiwidG9WZXJ0ZXhQYXJ0aXRpb25Db3VudCI6MSwidG9WZXJ0ZXhMaW1pdHMiOnsicmVhZEJhdGNoU2l6ZSI6NTAwLCJyZWFkVGltZW91dCI6IjFzIiwiYnVmZmVyTWF4TGVuZ3RoIjozMDAwMCwiYnVmZmVyVXNhZ2VMaW1pdCI6ODB9fV0sIndhdGVybWFyayI6eyJtYXhEZWxheSI6IjBzIn19LCJzdGF0dXMiOnsicGhhc2UiOiIiLCJyZXBsaWNhcyI6MCwiZGVzaXJlZFJlcGxpY2FzIjowLCJsYXN0U2NhbGVkQXQiOm51bGx9fQ==".to_string(); - - let env_vars = [("NUMAFLOW_ISBSVC_JETSTREAM_URL", "localhost:4222")]; - let pipeline_config = PipelineConfig::load(pipeline_cfg_base64, env_vars).unwrap(); let reader_cancel_token = CancellationToken::new(); let (mut js_reader_rx, js_reader_task) = js_reader - .streaming_read(reader_cancel_token.clone(), &pipeline_config) + .streaming_read(reader_cancel_token.clone()) .await .unwrap(); @@ -438,7 +441,7 @@ mod tests { event_time: Utc::now(), id: MessageID { vertex_name: "vertex".to_string().into(), - offset: format!("{}", i + 1).into(), + offset: format!("{}-0", i + 1).into(), index: i, }, headers: HashMap::new(), diff --git a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs index a99d43856d..e71335a576 100644 --- a/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs +++ b/rust/numaflow-core/src/pipeline/isb/jetstream/writer.rs @@ -12,6 +12,7 @@ use async_nats::jetstream::Context; use bytes::{Bytes, BytesMut}; use tokio::sync::Semaphore; use tokio::task::JoinHandle; +use tokio::time; use tokio::time::{sleep, Instant}; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; @@ -31,11 +32,11 @@ use crate::Result; const DEFAULT_RETRY_INTERVAL_MILLIS: u64 = 10; const DEFAULT_REFRESH_INTERVAL_SECS: u64 = 1; -#[derive(Clone)] /// Writes to JetStream ISB. Exposes both write and blocking methods to write messages. /// It accepts a cancellation token to stop infinite retries during shutdown. /// JetstreamWriter is one to many mapping of streams to write messages to. It also /// maintains the buffer usage metrics for each stream. +#[derive(Clone)] pub(crate) struct JetstreamWriter { config: Arc>, js_ctx: Context, @@ -183,6 +184,9 @@ impl JetstreamWriter { let mut messages_stream = messages_stream; let mut hash = DefaultHasher::new(); + let mut processed_msgs_count: usize = 0; + let mut last_logged_at = time::Instant::now(); + while let Some(message) = messages_stream.next().await { // if message needs to be dropped, ack and continue // TODO: add metric for dropped count @@ -241,6 +245,17 @@ impl JetstreamWriter { offset: message.id.offset, }) .await?; + + processed_msgs_count += 1; + if last_logged_at.elapsed().as_secs() >= 1 { + info!( + "Processed {} messages in {:?}", + processed_msgs_count, + std::time::Instant::now() + ); + processed_msgs_count = 0; + last_logged_at = Instant::now(); + } } Ok(()) }); diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index 9dd0f39596..bde1f6059e 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use numaflow_pb::clients::map::map_client::MapClient; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; @@ -9,6 +10,10 @@ use tonic::transport::Channel; use crate::config::components::sink::{SinkConfig, SinkType}; use crate::config::components::source::{SourceConfig, SourceType}; use crate::config::components::transformer::TransformerConfig; +use crate::config::pipeline::map::{MapMode, MapType, MapVtxConfig}; +use crate::config::pipeline::{DEFAULT_BATCH_MAP_SOCKET, DEFAULT_STREAM_MAP_SOCKET}; +use crate::error::Error; +use crate::mapper::map::MapHandle; use crate::shared::grpc; use crate::shared::server_info::{sdk_server_info, ContainerType}; use crate::sink::{SinkClientType, SinkWriter, SinkWriterBuilder}; @@ -147,7 +152,7 @@ pub(crate) async fn create_sink_writer( } /// Creates a transformer if it is configured -pub async fn create_transformer( +pub(crate) async fn create_transformer( batch_size: usize, transformer_config: Option, tracker_handle: TrackerHandle, @@ -197,6 +202,66 @@ pub async fn create_transformer( Ok((None, None)) } +pub(crate) async fn create_mapper( + batch_size: usize, + read_timeout: Duration, + map_config: MapVtxConfig, + tracker_handle: TrackerHandle, + cln_token: CancellationToken, +) -> error::Result<(MapHandle, Option>)> { + match map_config.map_type { + MapType::UserDefined(mut config) => { + let server_info = + sdk_server_info(config.server_info_path.clone().into(), cln_token.clone()).await?; + + // based on the map mode that is set in the server info, we will override the socket path + // so that the clients can connect to the appropriate socket. + let config = match server_info.get_map_mode().unwrap_or(MapMode::Unary) { + MapMode::Unary => config, + MapMode::Batch => { + config.socket_path = DEFAULT_BATCH_MAP_SOCKET.into(); + config + } + MapMode::Stream => { + config.socket_path = DEFAULT_STREAM_MAP_SOCKET.into(); + config + } + }; + + let metric_labels = metrics::sdk_info_labels( + config::get_component_type().to_string(), + config::get_vertex_name().to_string(), + server_info.language.clone(), + server_info.version.clone(), + ContainerType::Sourcer.to_string(), + ); + metrics::global_metrics() + .sdk_info + .get_or_create(&metric_labels) + .set(1); + + let mut map_grpc_client = + MapClient::new(grpc::create_rpc_channel(config.socket_path.clone().into()).await?) + .max_encoding_message_size(config.grpc_max_message_size) + .max_decoding_message_size(config.grpc_max_message_size); + grpc::wait_until_mapper_ready(&cln_token, &mut map_grpc_client).await?; + Ok(( + MapHandle::new( + server_info.get_map_mode().unwrap_or(MapMode::Unary), + batch_size, + read_timeout, + map_config.concurrency, + map_grpc_client.clone(), + tracker_handle, + ) + .await?, + Some(map_grpc_client), + )) + } + MapType::Builtin(_) => Err(Error::Mapper("Builtin mapper is not supported".to_string())), + } +} + /// Creates a source type based on the configuration pub async fn create_source( batch_size: usize, @@ -311,8 +376,8 @@ mod tests { async fn ack(&self, _offset: Vec) {} - async fn pending(&self) -> usize { - 0 + async fn pending(&self) -> Option { + Some(0) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/shared/grpc.rs b/rust/numaflow-core/src/shared/grpc.rs index 3500524f02..bedfd2e138 100644 --- a/rust/numaflow-core/src/shared/grpc.rs +++ b/rust/numaflow-core/src/shared/grpc.rs @@ -5,6 +5,7 @@ use axum::http::Uri; use backoff::retry::Retry; use backoff::strategy::fixed; use chrono::{DateTime, TimeZone, Timelike, Utc}; +use numaflow_pb::clients::map::map_client::MapClient; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; @@ -81,6 +82,26 @@ pub(crate) async fn wait_until_transformer_ready( Ok(()) } +/// Waits until the mapper server is ready, by doing health checks +pub(crate) async fn wait_until_mapper_ready( + cln_token: &CancellationToken, + client: &mut MapClient, +) -> error::Result<()> { + loop { + if cln_token.is_cancelled() { + return Err(Error::Forwarder( + "Cancellation token is cancelled".to_string(), + )); + } + match client.is_ready(Request::new(())).await { + Ok(_) => break, + Err(_) => sleep(Duration::from_secs(1)).await, + } + info!("Waiting for mapper client to be ready..."); + } + Ok(()) +} + pub(crate) fn prost_timestamp_from_utc(t: DateTime) -> Option { Some(Timestamp { seconds: t.timestamp(), diff --git a/rust/numaflow-core/src/shared/server_info.rs b/rust/numaflow-core/src/shared/server_info.rs index ee3b1c8d6a..7576368413 100644 --- a/rust/numaflow-core/src/shared/server_info.rs +++ b/rust/numaflow-core/src/shared/server_info.rs @@ -12,12 +12,14 @@ use tokio::time::sleep; use tokio_util::sync::CancellationToken; use tracing::{info, warn}; +use crate::config::pipeline::map::MapMode; use crate::error::{self, Error}; use crate::shared::server_info::version::SdkConstraints; // Constant to represent the end of the server info. // Equivalent to U+005C__END__. const END: &str = "U+005C__END__"; +const MAP_MODE_KEY: &str = "MAP_MODE"; #[derive(Debug, Eq, PartialEq, Clone, Hash)] pub enum ContainerType { @@ -88,6 +90,17 @@ pub(crate) struct ServerInfo { pub(crate) metadata: Option>, // Metadata is optional } +impl ServerInfo { + pub(crate) fn get_map_mode(&self) -> Option { + if let Some(metadata) = &self.metadata { + if let Some(map_mode) = metadata.get(MAP_MODE_KEY) { + return MapMode::from_str(map_mode); + } + } + None + } +} + /// sdk_server_info waits until the server info file is ready and check whether the /// server is compatible with Numaflow. pub(crate) async fn sdk_server_info( @@ -415,21 +428,25 @@ mod version { go_version_map.insert(ContainerType::SourceTransformer, "0.9.0-z".to_string()); go_version_map.insert(ContainerType::Sinker, "0.9.0-z".to_string()); go_version_map.insert(ContainerType::FbSinker, "0.9.0-z".to_string()); + go_version_map.insert(ContainerType::Mapper, "0.9.0-z".to_string()); let mut python_version_map = HashMap::new(); python_version_map.insert(ContainerType::Sourcer, "0.9.0rc100".to_string()); python_version_map.insert(ContainerType::SourceTransformer, "0.9.0rc100".to_string()); python_version_map.insert(ContainerType::Sinker, "0.9.0rc100".to_string()); python_version_map.insert(ContainerType::FbSinker, "0.9.0rc100".to_string()); + python_version_map.insert(ContainerType::Mapper, "0.9.0rc100".to_string()); let mut java_version_map = HashMap::new(); java_version_map.insert(ContainerType::Sourcer, "0.9.0-z".to_string()); java_version_map.insert(ContainerType::SourceTransformer, "0.9.0-z".to_string()); java_version_map.insert(ContainerType::Sinker, "0.9.0-z".to_string()); java_version_map.insert(ContainerType::FbSinker, "0.9.0-z".to_string()); + java_version_map.insert(ContainerType::Mapper, "0.9.0-z".to_string()); let mut rust_version_map = HashMap::new(); rust_version_map.insert(ContainerType::Sourcer, "0.1.0-z".to_string()); rust_version_map.insert(ContainerType::SourceTransformer, "0.1.0-z".to_string()); rust_version_map.insert(ContainerType::Sinker, "0.1.0-z".to_string()); rust_version_map.insert(ContainerType::FbSinker, "0.1.0-z".to_string()); + rust_version_map.insert(ContainerType::Mapper, "0.1.0-z".to_string()); let mut m = HashMap::new(); m.insert("go".to_string(), go_version_map); diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 8be9d85493..4d280d3725 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -247,8 +247,6 @@ impl Source { info!("Started streaming source with batch size: {}", batch_size); let handle = tokio::spawn(async move { - let mut processed_msgs_count: usize = 0; - let mut last_logged_at = time::Instant::now(); // this semaphore is used only if read-ahead is disabled. we hold this semaphore to // make sure we can read only if the current inflight ones are ack'ed. let semaphore = Arc::new(Semaphore::new(1)); @@ -312,7 +310,7 @@ impl Source { // insert the offset and the ack one shot in the tracker. tracker_handle - .insert(offset.to_string().into(), resp_ack_tx) + .insert(message.id.offset.clone(), resp_ack_tx) .await?; // store the ack one shot in the batch to invoke ack later. @@ -343,17 +341,6 @@ impl Source { None }, )); - - processed_msgs_count += n; - if last_logged_at.elapsed().as_secs() >= 1 { - info!( - "Processed {} messages in {:?}", - processed_msgs_count, - std::time::Instant::now() - ); - processed_msgs_count = 0; - last_logged_at = time::Instant::now(); - } } }); Ok((ReceiverStream::new(messages_rx), handle)) @@ -504,8 +491,8 @@ mod tests { } } - async fn pending(&self) -> usize { - self.yet_to_ack.read().unwrap().len() + async fn pending(&self) -> Option { + Some(self.yet_to_ack.read().unwrap().len()) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index 758f8a6fc2..e5717c12a6 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -292,8 +292,8 @@ mod tests { } } - async fn pending(&self) -> usize { - self.yet_to_ack.read().unwrap().len() + async fn pending(&self) -> Option { + Some(self.yet_to_ack.read().unwrap().len()) } async fn partitions(&self) -> Option> { diff --git a/rust/numaflow-core/src/tracker.rs b/rust/numaflow-core/src/tracker.rs index a4ef30e24c..a8ccaca54a 100644 --- a/rust/numaflow-core/src/tracker.rs +++ b/rust/numaflow-core/src/tracker.rs @@ -12,7 +12,6 @@ use std::collections::HashMap; use bytes::Bytes; use tokio::sync::{mpsc, oneshot}; -use tracing::warn; use crate::error::Error; use crate::message::ReadAck; @@ -43,6 +42,7 @@ enum ActorMessage { Discard { offset: String, }, + DiscardAll, // New variant for discarding all messages #[cfg(test)] IsEmpty { respond_to: oneshot::Sender, @@ -56,11 +56,10 @@ struct Tracker { receiver: mpsc::Receiver, } -/// Implementation of Drop for Tracker to send Nak for unacknowledged messages. impl Drop for Tracker { fn drop(&mut self) { - for (offset, entry) in self.entries.drain() { - warn!(?offset, "Sending Nak for unacknowledged message"); + // clear the entries from the map and send nak + for (_, entry) in self.entries.drain() { entry .ack_send .send(ReadAck::Nak) @@ -103,6 +102,9 @@ impl Tracker { ActorMessage::Discard { offset } => { self.handle_discard(offset); } + ActorMessage::DiscardAll => { + self.handle_discard_all().await; + } #[cfg(test)] ActorMessage::IsEmpty { respond_to } => { let is_empty = self.entries.is_empty(); @@ -118,7 +120,7 @@ impl Tracker { TrackerEntry { ack_send: respond_to, count: 0, - eof: false, + eof: true, }, ); } @@ -126,8 +128,18 @@ impl Tracker { /// Updates an existing entry in the tracker with the number of expected messages and EOF status. fn handle_update(&mut self, offset: String, count: u32, eof: bool) { if let Some(entry) = self.entries.get_mut(&offset) { - entry.count = count; + entry.count += count; entry.eof = eof; + // if the count is zero, we can send an ack immediately + // this is case where map stream will send eof true after + // receiving all the messages. + if entry.count == 0 { + let entry = self.entries.remove(&offset).unwrap(); + entry + .ack_send + .send(ReadAck::Ack) + .expect("Failed to send ack"); + } } } @@ -138,7 +150,7 @@ impl Tracker { if entry.count > 0 { entry.count -= 1; } - if entry.count == 0 || entry.eof { + if entry.count == 0 && entry.eof { entry .ack_send .send(ReadAck::Ack) @@ -158,6 +170,16 @@ impl Tracker { .expect("Failed to send nak"); } } + + /// Discards all entries from the tracker and sends a nak for each. + async fn handle_discard_all(&mut self) { + for (_, entry) in self.entries.drain() { + entry + .ack_send + .send(ReadAck::Nak) + .expect("Failed to send nak"); + } + } } /// TrackerHandle provides an interface to interact with the Tracker. @@ -231,6 +253,15 @@ impl TrackerHandle { Ok(()) } + /// Discards all messages from the Tracker and sends a nak for each. + pub(crate) async fn discard_all(&self) -> Result<()> { + let message = ActorMessage::DiscardAll; + self.sender + .send(message) + .await + .map_err(|e| Error::Tracker(format!("{:?}", e)))?; + Ok(()) + } /// Checks if the Tracker is empty. Used for testing to make sure all messages are acknowledged. #[cfg(test)] pub(crate) async fn is_empty(&self) -> Result { @@ -293,7 +324,7 @@ mod tests { // Update the message with a count of 3 handle - .update("offset1".to_string().into(), 3, false) + .update("offset1".to_string().into(), 3, true) .await .unwrap(); diff --git a/rust/numaflow-core/src/transformer.rs b/rust/numaflow-core/src/transformer.rs index 0b26a7e76a..6f9298b7c4 100644 --- a/rust/numaflow-core/src/transformer.rs +++ b/rust/numaflow-core/src/transformer.rs @@ -6,7 +6,6 @@ use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; use tonic::transport::Channel; -use tracing::error; use crate::error::Error; use crate::message::Message; @@ -15,7 +14,7 @@ use crate::tracker::TrackerHandle; use crate::transformer::user_defined::UserDefinedTransformer; use crate::Result; -/// User-Defined Transformer extends Numaflow to add custom sources supported outside the builtins. +/// User-Defined Transformer is a custom transformer that can be built by the user. /// /// [User-Defined Transformer]: https://numaflow.numaproj.io/user-guide/sources/transformer/overview/#build-your-own-transformer pub(crate) mod user_defined; @@ -60,13 +59,22 @@ impl TransformerActor { } } -/// StreamingTransformer, transforms messages in a streaming fashion. +/// Transformer, transforms messages in a streaming fashion. pub(crate) struct Transformer { batch_size: usize, sender: mpsc::Sender, concurrency: usize, tracker_handle: TrackerHandle, + task_handle: JoinHandle<()>, } + +/// Aborts the actor task when the transformer is dropped. +impl Drop for Transformer { + fn drop(&mut self) { + self.task_handle.abort(); + } +} + impl Transformer { pub(crate) async fn new( batch_size: usize, @@ -80,7 +88,7 @@ impl Transformer { UserDefinedTransformer::new(batch_size, client).await?, ); - tokio::spawn(async move { + let task_handle = tokio::spawn(async move { transformer_actor.run().await; }); @@ -89,23 +97,25 @@ impl Transformer { concurrency, sender, tracker_handle, + task_handle, }) } /// Applies the transformation on the message and sends it to the next stage, it blocks if the /// concurrency limit is reached. - pub(crate) async fn transform( + async fn transform( transform_handle: mpsc::Sender, permit: OwnedSemaphorePermit, read_msg: Message, output_tx: mpsc::Sender, tracker_handle: TrackerHandle, - ) -> Result<()> { + error_tx: mpsc::Sender, + ) { // only if we have tasks < max_concurrency - let output_tx = output_tx.clone(); // invoke transformer and then wait for the one-shot + // short-lived tokio spawns we don't need structured concurrency here tokio::spawn(async move { let start_time = tokio::time::Instant::now(); let _permit = permit; @@ -117,32 +127,41 @@ impl Transformer { }; // invoke trf - transform_handle - .send(msg) - .await - .expect("failed to send message"); + if let Err(e) = transform_handle.send(msg).await { + let _ = error_tx + .send(Error::Transformer(format!("failed to send message: {}", e))) + .await; + return; + } // wait for one-shot match receiver.await { Ok(Ok(mut transformed_messages)) => { - tracker_handle + if let Err(e) = tracker_handle .update( read_msg.id.offset.clone(), transformed_messages.len() as u32, - false, + true, ) .await - .expect("failed to update tracker"); + { + let _ = error_tx.send(e).await; + return; + } for transformed_message in transformed_messages.drain(..) { let _ = output_tx.send(transformed_message).await; } } - Err(_) | Ok(Err(_)) => { - error!("Failed to transform message"); - tracker_handle - .discard(read_msg.id.offset.clone()) - .await - .expect("failed to discard tracker"); + Ok(Err(e)) => { + let _ = error_tx.send(e).await; + } + Err(e) => { + let _ = error_tx + .send(Error::Transformer(format!( + "failed to receive message: {}", + e + ))) + .await; } } monovertex_metrics() @@ -151,40 +170,59 @@ impl Transformer { .get_or_create(mvtx_forward_metric_labels()) .observe(start_time.elapsed().as_micros() as f64); }); - - Ok(()) } - /// Starts reading messages in the form of chunks and transforms them and - /// sends them to the next stage. + /// Starts the transformation of the stream of messages and returns the transformed stream. pub(crate) fn transform_stream( &self, input_stream: ReceiverStream, ) -> Result<(ReceiverStream, JoinHandle>)> { let (output_tx, output_rx) = mpsc::channel(self.batch_size); + // channel to transmit errors from the transformer tasks to the main task + let (error_tx, mut error_rx) = mpsc::channel(1); + let transform_handle = self.sender.clone(); let tracker_handle = self.tracker_handle.clone(); - // FIXME: batch_size should not be used, introduce a new config called udf concurrency let semaphore = Arc::new(Semaphore::new(self.concurrency)); let handle = tokio::spawn(async move { let mut input_stream = input_stream; - while let Some(read_msg) = input_stream.next().await { - let permit = Arc::clone(&semaphore).acquire_owned().await.map_err(|e| { - Error::Transformer(format!("failed to acquire semaphore: {}", e)) - })?; - - Self::transform( - transform_handle.clone(), - permit, - read_msg, - output_tx.clone(), - tracker_handle.clone(), - ) - .await?; + // we do a tokio::select! loop to handle the input stream and the error channel + // in case of any errors in the transformer tasks we need to shut down the mapper + // and discard all the messages in the tracker. + loop { + tokio::select! { + x = input_stream.next() => { + if let Some(read_msg) = x { + let permit = Arc::clone(&semaphore) + .acquire_owned() + .await + .map_err(|e| Error::Transformer(format!("failed to acquire semaphore: {}", e)))?; + + let error_tx = error_tx.clone(); + Self::transform( + transform_handle.clone(), + permit, + read_msg, + output_tx.clone(), + tracker_handle.clone(), + error_tx, + ).await; + } else { + break; + } + }, + Some(error) = error_rx.recv() => { + // discard all the messages in the tracker since it's a critical error, and + // we are shutting down + tracker_handle.discard_all().await?; + return Err(error); + }, + } } + Ok(()) }); @@ -202,6 +240,7 @@ mod tests { use tokio::sync::oneshot; use super::*; + use crate::message::StringOffset; use crate::message::{Message, MessageID, Offset}; use crate::shared::grpc::create_rpc_channel; @@ -248,10 +287,7 @@ mod tests { keys: Arc::from(vec!["first".into()]), tags: None, value: "hello".into(), - offset: Some(Offset::String(crate::message::StringOffset::new( - "0".to_string(), - 0, - ))), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), event_time: chrono::Utc::now(), id: MessageID { vertex_name: "vertex_name".to_string().into(), @@ -265,14 +301,19 @@ mod tests { let semaphore = Arc::new(Semaphore::new(10)); let permit = semaphore.acquire_owned().await.unwrap(); + let (error_tx, mut error_rx) = mpsc::channel(1); Transformer::transform( transformer.sender.clone(), permit, message, output_tx, tracker_handle, + error_tx, ) - .await?; + .await; + + // check for errors + assert!(error_rx.recv().await.is_none()); let transformed_message = output_rx.recv().await.unwrap(); assert_eq!(transformed_message.value, "hello"); @@ -325,10 +366,7 @@ mod tests { keys: Arc::from(vec![format!("key_{}", i)]), tags: None, value: format!("value_{}", i).into(), - offset: Some(Offset::String(crate::message::StringOffset::new( - i.to_string(), - 0, - ))), + offset: Some(Offset::String(StringOffset::new(i.to_string(), 0))), event_time: chrono::Utc::now(), id: MessageID { vertex_name: "vertex_name".to_string().into(), @@ -368,4 +406,78 @@ mod tests { ); Ok(()) } + + struct SimpleTransformerPanic; + + #[tonic::async_trait] + impl sourcetransform::SourceTransformer for SimpleTransformerPanic { + async fn transform( + &self, + _input: sourcetransform::SourceTransformRequest, + ) -> Vec { + panic!("SimpleTransformerPanic panicked!"); + } + } + + #[tokio::test] + async fn test_transform_stream_with_panic() -> Result<()> { + let tmp_dir = TempDir::new().unwrap(); + let sock_file = tmp_dir.path().join("sourcetransform.sock"); + let server_info_file = tmp_dir.path().join("sourcetransformer-server-info"); + + let server_info = server_info_file.clone(); + let server_socket = sock_file.clone(); + let handle = tokio::spawn(async move { + sourcetransform::Server::new(SimpleTransformerPanic) + .with_socket_file(server_socket) + .with_server_info_file(server_info) + .start() + .await + .expect("server failed"); + }); + + // wait for the server to start + tokio::time::sleep(Duration::from_millis(100)).await; + + let tracker_handle = TrackerHandle::new(); + let client = SourceTransformClient::new(create_rpc_channel(sock_file).await?); + let transformer = Transformer::new(500, 10, client, tracker_handle.clone()).await?; + + let (input_tx, input_rx) = mpsc::channel(10); + let input_stream = ReceiverStream::new(input_rx); + + let message = Message { + keys: Arc::from(vec!["first".into()]), + tags: None, + value: "hello".into(), + offset: Some(Offset::String(StringOffset::new("0".to_string(), 0))), + event_time: chrono::Utc::now(), + id: MessageID { + vertex_name: "vertex_name".to_string().into(), + offset: "0".to_string().into(), + index: 0, + }, + headers: Default::default(), + }; + + input_tx.send(message).await.unwrap(); + + let (_output_stream, transform_handle) = transformer.transform_stream(input_stream)?; + + // Await the join handle and expect an error due to the panic + let result = transform_handle.await.unwrap(); + assert!(result.is_err(), "Expected an error due to panic"); + assert!(result.unwrap_err().to_string().contains("panic")); + + // we need to drop the transformer, because if there are any in-flight requests + // server fails to shut down. https://github.com/numaproj/numaflow-rs/issues/85 + drop(transformer); + + tokio::time::sleep(Duration::from_millis(50)).await; + assert!( + handle.is_finished(), + "Expected gRPC server to have shut down" + ); + Ok(()) + } } diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 9a82275ac8..398d5a4bcb 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -1,11 +1,11 @@ use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use numaflow_pb::clients::sourcetransformer::{ self, source_transform_client::SourceTransformClient, SourceTransformRequest, SourceTransformResponse, }; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot, Mutex}; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; @@ -28,6 +28,14 @@ struct ParentMessageInfo { pub(super) struct UserDefinedTransformer { read_tx: mpsc::Sender, senders: ResponseSenderMap, + task_handle: tokio::task::JoinHandle<()>, +} + +/// Aborts the background task when the UserDefinedTransformer is dropped. +impl Drop for UserDefinedTransformer { + fn drop(&mut self) { + self.task_handle.abort(); + } } impl UserDefinedTransformer { @@ -65,15 +73,19 @@ impl UserDefinedTransformer { // map to track the oneshot sender for each request along with the message info let sender_map = Arc::new(Mutex::new(HashMap::new())); + // background task to receive responses from the server and send them to the appropriate + // oneshot sender based on the message id + let task_handle = tokio::spawn(Self::receive_responses( + Arc::clone(&sender_map), + resp_stream, + )); + let transformer = Self { read_tx, - senders: Arc::clone(&sender_map), + senders: sender_map, + task_handle, }; - // background task to receive responses from the server and send them to the appropriate - // oneshot sender based on the message id - tokio::spawn(Self::receive_responses(sender_map, resp_stream)); - Ok(transformer) } @@ -83,29 +95,32 @@ impl UserDefinedTransformer { sender_map: ResponseSenderMap, mut resp_stream: Streaming, ) { - while let Some(resp) = resp_stream - .message() - .await - .expect("failed to receive response") - { + while let Some(resp) = match resp_stream.message().await { + Ok(message) => message, + Err(e) => { + let error = + Error::Transformer(format!("failed to receive transformer response: {}", e)); + let mut senders = sender_map.lock().await; + for (_, (_, sender)) in senders.drain() { + let _ = sender.send(Err(error.clone())); + } + None + } + } { let msg_id = resp.id; - if let Some((msg_info, sender)) = sender_map - .lock() - .expect("map entry should always be present") - .remove(&msg_id) - { + if let Some((msg_info, sender)) = sender_map.lock().await.remove(&msg_id) { let mut response_messages = vec![]; for (i, result) in resp.results.into_iter().enumerate() { let message = Message { id: MessageID { vertex_name: get_vertex_name().to_string().into(), index: i as i32, - offset: msg_info.offset.to_string().into(), + offset: msg_info.offset.clone().to_string().into(), }, keys: Arc::from(result.keys), tags: Some(Arc::from(result.tags)), value: result.value.into(), - offset: None, + offset: Some(msg_info.offset.clone()), event_time: utc_from_timestamp(result.event_time), headers: msg_info.headers.clone(), }; @@ -124,7 +139,12 @@ impl UserDefinedTransformer { message: Message, respond_to: oneshot::Sender>>, ) { - let msg_id = message.id.to_string(); + let key = message + .offset + .clone() + .expect("offset should be present") + .to_string(); + let msg_info = ParentMessageInfo { offset: message.offset.clone().expect("offset can never be none"), headers: message.headers.clone(), @@ -132,10 +152,13 @@ impl UserDefinedTransformer { self.senders .lock() - .unwrap() - .insert(msg_id, (msg_info, respond_to)); + .await + .insert(key, (msg_info, respond_to)); - self.read_tx.send(message.into()).await.unwrap(); + self.read_tx + .send(message.into()) + .await + .expect("failed to send message"); } } From faa27a6f2ec33ec93b3a542debef4eb6cead88ea Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Mon, 23 Dec 2024 23:00:07 -0500 Subject: [PATCH 177/188] docs: update fallback sink docs (#2303) Signed-off-by: Keran Yang --- api/json-schema/schema.json | 2 +- api/openapi-spec/swagger.json | 2 +- docs/APIs.md | 7 ++- docs/user-guide/sinks/fallback.md | 48 ++++++++++++------- docs/user-guide/sinks/retry-strategy.md | 8 ++-- pkg/apis/numaflow/v1alpha1/generated.proto | 7 ++- pkg/apis/numaflow/v1alpha1/retry_strategy.go | 7 ++- .../numaflow/v1alpha1/zz_generated.openapi.go | 2 +- .../src/models/retry_strategy.rs | 2 +- 9 files changed, 58 insertions(+), 27 deletions(-) diff --git a/api/json-schema/schema.json b/api/json-schema/schema.json index 84ed58ed90..5255c9b514 100644 --- a/api/json-schema/schema.json +++ b/api/json-schema/schema.json @@ -21764,7 +21764,7 @@ "description": "BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase." }, "onFailure": { - "description": "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + "description": "OnFailure specifies the action to take when the specified retry strategy fails. The possible values are: 1. \"retry\": start another round of retrying the operation, 2. \"fallback\": re-route the operation to a fallback sink and 3. \"drop\": drop the operation and perform no further action. The default action is to retry.", "type": "string" } }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 19c65fe5cf..f85fb66f1e 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -21751,7 +21751,7 @@ "$ref": "#/definitions/io.numaproj.numaflow.v1alpha1.Backoff" }, "onFailure": { - "description": "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + "description": "OnFailure specifies the action to take when the specified retry strategy fails. The possible values are: 1. \"retry\": start another round of retrying the operation, 2. \"fallback\": re-route the operation to a fallback sink and 3. \"drop\": drop the operation and perform no further action. The default action is to retry.", "type": "string" } } diff --git a/docs/APIs.md b/docs/APIs.md index bcda2eb8d4..fb07d8291d 100644 --- a/docs/APIs.md +++ b/docs/APIs.md @@ -9077,8 +9077,11 @@ OnFailureRetryStrategy (Optional)

-OnFailure specifies the action to take when a retry fails. The default -action is to retry. +OnFailure specifies the action to take when the specified retry strategy +fails. The possible values are: 1. “retry”: start another round of +retrying the operation, 2. “fallback”: re-route the operation to a +fallback sink and 3. “drop”: drop the operation and perform no further +action. The default action is to retry.

diff --git a/docs/user-guide/sinks/fallback.md b/docs/user-guide/sinks/fallback.md index 576206f1b2..af0da98f5a 100644 --- a/docs/user-guide/sinks/fallback.md +++ b/docs/user-guide/sinks/fallback.md @@ -1,25 +1,32 @@ # Fallback Sink -A `Fallback` Sink functions as a `Dead Letter Queue (DLQ)` Sink and can be configured to serve as a backup when the primary sink is down, -unavailable, or under maintenance. This is particularly useful when multiple sinks are in a pipeline; if a sink fails, the resulting -back-pressure will back-propagate and stop the source vertex from reading more data. A `Fallback` Sink can beset up to prevent this from happening. -This backup sink stores data while the primary sink is offline. The stored data can be replayed once the primary sink is back online. +A `Fallback` Sink functions as a `Dead Letter Queue (DLQ)` Sink. +It can be configured to serve as a backup sink when the primary sink fails processing messages. -Note: The `fallback` field is optional. +## The Use Case -Users are required to return a fallback response from the [user-defined sink](https://numaflow.numaproj.io/user-guide/sinks/user-defined-sinks/) when the primary sink fails; only -then the messages will be directed to the fallback sink. +Fallback Sink is useful to prevent back pressures caused by failed messages in the primary sink. -Example of a fallback response in a user-defined sink: [here](https://github.com/numaproj/numaflow-go/blob/main/pkg/sinker/examples/fallback/main.go) +In a pipeline without fallback sinks, if a sink fails to process certain messages, +the failed messages, by default, can get retried indefinitely, +causing back pressures propagated all the way back to the source vertex. +Eventually, the pipeline will be blocked, and no new messages will be processed. +A fallback sink can be set up to prevent this from happening, by storing the failed messages in a separate sink. -## CAVEATs -The `fallback` field can only be utilized when the primary sink is a `User Defined Sink.` +## Caveats +A fallback sink can only be configured when the primary sink is a user-defined sink. -## Example +## How to use -### Builtin Kafka -An example using builtin kafka as fallback sink: +To configure a fallback sink, +changes need to be made on both the pipeline specification and the user-defined sink implementation. + +### Step 1 - update the specification + +Add a `fallback` field to the sink configuration in the pipeline specification file. + +The following example uses the builtin kafka as a fallback sink. ```yaml - name: out @@ -34,10 +41,9 @@ An example using builtin kafka as fallback sink: - my-broker2:19700 topic: my-topic ``` -### UD Sink -An example using custom user-defined sink as fallback sink. -User Defined Sink as a fallback sink: +A fallback sink can also be a user-defined sink. + ```yaml - name: out sink: @@ -49,3 +55,13 @@ User Defined Sink as a fallback sink: container: image: my-sink:latest ``` +### Step 2 - update the user-defined sink implementation + +Code changes have to be made in the primary sink to generate either a **failed** response or a **fallback** response, +based on the use case. + +* a **failed** response gets processed following the [retry strategy](https://numaflow.numaproj.io/user-guide/sinks/retry-strategy/), and if the retry strategy is set to `fallback`, the message will be directed to the fallback sink after the retries are exhausted. +* a **fallback** response doesn't respect the sink retry strategy. It gets immediately directed to the fallback sink without getting retried. + +SDK methods to generate either a fallback or a failed response in a primary user-defined sink can be found here: +[Golang](https://github.com/numaproj/numaflow-go/blob/main/pkg/sinker/types.go), [Java](https://github.com/numaproj/numaflow-java/blob/main/src/main/java/io/numaproj/numaflow/sinker/Response.java), [Python](https://github.com/numaproj/numaflow-python/blob/main/pynumaflow/sinker/_dtypes.py) diff --git a/docs/user-guide/sinks/retry-strategy.md b/docs/user-guide/sinks/retry-strategy.md index a5b2a7264b..486f623461 100644 --- a/docs/user-guide/sinks/retry-strategy.md +++ b/docs/user-guide/sinks/retry-strategy.md @@ -1,18 +1,20 @@ # Retry Strategy ### Overview + The `RetryStrategy` is used to configure the behavior for a sink after encountering failures during a write operation. This structure allows the user to specify how Numaflow should respond to different fail-over scenarios for Sinks, ensuring that the writing can be resilient and handle unexpected issues efficiently. +`RetryStrategy` ONLY gets applied to failed messages. To return a failed messages, use the methods provided by the SDKs. +- `ResponseFailure`for [Golang](https://github.com/numaproj/numaflow-go/blob/main/pkg/sinker/types.go) +- `responseFailure` for [Java](https://github.com/numaproj/numaflow-java/blob/main/src/main/java/io/numaproj/numaflow/sinker/Response.java#L40) +- `as_fallback` for [Python](https://github.com/numaproj/numaflow-python/blob/main/pynumaflow/sinker/_dtypes.py) ### Struct Explanation - `retryStrategy` is optional, and can be added to the Sink spec configurations where retry logic is necessary. - - ```yaml sink: retryStrategy: diff --git a/pkg/apis/numaflow/v1alpha1/generated.proto b/pkg/apis/numaflow/v1alpha1/generated.proto index 7b81e2235e..1a677e81af 100644 --- a/pkg/apis/numaflow/v1alpha1/generated.proto +++ b/pkg/apis/numaflow/v1alpha1/generated.proto @@ -1397,7 +1397,12 @@ message RetryStrategy { // +optional optional Backoff backoff = 1; - // OnFailure specifies the action to take when a retry fails. The default action is to retry. + // OnFailure specifies the action to take when the specified retry strategy fails. + // The possible values are: + // 1. "retry": start another round of retrying the operation, + // 2. "fallback": re-route the operation to a fallback sink and + // 3. "drop": drop the operation and perform no further action. + // The default action is to retry. // +optional // +kubebuilder:default="retry" optional string onFailure = 2; diff --git a/pkg/apis/numaflow/v1alpha1/retry_strategy.go b/pkg/apis/numaflow/v1alpha1/retry_strategy.go index 12c9daab4b..c21be28a57 100644 --- a/pkg/apis/numaflow/v1alpha1/retry_strategy.go +++ b/pkg/apis/numaflow/v1alpha1/retry_strategy.go @@ -36,7 +36,12 @@ type RetryStrategy struct { // BackOff specifies the parameters for the backoff strategy, controlling how delays between retries should increase. // +optional BackOff *Backoff `json:"backoff,omitempty" protobuf:"bytes,1,opt,name=backoff"` - // OnFailure specifies the action to take when a retry fails. The default action is to retry. + // OnFailure specifies the action to take when the specified retry strategy fails. + // The possible values are: + // 1. "retry": start another round of retrying the operation, + // 2. "fallback": re-route the operation to a fallback sink and + // 3. "drop": drop the operation and perform no further action. + // The default action is to retry. // +optional // +kubebuilder:default="retry" OnFailure *OnFailureRetryStrategy `json:"onFailure,omitempty" protobuf:"bytes,2,opt,name=onFailure"` diff --git a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go index b5cff624f8..3b20e1948c 100644 --- a/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/numaflow/v1alpha1/zz_generated.openapi.go @@ -4524,7 +4524,7 @@ func schema_pkg_apis_numaflow_v1alpha1_RetryStrategy(ref common.ReferenceCallbac }, "onFailure": { SchemaProps: spec.SchemaProps{ - Description: "OnFailure specifies the action to take when a retry fails. The default action is to retry.", + Description: "OnFailure specifies the action to take when the specified retry strategy fails. The possible values are: 1. \"retry\": start another round of retrying the operation, 2. \"fallback\": re-route the operation to a fallback sink and 3. \"drop\": drop the operation and perform no further action. The default action is to retry.", Type: []string{"string"}, Format: "", }, diff --git a/rust/numaflow-models/src/models/retry_strategy.rs b/rust/numaflow-models/src/models/retry_strategy.rs index 0b1a52a654..22cfc4809a 100644 --- a/rust/numaflow-models/src/models/retry_strategy.rs +++ b/rust/numaflow-models/src/models/retry_strategy.rs @@ -22,7 +22,7 @@ limitations under the License. pub struct RetryStrategy { #[serde(rename = "backoff", skip_serializing_if = "Option::is_none")] pub backoff: Option>, - /// OnFailure specifies the action to take when a retry fails. The default action is to retry. + /// OnFailure specifies the action to take when the specified retry strategy fails. The possible values are: 1. \"retry\": start another round of retrying the operation, 2. \"fallback\": re-route the operation to a fallback sink and 3. \"drop\": drop the operation and perform no further action. The default action is to retry. #[serde(rename = "onFailure", skip_serializing_if = "Option::is_none")] pub on_failure: Option, } From d387d3fe5bbcc9c5a2f181cebfa2c0ef3b15eb87 Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Wed, 25 Dec 2024 06:28:48 +0530 Subject: [PATCH 178/188] feat: enhance time range selector for metrics - include presets (#2292) Signed-off-by: adarsh0728 --- ui/package.json | 13 +- .../Metrics/partials/LineChart/index.tsx | 19 +- .../partials/common/Dropdown/index.tsx | 7 +- .../common/TimeRange/TimeSelector.css | 49 +++++ .../partials/common/TimeRange/index.tsx | 104 ++++----- ui/yarn.lock | 203 +++++++++++++++++- 6 files changed, 319 insertions(+), 76 deletions(-) create mode 100644 ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/TimeSelector.css diff --git a/ui/package.json b/ui/package.json index c65f7419e6..70a3986112 100644 --- a/ui/package.json +++ b/ui/package.json @@ -29,7 +29,7 @@ "@monaco-editor/react": "^4.5.2", "@mui/icons-material": "^5.6.2", "@mui/material": "^5.6.3", - "@mui/x-date-pickers": "^7.21.0", + "@mui/x-date-pickers": "^7.23.2", "@stardazed/streams-polyfill": "^2.4.0", "@testing-library/jest-dom": "^6.1.4", "@testing-library/react": "^14.0.0", @@ -37,9 +37,11 @@ "@types/d3-selection": "^3.0.2", "@types/dagre": "^0.7.47", "@types/jest": "^27.0.1", + "@types/jquery": "^3.5.32", "@types/lodash": "^4.14.195", "@types/node": "^16.7.13", "@types/react": "^18.0.0", + "@types/react-bootstrap-daterangepicker": "^7.0.0", "@types/react-dom": "^18.0.0", "@types/react-router-dom": "^5.3.3", "@types/react-test-renderer": "^18.0.0", @@ -48,15 +50,22 @@ "@visx/responsive": "^2.8.0", "@visx/shape": "^2.4.0", "@visx/tooltip": "^2.8.0", + "bootstrap": "^5.3.3", + "bootstrap-daterangepicker": "^3.1.0", "d3-color": "^3.1.0", "d3-scale": "^4.0.2", "d3-selection": "^3.0.0", "dagre": "^0.8.5", + "date-fns": "^4.1.0", "dayjs": "^1.11.13", - "moment": "^2.29.4", + "jquery": "^3.7.1", + "moment": "^2.30.1", "monaco-editor": "0.40.0", "msw": "^0.47.4", "react": "^18.0.0", + "react-bootstrap-daterangepicker": "^8.0.0", + "react-datetime": "^3.3.1", + "react-datetime-picker": "^6.0.1", "react-dom": "^18.0.0", "react-highlight-words": "^0.18.0", "react-json-view": "^1.21.3", diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx index 462428cce6..9a8f4a4434 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx @@ -12,10 +12,10 @@ import { import Box from "@mui/material/Box"; import CircularProgress from "@mui/material/CircularProgress"; import Dropdown from "../common/Dropdown"; -import TimeRange from "../common/TimeRange"; import FiltersDropdown from "../common/FiltersDropdown"; import EmptyChart from "../EmptyChart"; import { useMetricsFetch } from "../../../../../../../../../../../../../../../utils/fetchWrappers/metricsFetch"; +import TimeSelector from "../common/TimeRange"; // TODO have a check for metricReq against metric object to ensure required fields are passed const LineChartComponent = ({ @@ -188,6 +188,8 @@ const LineChartComponent = ({ if (paramsList?.length === 0) return <>; + const hasTimeParams = paramsList?.some((param) => ["start_time", "end_time"].includes(param.name)); + return ( ); })} - - {paramsList - ?.filter((param) => ["start_time", "end_time"]?.includes(param.name)) - ?.map((param: any) => { - return ( - - - - ); - })} + {hasTimeParams && ( + + + + )} {filtersList?.filter((filterEle: any) => !filterEle?.required)?.length > diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx index 4f950836f5..4b2bb0c2a7 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/Dropdown/index.tsx @@ -39,7 +39,10 @@ const Dropdown = ({ }, [field, dimensionReverseMap, type, quantileOptions, durationOptions]); const [value, setValue] = useState(getInitialValue); - const fieldName = field.charAt(0).toUpperCase() + field.slice(1); + let fieldName = field.charAt(0).toUpperCase() + field.slice(1); + if (fieldName == "Duration"){ + fieldName = "Query Window" + } // Update metricsReq with the initial value useEffect(() => { @@ -105,7 +108,7 @@ const Dropdown = ({ setValue(e.target.value); setMetricReq((prev: any) => ({ ...prev, [field]: e.target.value })); }} - sx={{ fontSize: "1.6rem" }} + sx={{ fontSize: "1.6rem", height: '50px' }} > {getDropDownEntries} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/TimeSelector.css b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/TimeSelector.css new file mode 100644 index 0000000000..f0ba66b5b0 --- /dev/null +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/TimeSelector.css @@ -0,0 +1,49 @@ +.date-range-label { + font-size: 1.4rem; + position: absolute; + top: -10px; /* Adjust as needed */ + left: 10px; /* Adjust as needed */ + font-size: 1.2rem; /* Adjust as needed */ + color: #999; /* Adjust color as needed */ + transition: 0.2s ease all; + background-color: white; /* Match the background of the input */ + padding: 0 5px; /* Add padding to prevent overlap with the border */ + z-index: 1; /* Ensure the label is above the input */ + } + +.date-range-picker-container { + position: relative; + display: inline-block; + } + + .date-input { + font-size: 1.6rem; + cursor: pointer; + outline: none; + border: 1px solid rgba(255, 255, 255, 0); + transition: border-color 0.3s; + } + + .date-input:hover { + border-color: #000000; + } + + .date-input:focus { + border-color: #3492EF; + outline: none; + box-shadow: none; + border-width: 2px; + } + .caret { + position: absolute; + right: 10px; + top: 50%; + transform: translateY(-50%); + pointer-events: none; + font-size: 1.6rem; + color: #00000060; + } + +.date-range-picker-container:focus-within .date-range-label { + color: #3492EF; /* Change to the desired color on focus */ +} \ No newline at end of file diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx index 9f27417960..13415a96da 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/common/TimeRange/index.tsx @@ -1,62 +1,66 @@ -import { useCallback, useEffect, useMemo, useState } from "react"; -import Box from "@mui/material/Box"; -import { DateTimePicker, LocalizationProvider } from "@mui/x-date-pickers"; -import { AdapterDayjs } from "@mui/x-date-pickers/AdapterDayjs"; -import dayjs from "dayjs"; +import moment from 'moment'; +import React, { useState } from 'react'; +import 'bootstrap/dist/css/bootstrap.min.css'; +import 'bootstrap-daterangepicker/daterangepicker.css'; +import DateRangePicker from 'react-bootstrap-daterangepicker'; +import 'jquery'; +import './TimeSelector.css'; +import { ArrowDropDown } from '@mui/icons-material'; -export interface MetricTimeRangeProps { - field: string; +interface TimeSelectorProps { setMetricReq: any; } -const TimeRange = ({ field, setMetricReq }: MetricTimeRangeProps) => { - const getInitialValue = useMemo(() => { - switch (field) { - case "start_time": - return dayjs().subtract(1, "hour"); - case "end_time": - return dayjs(); - default: - return null; - } - }, [field]); +const TimeSelector = ({setMetricReq}: TimeSelectorProps) => { + const [startDate, setStartDate] = useState(moment().subtract(1, 'hour')); + const [endDate, setEndDate] = useState(moment()); - const [time, setTime] = useState(getInitialValue); + const handleCallback = (start: moment.Moment, end: moment.Moment) => { + setStartDate(start); + setEndDate(end); + setMetricReq((prev: any) => ({ + ...prev, + start_time: start.format(), + end_time: end.format() + })); + }; - // Update metricsReq with the initial value - useEffect(() => { - setMetricReq((prev: any) => ({ ...prev, [field]: getInitialValue })); - }, [getInitialValue, field, setMetricReq]); - - const handleTimeChange = useCallback( - (newValue: dayjs.Dayjs | null) => { - if (newValue && newValue.isValid()) { - setTime(newValue); - setMetricReq((prev: any) => ({ ...prev, [field]: newValue })); - } - }, - [setTime] - ); + const ranges: { [key: string]: [moment.Moment, moment.Moment] } = { + 'Last 10 Minutes': [moment().subtract(10, 'minutes'), moment()], + 'Last 30 Minutes': [moment().subtract(30, 'minutes'), moment()], + 'Last Hour': [moment().subtract(1, 'hour'), moment()], + 'Last 2 Hours': [moment().subtract(2, 'hours'), moment()], + 'Last 6 Hours': [moment().subtract(6, 'hours'), moment()], + 'Last 12 Hours': [moment().subtract(12, 'hours'), moment()], + }; return ( - - - + + + - - + + +
); }; -export default TimeRange; +export default TimeSelector; diff --git a/ui/yarn.lock b/ui/yarn.lock index 5779331c0a..f3d7a1e6d2 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -1980,23 +1980,23 @@ prop-types "^15.8.1" react-is "^18.3.1" -"@mui/x-date-pickers@^7.21.0": - version "7.21.0" - resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-7.21.0.tgz#78de7e81bdf863d443d7963777dfc3052ae3c320" - integrity sha512-WLpuTu3PvhYwd7IAJSuDWr1Zd8c5C8Cc7rpAYCaV5+tGBoEP0C2UKqClMR4F1wTiU2a7x3dzgQzkcgK72yyqDw== +"@mui/x-date-pickers@^7.23.2": + version "7.23.2" + resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-7.23.2.tgz#9c447104f21a82abab17a954a4095ad2675a6800" + integrity sha512-Kt9VsEnShaBKiaastTYku66UIWptgc9UMA16d0G/0TkfIsvZrAD3iacQR6HHAXWspaFshdfsRmW2JAoFhzKZsg== dependencies: "@babel/runtime" "^7.25.7" "@mui/utils" "^5.16.6 || ^6.0.0" - "@mui/x-internals" "7.21.0" + "@mui/x-internals" "7.23.0" "@types/react-transition-group" "^4.4.11" clsx "^2.1.1" prop-types "^15.8.1" react-transition-group "^4.4.5" -"@mui/x-internals@7.21.0": - version "7.21.0" - resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-7.21.0.tgz#daca984059015b27efdb47bb44dc7ff4a6816673" - integrity sha512-94YNyZ0BhK5Z+Tkr90RKf47IVCW8R/1MvdUhh6MCQg6sZa74jsX+x+gEZ4kzuCqOsuyTyxikeQ8vVuCIQiP7UQ== +"@mui/x-internals@7.23.0": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-7.23.0.tgz#3b1d0e47f1504cbd74c60b6a514eb18c108cc6dd" + integrity sha512-bPclKpqUiJYIHqmTxSzMVZi6MH51cQsn5U+8jskaTlo3J4QiMeCYJn/gn7YbeR9GOZFp8hetyHjoQoVHKRXCig== dependencies: "@babel/runtime" "^7.25.7" "@mui/utils" "^5.16.6 || ^6.0.0" @@ -2784,6 +2784,13 @@ jest-matcher-utils "^27.0.0" pretty-format "^27.0.0" +"@types/jquery@^3.5.32": + version "3.5.32" + resolved "https://registry.yarnpkg.com/@types/jquery/-/jquery-3.5.32.tgz#3eb0da20611b92c7c49ebed6163b52a4fdc57def" + integrity sha512-b9Xbf4CkMqS02YH8zACqN1xzdxc3cO735Qe5AbSUFmyOiaWAbcpqh9Wna+Uk0vgACvoQHpWDg2rGdHkYPLmCiQ== + dependencies: + "@types/sizzle" "*" + "@types/js-levenshtein@^1.1.1": version "1.1.3" resolved "https://registry.yarnpkg.com/@types/js-levenshtein/-/js-levenshtein-1.1.3.tgz#a6fd0bdc8255b274e5438e0bfb25f154492d1106" @@ -2868,6 +2875,13 @@ resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.7.tgz#50ae4353eaaddc04044279812f52c8c65857dbcb" integrity sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ== +"@types/react-bootstrap-daterangepicker@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@types/react-bootstrap-daterangepicker/-/react-bootstrap-daterangepicker-7.0.0.tgz#445ddaaa5e38c7d98130ed9c43d4c391c027cc4a" + integrity sha512-x6d7FBbW6kDNI84UgB+l1GpPiRRPuHaXokpc+JHayvoCFVRYwB1NHCD9n0JsC9aUA1Zuvgy4Mo4wjgK9g+fDsg== + dependencies: + react-bootstrap-daterangepicker "*" + "@types/react-dom@*", "@types/react-dom@^18.0.0": version "18.3.0" resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.3.0.tgz#0cbc818755d87066ab6ca74fbedb2547d74a82b0" @@ -2969,6 +2983,11 @@ dependencies: "@types/node" "*" +"@types/sizzle@*": + version "2.3.9" + resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.9.tgz#d4597dbd4618264c414d7429363e3f50acb66ea2" + integrity sha512-xzLEyKB50yqCUPUJkIsrVvoWNfFUbIZI+RspLWt8u+tIW/BetMBZtgV2LY/2o+tYH8dRvQ+eoPf3NdhQCcLE2w== + "@types/sockjs@^0.3.33": version "0.3.36" resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.36.tgz#ce322cf07bcc119d4cbf7f88954f3a3bd0f67535" @@ -3320,6 +3339,11 @@ "@webassemblyjs/ast" "1.12.1" "@xtuc/long" "4.2.2" +"@wojtekmaj/date-utils@^1.1.3", "@wojtekmaj/date-utils@^1.5.0": + version "1.5.1" + resolved "https://registry.yarnpkg.com/@wojtekmaj/date-utils/-/date-utils-1.5.1.tgz#c3cd67177ac781cfa5736219d702a55a2aea5f2b" + integrity sha512-+i7+JmNiE/3c9FKxzWFi2IjRJ+KzZl1QPu6QNrsgaa2MuBgXvUy4gA1TVzf/JMdIIloB76xSKikTWuyYAIVLww== + "@xmldom/xmldom@^0.8.3": version "0.8.10" resolved "https://registry.yarnpkg.com/@xmldom/xmldom/-/xmldom-0.8.10.tgz#a1337ca426aa61cef9fe15b5b28e340a72f6fa99" @@ -3945,6 +3969,19 @@ boolbase@^1.0.0: resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" integrity sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww== +bootstrap-daterangepicker@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/bootstrap-daterangepicker/-/bootstrap-daterangepicker-3.1.0.tgz#632e6fb2de4b6360c5c0a9d5f6adb9aace051fe8" + integrity sha512-oaQZx6ZBDo/dZNyXGVi2rx5GmFXThyQLAxdtIqjtLlYVaQUfQALl5JZMJJZzyDIX7blfy4ppZPAJ10g8Ma4d/g== + dependencies: + jquery ">=1.10" + moment "^2.9.0" + +bootstrap@^5.3.3: + version "5.3.3" + resolved "https://registry.yarnpkg.com/bootstrap/-/bootstrap-5.3.3.tgz#de35e1a765c897ac940021900fcbb831602bac38" + integrity sha512-8HLCdWgyoMguSO9o+aH+iuZ+aht+mzW0u3HIMzVu7Srrpv7EBBxTnrFlSCskwdY1+EOFQSm7uMJhNQHkdPcmjg== + brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -4894,6 +4931,11 @@ data-view-byte-offset@^1.0.0: es-errors "^1.3.0" is-data-view "^1.0.1" +date-fns@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-4.1.0.tgz#64b3d83fff5aa80438f5b1a633c2e83b8a1c2d14" + integrity sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg== + dayjs@^1.11.13: version "1.11.13" resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" @@ -5036,6 +5078,11 @@ destroy@1.2.0: resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== +detect-element-overflow@^1.4.0: + version "1.4.2" + resolved "https://registry.yarnpkg.com/detect-element-overflow/-/detect-element-overflow-1.4.2.tgz#2e48509e5aa07647f4335b5f4f52c146b92f99c5" + integrity sha512-4m6cVOtvm/GJLjo7WFkPfwXoEIIbM7GQwIh4WEa4g7IsNi1YzwUsGL5ApNLrrHL29bHeNeQ+/iZhw+YHqgE2Fw== + detect-newline@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" @@ -6287,6 +6334,13 @@ get-symbol-description@^1.0.2: es-errors "^1.3.0" get-intrinsic "^1.2.4" +get-user-locale@^2.2.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/get-user-locale/-/get-user-locale-2.3.2.tgz#d37ae6e670c2b57d23a96fb4d91e04b2059d52cf" + integrity sha512-O2GWvQkhnbDoWFUJfaBlDIKUEdND8ATpBXD6KXcbhxlfktyD/d8w6mkzM/IlQEqGZAMz/PW6j6Hv53BiigKLUQ== + dependencies: + mem "^8.0.0" + glob-parent@^5.1.2, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" @@ -7672,6 +7726,11 @@ jiti@^1.21.0: resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268" integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w== +jquery@>=1.10, jquery@^3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.7.1.tgz#083ef98927c9a6a74d05a6af02806566d16274de" + integrity sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg== + js-levenshtein@^1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" @@ -8109,6 +8168,11 @@ make-dir@^4.0.0: dependencies: semver "^7.5.3" +make-event-props@^1.6.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/make-event-props/-/make-event-props-1.6.2.tgz#c8e0e48eb28b9b808730de38359f6341de7ec5a2" + integrity sha512-iDwf7mA03WPiR8QxvcVHmVWEPfMY1RZXerDVNCRYW7dUr2ppH3J58Rwb39/WG39yTZdRSxr3x+2v22tvI0VEvA== + makeerror@1.0.12: version "1.0.12" resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.12.tgz#3e5dd2079a82e812e983cc6610c4a2cb0eaa801a" @@ -8116,6 +8180,13 @@ makeerror@1.0.12: dependencies: tmpl "1.0.5" +map-age-cleaner@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" + integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w== + dependencies: + p-defer "^1.0.0" + mdn-data@2.0.14: version "2.0.14" resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" @@ -8131,6 +8202,14 @@ media-typer@0.3.0: resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== +mem@^8.0.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/mem/-/mem-8.1.1.tgz#cf118b357c65ab7b7e0817bdf00c8062297c0122" + integrity sha512-qFCFUDs7U3b8mBDPyz5EToEKoAkgCzqquIgi9nkkR9bixxOVOre+09lbuH7+9Kn2NFpm56M3GUWVbU2hQgdACA== + dependencies: + map-age-cleaner "^0.1.3" + mimic-fn "^3.1.0" + memfs@^3.1.2, memfs@^3.4.3: version "3.6.0" resolved "https://registry.yarnpkg.com/memfs/-/memfs-3.6.0.tgz#d7a2110f86f79dd950a8b6df6d57bc984aa185f6" @@ -8198,6 +8277,11 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== +mimic-fn@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-3.1.0.tgz#65755145bbf3e36954b949c16450427451d5ca74" + integrity sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ== + min-indent@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/min-indent/-/min-indent-1.0.1.tgz#a63f681673b30571fbe8bc25686ae746eefa9869" @@ -8259,7 +8343,7 @@ mkdirp@~0.5.1: dependencies: minimist "^1.2.6" -moment@^2.29.4: +moment@^2.30.1, moment@^2.9.0: version "2.30.1" resolved "https://registry.yarnpkg.com/moment/-/moment-2.30.1.tgz#f8c91c07b7a786e30c59926df530b4eac96974ae" integrity sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how== @@ -8598,6 +8682,11 @@ outvariant@^1.2.1, outvariant@^1.3.0: resolved "https://registry.yarnpkg.com/outvariant/-/outvariant-1.4.3.tgz#221c1bfc093e8fec7075497e7799fdbf43d14873" integrity sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA== +p-defer@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" + integrity sha512-wB3wfAxZpk2AzOfUMJNL+d36xothRSyj8EXOa4f6GMqYDN9BJaaSISbsk+wS9abmnebVw95C2Kb5t85UmpCxuw== + p-limit@^2.0.0, p-limit@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" @@ -9446,7 +9535,7 @@ prompts@^2.0.1, prompts@^2.4.2: kleur "^3.0.3" sisteransi "^1.0.5" -prop-types@^15.5.10, prop-types@^15.5.8, prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: +prop-types@^15.5.10, prop-types@^15.5.7, prop-types@^15.5.8, prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: version "15.8.1" resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== @@ -9551,6 +9640,65 @@ react-base16-styling@^0.6.0: lodash.flow "^3.3.0" pure-color "^1.2.0" +react-bootstrap-daterangepicker@*, react-bootstrap-daterangepicker@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/react-bootstrap-daterangepicker/-/react-bootstrap-daterangepicker-8.0.0.tgz#5c60670fae3cf9193fa274e4e12d9c878cb40d63" + integrity sha512-zwEMHq93/a0f2C2Cc/Q1zxN+jYWF4JsWEwVkJ2xVGp++Oc3Ck/fI2F9kiEqY1n8oKV0WFT4+cTcoagG7sWuXXw== + +react-calendar@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/react-calendar/-/react-calendar-5.1.0.tgz#f5d3342a872cbb8907099ca5651bc936046033b8" + integrity sha512-09o/rQHPZGEi658IXAJtWfra1N69D1eFnuJ3FQm9qUVzlzNnos1+GWgGiUeSs22QOpNm32aoVFOimq0p3Ug9Eg== + dependencies: + "@wojtekmaj/date-utils" "^1.1.3" + clsx "^2.0.0" + get-user-locale "^2.2.1" + warning "^4.0.0" + +react-clock@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/react-clock/-/react-clock-5.1.0.tgz#e47425ebb6cdfdda4741441576b2c386e17c3a19" + integrity sha512-DKmr29VOK6M8wpbzGUZZa9PwGnG9uC6QXtDLwGwcc2r3vdS/HxNhf5xMMjudXLk7m096mNJQf7AgfjiDpzAYYw== + dependencies: + "@wojtekmaj/date-utils" "^1.5.0" + clsx "^2.0.0" + get-user-locale "^2.2.1" + +react-date-picker@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/react-date-picker/-/react-date-picker-11.0.0.tgz#f7dc25e9a679f94ad44f11644ea0fdc541be1834" + integrity sha512-l+siu5HSZ/ciGL1293KCAHl4o9aD5rw16V4tB0C43h7QbMv2dWGgj7Dxgt8iztLaPVtEfOt/+sxNiTYw4WVq6A== + dependencies: + "@wojtekmaj/date-utils" "^1.1.3" + clsx "^2.0.0" + get-user-locale "^2.2.1" + make-event-props "^1.6.0" + react-calendar "^5.0.0" + react-fit "^2.0.0" + update-input-width "^1.4.0" + +react-datetime-picker@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/react-datetime-picker/-/react-datetime-picker-6.0.1.tgz#7a88ba84cdccd5096029965966723d74a0461535" + integrity sha512-G7W8bK0SLuO66RVWYGD2q1bD4Wk4pUOpJCq9r44A4P33uq0aAtd3dT1HNEu2fvlmMpYxC4J571ZPI9bUG46pDA== + dependencies: + "@wojtekmaj/date-utils" "^1.1.3" + clsx "^2.0.0" + get-user-locale "^2.2.1" + make-event-props "^1.6.0" + react-calendar "^5.0.0" + react-clock "^5.0.0" + react-date-picker "^11.0.0" + react-fit "^2.0.0" + react-time-picker "^7.0.0" + +react-datetime@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/react-datetime/-/react-datetime-3.3.1.tgz#60870ef7cb70f3a98545385e068f16344a50b1db" + integrity sha512-CMgQFLGidYu6CAlY6S2Om2UZiTfZsjC6j4foXcZ0kb4cSmPomdJ2S1PhK0v3fwflGGVuVARGxwkEUWtccHapJA== + dependencies: + prop-types "^15.5.7" + react-dev-utils@^12.0.1: version "12.0.1" resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-12.0.1.tgz#ba92edb4a1f379bd46ccd6bcd4e7bc398df33e73" @@ -9594,6 +9742,14 @@ react-error-overlay@^6.0.11: resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.11.tgz#92835de5841c5cf08ba00ddd2d677b6d17ff9adb" integrity sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg== +react-fit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/react-fit/-/react-fit-2.0.1.tgz#4bcb4de7aa94c9fdf452b0c63de0889496f50244" + integrity sha512-Eip6ALs/+6Jv82Si0I9UnfysdwVlAhkkZRycgmMdnj7jwUg69SVFp84ICxwB8zszkfvJJ2MGAAo9KAYM8ZUykQ== + dependencies: + detect-element-overflow "^1.4.0" + warning "^4.0.0" + react-highlight-words@^0.18.0: version "0.18.0" resolved "https://registry.yarnpkg.com/react-highlight-words/-/react-highlight-words-0.18.0.tgz#ff3b3ef7cb497fa2e8fa4d54c1a1a98ac6390d0e" @@ -9756,6 +9912,19 @@ react-textarea-autosize@^8.3.2: use-composed-ref "^1.3.0" use-latest "^1.2.1" +react-time-picker@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/react-time-picker/-/react-time-picker-7.0.0.tgz#3f9d80d2de8a8ffc57c41dd71095477f6a7ffc03" + integrity sha512-k6mUjkI+OsY73mg0yjMxqkLXv/UXR1LN7AARNqfyGZOwqHqo1JrjL3lLHTHWQ86HmPTBL/dZACbIX/fV1NLmWg== + dependencies: + "@wojtekmaj/date-utils" "^1.1.3" + clsx "^2.0.0" + get-user-locale "^2.2.1" + make-event-props "^1.6.0" + react-clock "^5.0.0" + react-fit "^2.0.0" + update-input-width "^1.4.0" + react-toastify@^9.1.1: version "9.1.3" resolved "https://registry.yarnpkg.com/react-toastify/-/react-toastify-9.1.3.tgz#1e798d260d606f50e0fab5ee31daaae1d628c5ff" @@ -11266,6 +11435,11 @@ update-browserslist-db@^1.1.0: escalade "^3.1.2" picocolors "^1.0.1" +update-input-width@^1.4.0: + version "1.4.2" + resolved "https://registry.yarnpkg.com/update-input-width/-/update-input-width-1.4.2.tgz#49d327a39395185b0fd440b9c3b1d6f81173655c" + integrity sha512-/p0XLhrQQQ4bMWD7bL9duYObwYCO1qGr8R19xcMmoMSmXuQ7/1//veUnCObQ7/iW6E2pGS6rFkS4TfH4ur7e/g== + uri-js@^4.2.2: version "4.4.1" resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" @@ -11409,6 +11583,13 @@ walker@^1.0.7: dependencies: makeerror "1.0.12" +warning@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" + integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w== + dependencies: + loose-envify "^1.0.0" + watchpack@^2.4.1: version "2.4.1" resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.1.tgz#29308f2cac150fa8e4c92f90e0ec954a9fed7fff" From 677cc59dc6ba078222cdaaf43394d3fd427acc05 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Tue, 24 Dec 2024 17:25:28 -0800 Subject: [PATCH 179/188] chore: use domain-qualified finalizer name (#2304) Signed-off-by: Derek Wang --- pkg/reconciler/isbsvc/controller.go | 10 ++++++++-- pkg/reconciler/pipeline/controller.go | 11 +++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/isbsvc/controller.go b/pkg/reconciler/isbsvc/controller.go index 7b04913d17..e1998f7410 100644 --- a/pkg/reconciler/isbsvc/controller.go +++ b/pkg/reconciler/isbsvc/controller.go @@ -41,7 +41,9 @@ import ( ) const ( - finalizerName = dfv1.ControllerISBSvc + finalizerName = "numaflow.numaproj.io/" + dfv1.ControllerISBSvc + // TODO: clean up the deprecated finalizer in v1.7 + deprecatedFinalizerName = dfv1.ControllerISBSvc ) // interStepBufferReconciler reconciles an Inter-Step Buffer Service object. @@ -97,7 +99,7 @@ func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc log := logging.FromContext(ctx) if !isbSvc.DeletionTimestamp.IsZero() { log.Info("Deleting ISB Service") - if controllerutil.ContainsFinalizer(isbSvc, finalizerName) { + if controllerutil.ContainsFinalizer(isbSvc, finalizerName) || controllerutil.ContainsFinalizer(isbSvc, deprecatedFinalizerName) { // Finalizer logic should be added here. if err := installer.Uninstall(ctx, isbSvc, r.client, r.kubeClient, r.config, log, r.recorder); err != nil { log.Errorw("Failed to uninstall", zap.Error(err)) @@ -105,11 +107,15 @@ func (r *interStepBufferServiceReconciler) reconcile(ctx context.Context, isbSvc return err } controllerutil.RemoveFinalizer(isbSvc, finalizerName) + controllerutil.RemoveFinalizer(isbSvc, deprecatedFinalizerName) // Clean up metrics _ = reconciler.ISBSvcHealth.DeleteLabelValues(isbSvc.Namespace, isbSvc.Name) } return nil } + if controllerutil.ContainsFinalizer(isbSvc, deprecatedFinalizerName) { // Remove deprecated finalizer if exists + controllerutil.RemoveFinalizer(isbSvc, deprecatedFinalizerName) + } if needsFinalizer(isbSvc) { controllerutil.AddFinalizer(isbSvc, finalizerName) } diff --git a/pkg/reconciler/pipeline/controller.go b/pkg/reconciler/pipeline/controller.go index cb7e7a2bfd..8b7a7c8ad0 100644 --- a/pkg/reconciler/pipeline/controller.go +++ b/pkg/reconciler/pipeline/controller.go @@ -52,7 +52,9 @@ import ( ) const ( - finalizerName = dfv1.ControllerPipeline + finalizerName = "numaflow.numaproj.io/" + dfv1.ControllerPipeline + // TODO: clean up the deprecated finalizer in v1.7 + deprecatedFinalizerName = dfv1.ControllerPipeline pauseTimestampPath = `/metadata/annotations/numaflow.numaproj.io~1pause-timestamp` ) @@ -111,7 +113,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( log := logging.FromContext(ctx) if !pl.DeletionTimestamp.IsZero() { log.Info("Deleting pipeline") - if controllerutil.ContainsFinalizer(pl, finalizerName) { + if controllerutil.ContainsFinalizer(pl, finalizerName) || controllerutil.ContainsFinalizer(pl, deprecatedFinalizerName) { if time.Now().Before(pl.DeletionTimestamp.Add(time.Duration(pl.GetTerminationGracePeriodSeconds()) * time.Second)) { safeToDelete, err := r.safeToDelete(ctx, pl) if err != nil { @@ -135,6 +137,7 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( } controllerutil.RemoveFinalizer(pl, finalizerName) + controllerutil.RemoveFinalizer(pl, deprecatedFinalizerName) // Clean up metrics _ = reconciler.PipelineHealth.DeleteLabelValues(pl.Namespace, pl.Name) // Delete corresponding vertex metrics @@ -155,6 +158,10 @@ func (r *pipelineReconciler) reconcile(ctx context.Context, pl *dfv1.Pipeline) ( pl.Status.InitConditions() pl.Status.SetObservedGeneration(pl.Generation) + if controllerutil.ContainsFinalizer(pl, deprecatedFinalizerName) { // Remove deprecated finalizer if exists + controllerutil.RemoveFinalizer(pl, deprecatedFinalizerName) + } + if !controllerutil.ContainsFinalizer(pl, finalizerName) { controllerutil.AddFinalizer(pl, finalizerName) } From bb4a0de61da60dc105439943ca681a3360b42df9 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 25 Dec 2024 11:44:20 -0800 Subject: [PATCH 180/188] chore: cascade deletion on isbsvc pvc (#2305) --- pkg/apis/numaflow/v1alpha1/deprecated.go | 11 +++++++++++ .../numaflow/v1alpha1/jetstream_buffer_service.go | 4 ++++ .../v1alpha1/jetstream_buffer_service_test.go | 4 ++++ pkg/apis/numaflow/v1alpha1/redis_buffer_service.go | 4 ++++ .../numaflow/v1alpha1/redis_buffer_service_test.go | 4 ++++ pkg/reconciler/isbsvc/installer/jetstream.go | 6 +++++- pkg/reconciler/isbsvc/installer/native_redis.go | 6 +++++- 7 files changed, 37 insertions(+), 2 deletions(-) diff --git a/pkg/apis/numaflow/v1alpha1/deprecated.go b/pkg/apis/numaflow/v1alpha1/deprecated.go index 9fd3152115..978c8d60b7 100644 --- a/pkg/apis/numaflow/v1alpha1/deprecated.go +++ b/pkg/apis/numaflow/v1alpha1/deprecated.go @@ -31,3 +31,14 @@ func isSidecarSupported() bool { k8sVersion, _ := strconv.ParseFloat(v, 32) return k8sVersion >= 1.29 } + +// TODO: (k8s 1.27) Remove this once we deprecate the support for k8s < 1.27 +func IsPVCRetentionPolicySupported() bool { + v := os.Getenv(EnvK8sServerVersion) + if v == "" { + return true // default to true if the env var is not found + } + // e.g. 1.31 + k8sVersion, _ := strconv.ParseFloat(v, 32) + return k8sVersion >= 1.27 +} diff --git a/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service.go b/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service.go index cd0a1440df..b39d78ea65 100644 --- a/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service.go +++ b/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service.go @@ -255,6 +255,10 @@ func (j JetStreamBufferService) GetStatefulSetSpec(req GetJetStreamStatefulSetSp } j.AbstractPodTemplate.ApplyToPodSpec(podSpec) spec := appv1.StatefulSetSpec{ + PersistentVolumeClaimRetentionPolicy: &appv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appv1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appv1.RetainPersistentVolumeClaimRetentionPolicyType, + }, PodManagementPolicy: appv1.ParallelPodManagement, Replicas: &replicas, ServiceName: req.ServiceName, diff --git a/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service_test.go b/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service_test.go index becf05d86a..de87ab6ec5 100644 --- a/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service_test.go +++ b/pkg/apis/numaflow/v1alpha1/jetstream_buffer_service_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -74,6 +75,9 @@ func TestJetStreamGetStatefulSetSpec(t *testing.T) { }, } spec := s.GetStatefulSetSpec(req) + assert.NotNil(t, spec.PersistentVolumeClaimRetentionPolicy) + assert.Equal(t, appv1.DeletePersistentVolumeClaimRetentionPolicyType, spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) + assert.Equal(t, appv1.RetainPersistentVolumeClaimRetentionPolicyType, spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) assert.True(t, len(spec.VolumeClaimTemplates) > 0) }) diff --git a/pkg/apis/numaflow/v1alpha1/redis_buffer_service.go b/pkg/apis/numaflow/v1alpha1/redis_buffer_service.go index 258388ab09..c9632cecad 100644 --- a/pkg/apis/numaflow/v1alpha1/redis_buffer_service.go +++ b/pkg/apis/numaflow/v1alpha1/redis_buffer_service.go @@ -338,6 +338,10 @@ redis_exporter`}, nr.AbstractPodTemplate.ApplyToPodSpec(podSpec) spec := appv1.StatefulSetSpec{ + PersistentVolumeClaimRetentionPolicy: &appv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appv1.DeletePersistentVolumeClaimRetentionPolicyType, + WhenScaled: appv1.RetainPersistentVolumeClaimRetentionPolicyType, + }, Replicas: &replicas, ServiceName: req.ServiceName, Selector: &metav1.LabelSelector{ diff --git a/pkg/apis/numaflow/v1alpha1/redis_buffer_service_test.go b/pkg/apis/numaflow/v1alpha1/redis_buffer_service_test.go index 0fd1c821fd..348fce305f 100644 --- a/pkg/apis/numaflow/v1alpha1/redis_buffer_service_test.go +++ b/pkg/apis/numaflow/v1alpha1/redis_buffer_service_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -66,6 +67,9 @@ func TestRedisGetStatefulSetSpec(t *testing.T) { }, } spec := s.GetStatefulSetSpec(req) + assert.NotNil(t, spec.PersistentVolumeClaimRetentionPolicy) + assert.Equal(t, appv1.DeletePersistentVolumeClaimRetentionPolicyType, spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) + assert.Equal(t, appv1.RetainPersistentVolumeClaimRetentionPolicyType, spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) assert.True(t, len(spec.VolumeClaimTemplates) > 0) assert.True(t, len(spec.Template.Spec.InitContainers) > 0) assert.NotNil(t, spec.Template.Spec.SecurityContext) diff --git a/pkg/reconciler/isbsvc/installer/jetstream.go b/pkg/reconciler/isbsvc/installer/jetstream.go index f0faa09caa..e9d92ce82c 100644 --- a/pkg/reconciler/isbsvc/installer/jetstream.go +++ b/pkg/reconciler/isbsvc/installer/jetstream.go @@ -518,7 +518,11 @@ func (r *jetStreamInstaller) createConfigMap(ctx context.Context) error { func (r *jetStreamInstaller) Uninstall(ctx context.Context) error { // Clean up metrics _ = reconciler.JetStreamISBSvcReplicas.DeleteLabelValues(r.isbSvc.Namespace, r.isbSvc.Name) - return r.uninstallPVCs(ctx) + // TODO: (k8s 1.27) Remove this once we deprecate the support for k8s < 1.27 + if !dfv1.IsPVCRetentionPolicySupported() { + return r.uninstallPVCs(ctx) + } + return nil } func (r *jetStreamInstaller) uninstallPVCs(ctx context.Context) error { diff --git a/pkg/reconciler/isbsvc/installer/native_redis.go b/pkg/reconciler/isbsvc/installer/native_redis.go index 495d24b03b..84b6ea4a78 100644 --- a/pkg/reconciler/isbsvc/installer/native_redis.go +++ b/pkg/reconciler/isbsvc/installer/native_redis.go @@ -585,7 +585,11 @@ func (r *redisInstaller) createStatefulSet(ctx context.Context) error { func (r *redisInstaller) Uninstall(ctx context.Context) error { // Clean up metrics _ = reconciler.RedisISBSvcReplicas.DeleteLabelValues(r.isbSvc.Namespace, r.isbSvc.Name) - return r.uninstallPVCs(ctx) + // TODO: (k8s 1.27) Remove this once we deprecate the support for k8s < 1.27 + if !dfv1.IsPVCRetentionPolicySupported() { + return r.uninstallPVCs(ctx) + } + return nil } func (r *redisInstaller) uninstallPVCs(ctx context.Context) error { From 0c86e820e6a47780dceab143175e747d28ab4e47 Mon Sep 17 00:00:00 2001 From: Yashash H L Date: Thu, 2 Jan 2025 12:19:42 +0530 Subject: [PATCH 181/188] chore: move message conversions to appropriate modules (#2306) Signed-off-by: Yashash H L Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/numaflow-core/src/config/monovertex.rs | 2 +- .../src/mapper/map/user_defined.rs | 18 + rust/numaflow-core/src/message.rs | 337 +----------------- rust/numaflow-core/src/sink.rs | 132 ++++++- rust/numaflow-core/src/sink/blackhole.rs | 8 +- rust/numaflow-core/src/sink/log.rs | 11 +- rust/numaflow-core/src/sink/user_defined.rs | 23 +- rust/numaflow-core/src/source/user_defined.rs | 109 +++++- .../src/transformer/user_defined.rs | 58 ++- 9 files changed, 330 insertions(+), 368 deletions(-) diff --git a/rust/numaflow-core/src/config/monovertex.rs b/rust/numaflow-core/src/config/monovertex.rs index 75f5a8cb9a..c6f18e3c8e 100644 --- a/rust/numaflow-core/src/config/monovertex.rs +++ b/rust/numaflow-core/src/config/monovertex.rs @@ -47,7 +47,7 @@ impl Default for MonovertexConfig { source_type: source::SourceType::Generator(GeneratorConfig::default()), }, sink_config: SinkConfig { - sink_type: sink::SinkType::Log(sink::LogConfig::default()), + sink_type: SinkType::Log(sink::LogConfig::default()), retry_config: None, }, transformer_config: None, diff --git a/rust/numaflow-core/src/mapper/map/user_defined.rs b/rust/numaflow-core/src/mapper/map/user_defined.rs index 6bc816c405..0799eb6542 100644 --- a/rust/numaflow-core/src/mapper/map/user_defined.rs +++ b/rust/numaflow-core/src/mapper/map/user_defined.rs @@ -13,6 +13,7 @@ use tracing::error; use crate::config::get_vertex_name; use crate::error::{Error, Result}; use crate::message::{Message, MessageID, Offset}; +use crate::shared::grpc::prost_timestamp_from_utc; type ResponseSenderMap = Arc>>)>>>; @@ -26,6 +27,23 @@ struct ParentMessageInfo { headers: HashMap, } +impl From for MapRequest { + fn from(message: Message) -> Self { + Self { + request: Some(map::map_request::Request { + keys: message.keys.to_vec(), + value: message.value.to_vec(), + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + }), + id: message.offset.unwrap().to_string(), + handshake: None, + status: None, + } + } +} + /// UserDefinedUnaryMap is a grpc client that sends unary requests to the map server /// and forwards the responses. pub(in crate::mapper) struct UserDefinedUnaryMap { diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index a33b4a7041..00f5cca663 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -3,24 +3,14 @@ use std::collections::HashMap; use std::fmt; use std::sync::Arc; -use async_nats::HeaderValue; -use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; -use base64::Engine; use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; -use numaflow_pb::clients::map::MapRequest; -use numaflow_pb::clients::sink::sink_request::Request; -use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; -use numaflow_pb::clients::sink::{sink_response, SinkRequest}; -use numaflow_pb::clients::source::read_response; -use numaflow_pb::clients::sourcetransformer::SourceTransformRequest; use prost::Message as ProtoMessage; use serde::{Deserialize, Serialize}; use crate::shared::grpc::prost_timestamp_from_utc; use crate::shared::grpc::utc_from_timestamp; -use crate::Result; -use crate::{config, Error}; +use crate::Error; const DROP: &str = "U+005C__DROP__"; @@ -62,44 +52,6 @@ impl fmt::Display for Offset { } } -impl TryFrom for Message { - type Error = Error; - - fn try_from(message: async_nats::Message) -> std::result::Result { - let payload = message.payload; - let headers: HashMap = message - .headers - .unwrap_or_default() - .iter() - .map(|(key, value)| { - ( - key.to_string(), - value.first().unwrap_or(&HeaderValue::from("")).to_string(), - ) - }) - .collect(); - // FIXME(cr): we should not be using subject. keys are in the payload - let keys = message.subject.split('.').map(|s| s.to_string()).collect(); - let event_time = Utc::now(); - let offset = None; - let id = MessageID { - vertex_name: config::get_vertex_name().to_string().into(), - offset: "0".to_string().into(), - index: 0, - }; - - Ok(Self { - keys, - tags: None, - value: payload, - offset, - event_time, - id, - headers, - }) - } -} - impl Message { // Check if the message should be dropped. pub(crate) fn dropped(&self) -> bool { @@ -135,8 +87,8 @@ impl fmt::Display for IntOffset { #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct StringOffset { /// offset could be a complex base64 string. - offset: Bytes, - partition_idx: u16, + pub(crate) offset: Bytes, + pub(crate) partition_idx: u16, } impl StringOffset { @@ -206,26 +158,10 @@ impl fmt::Display for MessageID { } } -impl TryFrom for numaflow_pb::clients::source::Offset { - type Error = Error; - - fn try_from(offset: Offset) -> std::result::Result { - match offset { - Offset::Int(_) => Err(Error::Source("IntOffset not supported".to_string())), - Offset::String(o) => Ok(numaflow_pb::clients::source::Offset { - offset: BASE64_STANDARD - .decode(o.offset) - .expect("we control the encoding, so this should never fail"), - partition_id: o.partition_idx as i32, - }), - } - } -} - impl TryFrom for BytesMut { type Error = Error; - fn try_from(message: Message) -> std::result::Result { + fn try_from(message: Message) -> Result { let proto_message = numaflow_pb::objects::isb::Message { header: Some(numaflow_pb::objects::isb::Header { message_info: Some(numaflow_pb::objects::isb::MessageInfo { @@ -253,7 +189,7 @@ impl TryFrom for BytesMut { impl TryFrom for Message { type Error = Error; - fn try_from(bytes: Bytes) -> std::result::Result { + fn try_from(bytes: Bytes) -> Result { let proto_message = numaflow_pb::objects::isb::Message::decode(bytes) .map_err(|e| Error::Proto(e.to_string()))?; @@ -280,153 +216,14 @@ impl TryFrom for Message { } } -/// Convert the [`Message`] to [`SourceTransformRequest`] -impl From for SourceTransformRequest { - fn from(message: Message) -> Self { - Self { - request: Some( - numaflow_pb::clients::sourcetransformer::source_transform_request::Request { - id: message - .offset - .expect("offset should be present") - .to_string(), - keys: message.keys.to_vec(), - value: message.value.to_vec(), - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - headers: message.headers, - }, - ), - handshake: None, - } - } -} - -impl From for MapRequest { - fn from(message: Message) -> Self { - Self { - request: Some(numaflow_pb::clients::map::map_request::Request { - keys: message.keys.to_vec(), - value: message.value.to_vec(), - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - headers: message.headers, - }), - id: message.offset.unwrap().to_string(), - handshake: None, - status: None, - } - } -} - -/// Convert [`read_response::Result`] to [`Message`] -impl TryFrom for Message { - type Error = Error; - - fn try_from(result: read_response::Result) -> Result { - let source_offset = match result.offset { - Some(o) => Offset::String(StringOffset { - offset: BASE64_STANDARD.encode(o.offset).into(), - partition_idx: o.partition_id as u16, - }), - None => return Err(Error::Source("Offset not found".to_string())), - }; - - Ok(Message { - keys: Arc::from(result.keys), - tags: None, - value: result.payload.into(), - offset: Some(source_offset.clone()), - event_time: utc_from_timestamp(result.event_time), - id: MessageID { - vertex_name: config::get_vertex_name().to_string().into(), - offset: source_offset.to_string().into(), - index: 0, - }, - headers: result.headers, - }) - } -} - -/// Convert [`Message`] to [`proto::SinkRequest`] -impl From for SinkRequest { - fn from(message: Message) -> Self { - Self { - request: Some(Request { - keys: message.keys.to_vec(), - value: message.value.to_vec(), - event_time: prost_timestamp_from_utc(message.event_time), - watermark: None, - id: message.id.to_string(), - headers: message.headers, - }), - status: None, - handshake: None, - } - } -} - -/// Sink's status for each [Message] written to Sink. -#[derive(PartialEq, Debug)] -pub(crate) enum ResponseStatusFromSink { - /// Successfully wrote to the Sink. - Success, - /// Failed with error message. - Failed(String), - /// Write to FallBack Sink. - Fallback, -} - -/// Sink will give a response per [Message]. -#[derive(Debug, PartialEq)] -pub(crate) struct ResponseFromSink { - /// Unique id per [Message]. We need to track per [Message] status. - pub(crate) id: String, - /// Status of the "sink" operation per [Message]. - pub(crate) status: ResponseStatusFromSink, -} - -impl From for sink_response::Result { - fn from(value: ResponseFromSink) -> Self { - let (status, err_msg) = match value.status { - ResponseStatusFromSink::Success => (Success, "".to_string()), - ResponseStatusFromSink::Failed(err) => (Failure, err.to_string()), - ResponseStatusFromSink::Fallback => (Fallback, "".to_string()), - }; - - Self { - id: value.id, - status: status as i32, - err_msg, - } - } -} - -impl From for ResponseFromSink { - fn from(value: sink_response::Result) -> Self { - let status = match value.status() { - Success => ResponseStatusFromSink::Success, - Failure => ResponseStatusFromSink::Failed(value.err_msg), - Fallback => ResponseStatusFromSink::Fallback, - }; - Self { - id: value.id, - status, - } - } -} - #[cfg(test)] mod tests { - use std::collections::HashMap; - + use crate::error::Result; use chrono::TimeZone; - use numaflow_pb::clients::sink::sink_response::Result as SinkResult; - use numaflow_pb::clients::sink::SinkResponse; - use numaflow_pb::clients::source::Offset as SourceOffset; use numaflow_pb::objects::isb::{ Body, Header, Message as ProtoMessage, MessageId, MessageInfo, }; + use std::collections::HashMap; use super::*; @@ -530,116 +327,6 @@ mod tests { ); } - #[test] - fn test_message_to_source_transform_request() { - let message = Message { - keys: Arc::from(vec!["key1".to_string()]), - tags: None, - value: vec![1, 2, 3].into(), - offset: Some(Offset::String(StringOffset { - offset: "123".to_string().into(), - partition_idx: 0, - })), - event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), - id: MessageID { - vertex_name: "vertex".to_string().into(), - offset: "123".to_string().into(), - index: 0, - }, - headers: HashMap::new(), - }; - - let request: SourceTransformRequest = message.into(); - assert!(request.request.is_some()); - } - - #[test] - fn test_read_response_result_to_message() { - let result = read_response::Result { - payload: vec![1, 2, 3], - offset: Some(SourceOffset { - offset: BASE64_STANDARD.encode("123").into_bytes(), - partition_id: 0, - }), - event_time: Some( - prost_timestamp_from_utc(Utc.timestamp_opt(1627846261, 0).unwrap()).unwrap(), - ), - keys: vec!["key1".to_string()], - headers: HashMap::new(), - }; - - let message: Result = result.try_into(); - assert!(message.is_ok()); - - let message = message.unwrap(); - assert_eq!(message.keys.to_vec(), vec!["key1".to_string()]); - assert_eq!(message.value, vec![1, 2, 3]); - assert_eq!( - message.event_time, - Utc.timestamp_opt(1627846261, 0).unwrap() - ); - } - - #[test] - fn test_message_to_sink_request() { - let message = Message { - keys: Arc::from(vec!["key1".to_string()]), - tags: None, - value: vec![1, 2, 3].into(), - offset: Some(Offset::String(StringOffset { - offset: "123".to_string().into(), - partition_idx: 0, - })), - event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), - id: MessageID { - vertex_name: "vertex".to_string().into(), - offset: "123".to_string().into(), - index: 0, - }, - headers: HashMap::new(), - }; - - let request: SinkRequest = message.into(); - assert!(request.request.is_some()); - } - - #[test] - fn test_response_from_sink_to_sink_response() { - let response = ResponseFromSink { - id: "123".to_string(), - status: ResponseStatusFromSink::Success, - }; - - let sink_result: sink_response::Result = response.into(); - assert_eq!(sink_result.status, Success as i32); - } - - #[test] - fn test_sink_response_to_response_from_sink() { - let sink_response = SinkResponse { - results: vec![SinkResult { - id: "123".to_string(), - status: Success as i32, - err_msg: "".to_string(), - }], - handshake: None, - status: None, - }; - - let results: Vec = sink_response - .results - .into_iter() - .map(Into::into) - .collect::>(); - assert!(!results.is_empty()); - - assert_eq!(results.get(0).unwrap().id, "123"); - assert_eq!( - results.get(0).unwrap().status, - ResponseStatusFromSink::Success - ); - } - #[test] fn test_message_id_from_proto() { let proto_id = MessageId { @@ -683,15 +370,5 @@ mod tests { let offset_string = Offset::String(string_offset); assert_eq!(format!("{}", offset_string), "42-1"); - - // Test conversion from Offset to AckRequest for StringOffset - let offset = Offset::String(StringOffset::new(BASE64_STANDARD.encode("42"), 1)); - let offset: Result = offset.try_into(); - assert_eq!(offset.unwrap().partition_id, 1); - - // Test conversion from Offset to AckRequest for IntOffset (should fail) - let offset = Offset::Int(IntOffset::new(42, 1)); - let result: Result = offset.try_into(); - assert!(result.is_err()); } } diff --git a/rust/numaflow-core/src/sink.rs b/rust/numaflow-core/src/sink.rs index 0b30f4c30a..c60c57bd3e 100644 --- a/rust/numaflow-core/src/sink.rs +++ b/rust/numaflow-core/src/sink.rs @@ -2,6 +2,8 @@ use std::collections::HashMap; use std::time::Duration; use numaflow_pb::clients::sink::sink_client::SinkClient; +use numaflow_pb::clients::sink::sink_response; +use numaflow_pb::clients::sink::Status::{Failure, Fallback, Success}; use tokio::sync::mpsc::Receiver; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinHandle; @@ -17,7 +19,7 @@ use user_defined::UserDefinedSink; use crate::config::components::sink::{OnFailureStrategy, RetryConfig}; use crate::config::is_mono_vertex; use crate::error::Error; -use crate::message::{Message, ResponseFromSink, ResponseStatusFromSink}; +use crate::message::Message; use crate::metrics::{ monovertex_metrics, mvtx_forward_metric_labels, pipeline_forward_metric_labels, pipeline_metrics, @@ -25,8 +27,16 @@ use crate::metrics::{ use crate::tracker::TrackerHandle; use crate::Result; +/// A [blackhole] sink which reads but never writes to anywhere, semantic equivalent of `/dev/null`. +/// +/// [Blackhole]: https://numaflow.numaproj.io/user-guide/sinks/blackhole/ mod blackhole; + +/// [log] sink prints out the read messages on to stdout. +/// +/// [Log]: https://numaflow.numaproj.io/user-guide/sinks/log/ mod log; + /// [User-Defined Sink] extends Numaflow to add custom sources supported outside the builtins. /// /// [User-Defined Sink]: https://numaflow.numaproj.io/user-guide/sinks/user-defined-sinks/ @@ -645,6 +655,56 @@ impl SinkWriter { } } +/// Sink's status for each [Message] written to Sink. +#[derive(PartialEq, Debug)] +pub(crate) enum ResponseStatusFromSink { + /// Successfully wrote to the Sink. + Success, + /// Failed with error message. + Failed(String), + /// Write to FallBack Sink. + Fallback, +} + +/// Sink will give a response per [Message]. +#[derive(Debug, PartialEq)] +pub(crate) struct ResponseFromSink { + /// Unique id per [Message]. We need to track per [Message] status. + pub(crate) id: String, + /// Status of the "sink" operation per [Message]. + pub(crate) status: ResponseStatusFromSink, +} + +impl From for ResponseFromSink { + fn from(value: sink_response::Result) -> Self { + let status = match value.status() { + Success => ResponseStatusFromSink::Success, + Failure => ResponseStatusFromSink::Failed(value.err_msg), + Fallback => ResponseStatusFromSink::Fallback, + }; + Self { + id: value.id, + status, + } + } +} + +impl From for sink_response::Result { + fn from(value: ResponseFromSink) -> Self { + let (status, err_msg) = match value.status { + ResponseStatusFromSink::Success => (Success, "".to_string()), + ResponseStatusFromSink::Failed(err) => (Failure, err.to_string()), + ResponseStatusFromSink::Fallback => (Fallback, "".to_string()), + }; + + Self { + id: value.id, + status: status as i32, + err_msg, + } + } +} + impl Drop for SinkWriter { fn drop(&mut self) {} } @@ -653,15 +713,15 @@ impl Drop for SinkWriter { mod tests { use std::sync::Arc; - use chrono::Utc; + use super::*; + use crate::message::{Message, MessageID, Offset, ReadAck, StringOffset}; + use crate::shared::grpc::create_rpc_channel; + use chrono::{TimeZone, Utc}; use numaflow::sink; + use numaflow_pb::clients::sink::{SinkRequest, SinkResponse}; use tokio::time::Duration; use tokio_util::sync::CancellationToken; - use super::*; - use crate::message::{Message, MessageID, ReadAck}; - use crate::shared::grpc::create_rpc_channel; - struct SimpleSink; #[tonic::async_trait] impl sink::Sinker for SimpleSink { @@ -938,4 +998,64 @@ mod tests { // check if the tracker is empty assert!(tracker_handle.is_empty().await.unwrap()); } + + #[test] + fn test_message_to_sink_request() { + let message = Message { + keys: Arc::from(vec!["key1".to_string()]), + tags: None, + value: vec![1, 2, 3].into(), + offset: Some(Offset::String(StringOffset { + offset: "123".to_string().into(), + partition_idx: 0, + })), + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), + index: 0, + }, + headers: HashMap::new(), + }; + + let request: SinkRequest = message.into(); + assert!(request.request.is_some()); + } + + #[test] + fn test_response_from_sink_to_sink_response() { + let response = ResponseFromSink { + id: "123".to_string(), + status: ResponseStatusFromSink::Success, + }; + + let sink_result: sink_response::Result = response.into(); + assert_eq!(sink_result.status, Success as i32); + } + + #[test] + fn test_sink_response_to_response_from_sink() { + let sink_response = SinkResponse { + results: vec![sink_response::Result { + id: "123".to_string(), + status: Success as i32, + err_msg: "".to_string(), + }], + handshake: None, + status: None, + }; + + let results: Vec = sink_response + .results + .into_iter() + .map(Into::into) + .collect::>(); + assert!(!results.is_empty()); + + assert_eq!(results.first().unwrap().id, "123"); + assert_eq!( + results.first().unwrap().status, + ResponseStatusFromSink::Success + ); + } } diff --git a/rust/numaflow-core/src/sink/blackhole.rs b/rust/numaflow-core/src/sink/blackhole.rs index eb2f331360..308a59e35b 100644 --- a/rust/numaflow-core/src/sink/blackhole.rs +++ b/rust/numaflow-core/src/sink/blackhole.rs @@ -1,5 +1,5 @@ -use super::Sink; -use crate::message::{Message, ResponseFromSink, ResponseStatusFromSink}; +use super::{ResponseFromSink, ResponseStatusFromSink, Sink}; +use crate::message::Message; /// Blackhole is a sink to emulate /dev/null pub struct BlackholeSink; @@ -25,8 +25,8 @@ mod tests { use super::BlackholeSink; use crate::message::IntOffset; - use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; - use crate::sink::Sink; + use crate::message::{Message, MessageID, Offset}; + use crate::sink::{ResponseFromSink, ResponseStatusFromSink, Sink}; #[tokio::test] async fn test_black_hole() { diff --git a/rust/numaflow-core/src/sink/log.rs b/rust/numaflow-core/src/sink/log.rs index 9ae426f1f2..71bb373741 100644 --- a/rust/numaflow-core/src/sink/log.rs +++ b/rust/numaflow-core/src/sink/log.rs @@ -1,8 +1,5 @@ -use crate::sink::Sink; -use crate::{ - error, - message::{Message, ResponseFromSink, ResponseStatusFromSink}, -}; +use crate::sink::{ResponseFromSink, ResponseStatusFromSink, Sink}; +use crate::{error, message::Message}; pub(crate) struct LogSink; @@ -41,8 +38,8 @@ mod tests { use super::LogSink; use crate::message::IntOffset; - use crate::message::{Message, MessageID, Offset, ResponseFromSink, ResponseStatusFromSink}; - use crate::sink::Sink; + use crate::message::{Message, MessageID, Offset}; + use crate::sink::{ResponseFromSink, ResponseStatusFromSink, Sink}; #[tokio::test] async fn test_log_sink() { diff --git a/rust/numaflow-core/src/sink/user_defined.rs b/rust/numaflow-core/src/sink/user_defined.rs index efb9c1178d..0bcb4c6853 100644 --- a/rust/numaflow-core/src/sink/user_defined.rs +++ b/rust/numaflow-core/src/sink/user_defined.rs @@ -6,8 +6,9 @@ use tonic::transport::Channel; use tonic::{Request, Streaming}; use tracing::error; -use crate::message::{Message, ResponseFromSink}; -use crate::sink::Sink; +use crate::message::Message; +use crate::shared::grpc::prost_timestamp_from_utc; +use crate::sink::{ResponseFromSink, Sink}; use crate::Error; use crate::Result; @@ -19,6 +20,24 @@ pub struct UserDefinedSink { resp_stream: Streaming, } +/// Convert [`Message`] to [`proto::SinkRequest`] +impl From for SinkRequest { + fn from(message: Message) -> Self { + Self { + request: Some(numaflow_pb::clients::sink::sink_request::Request { + keys: message.keys.to_vec(), + value: message.value.to_vec(), + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + id: message.id.to_string(), + headers: message.headers, + }), + status: None, + handshake: None, + } + } +} + impl UserDefinedSink { pub(crate) async fn new(mut client: SinkClient) -> Result { let (sink_tx, sink_rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); diff --git a/rust/numaflow-core/src/source/user_defined.rs b/rust/numaflow-core/src/source/user_defined.rs index e5717c12a6..5f274119bb 100644 --- a/rust/numaflow-core/src/source/user_defined.rs +++ b/rust/numaflow-core/src/source/user_defined.rs @@ -1,19 +1,22 @@ -use std::time::Duration; - +use base64::prelude::BASE64_STANDARD; +use base64::Engine; use numaflow_pb::clients::source; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::source::{ - read_request, AckRequest, AckResponse, ReadRequest, ReadResponse, + read_request, read_response, AckRequest, AckResponse, ReadRequest, ReadResponse, }; +use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::transport::Channel; use tonic::{Request, Streaming}; -use crate::message::{Message, Offset}; +use crate::message::{Message, MessageID, Offset, StringOffset}; use crate::reader::LagReader; +use crate::shared::grpc::utc_from_timestamp; use crate::source::{SourceAcker, SourceReader}; -use crate::{Error, Result}; +use crate::{config, Error, Result}; /// User-Defined Source to operative on custom sources. #[derive(Debug)] @@ -100,6 +103,51 @@ impl UserDefinedSourceRead { } } +/// Convert [`read_response::Result`] to [`Message`] +impl TryFrom for Message { + type Error = Error; + + fn try_from(result: read_response::Result) -> Result { + let source_offset = match result.offset { + Some(o) => Offset::String(StringOffset { + offset: BASE64_STANDARD.encode(o.offset).into(), + partition_idx: o.partition_id as u16, + }), + None => return Err(Error::Source("Offset not found".to_string())), + }; + + Ok(Message { + keys: Arc::from(result.keys), + tags: None, + value: result.payload.into(), + offset: Some(source_offset.clone()), + event_time: utc_from_timestamp(result.event_time), + id: MessageID { + vertex_name: config::get_vertex_name().to_string().into(), + offset: source_offset.to_string().into(), + index: 0, + }, + headers: result.headers, + }) + } +} + +impl TryFrom for source::Offset { + type Error = Error; + + fn try_from(offset: Offset) -> std::result::Result { + match offset { + Offset::Int(_) => Err(Error::Source("IntOffset not supported".to_string())), + Offset::String(o) => Ok(numaflow_pb::clients::source::Offset { + offset: BASE64_STANDARD + .decode(o.offset) + .expect("we control the encoding, so this should never fail"), + partition_id: o.partition_idx as i32, + }), + } + } +} + impl SourceReader for UserDefinedSourceRead { fn name(&self) -> &'static str { "user-defined-source" @@ -233,17 +281,17 @@ impl LagReader for UserDefinedSourceLagReader { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; - use chrono::Utc; + use super::*; + use crate::message::IntOffset; + use crate::shared::grpc::{create_rpc_channel, prost_timestamp_from_utc}; + use chrono::{TimeZone, Utc}; use numaflow::source; use numaflow::source::{Message, Offset, SourceReadRequest}; use numaflow_pb::clients::source::source_client::SourceClient; use tokio::sync::mpsc::Sender; - use super::*; - use crate::shared::grpc::create_rpc_channel; - struct SimpleSource { num: usize, yet_to_ack: std::sync::RwLock>, @@ -353,4 +401,45 @@ mod tests { .expect("failed to send shutdown signal"); server_handle.await.expect("failed to join server task"); } + + #[test] + fn test_read_response_result_to_message() { + let result = read_response::Result { + payload: vec![1, 2, 3], + offset: Some(numaflow_pb::clients::source::Offset { + offset: BASE64_STANDARD.encode("123").into_bytes(), + partition_id: 0, + }), + event_time: Some( + prost_timestamp_from_utc(Utc.timestamp_opt(1627846261, 0).unwrap()).unwrap(), + ), + keys: vec!["key1".to_string()], + headers: HashMap::new(), + }; + + let message: Result = result.try_into(); + assert!(message.is_ok()); + + let message = message.unwrap(); + assert_eq!(message.keys.to_vec(), vec!["key1".to_string()]); + assert_eq!(message.value, vec![1, 2, 3]); + assert_eq!( + message.event_time, + Utc.timestamp_opt(1627846261, 0).unwrap() + ); + } + + #[test] + fn test_offset_conversion() { + // Test conversion from Offset to AckRequest for StringOffset + let offset = + crate::message::Offset::String(StringOffset::new(BASE64_STANDARD.encode("42"), 1)); + let offset: Result = offset.try_into(); + assert_eq!(offset.unwrap().partition_id, 1); + + // Test conversion from Offset to AckRequest for IntOffset (should fail) + let offset = crate::message::Offset::Int(IntOffset::new(42, 1)); + let result: Result = offset.try_into(); + assert!(result.is_err()); + } } diff --git a/rust/numaflow-core/src/transformer/user_defined.rs b/rust/numaflow-core/src/transformer/user_defined.rs index 398d5a4bcb..78518e4c01 100644 --- a/rust/numaflow-core/src/transformer/user_defined.rs +++ b/rust/numaflow-core/src/transformer/user_defined.rs @@ -13,7 +13,7 @@ use tonic::{Request, Streaming}; use crate::config::get_vertex_name; use crate::error::{Error, Result}; use crate::message::{Message, MessageID, Offset}; -use crate::shared::grpc::utc_from_timestamp; +use crate::shared::grpc::{prost_timestamp_from_utc, utc_from_timestamp}; type ResponseSenderMap = Arc>>)>>>; @@ -38,6 +38,26 @@ impl Drop for UserDefinedTransformer { } } +/// Convert the [`Message`] to [`SourceTransformRequest`] +impl From for SourceTransformRequest { + fn from(message: Message) -> Self { + Self { + request: Some(sourcetransformer::source_transform_request::Request { + id: message + .offset + .expect("offset should be present") + .to_string(), + keys: message.keys.to_vec(), + value: message.value.to_vec(), + event_time: prost_timestamp_from_utc(message.event_time), + watermark: None, + headers: message.headers, + }), + handshake: None, + } + } +} + impl UserDefinedTransformer { /// Performs handshake with the server and creates a new UserDefinedTransformer. pub(super) async fn new( @@ -164,17 +184,16 @@ impl UserDefinedTransformer { #[cfg(test)] mod tests { + use super::*; + use crate::message::StringOffset; + use crate::shared::grpc::create_rpc_channel; + use chrono::{TimeZone, Utc}; + use numaflow::sourcetransform; use std::error::Error; - use std::sync::Arc; + use std::result::Result; use std::time::Duration; - - use numaflow::sourcetransform; - use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; use tempfile::TempDir; - use crate::message::{MessageID, StringOffset}; - use crate::shared::grpc::create_rpc_channel; - use crate::transformer::user_defined::UserDefinedTransformer; struct NowCat; #[tonic::async_trait] @@ -258,4 +277,27 @@ mod tests { ); Ok(()) } + + #[test] + fn test_message_to_source_transform_request() { + let message = Message { + keys: Arc::from(vec!["key1".to_string()]), + tags: None, + value: vec![1, 2, 3].into(), + offset: Some(Offset::String(StringOffset { + offset: "123".to_string().into(), + partition_idx: 0, + })), + event_time: Utc.timestamp_opt(1627846261, 0).unwrap(), + id: MessageID { + vertex_name: "vertex".to_string().into(), + offset: "123".to_string().into(), + index: 0, + }, + headers: HashMap::new(), + }; + + let request: SourceTransformRequest = message.into(); + assert!(request.request.is_some()); + } } From 8d8340c725e39031ef56e04e603c3b9241f15399 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Mon, 6 Jan 2025 11:27:53 +0530 Subject: [PATCH 182/188] feat: Implement `Sourcer` traits for serving source (#2301) Signed-off-by: Sreekanth Signed-off-by: Vigith Maurice Co-authored-by: Vigith Maurice --- rust/Cargo.lock | 212 +++++++++--- rust/Cargo.toml | 6 +- rust/numaflow-core/Cargo.toml | 11 +- rust/numaflow-core/src/config/components.rs | 16 +- rust/numaflow-core/src/message.rs | 8 +- rust/numaflow-core/src/metrics.rs | 2 - .../src/shared/create_components.rs | 22 +- rust/numaflow-core/src/source.rs | 11 + rust/numaflow-core/src/source/serving.rs | 206 ++++++++++++ rust/numaflow/src/main.rs | 13 +- rust/serving/Cargo.toml | 15 +- rust/serving/src/app.rs | 304 +++++------------- rust/serving/src/app/jetstream_proxy.rs | 284 ++++++++-------- rust/serving/src/config.rs | 144 +++------ rust/serving/src/error.rs | 7 + rust/serving/src/lib.rs | 47 +-- rust/serving/src/metrics.rs | 2 + rust/serving/src/pipeline.rs | 43 ++- rust/serving/src/source.rs | 292 +++++++++++++++++ 19 files changed, 1048 insertions(+), 597 deletions(-) create mode 100644 rust/numaflow-core/src/source/serving.rs create mode 100644 rust/serving/src/source.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index beec59aa4b..e3d90e2f05 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -53,39 +53,6 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" -[[package]] -name = "async-nats" -version = "0.35.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8df97cb8fc4a884af29ab383e9292ea0939cfcdd7d2a17179086dc6c427e7f" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures", - "memchr", - "nkeys", - "nuid", - "once_cell", - "portable-atomic", - "rand", - "regex", - "ring", - "rustls-native-certs 0.7.3", - "rustls-pemfile 2.2.0", - "rustls-webpki 0.102.8", - "serde", - "serde_json", - "serde_nanos", - "serde_repr", - "thiserror 1.0.69", - "time", - "tokio", - "tokio-rustls 0.26.0", - "tracing", - "tryhard", - "url", -] - [[package]] name = "async-nats" version = "0.38.0" @@ -221,7 +188,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -705,6 +672,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1125,6 +1107,22 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.5.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.10" @@ -1608,6 +1606,23 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nkeys" version = "0.4.4" @@ -1748,7 +1763,7 @@ dependencies = [ name = "numaflow-core" version = "0.1.0" dependencies = [ - "async-nats 0.38.0", + "async-nats", "axum", "axum-server", "backoff", @@ -1771,13 +1786,14 @@ dependencies = [ "pulsar", "rand", "rcgen", + "reqwest 0.12.12", "rustls 0.23.19", "semver", "serde", "serde_json", "serving", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.8", "tokio", "tokio-stream", "tokio-util", @@ -1821,7 +1837,7 @@ dependencies = [ "prost 0.11.9", "pulsar", "serde", - "thiserror 2.0.3", + "thiserror 2.0.8", "tokio", "tonic", "tracing", @@ -1842,12 +1858,50 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "openssl" +version = "0.10.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "ordered-float" version = "2.10.1" @@ -2025,6 +2079,12 @@ dependencies = [ "spki", ] +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + [[package]] name = "portable-atomic" version = "1.10.0" @@ -2249,7 +2309,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.19", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.8", "tokio", "tracing", ] @@ -2268,7 +2328,7 @@ dependencies = [ "rustls 0.23.19", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.8", "tinyvec", "tracing", "web-time", @@ -2448,7 +2508,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-rustls 0.24.1", "tower-service", @@ -2462,24 +2522,28 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-core", "futures-util", + "h2 0.4.7", "http 1.1.0", "http-body 1.0.1", "http-body-util", "hyper 1.5.1", "hyper-rustls 0.27.3", + "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -2491,8 +2555,11 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 1.0.2", + "system-configuration 0.6.1", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.0", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -2850,7 +2917,7 @@ name = "servesink" version = "0.1.0" dependencies = [ "numaflow 0.1.1", - "reqwest 0.12.9", + "reqwest 0.12.12", "tokio", "tonic", "tracing", @@ -2860,12 +2927,12 @@ dependencies = [ name = "serving" version = "0.1.0" dependencies = [ - "async-nats 0.35.1", "axum", "axum-macros", "axum-server", "backoff", "base64 0.22.1", + "bytes", "chrono", "hyper-util", "numaflow-models", @@ -2873,6 +2940,8 @@ dependencies = [ "prometheus-client", "rcgen", "redis", + "reqwest 0.12.12", + "rustls 0.23.19", "serde", "serde_json", "thiserror 1.0.69", @@ -3067,7 +3136,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation 0.9.4", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.9.4", + "system-configuration-sys 0.6.0", ] [[package]] @@ -3080,6 +3160,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tempfile" version = "3.14.0" @@ -3104,11 +3194,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.8", ] [[package]] @@ -3124,9 +3214,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" dependencies = [ "proc-macro2", "quote", @@ -3227,6 +3317,16 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-retry" version = "0.3.0" @@ -3370,14 +3470,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tokio", "tower-layer", "tower-service", @@ -3594,6 +3694,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 8a6b41a1a4..75fd036128 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -58,8 +58,12 @@ numaflow-core = { path = "numaflow-core" } numaflow-models = { path = "numaflow-models" } backoff = { path = "backoff" } numaflow-pb = { path = "numaflow-pb" } -numaflow-pulsar = {path = "extns/numaflow-pulsar"} +numaflow-pulsar = { path = "extns/numaflow-pulsar" } tokio = "1.41.1" +bytes = "1.7.1" tracing = "0.1.40" axum = "0.7.5" axum-server = { version = "0.7.1", features = ["tls-rustls"] } +serde = { version = "1.0.204", features = ["derive"] } +rustls = { version = "0.23.12", features = ["aws_lc_rs"] } +reqwest = "0.12.12" diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 38cabb704f..4a98303a1e 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -21,8 +21,10 @@ serving.workspace = true backoff.workspace = true axum.workspace = true axum-server.workspace = true +bytes.workspace = true +serde.workspace = true +rustls.workspace = true tonic = "0.12.3" -bytes = "1.7.1" thiserror = "2.0.3" tokio-util = "0.7.11" tokio-stream = "0.1.15" @@ -35,8 +37,6 @@ tower = "0.4.13" serde_json = "1.0.122" trait-variant = "0.1.2" rcgen = "0.13.1" -rustls = { version = "0.23.12", features = ["aws_lc_rs"] } -serde = { version = "1.0.204", features = ["derive"] } semver = "1.0" pep440_rs = "0.6.6" parking_lot = "0.12.3" @@ -50,6 +50,9 @@ async-nats = "0.38.0" [dev-dependencies] tempfile = "3.11.0" numaflow = { git = "https://github.com/numaproj/numaflow-rs.git", rev = "9ca9362ad511084501520e5a37d40cdcd0cdc9d9" } -pulsar = { version = "6.3.0", default-features = false, features = ["tokio-rustls-runtime"] } +pulsar = { version = "6.3.0", default-features = false, features = [ + "tokio-rustls-runtime", +] } +reqwest = { workspace = true, features = ["json"] } [build-dependencies] diff --git a/rust/numaflow-core/src/config/components.rs b/rust/numaflow-core/src/config/components.rs index a49692060f..3dc0bf2a66 100644 --- a/rust/numaflow-core/src/config/components.rs +++ b/rust/numaflow-core/src/config/components.rs @@ -5,6 +5,7 @@ pub(crate) mod source { use std::collections::HashMap; use std::env; + use std::sync::Arc; use std::{fmt::Debug, time::Duration}; use bytes::Bytes; @@ -37,7 +38,9 @@ pub(crate) mod source { Generator(GeneratorConfig), UserDefined(UserDefinedConfig), Pulsar(PulsarSourceConfig), - Serving(serving::Settings), + // Serving source starts an Axum HTTP server in the background. + // The settings will be used as application state which gets cloned in each handler on each request. + Serving(Arc), } impl From> for SourceType { @@ -110,10 +113,7 @@ pub(crate) mod source { // There should be only one option (user-defined) to define the settings. fn try_from(cfg: Box) -> Result { let env_vars = env::vars().collect::>(); - - let mut settings: serving::Settings = env_vars - .try_into() - .map_err(|e: serving::Error| Error::Config(e.to_string()))?; + let mut settings: serving::Settings = env_vars.try_into()?; settings.tid_header = cfg.msg_id_header_key; @@ -148,7 +148,7 @@ pub(crate) mod source { } settings.redis.addr = cfg.store.url; - Ok(SourceType::Serving(settings)) + Ok(SourceType::Serving(Arc::new(settings))) } } @@ -168,6 +168,10 @@ pub(crate) mod source { return pulsar.try_into(); } + if let Some(serving) = source.serving.take() { + return serving.try_into(); + } + Err(Error::Config(format!("Invalid source type: {source:?}"))) } } diff --git a/rust/numaflow-core/src/message.rs b/rust/numaflow-core/src/message.rs index 00f5cca663..fe20613dad 100644 --- a/rust/numaflow-core/src/message.rs +++ b/rust/numaflow-core/src/message.rs @@ -37,7 +37,7 @@ pub(crate) struct Message { } /// Offset of the message which will be used to acknowledge the message. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub(crate) enum Offset { Int(IntOffset), String(StringOffset), @@ -62,7 +62,7 @@ impl Message { } /// IntOffset is integer based offset enum type. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct IntOffset { pub(crate) offset: u64, pub(crate) partition_idx: u16, @@ -84,7 +84,7 @@ impl fmt::Display for IntOffset { } /// StringOffset is string based offset enum type. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub(crate) struct StringOffset { /// offset could be a complex base64 string. pub(crate) offset: Bytes, @@ -120,7 +120,7 @@ pub(crate) enum ReadAck { } /// Message ID which is used to uniquely identify a message. It cheap to clone this. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub(crate) struct MessageID { pub(crate) vertex_name: Bytes, pub(crate) offset: Bytes, diff --git a/rust/numaflow-core/src/metrics.rs b/rust/numaflow-core/src/metrics.rs index fa79e457b8..2a672ec31d 100644 --- a/rust/numaflow-core/src/metrics.rs +++ b/rust/numaflow-core/src/metrics.rs @@ -600,8 +600,6 @@ pub(crate) async fn start_metrics_https_server( addr: SocketAddr, metrics_state: UserDefinedContainerState, ) -> crate::Result<()> { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - // Generate a self-signed certificate let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) .map_err(|e| Error::Metrics(format!("Generating self-signed certificate: {}", e)))?; diff --git a/rust/numaflow-core/src/shared/create_components.rs b/rust/numaflow-core/src/shared/create_components.rs index bde1f6059e..b28f4caeee 100644 --- a/rust/numaflow-core/src/shared/create_components.rs +++ b/rust/numaflow-core/src/shared/create_components.rs @@ -1,15 +1,18 @@ +use std::sync::Arc; use std::time::Duration; use numaflow_pb::clients::map::map_client::MapClient; use numaflow_pb::clients::sink::sink_client::SinkClient; use numaflow_pb::clients::source::source_client::SourceClient; use numaflow_pb::clients::sourcetransformer::source_transform_client::SourceTransformClient; +use serving::ServingSource; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use crate::config::components::sink::{SinkConfig, SinkType}; use crate::config::components::source::{SourceConfig, SourceType}; use crate::config::components::transformer::TransformerConfig; +use crate::config::get_vertex_replica; use crate::config::pipeline::map::{MapMode, MapType, MapVtxConfig}; use crate::config::pipeline::{DEFAULT_BATCH_MAP_SOCKET, DEFAULT_STREAM_MAP_SOCKET}; use crate::error::Error; @@ -334,8 +337,23 @@ pub async fn create_source( None, )) } - SourceType::Serving(_) => { - unimplemented!("Serving as built-in source is not yet implemented") + SourceType::Serving(config) => { + let serving = ServingSource::new( + Arc::clone(config), + batch_size, + read_timeout, + *get_vertex_replica(), + ) + .await?; + Ok(( + Source::new( + batch_size, + source::SourceType::Serving(serving), + tracker_handle, + source_config.read_ahead, + ), + None, + )) } } } diff --git a/rust/numaflow-core/src/source.rs b/rust/numaflow-core/src/source.rs index 4d280d3725..a30fc9777e 100644 --- a/rust/numaflow-core/src/source.rs +++ b/rust/numaflow-core/src/source.rs @@ -37,6 +37,9 @@ pub(crate) mod generator; /// [Pulsar]: https://numaflow.numaproj.io/user-guide/sources/pulsar/ pub(crate) mod pulsar; +pub(crate) mod serving; +use serving::ServingSource; + /// Set of Read related items that has to be implemented to become a Source. pub(crate) trait SourceReader { #[allow(dead_code)] @@ -68,6 +71,7 @@ pub(crate) enum SourceType { generator::GeneratorLagReader, ), Pulsar(PulsarSource), + Serving(ServingSource), } enum ActorMessage { @@ -182,6 +186,13 @@ impl Source { actor.run().await; }); } + SourceType::Serving(serving) => { + tokio::spawn(async move { + let actor = + SourceActor::new(receiver, serving.clone(), serving.clone(), serving); + actor.run().await; + }); + } }; Self { read_batch_size: batch_size, diff --git a/rust/numaflow-core/src/source/serving.rs b/rust/numaflow-core/src/source/serving.rs new file mode 100644 index 0000000000..b9fb6c72ed --- /dev/null +++ b/rust/numaflow-core/src/source/serving.rs @@ -0,0 +1,206 @@ +use std::sync::Arc; + +pub(crate) use serving::ServingSource; + +use crate::config::get_vertex_replica; +use crate::message::{MessageID, StringOffset}; +use crate::Error; +use crate::Result; + +use super::{get_vertex_name, Message, Offset}; + +impl TryFrom for Message { + type Error = Error; + + fn try_from(message: serving::Message) -> Result { + let offset = Offset::String(StringOffset::new(message.id.clone(), *get_vertex_replica())); + + Ok(Message { + // we do not support keys from HTTP client + keys: Arc::from(vec![]), + tags: None, + value: message.value, + offset: Some(offset.clone()), + event_time: Default::default(), + id: MessageID { + vertex_name: get_vertex_name().to_string().into(), + offset: offset.to_string().into(), + index: 0, + }, + headers: message.headers, + }) + } +} + +impl From for Error { + fn from(value: serving::Error) -> Self { + Error::Source(value.to_string()) + } +} + +impl super::SourceReader for ServingSource { + fn name(&self) -> &'static str { + "serving" + } + + async fn read(&mut self) -> Result> { + self.read_messages() + .await? + .into_iter() + .map(|msg| msg.try_into()) + .collect() + } + + fn partitions(&self) -> Vec { + vec![*get_vertex_replica()] + } +} + +impl super::SourceAcker for ServingSource { + /// HTTP response is sent only once we have confirmation that the message has been written to the ISB. + // TODO: Current implementation only works for `/v1/process/async` endpoint. + // For `/v1/process/{sync,sync_serve}` endpoints: https://github.com/numaproj/numaflow/issues/2308 + async fn ack(&mut self, offsets: Vec) -> Result<()> { + let mut serving_offsets = vec![]; + for offset in offsets { + let Offset::String(offset) = offset else { + return Err(Error::Source(format!( + "Expected string offset for Serving source. Got {offset:?}" + ))); + }; + serving_offsets.push(offset.to_string()); + } + self.ack_messages(serving_offsets).await?; + Ok(()) + } +} + +impl super::LagReader for ServingSource { + async fn pending(&mut self) -> Result> { + Ok(None) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + message::{Message, MessageID, Offset, StringOffset}, + source::{SourceAcker, SourceReader}, + }; + use std::{collections::HashMap, sync::Arc, time::Duration}; + + use bytes::Bytes; + use serving::{ServingSource, Settings}; + + use super::get_vertex_replica; + + type Result = std::result::Result>; + + #[test] + fn test_message_conversion() -> Result<()> { + const MSG_ID: &str = "b149ad7a-5690-4f0a"; + + let mut headers = HashMap::new(); + headers.insert("header-key".to_owned(), "header-value".to_owned()); + + let serving_message = serving::Message { + value: Bytes::from_static(b"test"), + id: MSG_ID.into(), + headers: headers.clone(), + }; + let message: Message = serving_message.try_into()?; + assert_eq!(message.value, Bytes::from_static(b"test")); + assert_eq!( + message.offset, + Some(Offset::String(StringOffset::new(MSG_ID.into(), 0))) + ); + assert_eq!( + message.id, + MessageID { + vertex_name: Bytes::new(), + offset: format!("{MSG_ID}-0").into(), + index: 0 + } + ); + + assert_eq!(message.headers, headers); + + Ok(()) + } + + #[test] + fn test_error_conversion() { + use crate::error::Error; + let error: Error = serving::Error::ParseConfig("Invalid config".to_owned()).into(); + if let Error::Source(val) = error { + assert_eq!(val, "ParseConfig Error - Invalid config".to_owned()); + } else { + panic!("Expected Error::Source() variant"); + } + } + + #[tokio::test] + async fn test_serving_source_reader_acker() -> Result<()> { + let settings = Settings { + app_listen_port: 2000, + ..Default::default() + }; + let settings = Arc::new(settings); + let mut serving_source = ServingSource::new( + Arc::clone(&settings), + 10, + Duration::from_millis(1), + *get_vertex_replica(), + ) + .await?; + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(2)) + .danger_accept_invalid_certs(true) + .build() + .unwrap(); + + // Wait for the server + for _ in 0..10 { + let resp = client + .get(format!( + "https://localhost:{}/livez", + settings.app_listen_port + )) + .send() + .await; + if resp.is_ok() { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + + let task_handle = tokio::spawn(async move { + loop { + tokio::time::sleep(Duration::from_millis(10)).await; + let mut messages = serving_source.read().await.unwrap(); + if messages.is_empty() { + // Server has not received any requests yet + continue; + } + assert_eq!(messages.len(), 1); + let msg = messages.remove(0); + serving_source.ack(vec![msg.offset.unwrap()]).await.unwrap(); + break; + } + }); + + let resp = client + .post(format!( + "https://localhost:{}/v1/process/async", + settings.app_listen_port + )) + .json("test-payload") + .send() + .await?; + + assert!(resp.status().is_success()); + assert!(task_handle.await.is_ok()); + Ok(()) + } +} diff --git a/rust/numaflow/src/main.rs b/rust/numaflow/src/main.rs index 60e26ef850..9a5ab6fe82 100644 --- a/rust/numaflow/src/main.rs +++ b/rust/numaflow/src/main.rs @@ -1,7 +1,5 @@ -use std::collections::HashMap; use std::env; use std::error::Error; -use std::sync::Arc; use tracing::error; use tracing_subscriber::layer::SubscriberExt; @@ -31,14 +29,7 @@ async fn main() -> Result<(), Box> { async fn run() -> Result<(), Box> { let args: Vec = env::args().collect(); // Based on the argument, run the appropriate component. - if args.contains(&"--serving".to_string()) { - let env_vars: HashMap = env::vars().collect(); - let settings: serving::Settings = env_vars.try_into()?; - let settings = Arc::new(settings); - serving::serve(settings) - .await - .map_err(|e| format!("Error running serving: {e:?}"))?; - } else if args.contains(&"--servesink".to_string()) { + if args.contains(&"--servesink".to_string()) { servesink::servesink() .await .map_err(|e| format!("Error running servesink: {e:?}"))?; @@ -47,5 +38,5 @@ async fn run() -> Result<(), Box> { .await .map_err(|e| format!("Error running rust binary: {e:?}"))? } - Err("Invalid argument. Use --serving, --servesink, or --rust".into()) + Err("Invalid argument. Use --servesink, or --rust".into()) } diff --git a/rust/serving/Cargo.toml b/rust/serving/Cargo.toml index de2f8bb820..857d69db77 100644 --- a/rust/serving/Cargo.toml +++ b/rust/serving/Cargo.toml @@ -5,8 +5,7 @@ edition = "2021" [features] redis-tests = [] -nats-tests = [] -all-tests = ["redis-tests", "nats-tests"] +all-tests = ["redis-tests"] [lints] workspace = true @@ -18,7 +17,8 @@ numaflow-models.workspace = true backoff.workspace = true axum.workspace = true axum-server.workspace = true -async-nats = "0.35.1" +bytes.workspace = true +rustls.workspace = true axum-macros = "0.4.1" hyper-util = { version = "0.1.6", features = ["client-legacy"] } serde = { version = "1.0.204", features = ["derive"] } @@ -26,7 +26,11 @@ serde_json = "1.0.120" tower = "0.4.13" tower-http = { version = "0.5.2", features = ["trace", "timeout"] } uuid = { version = "1.10.0", features = ["v4"] } -redis = { version = "0.26.0", features = ["tokio-comp", "aio", "connection-manager"] } +redis = { version = "0.26.0", features = [ + "tokio-comp", + "aio", + "connection-manager", +] } trait-variant = "0.1.2" chrono = { version = "0.4", features = ["serde"] } base64 = "0.22.1" @@ -35,3 +39,6 @@ parking_lot = "0.12.3" prometheus-client = "0.22.3" thiserror = "1.0.63" +[dev-dependencies] +reqwest = { workspace = true, features = ["json"] } +rustls.workspace = true diff --git a/rust/serving/src/app.rs b/rust/serving/src/app.rs index 56d4a33cb3..82ef1ef62e 100644 --- a/rust/serving/src/app.rs +++ b/rust/serving/src/app.rs @@ -1,9 +1,6 @@ use std::net::SocketAddr; -use std::sync::Arc; use std::time::Duration; -use async_nats::jetstream; -use async_nats::jetstream::Context; use axum::extract::{MatchedPath, State}; use axum::http::StatusCode; use axum::middleware::Next; @@ -25,12 +22,9 @@ use self::{ message_path::get_message_path, }; use crate::app::callback::store::Store; -use crate::app::tracker::MessageGraph; -use crate::config::JetStreamConfig; -use crate::pipeline::PipelineDCG; +use crate::metrics::capture_metrics; +use crate::AppState; use crate::Error::InitError; -use crate::Settings; -use crate::{app::callback::state::State as CallbackState, metrics::capture_metrics}; /// manage callbacks pub(crate) mod callback; @@ -41,7 +35,7 @@ mod jetstream_proxy; /// Return message path in response to UI requests mod message_path; // TODO: merge message_path and tracker mod response; -mod tracker; +pub(crate) mod tracker; /// Everything for numaserve starts here. The routing, middlewares, proxying, etc. // TODO @@ -49,16 +43,39 @@ mod tracker; // - [ ] outer fallback for /v1/direct /// Start the main application Router and the axum server. -pub(crate) async fn start_main_server( - settings: Arc, +pub(crate) async fn start_main_server( + app: AppState, tls_config: RustlsConfig, - pipeline_spec: PipelineDCG, -) -> crate::Result<()> { - let app_addr: SocketAddr = format!("0.0.0.0:{}", &settings.app_listen_port) +) -> crate::Result<()> +where + T: Clone + Send + Sync + Store + 'static, +{ + let app_addr: SocketAddr = format!("0.0.0.0:{}", &app.settings.app_listen_port) .parse() .map_err(|e| InitError(format!("{e:?}")))?; - let tid_header = settings.tid_header.clone(); + let handle = Handle::new(); + // Spawn a task to gracefully shutdown server. + tokio::spawn(graceful_shutdown(handle.clone())); + + info!(?app_addr, "Starting application server"); + + let router = router_with_auth(app).await?; + + axum_server::bind_rustls(app_addr, tls_config) + .handle(handle) + .serve(router.into_make_service()) + .await + .map_err(|e| InitError(format!("Starting web server for metrics: {}", e)))?; + + Ok(()) +} + +pub(crate) async fn router_with_auth(app: AppState) -> crate::Result +where + T: Clone + Send + Sync + Store + 'static, +{ + let tid_header = app.settings.tid_header.clone(); let layers = ServiceBuilder::new() // Add tracing to all requests .layer( @@ -85,45 +102,14 @@ pub(crate) async fn start_main_server( .layer( // Graceful shutdown will wait for outstanding requests to complete. Add a timeout so // requests don't hang forever. - TimeoutLayer::new(Duration::from_secs(settings.drain_timeout_secs)), + TimeoutLayer::new(Duration::from_secs(app.settings.drain_timeout_secs)), ) // Add auth middleware to all user facing routes .layer(middleware::from_fn_with_state( - settings.api_auth_token.clone(), + app.settings.api_auth_token.clone(), auth_middleware, )); - - // Create the message graph from the pipeline spec and the redis store - let msg_graph = MessageGraph::from_pipeline(&pipeline_spec).map_err(|e| { - InitError(format!( - "Creating message graph from pipeline spec: {:?}", - e - )) - })?; - - // Create a redis store to store the callbacks and the custom responses - let redis_store = - callback::store::redisstore::RedisConnection::new(settings.redis.clone()).await?; - let state = CallbackState::new(msg_graph, redis_store).await?; - - let handle = Handle::new(); - // Spawn a task to gracefully shutdown server. - tokio::spawn(graceful_shutdown(handle.clone())); - - // Create a Jetstream context - let js_context = create_js_context(&settings.jetstream).await?; - - let router = setup_app(settings, js_context, state).await?.layer(layers); - - info!(?app_addr, "Starting application server"); - - axum_server::bind_rustls(app_addr, tls_config) - .handle(handle) - .serve(router.into_make_service()) - .await - .map_err(|e| InitError(format!("Starting web server for metrics: {}", e)))?; - - Ok(()) + Ok(setup_app(app).await?.layer(layers)) } // Gracefully shutdown the server on receiving SIGINT or SIGTERM @@ -154,30 +140,6 @@ async fn graceful_shutdown(handle: Handle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } -async fn create_js_context(js_config: &JetStreamConfig) -> crate::Result { - // Connect to Jetstream with user and password if they are set - let js_client = match js_config.auth.as_ref() { - Some(auth) => { - async_nats::connect_with_options( - &js_config.url, - async_nats::ConnectOptions::with_user_and_password( - auth.username.clone(), - auth.password.clone(), - ), - ) - .await - } - _ => async_nats::connect(&js_config.url).await, - } - .map_err(|e| { - InitError(format!( - "Connecting to jetstream server {}: {}", - &js_config.url, e - )) - })?; - Ok(jetstream::new(js_client)) -} - const PUBLISH_ENDPOINTS: [&str; 3] = [ "/v1/process/sync", "/v1/process/sync_serve", @@ -228,28 +190,14 @@ async fn auth_middleware( } } -#[derive(Clone)] -pub(crate) struct AppState { - pub(crate) settings: Arc, - pub(crate) callback_state: CallbackState, - pub(crate) context: Context, -} - async fn setup_app( - settings: Arc, - context: Context, - callback_state: CallbackState, + app: AppState, ) -> crate::Result { - let app_state = AppState { - settings, - callback_state: callback_state.clone(), - context: context.clone(), - }; let parent = Router::new() .route("/health", get(health_check)) .route("/livez", get(livez)) // Liveliness check .route("/readyz", get(readyz)) - .with_state(app_state.clone()); // Readiness check + .with_state(app.clone()); // Readiness check // a pool based client implementation for direct proxy, this client is cloneable. let client: direct_proxy::Client = @@ -260,9 +208,9 @@ async fn setup_app( let app = parent .nest( "/v1/direct", - direct_proxy(client, app_state.settings.upstream_addr.clone()), + direct_proxy(client, app.settings.upstream_addr.clone()), ) - .nest("/v1/process", routes(app_state).await?); + .nest("/v1/process", routes(app).await?); Ok(app) } @@ -278,13 +226,7 @@ async fn livez() -> impl IntoResponse { async fn readyz( State(app): State>, ) -> impl IntoResponse { - if app.callback_state.clone().ready().await - && app - .context - .get_stream(&app.settings.jetstream.stream) - .await - .is_ok() - { + if app.callback_state.clone().ready().await { StatusCode::NO_CONTENT } else { StatusCode::INTERNAL_SERVER_ERROR @@ -308,188 +250,100 @@ async fn routes( #[cfg(test)] mod tests { - use async_nats::jetstream::stream; + use std::sync::Arc; + use axum::http::StatusCode; - use tokio::time::{sleep, Duration}; use tower::ServiceExt; use super::*; use crate::app::callback::store::memstore::InMemoryStore; - use crate::config::generate_certs; + use crate::Settings; + use callback::state::State as CallbackState; + use tokio::sync::mpsc; + use tracker::MessageGraph; const PIPELINE_SPEC_ENCODED: &str = "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6ImluIiwic291cmNlIjp7InNlcnZpbmciOnsiYXV0aCI6bnVsbCwic2VydmljZSI6dHJ1ZSwibXNnSURIZWFkZXJLZXkiOiJYLU51bWFmbG93LUlkIiwic3RvcmUiOnsidXJsIjoicmVkaXM6Ly9yZWRpczo2Mzc5In19fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIiLCJlbnYiOlt7Im5hbWUiOiJSVVNUX0xPRyIsInZhbHVlIjoiZGVidWcifV19LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InBsYW5uZXIiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJwbGFubmVyIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6InRpZ2VyIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsidGlnZXIiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZG9nIiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZG9nIl0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sImJ1aWx0aW4iOm51bGwsImdyb3VwQnkiOm51bGx9LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19LHsibmFtZSI6ImVsZXBoYW50IiwidWRmIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6ImFzY2lpOjAuMSIsImFyZ3MiOlsiZWxlcGhhbnQiXSwicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwiYnVpbHRpbiI6bnVsbCwiZ3JvdXBCeSI6bnVsbH0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiYXNjaWlhcnQiLCJ1ZGYiOnsiY29udGFpbmVyIjp7ImltYWdlIjoiYXNjaWk6MC4xIiwiYXJncyI6WyJhc2NpaWFydCJdLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJidWlsdGluIjpudWxsLCJncm91cEJ5IjpudWxsfSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2ZS1zaW5rIiwic2luayI6eyJ1ZHNpbmsiOnsiY29udGFpbmVyIjp7ImltYWdlIjoic2VydmVzaW5rOjAuMSIsImVudiI6W3sibmFtZSI6Ik5VTUFGTE9XX0NBTExCQUNLX1VSTF9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctQ2FsbGJhY2stVXJsIn0seyJuYW1lIjoiTlVNQUZMT1dfTVNHX0lEX0hFQURFUl9LRVkiLCJ2YWx1ZSI6IlgtTnVtYWZsb3ctSWQifV0sInJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn19LCJyZXRyeVN0cmF0ZWd5Ijp7fX0sImNvbnRhaW5lclRlbXBsYXRlIjp7InJlc291cmNlcyI6e30sImltYWdlUHVsbFBvbGljeSI6Ik5ldmVyIn0sInNjYWxlIjp7Im1pbiI6MX0sInVwZGF0ZVN0cmF0ZWd5Ijp7InR5cGUiOiJSb2xsaW5nVXBkYXRlIiwicm9sbGluZ1VwZGF0ZSI6eyJtYXhVbmF2YWlsYWJsZSI6IjI1JSJ9fX0seyJuYW1lIjoiZXJyb3Itc2luayIsInNpbmsiOnsidWRzaW5rIjp7ImNvbnRhaW5lciI6eyJpbWFnZSI6InNlcnZlc2luazowLjEiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9fSwicmV0cnlTdHJhdGVneSI6e319LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciJ9LCJzY2FsZSI6eyJtaW4iOjF9LCJ1cGRhdGVTdHJhdGVneSI6eyJ0eXBlIjoiUm9sbGluZ1VwZGF0ZSIsInJvbGxpbmdVcGRhdGUiOnsibWF4VW5hdmFpbGFibGUiOiIyNSUifX19XSwiZWRnZXMiOlt7ImZyb20iOiJpbiIsInRvIjoicGxhbm5lciIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImFzY2lpYXJ0IiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiYXNjaWlhcnQiXX19fSx7ImZyb20iOiJwbGFubmVyIiwidG8iOiJ0aWdlciIsImNvbmRpdGlvbnMiOnsidGFncyI6eyJvcGVyYXRvciI6Im9yIiwidmFsdWVzIjpbInRpZ2VyIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZG9nIiwiY29uZGl0aW9ucyI6eyJ0YWdzIjp7Im9wZXJhdG9yIjoib3IiLCJ2YWx1ZXMiOlsiZG9nIl19fX0seyJmcm9tIjoicGxhbm5lciIsInRvIjoiZWxlcGhhbnQiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlbGVwaGFudCJdfX19LHsiZnJvbSI6InRpZ2VyIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZG9nIiwidG8iOiJzZXJ2ZS1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH0seyJmcm9tIjoiZWxlcGhhbnQiLCJ0byI6InNlcnZlLXNpbmsiLCJjb25kaXRpb25zIjpudWxsfSx7ImZyb20iOiJhc2NpaWFydCIsInRvIjoic2VydmUtc2luayIsImNvbmRpdGlvbnMiOm51bGx9LHsiZnJvbSI6InBsYW5uZXIiLCJ0byI6ImVycm9yLXNpbmsiLCJjb25kaXRpb25zIjp7InRhZ3MiOnsib3BlcmF0b3IiOiJvciIsInZhbHVlcyI6WyJlcnJvciJdfX19XSwibGlmZWN5Y2xlIjp7fSwid2F0ZXJtYXJrIjp7fX0="; type Result = core::result::Result; type Error = Box; - #[tokio::test] - async fn test_start_main_server() -> Result<()> { - let (cert, key) = generate_certs()?; - - let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) - .await - .unwrap(); - - let settings = Arc::new(Settings { - app_listen_port: 0, - ..Settings::default() - }); - - let server = tokio::spawn(async move { - let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); - let result = start_main_server(settings, tls_config, pipeline_spec).await; - assert!(result.is_ok()) - }); - - // Give the server a little bit of time to start - sleep(Duration::from_millis(50)).await; - - // Stop the server - server.abort(); - Ok(()) - } - - #[cfg(feature = "all-tests")] #[tokio::test] async fn test_setup_app() -> Result<()> { let settings = Arc::new(Settings::default()); - let client = async_nats::connect(&settings.jetstream.url).await?; - let context = jetstream::new(client); - let stream_name = &settings.jetstream.stream; - - let stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await; - - assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; + let (tx, _) = mpsc::channel(10); + let app = AppState { + message: tx, + settings, + callback_state, + }; - let result = setup_app(settings, context, callback_state).await; + let result = setup_app(app).await; assert!(result.is_ok()); Ok(()) } - #[cfg(feature = "all-tests")] #[tokio::test] - async fn test_livez() -> Result<()> { + async fn test_health_check_endpoints() -> Result<()> { let settings = Arc::new(Settings::default()); - let client = async_nats::connect(&settings.jetstream.url).await?; - let context = jetstream::new(client); - let stream_name = &settings.jetstream.stream; - - let stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await; - - assert!(stream.is_ok()); let mem_store = InMemoryStore::new(); - let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); - let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; - + let msg_graph = MessageGraph::from_pipeline(&settings.pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; - let result = setup_app(settings, context, callback_state).await; + let (messages_tx, _messages_rx) = mpsc::channel(10); + let app = AppState { + message: messages_tx, + settings, + callback_state, + }; + + let router = setup_app(app).await.unwrap(); let request = Request::builder().uri("/livez").body(Body::empty())?; - - let response = result?.oneshot(request).await?; + let response = router.clone().oneshot(request).await?; assert_eq!(response.status(), StatusCode::NO_CONTENT); - Ok(()) - } - - #[cfg(feature = "all-tests")] - #[tokio::test] - async fn test_readyz() -> Result<()> { - let settings = Arc::new(Settings::default()); - let client = async_nats::connect(&settings.jetstream.url).await?; - let context = jetstream::new(client); - let stream_name = &settings.jetstream.stream; - - let stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await; - - assert!(stream.is_ok()); - - let mem_store = InMemoryStore::new(); - let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); - let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; - - let callback_state = CallbackState::new(msg_graph, mem_store).await?; - - let result = setup_app(settings, context, callback_state).await; let request = Request::builder().uri("/readyz").body(Body::empty())?; - - let response = result.unwrap().oneshot(request).await?; + let response = router.clone().oneshot(request).await?; assert_eq!(response.status(), StatusCode::NO_CONTENT); - Ok(()) - } - #[tokio::test] - async fn test_health_check() { - let response = health_check().await; - let response = response.into_response(); + let request = Request::builder().uri("/health").body(Body::empty())?; + let response = router.clone().oneshot(request).await?; assert_eq!(response.status(), StatusCode::OK); + Ok(()) } - #[cfg(feature = "all-tests")] #[tokio::test] async fn test_auth_middleware() -> Result<()> { - let settings = Arc::new(Settings::default()); - let client = async_nats::connect(&settings.jetstream.url).await?; - let context = jetstream::new(client); - let stream_name = &settings.jetstream.stream; - - let stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await; - - assert!(stream.is_ok()); + let settings = Settings { + api_auth_token: Some("test-token".into()), + ..Default::default() + }; let mem_store = InMemoryStore::new(); let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mem_store).await?; + let (messages_tx, _messages_rx) = mpsc::channel(10); + let app_state = AppState { - settings, + message: messages_tx, + settings: Arc::new(settings), callback_state, - context, }; - let app = Router::new() - .nest("/v1/process", routes(app_state).await.unwrap()) - .layer(middleware::from_fn_with_state( - Some("test_token".to_owned()), - auth_middleware, - )); - - let res = app + let router = router_with_auth(app_state).await.unwrap(); + let res = router .oneshot( axum::extract::Request::builder() + .method("POST") .uri("/v1/process/sync") .body(Body::empty()) .unwrap(), diff --git a/rust/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs index af7d3917ff..6f61a0530f 100644 --- a/rust/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -1,6 +1,5 @@ -use std::{borrow::Borrow, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; -use async_nats::{jetstream::Context, HeaderMap as JSHeaderMap}; use axum::{ body::Bytes, extract::State, @@ -9,12 +8,13 @@ use axum::{ routing::post, Json, Router, }; +use tokio::sync::{mpsc, oneshot}; use tracing::error; use uuid::Uuid; use super::{callback::store::Store, AppState}; -use crate::app::callback::state; use crate::app::response::{ApiError, ServeResponse}; +use crate::{app::callback::state, Message, MessageWrapper}; // TODO: // - [ ] better health check @@ -37,10 +37,9 @@ const NUMAFLOW_RESP_ARRAY_LEN: &str = "Numaflow-Array-Len"; const NUMAFLOW_RESP_ARRAY_IDX_LEN: &str = "Numaflow-Array-Index-Len"; struct ProxyState { + message: mpsc::Sender, tid_header: String, - context: Context, callback: state::State, - stream: String, callback_url: String, } @@ -48,10 +47,9 @@ pub(crate) async fn jetstream_proxy( state: AppState, ) -> crate::Result { let proxy_state = Arc::new(ProxyState { + message: state.message.clone(), tid_header: state.settings.tid_header.clone(), - context: state.context.clone(), callback: state.callback_state.clone(), - stream: state.settings.jetstream.stream.clone(), callback_url: format!( "https://{}:{}/v1/process/callback", state.settings.host_ip, state.settings.app_listen_port @@ -76,20 +74,34 @@ async fn sync_publish_serve( // Register the ID in the callback proxy state let notify = proxy_state.callback.clone().register(id.clone()); - if let Err(e) = publish_to_jetstream( - proxy_state.stream.clone(), - &proxy_state.callback_url, - headers, - body, - proxy_state.context.clone(), - proxy_state.tid_header.as_str(), - id.as_str(), - ) - .await - { + let mut msg_headers: HashMap = HashMap::new(); + for (key, value) in headers.iter() { + msg_headers.insert( + key.to_string(), + String::from_utf8_lossy(value.as_bytes()).to_string(), + ); + } + + let (tx, rx) = oneshot::channel(); + let message = MessageWrapper { + confirm_save: tx, + message: Message { + value: body, + id: id.clone(), + headers: msg_headers, + }, + }; + + proxy_state + .message + .send(message) + .await + .expect("Failed to send request payload to Serving channel"); + + if let Err(e) = rx.await { // Deregister the ID in the callback proxy state if writing to Jetstream fails let _ = proxy_state.callback.clone().deregister(&id).await; - error!(error = ?e, "Publishing message to Jetstream for sync serve request"); + error!(error = ?e, "Waiting for acknowledgement for message"); return Err(ApiError::BadGateway( "Failed to write message to Jetstream".to_string(), )); @@ -143,21 +155,30 @@ async fn sync_publish( ) -> Result, ApiError> { let id = extract_id_from_headers(&proxy_state.tid_header, &headers); + let mut msg_headers: HashMap = HashMap::new(); + for (key, value) in headers.iter() { + msg_headers.insert( + key.to_string(), + String::from_utf8_lossy(value.as_bytes()).to_string(), + ); + } + + let (tx, rx) = oneshot::channel(); + let message = MessageWrapper { + confirm_save: tx, + message: Message { + value: body, + id: id.clone(), + headers: msg_headers, + }, + }; + // Register the ID in the callback proxy state let notify = proxy_state.callback.clone().register(id.clone()); + proxy_state.message.send(message).await.unwrap(); // FIXME: - if let Err(e) = publish_to_jetstream( - proxy_state.stream.clone(), - &proxy_state.callback_url, - headers, - body, - proxy_state.context.clone(), - &proxy_state.tid_header, - id.as_str(), - ) - .await - { - // Deregister the ID in the callback proxy state if writing to Jetstream fails + if let Err(e) = rx.await { + // Deregister the ID in the callback proxy state if waiting for ack fails let _ = proxy_state.callback.clone().deregister(&id).await; error!(error = ?e, "Publishing message to Jetstream for sync request"); return Err(ApiError::BadGateway( @@ -192,62 +213,40 @@ async fn async_publish( body: Bytes, ) -> Result, ApiError> { let id = extract_id_from_headers(&proxy_state.tid_header, &headers); - let result = publish_to_jetstream( - proxy_state.stream.clone(), - &proxy_state.callback_url, - headers, - body, - proxy_state.context.clone(), - &proxy_state.tid_header, - id.as_str(), - ) - .await; + let mut msg_headers: HashMap = HashMap::new(); + for (key, value) in headers.iter() { + msg_headers.insert( + key.to_string(), + String::from_utf8_lossy(value.as_bytes()).to_string(), + ); + } - match result { + let (tx, rx) = oneshot::channel(); + let message = MessageWrapper { + confirm_save: tx, + message: Message { + value: body, + id: id.clone(), + headers: msg_headers, + }, + }; + + proxy_state.message.send(message).await.unwrap(); // FIXME: + match rx.await { Ok(_) => Ok(Json(ServeResponse::new( "Successfully published message".to_string(), id, StatusCode::OK, ))), Err(e) => { - error!(error = ?e, "Publishing message to Jetstream"); + error!(error = ?e, "Waiting for message save confirmation"); Err(ApiError::InternalServerError( - "Failed to publish message to Jetstream".to_string(), + "Failed to save message".to_string(), )) } } } -/// Write to JetStream and return the metadata. It is responsible for getting the ID from the header. -async fn publish_to_jetstream( - stream: String, - callback_url: &str, - headers: HeaderMap, - body: Bytes, - js_context: Context, - id_header: &str, - id_header_value: &str, -) -> Result<(), async_nats::Error> { - let mut js_headers = JSHeaderMap::new(); - - // pass in the HTTP headers as jetstream headers - for (k, v) in headers.iter() { - js_headers.append(k.as_ref(), String::from_utf8_lossy(v.as_bytes()).borrow()) - } - - js_headers.append(id_header, id_header_value); // Use the passed ID - js_headers.append(CALLBACK_URL_KEY, callback_url); - - js_context - .publish_with_headers(stream, js_headers, body) - .await - .map_err(|e| format!("Publishing message to stream: {e:?}"))? - .await - .map_err(|e| format!("Waiting for acknowledgement of published message: {e:?}"))?; - - Ok(()) -} - // extracts the ID from the headers, if not found, generates a new UUID fn extract_id_from_headers(tid_header: &str, headers: &HeaderMap) -> String { headers.get(tid_header).map_or_else( @@ -256,13 +255,10 @@ fn extract_id_from_headers(tid_header: &str, headers: &HeaderMap) -> String { ) } -#[cfg(feature = "nats-tests")] #[cfg(test)] mod tests { use std::sync::Arc; - use async_nats::jetstream; - use async_nats::jetstream::stream; use axum::body::{to_bytes, Body}; use axum::extract::Request; use axum::http::header::{CONTENT_LENGTH, CONTENT_TYPE}; @@ -303,46 +299,47 @@ mod tests { #[tokio::test] async fn test_async_publish() -> Result<(), Box> { - let settings = Settings::default(); - let settings = Arc::new(settings); - let client = async_nats::connect(&settings.jetstream.url) - .await - .map_err(|e| format!("Connecting to Jetstream: {:?}", e))?; - - let context = jetstream::new(client); - let id = "foobar"; - let stream_name = "default"; - - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await - .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e))?; + const ID_HEADER: &str = "X-Numaflow-ID"; + const ID_VALUE: &str = "foobar"; + let settings = Settings { + tid_header: ID_HEADER.into(), + ..Default::default() + }; let mock_store = MockStore {}; - let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); - let msg_graph = MessageGraph::from_pipeline(&pipeline_spec) - .map_err(|e| format!("Failed to create message graph from pipeline spec: {:?}", e))?; - + let pipeline_spec = PIPELINE_SPEC_ENCODED.parse().unwrap(); + let msg_graph = MessageGraph::from_pipeline(&pipeline_spec)?; let callback_state = CallbackState::new(msg_graph, mock_store).await?; + + let (messages_tx, mut messages_rx) = mpsc::channel::(10); + let response_collector = tokio::spawn(async move { + let message = messages_rx.recv().await.unwrap(); + let MessageWrapper { + confirm_save, + message, + } = message; + confirm_save.send(()).unwrap(); + message + }); + let app_state = AppState { + message: messages_tx, + settings: Arc::new(settings), callback_state, - context, - settings, }; + let app = jetstream_proxy(app_state).await?; let res = Request::builder() .method("POST") .uri("/async") .header(CONTENT_TYPE, "text/plain") - .header("id", id) + .header(ID_HEADER, ID_VALUE) .body(Body::from("Test Message")) .unwrap(); let response = app.oneshot(res).await.unwrap(); + let message = response_collector.await.unwrap(); + assert_eq!(message.id, ID_VALUE); assert_eq!(response.status(), StatusCode::OK); let result = extract_response_from_body(response.into_body()).await; @@ -350,7 +347,7 @@ mod tests { result, json!({ "message": "Successfully published message", - "id": id, + "id": ID_VALUE, "code": 200 }) ); @@ -392,20 +389,12 @@ mod tests { #[tokio::test] async fn test_sync_publish() { - let settings = Settings::default(); - let client = async_nats::connect(&settings.jetstream.url).await.unwrap(); - let context = jetstream::new(client); - let id = "foobar"; - let stream_name = "sync_pub"; - - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await - .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e)); + const ID_HEADER: &str = "X-Numaflow-ID"; + const ID_VALUE: &str = "foobar"; + let settings = Settings { + tid_header: ID_HEADER.into(), + ..Default::default() + }; let mem_store = InMemoryStore::new(); let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); @@ -413,16 +402,28 @@ mod tests { let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); - let settings = Arc::new(settings); + let (messages_tx, mut messages_rx) = mpsc::channel(10); + + let response_collector = tokio::spawn(async move { + let message = messages_rx.recv().await.unwrap(); + let MessageWrapper { + confirm_save, + message, + } = message; + confirm_save.send(()).unwrap(); + message + }); + let app_state = AppState { - settings, + message: messages_tx, + settings: Arc::new(settings), callback_state: callback_state.clone(), - context, }; + let app = jetstream_proxy(app_state).await.unwrap(); tokio::spawn(async move { - let cbs = create_default_callbacks(id); + let cbs = create_default_callbacks(ID_VALUE); let mut retries = 0; loop { match callback_state.insert_callback_requests(cbs.clone()).await { @@ -442,11 +443,13 @@ mod tests { .method("POST") .uri("/sync") .header("Content-Type", "text/plain") - .header("id", id) + .header(ID_HEADER, ID_VALUE) .body(Body::from("Test Message")) .unwrap(); let response = app.clone().oneshot(res).await.unwrap(); + let message = response_collector.await.unwrap(); + assert_eq!(message.id, ID_VALUE); assert_eq!(response.status(), StatusCode::OK); let result = extract_response_from_body(response.into_body()).await; @@ -454,7 +457,7 @@ mod tests { result, json!({ "message": "Successfully processed the message", - "id": id, + "id": ID_VALUE, "code": 200 }) ); @@ -462,20 +465,8 @@ mod tests { #[tokio::test] async fn test_sync_publish_serve() { + const ID_VALUE: &str = "foobar"; let settings = Arc::new(Settings::default()); - let client = async_nats::connect(&settings.jetstream.url).await.unwrap(); - let context = jetstream::new(client); - let id = "foobar"; - let stream_name = "sync_serve_pub"; - - let _stream = context - .get_or_create_stream(stream::Config { - name: stream_name.into(), - subjects: vec![stream_name.into()], - ..Default::default() - }) - .await - .map_err(|e| format!("creating stream {}: {}", &settings.jetstream.url, e)); let mem_store = InMemoryStore::new(); let pipeline_spec: PipelineDCG = PIPELINE_SPEC_ENCODED.parse().unwrap(); @@ -483,16 +474,28 @@ mod tests { let mut callback_state = CallbackState::new(msg_graph, mem_store).await.unwrap(); + let (messages_tx, mut messages_rx) = mpsc::channel(10); + + let response_collector = tokio::spawn(async move { + let message = messages_rx.recv().await.unwrap(); + let MessageWrapper { + confirm_save, + message, + } = message; + confirm_save.send(()).unwrap(); + message + }); + let app_state = AppState { + message: messages_tx, settings, callback_state: callback_state.clone(), - context, }; let app = jetstream_proxy(app_state).await.unwrap(); // pipeline is in -> cat -> out, so we will have 3 callback requests - let cbs = create_default_callbacks(id); + let cbs = create_default_callbacks(ID_VALUE); // spawn a tokio task which will insert the callback requests to the callback state // if it fails, sleep for 10ms and retry @@ -531,11 +534,14 @@ mod tests { .method("POST") .uri("/sync_serve") .header("Content-Type", "text/plain") - .header("id", id) + .header("ID", ID_VALUE) .body(Body::from("Test Message")) .unwrap(); let response = app.oneshot(res).await.unwrap(); + let message = response_collector.await.unwrap(); + assert_eq!(message.id, ID_VALUE); + assert_eq!(response.status(), StatusCode::OK); let content_len = response.headers().get(CONTENT_LENGTH).unwrap(); diff --git a/rust/serving/src/config.rs b/rust/serving/src/config.rs index 7ba3778d00..16c2ee125c 100644 --- a/rust/serving/src/config.rs +++ b/rust/serving/src/config.rs @@ -1,71 +1,29 @@ use std::collections::HashMap; use std::fmt::Debug; -use async_nats::rustls; use base64::prelude::BASE64_STANDARD; use base64::Engine; use rcgen::{generate_simple_self_signed, Certificate, CertifiedKey, KeyPair}; use serde::{Deserialize, Serialize}; -use crate::Error::ParseConfig; +use crate::{ + pipeline::PipelineDCG, + Error::{self, ParseConfig}, +}; const ENV_NUMAFLOW_SERVING_SOURCE_OBJECT: &str = "NUMAFLOW_SERVING_SOURCE_OBJECT"; -const ENV_NUMAFLOW_SERVING_JETSTREAM_URL: &str = "NUMAFLOW_ISBSVC_JETSTREAM_URL"; -const ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM: &str = "NUMAFLOW_SERVING_JETSTREAM_STREAM"; const ENV_NUMAFLOW_SERVING_STORE_TTL: &str = "NUMAFLOW_SERVING_STORE_TTL"; const ENV_NUMAFLOW_SERVING_HOST_IP: &str = "NUMAFLOW_SERVING_HOST_IP"; const ENV_NUMAFLOW_SERVING_APP_PORT: &str = "NUMAFLOW_SERVING_APP_LISTEN_PORT"; -const ENV_NUMAFLOW_SERVING_JETSTREAM_USER: &str = "NUMAFLOW_ISBSVC_JETSTREAM_USER"; -const ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD: &str = "NUMAFLOW_ISBSVC_JETSTREAM_PASSWORD"; const ENV_NUMAFLOW_SERVING_AUTH_TOKEN: &str = "NUMAFLOW_SERVING_AUTH_TOKEN"; +const ENV_MIN_PIPELINE_SPEC: &str = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC"; pub fn generate_certs() -> std::result::Result<(Certificate, KeyPair), String> { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let CertifiedKey { cert, key_pair } = generate_simple_self_signed(vec!["localhost".into()]) .map_err(|e| format!("Failed to generate cert {:?}", e))?; Ok((cert, key_pair)) } -#[derive(Deserialize, Clone, PartialEq)] -pub struct BasicAuth { - pub username: String, - pub password: String, -} - -impl Debug for BasicAuth { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let passwd_printable = if self.password.len() > 4 { - let passwd: String = self - .password - .chars() - .skip(self.password.len() - 2) - .take(2) - .collect(); - format!("***{}", passwd) - } else { - "*****".to_owned() - }; - write!(f, "{}:{}", self.username, passwd_printable) - } -} - -#[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct JetStreamConfig { - pub stream: String, - pub url: String, - pub auth: Option, -} - -impl Default for JetStreamConfig { - fn default() -> Self { - Self { - stream: "default".to_owned(), - url: "localhost:4222".to_owned(), - auth: None, - } - } -} - #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct RedisConfig { pub addr: String, @@ -95,11 +53,11 @@ pub struct Settings { pub metrics_server_listen_port: u16, pub upstream_addr: String, pub drain_timeout_secs: u64, - pub jetstream: JetStreamConfig, pub redis: RedisConfig, /// The IP address of the numaserve pod. This will be used to construct the value for X-Numaflow-Callback-Url header pub host_ip: String, pub api_auth_token: Option, + pub pipeline_spec: PipelineDCG, } impl Default for Settings { @@ -110,10 +68,10 @@ impl Default for Settings { metrics_server_listen_port: 3001, upstream_addr: "localhost:8888".to_owned(), drain_timeout_secs: 10, - jetstream: JetStreamConfig::default(), redis: RedisConfig::default(), host_ip: "127.0.0.1".to_owned(), api_auth_token: None, + pipeline_spec: Default::default(), } } } @@ -133,7 +91,7 @@ pub struct CallbackStorageConfig { /// This implementation is to load settings from env variables impl TryFrom> for Settings { - type Error = crate::Error; + type Error = Error; fn try_from(env_vars: HashMap) -> std::result::Result { let host_ip = env_vars .get(ENV_NUMAFLOW_SERVING_HOST_IP) @@ -144,19 +102,27 @@ impl TryFrom> for Settings { })? .to_owned(); + let pipeline_spec: PipelineDCG = env_vars + .get(ENV_MIN_PIPELINE_SPEC) + .ok_or_else(|| { + Error::ParseConfig(format!( + "Pipeline spec is not set using environment variable {ENV_MIN_PIPELINE_SPEC}" + )) + })? + .parse() + .map_err(|e| { + Error::ParseConfig(format!( + "Parsing pipeline spec: {}: error={e:?}", + env_vars.get(ENV_MIN_PIPELINE_SPEC).unwrap() + )) + })?; + let mut settings = Settings { host_ip, + pipeline_spec, ..Default::default() }; - if let Some(jetstream_url) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_URL) { - settings.jetstream.url = jetstream_url.to_owned(); - } - - if let Some(jetstream_stream) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM) { - settings.jetstream.stream = jetstream_stream.to_owned(); - } - if let Some(api_auth_token) = env_vars.get(ENV_NUMAFLOW_SERVING_AUTH_TOKEN) { settings.api_auth_token = Some(api_auth_token.to_owned()); } @@ -169,17 +135,6 @@ impl TryFrom> for Settings { })?; } - // If username is set, the password also must be set - if let Some(username) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_USER) { - let Some(password) = env_vars.get(ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD) else { - return Err(ParseConfig(format!("Env variable {ENV_NUMAFLOW_SERVING_JETSTREAM_USER} is set, but {ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD} is not set"))); - }; - settings.jetstream.auth = Some(BasicAuth { - username: username.to_owned(), - password: password.to_owned(), - }); - } - // Update redis.ttl_secs from environment variable if let Some(ttl_secs) = env_vars.get(ENV_NUMAFLOW_SERVING_STORE_TTL) { let ttl_secs: u32 = ttl_secs.parse().map_err(|e| { @@ -213,17 +168,9 @@ impl TryFrom> for Settings { #[cfg(test)] mod tests { - use super::*; + use crate::pipeline::{Edge, Vertex}; - #[test] - fn test_basic_auth_debug_print() { - let auth = BasicAuth { - username: "js-auth-user".into(), - password: "js-auth-password".into(), - }; - let auth_debug = format!("{auth:?}"); - assert_eq!(auth_debug, "js-auth-user:***rd"); - } + use super::*; #[test] fn test_default_config() { @@ -234,8 +181,6 @@ mod tests { assert_eq!(settings.metrics_server_listen_port, 3001); assert_eq!(settings.upstream_addr, "localhost:8888"); assert_eq!(settings.drain_timeout_secs, 10); - assert_eq!(settings.jetstream.stream, "default"); - assert_eq!(settings.jetstream.url, "localhost:4222"); assert_eq!(settings.redis.addr, "redis://127.0.0.1:6379"); assert_eq!(settings.redis.max_tasks, 50); assert_eq!(settings.redis.retries, 5); @@ -246,21 +191,12 @@ mod tests { fn test_config_parse() { // Set up the environment variables let env_vars = [ - ( - ENV_NUMAFLOW_SERVING_JETSTREAM_URL, - "nats://isbsvc-default-js-svc.default.svc:4222", - ), - ( - ENV_NUMAFLOW_SERVING_JETSTREAM_STREAM, - "ascii-art-pipeline-in-serving-source", - ), - (ENV_NUMAFLOW_SERVING_JETSTREAM_USER, "js-auth-user"), - (ENV_NUMAFLOW_SERVING_JETSTREAM_PASSWORD, "js-user-password"), (ENV_NUMAFLOW_SERVING_HOST_IP, "10.2.3.5"), (ENV_NUMAFLOW_SERVING_AUTH_TOKEN, "api-auth-token"), (ENV_NUMAFLOW_SERVING_APP_PORT, "8443"), (ENV_NUMAFLOW_SERVING_STORE_TTL, "86400"), - (ENV_NUMAFLOW_SERVING_SOURCE_OBJECT, "eyJhdXRoIjpudWxsLCJzZXJ2aWNlIjp0cnVlLCJtc2dJREhlYWRlcktleSI6IlgtTnVtYWZsb3ctSWQiLCJzdG9yZSI6eyJ1cmwiOiJyZWRpczovL3JlZGlzOjYzNzkifX0=") + (ENV_NUMAFLOW_SERVING_SOURCE_OBJECT, "eyJhdXRoIjpudWxsLCJzZXJ2aWNlIjp0cnVlLCJtc2dJREhlYWRlcktleSI6IlgtTnVtYWZsb3ctSWQiLCJzdG9yZSI6eyJ1cmwiOiJyZWRpczovL3JlZGlzOjYzNzkifX0="), + (ENV_MIN_PIPELINE_SPEC, "eyJ2ZXJ0aWNlcyI6W3sibmFtZSI6InNlcnZpbmctaW4iLCJzb3VyY2UiOnsic2VydmluZyI6eyJhdXRoIjpudWxsLCJzZXJ2aWNlIjp0cnVlLCJtc2dJREhlYWRlcktleSI6IlgtTnVtYWZsb3ctSWQiLCJzdG9yZSI6eyJ1cmwiOiJyZWRpczovL3JlZGlzOjYzNzkifX19LCJjb250YWluZXJUZW1wbGF0ZSI6eyJyZXNvdXJjZXMiOnt9LCJpbWFnZVB1bGxQb2xpY3kiOiJOZXZlciIsImVudiI6W3sibmFtZSI6IlJVU1RfTE9HIiwidmFsdWUiOiJpbmZvIn1dfSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fSx7Im5hbWUiOiJzZXJ2aW5nLXNpbmsiLCJzaW5rIjp7InVkc2luayI6eyJjb250YWluZXIiOnsiaW1hZ2UiOiJxdWF5LmlvL251bWFpby9udW1hZmxvdy1ycy9zaW5rLWxvZzpzdGFibGUiLCJlbnYiOlt7Im5hbWUiOiJOVU1BRkxPV19DQUxMQkFDS19VUkxfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUNhbGxiYWNrLVVybCJ9LHsibmFtZSI6Ik5VTUFGTE9XX01TR19JRF9IRUFERVJfS0VZIiwidmFsdWUiOiJYLU51bWFmbG93LUlkIn1dLCJyZXNvdXJjZXMiOnt9fX0sInJldHJ5U3RyYXRlZ3kiOnt9fSwiY29udGFpbmVyVGVtcGxhdGUiOnsicmVzb3VyY2VzIjp7fSwiaW1hZ2VQdWxsUG9saWN5IjoiTmV2ZXIifSwic2NhbGUiOnsibWluIjoxfSwidXBkYXRlU3RyYXRlZ3kiOnsidHlwZSI6IlJvbGxpbmdVcGRhdGUiLCJyb2xsaW5nVXBkYXRlIjp7Im1heFVuYXZhaWxhYmxlIjoiMjUlIn19fV0sImVkZ2VzIjpbeyJmcm9tIjoic2VydmluZy1pbiIsInRvIjoic2VydmluZy1zaW5rIiwiY29uZGl0aW9ucyI6bnVsbH1dLCJsaWZlY3ljbGUiOnt9LCJ3YXRlcm1hcmsiOnt9fQ==") ]; // Call the config method @@ -277,14 +213,6 @@ mod tests { metrics_server_listen_port: 3001, upstream_addr: "localhost:8888".into(), drain_timeout_secs: 10, - jetstream: JetStreamConfig { - stream: "ascii-art-pipeline-in-serving-source".into(), - url: "nats://isbsvc-default-js-svc.default.svc:4222".into(), - auth: Some(BasicAuth { - username: "js-auth-user".into(), - password: "js-user-password".into(), - }), - }, redis: RedisConfig { addr: "redis://redis:6379".into(), max_tasks: 50, @@ -294,8 +222,22 @@ mod tests { }, host_ip: "10.2.3.5".into(), api_auth_token: Some("api-auth-token".into()), + pipeline_spec: PipelineDCG { + vertices: vec![ + Vertex { + name: "serving-in".into(), + }, + Vertex { + name: "serving-sink".into(), + }, + ], + edges: vec![Edge { + from: "serving-in".into(), + to: "serving-sink".into(), + conditions: None, + }], + }, }; - assert_eq!(settings, expected_config); } } diff --git a/rust/serving/src/error.rs b/rust/serving/src/error.rs index d53509c939..8d03c48234 100644 --- a/rust/serving/src/error.rs +++ b/rust/serving/src/error.rs @@ -1,4 +1,5 @@ use thiserror::Error; +use tokio::sync::oneshot; // TODO: introduce module level error handling @@ -44,6 +45,12 @@ pub enum Error { #[error("Init Error - {0}")] InitError(String), + #[error("Failed to receive message from channel. Actor task is terminated: {0:?}")] + ActorTaskTerminated(oneshot::error::RecvError), + + #[error("Serving source error - {0}")] + Source(String), + #[error("Other Error - {0}")] // catch-all variant for now Other(String), diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 796313bdb2..001065ddfe 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -1,12 +1,13 @@ -use std::env; use std::net::SocketAddr; use std::sync::Arc; +use crate::app::callback::state::State as CallbackState; +use app::callback::store::Store; use axum_server::tls_rustls::RustlsConfig; +use tokio::sync::mpsc; use tracing::info; pub use self::error::{Error, Result}; -use self::pipeline::PipelineDCG; use crate::app::start_main_server; use crate::config::generate_certs; use crate::metrics::start_https_metrics_server; @@ -21,41 +22,43 @@ mod error; mod metrics; mod pipeline; -const ENV_MIN_PIPELINE_SPEC: &str = "NUMAFLOW_SERVING_MIN_PIPELINE_SPEC"; +pub mod source; +use crate::source::MessageWrapper; +pub use source::{Message, ServingSource}; + +#[derive(Clone)] +pub(crate) struct AppState { + pub(crate) message: mpsc::Sender, + pub(crate) settings: Arc, + pub(crate) callback_state: CallbackState, +} + +pub(crate) async fn serve( + app: AppState, +) -> std::result::Result<(), Box> +where + T: Clone + Send + Sync + Store + 'static, +{ + // Setup the CryptoProvider (controls core cryptography used by rustls) for the process + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); -pub async fn serve( - settings: Arc, -) -> std::result::Result<(), Box> { let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) .await .map_err(|e| format!("Failed to create tls config {:?}", e))?; - // TODO: Move all env variables into one place. Some env variables are loaded when Settings is initialized - let pipeline_spec: PipelineDCG = env::var(ENV_MIN_PIPELINE_SPEC) - .map_err(|_| { - format!("Pipeline spec is not set using environment variable {ENV_MIN_PIPELINE_SPEC}") - })? - .parse() - .map_err(|e| { - format!( - "Parsing pipeline spec: {}: error={e:?}", - env::var(ENV_MIN_PIPELINE_SPEC).unwrap() - ) - })?; - - info!(config = ?settings, ?pipeline_spec, "Starting server with config and pipeline spec"); + info!(config = ?app.settings, "Starting server with config and pipeline spec"); // Start the metrics server, which serves the prometheus metrics. let metrics_addr: SocketAddr = - format!("0.0.0.0:{}", &settings.metrics_server_listen_port).parse()?; + format!("0.0.0.0:{}", &app.settings.metrics_server_listen_port).parse()?; let metrics_server_handle = tokio::spawn(start_https_metrics_server(metrics_addr, tls_config.clone())); // Start the main server, which serves the application. - let app_server_handle = tokio::spawn(start_main_server(settings, tls_config, pipeline_spec)); + let app_server_handle = tokio::spawn(start_main_server(app, tls_config)); // TODO: is try_join the best? we need to short-circuit at the first failure tokio::try_join!(flatten(app_server_handle), flatten(metrics_server_handle))?; diff --git a/rust/serving/src/metrics.rs b/rust/serving/src/metrics.rs index 4c64760d4d..a605cc9988 100644 --- a/rust/serving/src/metrics.rs +++ b/rust/serving/src/metrics.rs @@ -175,6 +175,8 @@ mod tests { #[tokio::test] async fn test_start_metrics_server() -> Result<()> { + // Setup the CryptoProvider (controls core cryptography used by rustls) for the process + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) diff --git a/rust/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs index d782e3d73a..cb491d7d88 100644 --- a/rust/serving/src/pipeline.rs +++ b/rust/serving/src/pipeline.rs @@ -10,7 +10,7 @@ use crate::Error::ParseConfig; // OperatorType is an enum that contains the types of operators // that can be used in the conditions for the edge. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub enum OperatorType { +pub(crate) enum OperatorType { #[serde(rename = "and")] And, #[serde(rename = "or")] @@ -42,40 +42,37 @@ impl From for OperatorType { } // Tag is a struct that contains the information about the tags for the edge -#[cfg_attr(test, derive(PartialEq))] -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Tag { - pub operator: Option, - pub values: Vec, +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct Tag { + pub(crate) operator: Option, + pub(crate) values: Vec, } // Conditions is a struct that contains the information about the conditions for the edge -#[cfg_attr(test, derive(PartialEq))] -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Conditions { - pub tags: Option, +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct Conditions { + pub(crate) tags: Option, } // Edge is a struct that contains the information about the edge in the pipeline. -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Edge { - pub from: String, - pub to: String, - pub conditions: Option, +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct Edge { + pub(crate) from: String, + pub(crate) to: String, + pub(crate) conditions: Option, } /// DCG (directed compute graph) of the pipeline with minimal information build using vertices and edges /// from the pipeline spec -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde()] -pub struct PipelineDCG { - pub vertices: Vec, - pub edges: Vec, +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] +pub(crate) struct PipelineDCG { + pub(crate) vertices: Vec, + pub(crate) edges: Vec, } -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Vertex { - pub name: String, +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub(crate) struct Vertex { + pub(crate) name: String, } impl FromStr for PipelineDCG { diff --git a/rust/serving/src/source.rs b/rust/serving/src/source.rs new file mode 100644 index 0000000000..d038179672 --- /dev/null +++ b/rust/serving/src/source.rs @@ -0,0 +1,292 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; + +use bytes::Bytes; +use tokio::sync::{mpsc, oneshot}; +use tokio::time::Instant; + +use crate::app::callback::state::State as CallbackState; +use crate::app::callback::store::redisstore::RedisConnection; +use crate::app::tracker::MessageGraph; +use crate::Settings; +use crate::{Error, Result}; + +/// [Message] with a oneshot for notifying when the message has been completed processed. +pub(crate) struct MessageWrapper { + // TODO: this might be more that saving to ISB. + pub(crate) confirm_save: oneshot::Sender<()>, + pub(crate) message: Message, +} + +/// Serving payload passed on to Numaflow. +#[derive(Debug)] +pub struct Message { + pub value: Bytes, + pub id: String, + pub headers: HashMap, +} + +enum ActorMessage { + Read { + batch_size: usize, + timeout_at: Instant, + reply_to: oneshot::Sender>>, + }, + Ack { + offsets: Vec, + reply_to: oneshot::Sender>, + }, +} + +/// Background actor that starts Axum server for accepting HTTP requests. +struct ServingSourceActor { + /// The HTTP handlers will put the message received from the payload to this channel + messages: mpsc::Receiver, + /// Channel for the actor handle to communicate with this actor + handler_rx: mpsc::Receiver, + /// Mapping from request's ID header (usually `X-Numaflow-Id` header) to a channel. + /// This sending a message on this channel notifies the HTTP handler function that the message + /// has been successfully processed. + tracker: HashMap>, + vertex_replica_id: u16, +} + +impl ServingSourceActor { + async fn start( + settings: Arc, + handler_rx: mpsc::Receiver, + request_channel_buffer_size: usize, + vertex_replica_id: u16, + ) -> Result<()> { + // Channel to which HTTP handlers will send request payload + let (messages_tx, messages_rx) = mpsc::channel(request_channel_buffer_size); + // Create a redis store to store the callbacks and the custom responses + let redis_store = RedisConnection::new(settings.redis.clone()).await?; + // Create the message graph from the pipeline spec and the redis store + let msg_graph = MessageGraph::from_pipeline(&settings.pipeline_spec).map_err(|e| { + Error::InitError(format!( + "Creating message graph from pipeline spec: {:?}", + e + )) + })?; + let callback_state = CallbackState::new(msg_graph, redis_store).await?; + + tokio::spawn(async move { + let mut serving_actor = ServingSourceActor { + messages: messages_rx, + handler_rx, + tracker: HashMap::new(), + vertex_replica_id, + }; + serving_actor.run().await; + }); + let app = crate::AppState { + message: messages_tx, + settings, + callback_state, + }; + tokio::spawn(async move { + crate::serve(app).await.unwrap(); + }); + Ok(()) + } + + async fn run(&mut self) { + while let Some(msg) = self.handler_rx.recv().await { + self.handle_message(msg).await; + } + } + + async fn handle_message(&mut self, actor_msg: ActorMessage) { + match actor_msg { + ActorMessage::Read { + batch_size, + timeout_at, + reply_to, + } => { + let messages = self.read(batch_size, timeout_at).await; + let _ = reply_to.send(messages); + } + ActorMessage::Ack { offsets, reply_to } => { + let status = self.ack(offsets).await; + let _ = reply_to.send(status); + } + } + } + + async fn read(&mut self, count: usize, timeout_at: Instant) -> Result> { + let mut messages = vec![]; + loop { + // Stop if the read timeout has reached or if we have collected the requested number of messages + if messages.len() >= count || Instant::now() >= timeout_at { + break; + } + let next_msg = self.messages.recv(); + let message = match tokio::time::timeout_at(timeout_at, next_msg).await { + Ok(Some(msg)) => msg, + Ok(None) => { + // If we have collected at-least one message, we return those messages. + // The error will happen on all the subsequent read attempts too. + if messages.is_empty() { + return Err(Error::Other( + "Sending half of the Serving channel has disconnected".into(), + )); + } + tracing::error!("Sending half of the Serving channel has disconnected"); + return Ok(messages); + } + Err(_) => return Ok(messages), + }; + let MessageWrapper { + confirm_save, + message, + } = message; + + self.tracker.insert(message.id.clone(), confirm_save); + messages.push(message); + } + Ok(messages) + } + + async fn ack(&mut self, offsets: Vec) -> Result<()> { + let offset_suffix = format!("-{}", self.vertex_replica_id); + for offset in offsets { + let offset = offset.strip_suffix(&offset_suffix).ok_or_else(|| { + Error::Source(format!("offset does not end with '{}'", &offset_suffix)) + })?; + let confirm_save_tx = self + .tracker + .remove(offset) + .ok_or_else(|| Error::Source("offset was not found in the tracker".into()))?; + confirm_save_tx + .send(()) + .map_err(|e| Error::Source(format!("Sending on confirm_save channel: {e:?}")))?; + } + Ok(()) + } +} + +#[derive(Clone)] +pub struct ServingSource { + batch_size: usize, + // timeout for each batch read request + timeout: Duration, + actor_tx: mpsc::Sender, +} + +impl ServingSource { + pub async fn new( + settings: Arc, + batch_size: usize, + timeout: Duration, + vertex_replica_id: u16, + ) -> Result { + let (actor_tx, actor_rx) = mpsc::channel(2 * batch_size); + ServingSourceActor::start(settings, actor_rx, 2 * batch_size, vertex_replica_id).await?; + Ok(Self { + batch_size, + timeout, + actor_tx, + }) + } + + pub async fn read_messages(&self) -> Result> { + let start = Instant::now(); + let (tx, rx) = oneshot::channel(); + let actor_msg = ActorMessage::Read { + reply_to: tx, + batch_size: self.batch_size, + timeout_at: Instant::now() + self.timeout, + }; + let _ = self.actor_tx.send(actor_msg).await; + let messages = rx.await.map_err(Error::ActorTaskTerminated)??; + tracing::debug!( + count = messages.len(), + requested_count = self.batch_size, + time_taken_ms = start.elapsed().as_millis(), + "Got messages from Serving source" + ); + Ok(messages) + } + + pub async fn ack_messages(&self, offsets: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + let actor_msg = ActorMessage::Ack { + offsets, + reply_to: tx, + }; + let _ = self.actor_tx.send(actor_msg).await; + rx.await.map_err(Error::ActorTaskTerminated)??; + Ok(()) + } +} + +#[cfg(feature = "redis-tests")] +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use crate::Settings; + + use super::ServingSource; + + type Result = std::result::Result>; + #[tokio::test] + async fn test_serving_source() -> Result<()> { + let settings = Arc::new(Settings::default()); + let serving_source = + ServingSource::new(Arc::clone(&settings), 10, Duration::from_millis(1), 0).await?; + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(2)) + .danger_accept_invalid_certs(true) + .build() + .unwrap(); + + // Wait for the server + for _ in 0..10 { + let resp = client + .get(format!( + "https://localhost:{}/livez", + settings.app_listen_port + )) + .send() + .await; + if resp.is_ok() { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + + tokio::spawn(async move { + loop { + tokio::time::sleep(Duration::from_millis(10)).await; + let mut messages = serving_source.read_messages().await.unwrap(); + if messages.is_empty() { + // Server has not received any requests yet + continue; + } + assert_eq!(messages.len(), 1); + let msg = messages.remove(0); + serving_source + .ack_messages(vec![format!("{}-0", msg.id)]) + .await + .unwrap(); + break; + } + }); + + let resp = client + .post(format!( + "https://localhost:{}/v1/process/async", + settings.app_listen_port + )) + .json("test-payload") + .send() + .await?; + + assert!(resp.status().is_success()); + Ok(()) + } +} From 5f5af1baddce04d1e93b5ef6e1976ac1828d1a85 Mon Sep 17 00:00:00 2001 From: Adarsh Jain Date: Wed, 8 Jan 2025 15:56:21 +0530 Subject: [PATCH 183/188] feat: make unit and display name configurable (#2269) Signed-off-by: adarsh0728 --- .../namespaced-numaflow-server.yaml | 20 +++ config/advanced-install/numaflow-server.yaml | 20 +++ .../numaflow-server-metrics-proxy-config.yaml | 20 +++ config/install.yaml | 20 +++ config/namespace-install.yaml | 20 +++ server/apis/v1/handler.go | 2 +- server/apis/v1/response_metrics.go | 4 +- server/apis/v1/response_metrics_discovery.go | 14 +- .../PodDetails/partials/Metrics/index.tsx | 2 +- .../Metrics/partials/LineChart/index.tsx | 151 ++++++++++++++---- .../partials/Metrics/utils/constants.ts | 12 +- 11 files changed, 243 insertions(+), 42 deletions(-) diff --git a/config/advanced-install/namespaced-numaflow-server.yaml b/config/advanced-install/namespaced-numaflow-server.yaml index 7cb350b073..afad0b4972 100644 --- a/config/advanced-install/namespaced-numaflow-server.yaml +++ b/config/advanced-install/namespaced-numaflow-server.yaml @@ -156,6 +156,9 @@ data: required: false metrics: - metric_name: vertex_pending_messages + display_name: Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -189,6 +192,9 @@ data: required: false metrics: - metric_name: monovtx_pending + display_name: Mono Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name @@ -225,6 +231,10 @@ data: required: false metrics: - metric_name: monovtx_processing_time_bucket + display_name: Mono Vertex Processing Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: s required_filters: - namespace - mvtx_name @@ -235,6 +245,10 @@ data: - name: pod required: false - metric_name: monovtx_sink_time_bucket + display_name: Mono Vertex Sink Write Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: ms required_filters: - namespace - mvtx_name @@ -259,6 +273,9 @@ data: required: false metrics: - metric_name: forwarder_data_read_total + display_name: Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -284,6 +301,9 @@ data: required: false metrics: - metric_name: monovtx_read_total + display_name: Mono Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name diff --git a/config/advanced-install/numaflow-server.yaml b/config/advanced-install/numaflow-server.yaml index fcb283f11e..ffce78124d 100644 --- a/config/advanced-install/numaflow-server.yaml +++ b/config/advanced-install/numaflow-server.yaml @@ -163,6 +163,9 @@ data: required: false metrics: - metric_name: vertex_pending_messages + display_name: Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -196,6 +199,9 @@ data: required: false metrics: - metric_name: monovtx_pending + display_name: Mono Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name @@ -232,6 +238,10 @@ data: required: false metrics: - metric_name: monovtx_processing_time_bucket + display_name: Mono Vertex Processing Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: s required_filters: - namespace - mvtx_name @@ -242,6 +252,10 @@ data: - name: pod required: false - metric_name: monovtx_sink_time_bucket + display_name: Mono Vertex Sink Write Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: ms required_filters: - namespace - mvtx_name @@ -266,6 +280,9 @@ data: required: false metrics: - metric_name: forwarder_data_read_total + display_name: Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -291,6 +308,9 @@ data: required: false metrics: - metric_name: monovtx_read_total + display_name: Mono Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name diff --git a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml index fe634f5f17..9d1abf171e 100644 --- a/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml +++ b/config/base/numaflow-server/numaflow-server-metrics-proxy-config.yaml @@ -22,6 +22,9 @@ data: required: false metrics: - metric_name: vertex_pending_messages + display_name: Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -55,6 +58,9 @@ data: required: false metrics: - metric_name: monovtx_pending + display_name: Mono Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name @@ -91,6 +97,10 @@ data: required: false metrics: - metric_name: monovtx_processing_time_bucket + display_name: Mono Vertex Processing Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: s required_filters: - namespace - mvtx_name @@ -101,6 +111,10 @@ data: - name: pod required: false - metric_name: monovtx_sink_time_bucket + display_name: Mono Vertex Sink Write Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: ms required_filters: - namespace - mvtx_name @@ -125,6 +139,9 @@ data: required: false metrics: - metric_name: forwarder_data_read_total + display_name: Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -150,6 +167,9 @@ data: required: false metrics: - metric_name: monovtx_read_total + display_name: Mono Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name diff --git a/config/install.yaml b/config/install.yaml index 69fbd4ca4d..6410435bef 100644 --- a/config/install.yaml +++ b/config/install.yaml @@ -28576,6 +28576,9 @@ data: required: false metrics: - metric_name: vertex_pending_messages + display_name: Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -28609,6 +28612,9 @@ data: required: false metrics: - metric_name: monovtx_pending + display_name: Mono Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name @@ -28645,6 +28651,10 @@ data: required: false metrics: - metric_name: monovtx_processing_time_bucket + display_name: Mono Vertex Processing Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: s required_filters: - namespace - mvtx_name @@ -28655,6 +28665,10 @@ data: - name: pod required: false - metric_name: monovtx_sink_time_bucket + display_name: Mono Vertex Sink Write Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: ms required_filters: - namespace - mvtx_name @@ -28679,6 +28693,9 @@ data: required: false metrics: - metric_name: forwarder_data_read_total + display_name: Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -28704,6 +28721,9 @@ data: required: false metrics: - metric_name: monovtx_read_total + display_name: Mono Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name diff --git a/config/namespace-install.yaml b/config/namespace-install.yaml index 810422a7cc..25de004c48 100644 --- a/config/namespace-install.yaml +++ b/config/namespace-install.yaml @@ -28464,6 +28464,9 @@ data: required: false metrics: - metric_name: vertex_pending_messages + display_name: Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -28497,6 +28500,9 @@ data: required: false metrics: - metric_name: monovtx_pending + display_name: Mono Vertex Pending Messages + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name @@ -28533,6 +28539,10 @@ data: required: false metrics: - metric_name: monovtx_processing_time_bucket + display_name: Mono Vertex Processing Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: s required_filters: - namespace - mvtx_name @@ -28543,6 +28553,10 @@ data: - name: pod required: false - metric_name: monovtx_sink_time_bucket + display_name: Mono Vertex Sink Write Time Latency + # set "Units" or unset for default behaviour otherwise set "s" or "ms" for latency metrics + # Note: latency values are in μs + # unit: ms required_filters: - namespace - mvtx_name @@ -28567,6 +28581,9 @@ data: required: false metrics: - metric_name: forwarder_data_read_total + display_name: Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - pipeline @@ -28592,6 +28609,9 @@ data: required: false metrics: - metric_name: monovtx_read_total + display_name: Mono Vertex Read Processing Rate + # set "Units" or unset for default behaviour + # unit: Units required_filters: - namespace - mvtx_name diff --git a/server/apis/v1/handler.go b/server/apis/v1/handler.go index 17f0322c2a..8f50136b11 100644 --- a/server/apis/v1/handler.go +++ b/server/apis/v1/handler.go @@ -1340,7 +1340,7 @@ func (h *handler) DiscoverMetrics(c *gin.Context) { }) } - discoveredMetrics = append(discoveredMetrics, NewDiscoveryResponse(metric.Name, dimensionData)) + discoveredMetrics = append(discoveredMetrics, NewDiscoveryResponse(metric.Name, metric.DisplayName, metric.Unit, dimensionData)) } } } diff --git a/server/apis/v1/response_metrics.go b/server/apis/v1/response_metrics.go index 3b4a26507d..685e9f0d4e 100644 --- a/server/apis/v1/response_metrics.go +++ b/server/apis/v1/response_metrics.go @@ -32,7 +32,9 @@ type Dimension struct { } type Metric struct { - Name string `yaml:"metric_name"` + Name string `yaml:"metric_name"` + DisplayName string `yaml:"display_name"` + Unit string `yaml:"unit"` // array of required labels. Filters []string `yaml:"required_filters"` //array of dimensions and their data diff --git a/server/apis/v1/response_metrics_discovery.go b/server/apis/v1/response_metrics_discovery.go index ef2a160e1f..9c0e09acc8 100644 --- a/server/apis/v1/response_metrics_discovery.go +++ b/server/apis/v1/response_metrics_discovery.go @@ -7,17 +7,21 @@ type Dimensions struct { } type DiscoveryResponse struct { - MetricName string `json:"metric_name"` - Dimensions []Dimensions `json:"dimensions"` + MetricName string `json:"metric_name"` + DisplayName string `json:"display_name"` + Unit string `json:"unit"` + Dimensions []Dimensions `json:"dimensions"` } // MetricsDiscoveryResponse is a list of DiscoveryResponse type MetricsDiscoveryResponse []DiscoveryResponse // NewDiscoveryResponse creates a new DiscoveryResponse object for each metric. -func NewDiscoveryResponse(metricName string, dimensions []Dimensions) DiscoveryResponse { +func NewDiscoveryResponse(metricName, displayName, unitName string, dimensions []Dimensions) DiscoveryResponse { return DiscoveryResponse{ - MetricName: metricName, - Dimensions: dimensions, + MetricName: metricName, + DisplayName: displayName, + Unit: unitName, + Dimensions: dimensions, } } diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx index 71f5ee862d..fdb32539f4 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/index.tsx @@ -83,7 +83,7 @@ export function Metrics({ namespaceId, pipelineId, type, vertexId }: MetricsProp id={`${metric?.metric_name}-header`} > - {metricNameMap[metric?.metric_name] || metric?.metric_name} + {metric?.display_name || metricNameMap[metric?.metric_name] || metric?.metric_name} diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx index 9a8f4a4434..403cd082f6 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/partials/LineChart/index.tsx @@ -8,6 +8,7 @@ import { Tooltip, XAxis, YAxis, + Text } from "recharts"; import Box from "@mui/material/Box"; import CircularProgress from "@mui/material/CircularProgress"; @@ -17,6 +18,94 @@ import EmptyChart from "../EmptyChart"; import { useMetricsFetch } from "../../../../../../../../../../../../../../../utils/fetchWrappers/metricsFetch"; import TimeSelector from "../common/TimeRange"; +interface TooltipProps { + payload?: any[]; + label?: string; + active?: boolean; +} + +function CustomTooltip({ payload, label, active }: TooltipProps) { + if (active && payload && payload.length) { + const maxWidth = Math.max(...payload.map(entry => entry.name.length)) * 9.5; + console.log("max width: ", maxWidth) + return ( +
+

{label}

+ {payload.map((entry: any, index: any) => ( +
+ {entry.name}: + {entry.value} +
+ ))} +
+ ); + } + + return null; +} + +const getYAxisLabel = (unit: string) => { + if (unit !== "") { + return unit + } + return "Units" +}; + +const getDefaultFormatter = (value: number, metricName: string) => { + const formatValue = (value: number, suffix: string) => { + const formattedValue = parseFloat(value.toFixed(2)); + return formattedValue % 1 === 0 + ? `${Math.floor(formattedValue)}${suffix}` + : `${formattedValue}${suffix}`; + }; + switch(metricName){ + case "monovtx_ack_time_bucket": + case "monovtx_read_time_bucket": + case "monovtx_processing_time_bucket": + case "monovtx_sink_time_bucket": + case "numaflow_monovtx_processing_time_bucket": + case "numaflow_monovtx_sink_time_bucket": + case "numaflow_monovtx_read_time_bucket": + case "numaflow_monovtx_ack_time_bucket": + if (value === 0){ + return "0"; + } else if (value < 1000) { + return `${value} μs`; + } else if (value < 1000000) { + return formatValue(value / 1000, " ms"); + } else { + return formatValue(value / 1000000, " s"); + } + default: + if (value === 0){ + return "0"; + } else if (value < 1000) { + return `${value}`; + } else if (value < 1000000) { + return formatValue(value / 1000, " k"); + } else { + return formatValue(value / 1000000, " M"); + } + } +} + +const getTickFormatter = (unit: string, metricName: string) => { + const formatValue = (value: number) => { + const formattedValue = parseFloat(value.toFixed(2)); // Format to 2 decimal places + return formattedValue % 1 === 0 ? Math.floor(formattedValue) : formattedValue; // Remove trailing .0 + }; + return (value: number) => { + switch (unit) { + case 's': + return `${formatValue(value / 1000000)}`; + case 'ms': + return `${formatValue(value / 1000)}`; + default: + return getDefaultFormatter(value, metricName); + } + } +}; + // TODO have a check for metricReq against metric object to ensure required fields are passed const LineChartComponent = ({ namespaceId, @@ -40,7 +129,7 @@ const LineChartComponent = ({ const getRandomColor = useCallback((index: number) => { const hue = (index * 137.508) % 360; - return `hsl(${hue}, 70%, 50%)`; + return `hsl(${hue}, 50%, 50%)`; }, []); const getFilterValue = useCallback( @@ -226,29 +315,29 @@ const LineChartComponent = ({ {filtersList?.filter((filterEle: any) => !filterEle?.required)?.length > 0 && ( - - Filters - !filterEle?.required - )} - namespaceId={namespaceId} - pipelineId={pipelineId} - type={type} - vertexId={vertexId} - setFilters={setFilters} - /> - - )} + + Filters + !filterEle?.required + )} + namespaceId={namespaceId} + pipelineId={pipelineId} + type={type} + vertexId={vertexId} + setFilters={setFilters} + /> + + )} {isLoading && ( - - - + + + {getYAxisLabel(metric?.unit)}} + tickFormatter={getTickFormatter(metric?.unit, metric?.metric_name)} + /> + + + {chartLabels?.map((value, index) => ( ))} - + }/> diff --git a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts index b953f217e3..723c49ab0c 100644 --- a/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts +++ b/ui/src/components/pages/Pipeline/partials/Graph/partials/NodeInfo/partials/Pods/partials/PodDetails/partials/Metrics/utils/constants.ts @@ -32,15 +32,15 @@ export const dimensionReverseMap: { [p: string]: string } = { }; export const metricNameMap: { [p: string]: string } = { - monovtx_ack_time_bucket: "Mono Vertex Ack Time Latency (in micro seconds)", - monovtx_read_time_bucket: "Mono Vertex Read Time Latency (in micro seconds)", + monovtx_ack_time_bucket: "Mono Vertex Ack Time Latency", + monovtx_read_time_bucket: "Mono Vertex Read Time Latency", monovtx_processing_time_bucket: - "Mono Vertex Processing Time Latency (in micro seconds)", + "Mono Vertex Processing Time Latency", monovtx_sink_time_bucket: - "Mono Vertex Sink Write Time Latency (in micro seconds)", + "Mono Vertex Sink Write Time Latency", forwarder_data_read_total: - "Vertex Read Processing Rate (messages per second)", - monovtx_read_total: "Mono Vertex Read Processing Rate (messages per second)", + "Vertex Read Processing Rate", + monovtx_read_total: "Mono Vertex Read Processing Rate", monovtx_pending: "Mono Vertex Pending Messages", vertex_pending_messages: "Vertex Pending Messages", }; From b6c4de1832f1f6b4c42338b119dc2470383177f5 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Wed, 8 Jan 2025 18:38:34 +0530 Subject: [PATCH 184/188] fix: Initialize rustls's CryptoProvider early in the code (#2312) Signed-off-by: Sreekanth --- rust/Cargo.lock | 1 + rust/numaflow-core/Cargo.toml | 3 ++- rust/numaflow-core/src/source/serving.rs | 5 +++++ rust/numaflow/Cargo.toml | 1 + rust/numaflow/src/main.rs | 6 ++++++ rust/serving/src/app/jetstream_proxy.rs | 6 ------ rust/serving/src/lib.rs | 3 --- rust/serving/src/pipeline.rs | 2 +- 8 files changed, 16 insertions(+), 11 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index e3d90e2f05..3eddaaa6c5 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1708,6 +1708,7 @@ dependencies = [ "backoff", "numaflow-core", "numaflow-models", + "rustls 0.23.19", "servesink", "serving", "tokio", diff --git a/rust/numaflow-core/Cargo.toml b/rust/numaflow-core/Cargo.toml index 4a98303a1e..33c7e1cbe0 100644 --- a/rust/numaflow-core/Cargo.toml +++ b/rust/numaflow-core/Cargo.toml @@ -6,7 +6,8 @@ edition = "2021" [features] nats-tests = [] pulsar-tests = [] -all-tests = ["nats-tests", "pulsar-tests"] +redis-tests = [] +all-tests = ["nats-tests", "pulsar-tests", "redis-tests"] [lints] workspace = true diff --git a/rust/numaflow-core/src/source/serving.rs b/rust/numaflow-core/src/source/serving.rs index b9fb6c72ed..431cfbba35 100644 --- a/rust/numaflow-core/src/source/serving.rs +++ b/rust/numaflow-core/src/source/serving.rs @@ -139,6 +139,7 @@ mod tests { } } + #[cfg(feature = "redis-tests")] #[tokio::test] async fn test_serving_source_reader_acker() -> Result<()> { let settings = Settings { @@ -146,6 +147,10 @@ mod tests { ..Default::default() }; let settings = Arc::new(settings); + // Setup the CryptoProvider (controls core cryptography used by rustls) for the process + // ServingSource starts an Axum HTTPS server in the background. Rustls is used to generate + // self-signed certs when starting the server. + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let mut serving_source = ServingSource::new( Arc::clone(&settings), 10, diff --git a/rust/numaflow/Cargo.toml b/rust/numaflow/Cargo.toml index 6d5fc0dd60..58de96a9c0 100644 --- a/rust/numaflow/Cargo.toml +++ b/rust/numaflow/Cargo.toml @@ -14,4 +14,5 @@ numaflow-models.workspace = true backoff.workspace = true tokio.workspace = true tracing.workspace = true +rustls.workspace = true tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } \ No newline at end of file diff --git a/rust/numaflow/src/main.rs b/rust/numaflow/src/main.rs index 9a5ab6fe82..e0836ce21c 100644 --- a/rust/numaflow/src/main.rs +++ b/rust/numaflow/src/main.rs @@ -19,6 +19,12 @@ async fn main() -> Result<(), Box> { ) .with(tracing_subscriber::fmt::layer().with_ansi(false)) .init(); + + // Setup the CryptoProvider (controls core cryptography used by rustls) for the process + rustls::crypto::aws_lc_rs::default_provider() + .install_default() + .expect("Installing default CryptoProvider"); + if let Err(e) = run().await { error!("{e:?}"); return Err(e); diff --git a/rust/serving/src/app/jetstream_proxy.rs b/rust/serving/src/app/jetstream_proxy.rs index 6f61a0530f..eb083d57e9 100644 --- a/rust/serving/src/app/jetstream_proxy.rs +++ b/rust/serving/src/app/jetstream_proxy.rs @@ -32,7 +32,6 @@ use crate::{app::callback::state, Message, MessageWrapper}; // "from_vertex": "a" // } -const CALLBACK_URL_KEY: &str = "X-Numaflow-Callback-Url"; const NUMAFLOW_RESP_ARRAY_LEN: &str = "Numaflow-Array-Len"; const NUMAFLOW_RESP_ARRAY_IDX_LEN: &str = "Numaflow-Array-Index-Len"; @@ -40,7 +39,6 @@ struct ProxyState { message: mpsc::Sender, tid_header: String, callback: state::State, - callback_url: String, } pub(crate) async fn jetstream_proxy( @@ -50,10 +48,6 @@ pub(crate) async fn jetstream_proxy( message: state.message.clone(), tid_header: state.settings.tid_header.clone(), callback: state.callback_state.clone(), - callback_url: format!( - "https://{}:{}/v1/process/callback", - state.settings.host_ip, state.settings.app_listen_port - ), }); let router = Router::new() diff --git a/rust/serving/src/lib.rs b/rust/serving/src/lib.rs index 001065ddfe..bdc3aeab9c 100644 --- a/rust/serving/src/lib.rs +++ b/rust/serving/src/lib.rs @@ -39,9 +39,6 @@ pub(crate) async fn serve( where T: Clone + Send + Sync + Store + 'static, { - // Setup the CryptoProvider (controls core cryptography used by rustls) for the process - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let (cert, key) = generate_certs()?; let tls_config = RustlsConfig::from_pem(cert.pem().into(), key.serialize_pem().into()) diff --git a/rust/serving/src/pipeline.rs b/rust/serving/src/pipeline.rs index cb491d7d88..cc0c2298b8 100644 --- a/rust/serving/src/pipeline.rs +++ b/rust/serving/src/pipeline.rs @@ -65,7 +65,7 @@ pub(crate) struct Edge { /// DCG (directed compute graph) of the pipeline with minimal information build using vertices and edges /// from the pipeline spec #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] -pub(crate) struct PipelineDCG { +pub struct PipelineDCG { pub(crate) vertices: Vec, pub(crate) edges: Vec, } From 97b84cffa30a3258084a11fa0ae32dac4d283de8 Mon Sep 17 00:00:00 2001 From: Sreekanth Date: Wed, 8 Jan 2025 18:57:43 +0530 Subject: [PATCH 185/188] chore: Migrate from upload/download artifacts action v3 to v4 (#2314) --- .github/workflows/nightly-build.yml | 12 ++++++------ .github/workflows/release.yml | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index 769d7641a7..072241487f 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -35,7 +35,7 @@ jobs: - name: Make checksums run: make checksums - name: store artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: binaries path: dist @@ -65,7 +65,7 @@ jobs: - name: Rename binary run: cp -pv target/x86_64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-amd64 - name: Upload numaflow binary - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: numaflow-rs-linux-amd64 path: rust/numaflow-rs-linux-amd64 @@ -98,7 +98,7 @@ jobs: - name: Rename binary run: cp -pv target/aarch64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-arm64 - name: Upload numaflow binary - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: numaflow-rs-linux-arm64 path: rust/numaflow-rs-linux-arm64 @@ -127,19 +127,19 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Download Go binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: binaries path: dist/ - name: Download Rust amd64 binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: numaflow-rs-linux-amd64 path: dist/ - name: Download Rust arm64 binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: numaflow-rs-linux-arm64 path: dist/ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index af459314e6..b30fc9ca5c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: - name: Make checksums run: make checksums - name: store artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: binaries path: dist @@ -56,7 +56,7 @@ jobs: - name: Rename binary run: cp -pv target/x86_64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-amd64 - name: Upload numaflow binary - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: numaflow-rs-linux-amd64 path: rust/numaflow-rs-linux-amd64 @@ -82,7 +82,7 @@ jobs: - name: Rename binary run: cp -pv target/aarch64-unknown-linux-gnu/release/numaflow numaflow-rs-linux-arm64 - name: Upload numaflow binary - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: numaflow-rs-linux-arm64 path: rust/numaflow-rs-linux-arm64 @@ -111,19 +111,19 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Download Go binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: binaries path: dist/ - name: Download Rust amd64 binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: numaflow-rs-linux-amd64 path: dist/ - name: Download Rust arm64 binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: numaflow-rs-linux-arm64 path: dist/ @@ -171,7 +171,7 @@ jobs: - run: bom generate --image quay.io/numaproj/numaflow:$VERSION -o /tmp/numaflow.spdx # pack the boms into one file to make it easy to download - run: cd /tmp && tar -zcf sbom.tar.gz *.spdx - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: sbom.tar.gz path: /tmp/sbom.tar.gz @@ -191,11 +191,11 @@ jobs: echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV fi - name: Download binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: binaries path: dist/ - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: sbom.tar.gz path: /tmp From cd1fcb4110d849c20c23bc0b15be1e01bd984b1d Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Wed, 8 Jan 2025 10:42:20 -0800 Subject: [PATCH 186/188] test: more e2e test for monovertex (#2313) Signed-off-by: Derek Wang --- test/fixtures/expect.go | 9 ++++++ test/fixtures/util.go | 22 ++++++++++++- test/fixtures/when.go | 44 ++++++++++++++++++++++++++ test/monovertex-e2e/monovertex_test.go | 16 +++++++++- 4 files changed, 89 insertions(+), 2 deletions(-) diff --git a/test/fixtures/expect.go b/test/fixtures/expect.go index afa30447d9..996d762bd6 100644 --- a/test/fixtures/expect.go +++ b/test/fixtures/expect.go @@ -204,6 +204,15 @@ func (t *Expect) DaemonPodLogContains(pipelineName, regex string, opts ...PodLog return t } +func (t *Expect) MvtxDaemonPodsRunning() *Expect { + t.t.Helper() + timeout := 2 * time.Minute + if err := WaitForMvtxDaemonPodsRunning(t.kubeClient, Namespace, t.monoVertex.Name, timeout); err != nil { + t.t.Fatalf("Expected mvtx daemon pods of %q running: %v", t.monoVertex.Name, err) + } + return t +} + func (t *Expect) When() *When { return &When{ t: t.t, diff --git a/test/fixtures/util.go b/test/fixtures/util.go index 13a4026384..d2f27dc5f7 100644 --- a/test/fixtures/util.go +++ b/test/fixtures/util.go @@ -280,7 +280,7 @@ func WaitForMonoVertexPodRunning(kubeClient kubernetes.Interface, monoVertexClie } ok := len(podList.Items) > 0 && len(podList.Items) == monoVertex.CalculateReplicas() // pod number should equal to desired replicas for _, p := range podList.Items { - ok = ok && p.Status.Phase == corev1.PodRunning + ok = ok && isPodReady(p) } if ok { return nil @@ -378,6 +378,26 @@ func WaitForDaemonPodsRunning(kubeClient kubernetes.Interface, namespace, pipeli } } +func WaitForMvtxDaemonPodsRunning(kubeClient kubernetes.Interface, namespace, mvtx string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyMonoVertexName, mvtx, dfv1.KeyComponent, dfv1.ComponentMonoVertexDaemon) + for { + podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + return fmt.Errorf("error getting mvtx daemon pod name: %w", err) + } + ok := len(podList.Items) > 0 + for _, p := range podList.Items { + ok = ok && p.Status.Phase == corev1.PodRunning + } + if ok { + return nil + } + time.Sleep(2 * time.Second) + } +} + func VertexPodLogNotContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, pipelineName, vertexName, regex string, opts ...PodLogCheckOption) (bool, error) { labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyPipelineName, pipelineName, dfv1.KeyVertexName, vertexName) podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) diff --git a/test/fixtures/when.go b/test/fixtures/when.go index 986085bdd6..154c7cfda0 100644 --- a/test/fixtures/when.go +++ b/test/fixtures/when.go @@ -243,6 +243,50 @@ func (w *When) DaemonPodPortForward(pipelineName string, localPort, remotePort i return w } +func (w *When) MonoVertexPodPortForward(localPort, remotePort int) *When { + w.t.Helper() + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyComponent, dfv1.ComponentMonoVertex, dfv1.KeyMonoVertexName, w.monoVertex.Name) + ctx := context.Background() + podList, err := w.kubeClient.CoreV1().Pods(Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + w.t.Fatalf("Error getting mvtx pod name: %v", err) + } + podName := podList.Items[0].GetName() + w.t.Logf("MonoVertex POD name: %s", podName) + + stopCh := make(chan struct{}, 1) + if err = PodPortForward(w.restConfig, Namespace, podName, localPort, remotePort, stopCh); err != nil { + w.t.Fatalf("Expected mvtx pod port-forward: %v", err) + } + if w.portForwarderStopChannels == nil { + w.portForwarderStopChannels = make(map[string]chan struct{}) + } + w.portForwarderStopChannels[podName] = stopCh + return w +} + +func (w *When) MvtxDaemonPodPortForward(localPort, remotePort int) *When { + w.t.Helper() + labelSelector := fmt.Sprintf("%s=%s,%s=%s", dfv1.KeyComponent, dfv1.ComponentMonoVertexDaemon, dfv1.KeyMonoVertexName, w.monoVertex.Name) + ctx := context.Background() + podList, err := w.kubeClient.CoreV1().Pods(Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + w.t.Fatalf("Error getting mvtx daemon pod name: %v", err) + } + podName := podList.Items[0].GetName() + w.t.Logf("MonoVertex Daemon POD name: %s", podName) + + stopCh := make(chan struct{}, 1) + if err = PodPortForward(w.restConfig, Namespace, podName, localPort, remotePort, stopCh); err != nil { + w.t.Fatalf("Expected mvtx daemon pod port-forward: %v", err) + } + if w.portForwarderStopChannels == nil { + w.portForwarderStopChannels = make(map[string]chan struct{}) + } + w.portForwarderStopChannels[podName] = stopCh + return w +} + func (w *When) UXServerPodPortForward(localPort, remotePort int) *When { w.t.Helper() labelSelector := fmt.Sprintf("%s=%s", dfv1.KeyComponent, dfv1.ComponentUXServer) diff --git a/test/monovertex-e2e/monovertex_test.go b/test/monovertex-e2e/monovertex_test.go index 3fd72d6554..ba0c640527 100644 --- a/test/monovertex-e2e/monovertex_test.go +++ b/test/monovertex-e2e/monovertex_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/suite" + dfv1 "github.com/numaproj/numaflow/pkg/apis/numaflow/v1alpha1" . "github.com/numaproj/numaflow/test/fixtures" ) @@ -35,7 +36,20 @@ func (s *MonoVertexSuite) TestMonoVertexWithTransformer() { When().CreateMonoVertexAndWait() defer w.DeleteMonoVertexAndWait() - w.Expect().MonoVertexPodsRunning() + w.Expect().MonoVertexPodsRunning().MvtxDaemonPodsRunning() + + defer w.MonoVertexPodPortForward(8931, dfv1.MonoVertexMetricsPort). + MvtxDaemonPodPortForward(3232, dfv1.MonoVertexDaemonServicePort). + TerminateAllPodPortForwards() + + // Check metrics endpoints + HTTPExpect(s.T(), "https://localhost:8931").GET("/metrics"). + Expect(). + Status(200) + + HTTPExpect(s.T(), "https://localhost:3232").GET("/metrics"). + Expect(). + Status(200) // Expect the messages to be processed by the transformer. w.Expect().MonoVertexPodLogContains("AssignEventTime", PodLogCheckOptionWithContainer("transformer")) From ade70d8da310729129708781119a5634c65f9df0 Mon Sep 17 00:00:00 2001 From: Derek Wang Date: Sun, 12 Jan 2025 14:28:54 -0800 Subject: [PATCH 187/188] fix: lastScaledAt not updated during autoscaling down (#2323) --- pkg/reconciler/monovertex/controller.go | 4 ++-- pkg/reconciler/vertex/controller.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/monovertex/controller.go b/pkg/reconciler/monovertex/controller.go index dbe63c6ca8..e0ba7cac4c 100644 --- a/pkg/reconciler/monovertex/controller.go +++ b/pkg/reconciler/monovertex/controller.go @@ -206,7 +206,8 @@ func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *df if err := mr.cleanUpPodsFromTo(ctx, monoVtx, desiredReplicas, math.MaxInt); err != nil { return fmt.Errorf("failed to clean up mono vertex pods [%v, ∞): %w", desiredReplicas, err) } - if currentReplicas := int(monoVtx.Status.Replicas); currentReplicas > desiredReplicas { + currentReplicas := int(monoVtx.Status.Replicas) + if currentReplicas > desiredReplicas { monoVtx.Status.Replicas = uint32(desiredReplicas) } updatedReplicas := int(monoVtx.Status.UpdatedReplicas) @@ -286,7 +287,6 @@ func (mr *monoVertexReconciler) orchestratePods(ctx context.Context, monoVtx *df } } - currentReplicas := int(monoVtx.Status.Replicas) if currentReplicas != desiredReplicas { log.Infow("MonoVertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) mr.recorder.Eventf(monoVtx, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) diff --git a/pkg/reconciler/vertex/controller.go b/pkg/reconciler/vertex/controller.go index c5e7fdcfeb..393a573958 100644 --- a/pkg/reconciler/vertex/controller.go +++ b/pkg/reconciler/vertex/controller.go @@ -220,7 +220,8 @@ func (r *vertexReconciler) orchestratePods(ctx context.Context, vertex *dfv1.Ver if err := r.cleanUpPodsFromTo(ctx, vertex, desiredReplicas, math.MaxInt); err != nil { return fmt.Errorf("failed to clean up vertex pods [%v, ∞): %w", desiredReplicas, err) } - if currentReplicas := int(vertex.Status.Replicas); currentReplicas > desiredReplicas { + currentReplicas := int(vertex.Status.Replicas) + if currentReplicas > desiredReplicas { vertex.Status.Replicas = uint32(desiredReplicas) } updatedReplicas := int(vertex.Status.UpdatedReplicas) @@ -300,7 +301,6 @@ func (r *vertexReconciler) orchestratePods(ctx context.Context, vertex *dfv1.Ver } } - currentReplicas := int(vertex.Status.Replicas) if currentReplicas != desiredReplicas { log.Infow("Pipeline Vertex replicas changed", "currentReplicas", currentReplicas, "desiredReplicas", desiredReplicas) r.recorder.Eventf(vertex, corev1.EventTypeNormal, "ReplicasScaled", "Replicas changed from %d to %d", currentReplicas, desiredReplicas) From 292217375f62e09e8c0ec048395a6fb32b3fb69d Mon Sep 17 00:00:00 2001 From: Keran Yang Date: Mon, 13 Jan 2025 18:33:27 -0500 Subject: [PATCH 188/188] chore: skip publishing the last empty struct when generator stops (#2327) --- pkg/sources/generator/tickgen.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/sources/generator/tickgen.go b/pkg/sources/generator/tickgen.go index e2e96fec9c..a922e6557b 100644 --- a/pkg/sources/generator/tickgen.go +++ b/pkg/sources/generator/tickgen.go @@ -197,7 +197,11 @@ loop: // since the Read call is blocking, and runs in an infinite loop, // we implement Read With Wait semantics select { - case r := <-mg.srcChan: + case r, ok := <-mg.srcChan: + if !ok { + mg.logger.Info("All the messages have been read. returning.") + break loop + } msgs = append(msgs, mg.newReadMessage(r.key, r.data, r.offset, r.ts)) case <-timeout: break loop